diff --git a/modelarts/api-ref/ALL_META.TXT.json b/modelarts/api-ref/ALL_META.TXT.json new file mode 100644 index 00000000..6b9ee546 --- /dev/null +++ b/modelarts/api-ref/ALL_META.TXT.json @@ -0,0 +1,1872 @@ +[ + { + "uri":"modelarts_03_0139.html", + "product_code":"modelarts", + "code":"1", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Before You Start", + "title":"Before You Start", + "githuburl":"" + }, + { + "uri":"modelarts_03_0001.html", + "product_code":"modelarts", + "code":"2", + "des":"ModelArts is a one-stop AI development platform geared toward developers and data scientists of all skill levels. It enables you to rapidly build, train, and deploy model", + "doc_type":"api", + "kw":"Overview,Before You Start,API Reference", + "title":"Overview", + "githuburl":"" + }, + { + "uri":"modelarts_03_0140.html", + "product_code":"modelarts", + "code":"3", + "des":"ModelArts supports Representational State Transfer (REST) APIs, allowing you to call APIs using HTTPS. For details about API calling, see Calling APIs.", + "doc_type":"api", + "kw":"API Calling,Before You Start,API Reference", + "title":"API Calling", + "githuburl":"" + }, + { + "uri":"modelarts_03_0141.html", + "product_code":"modelarts", + "code":"4", + "des":"Endpoints are request address for calling APIs. Endpoints vary depending on services and regions. To obtain the regions and endpoints, contact the enterprise administrato", + "doc_type":"api", + "kw":"Endpoints,Before You Start,API Reference", + "title":"Endpoints", + "githuburl":"" + }, + { + "uri":"modelarts_03_0143.html", + "product_code":"modelarts", + "code":"5", + "des":"AccountAn account is created upon successful registration with the cloud platform. The account has full access permissions for all of its cloud services and resources. It", + "doc_type":"api", + "kw":"Basic Concepts,Before You Start,API Reference", + "title":"Basic Concepts", + "githuburl":"" + }, + { + "uri":"modelarts_03_0002.html", + "product_code":"modelarts", + "code":"6", + "des":"All ModelArts APIs are proprietary.You can use these APIs to manage datasets, training jobs, models, and services.Data management APIs include the APIs for managing datas", + "doc_type":"api", + "kw":"API Overview,API Reference", + "title":"API Overview", + "githuburl":"" + }, + { + "uri":"modelarts_03_0144.html", + "product_code":"modelarts", + "code":"7", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Calling APIs", + "title":"Calling APIs", + "githuburl":"" + }, + { + "uri":"modelarts_03_0005.html", + "product_code":"modelarts", + "code":"8", + "des":"This section describes the structure of a REST API request, and uses the IAM API for obtaining a user token as an example to demonstrate how to call an API. The obtained ", + "doc_type":"api", + "kw":"Making an API Request,Calling APIs,API Reference", + "title":"Making an API Request", + "githuburl":"" + }, + { + "uri":"modelarts_03_0004.html", + "product_code":"modelarts", + "code":"9", + "des":"Requests for calling an API can be authenticated using either of the following methods: AK/SK-based authentication: Requests are authenticated by encrypting the request b", + "doc_type":"api", + "kw":"Authentication,Calling APIs,API Reference", + "title":"Authentication", + "githuburl":"" + }, + { + "uri":"modelarts_03_0003.html", + "product_code":"modelarts", + "code":"10", + "des":"After sending a request, you will receive a response, including the status code, response header, and response body.A status code is a group of digits, ranging from 1xx t", + "doc_type":"api", + "kw":"Response,Calling APIs,API Reference", + "title":"Response", + "githuburl":"" + }, + { + "uri":"modelarts_03_0108.html", + "product_code":"modelarts", + "code":"11", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"DevEnviron", + "title":"DevEnviron", + "githuburl":"" + }, + { + "uri":"modelarts_03_0109.html", + "product_code":"modelarts", + "code":"12", + "des":"This API is used to query the authentication information of a development environment instance, which is used to open the development environment instance.GET /v1/{projec", + "doc_type":"api", + "kw":"Querying the Authentication Information of a Development Environment Instance,DevEnviron,API Referen", + "title":"Querying the Authentication Information of a Development Environment Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0110.html", + "product_code":"modelarts", + "code":"13", + "des":"This API is used to create a development environment instance for code development.Calling this API is an asynchronous operation. The job status can be obtained by callin", + "doc_type":"api", + "kw":"Creating a Development Environment Instance,DevEnviron,API Reference", + "title":"Creating a Development Environment Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0111.html", + "product_code":"modelarts", + "code":"14", + "des":"This API is used to query the development environment instances that meet the search criteria.GET /v1/{project_id}/demanager/instances?de_type={de_type}&provision_type={p", + "doc_type":"api", + "kw":"Querying a List of Development Environment Instances,DevEnviron,API Reference", + "title":"Querying a List of Development Environment Instances", + "githuburl":"" + }, + { + "uri":"modelarts_03_0112.html", + "product_code":"modelarts", + "code":"15", + "des":"This API is used to query the details about a development environment instance.GET /v1/{project_id}/demanager/instances/{instance_id}Table 1 describes the required parame", + "doc_type":"api", + "kw":"Querying the Details About a Development Environment Instance,DevEnviron,API Reference", + "title":"Querying the Details About a Development Environment Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0113.html", + "product_code":"modelarts", + "code":"16", + "des":"This API is used to modify the description of a development environment instance or information about the auto stop function.PUT /v1/{project_id}/demanager/instances/{ins", + "doc_type":"api", + "kw":"Modifying the Description of a Development Environment Instance,DevEnviron,API Reference", + "title":"Modifying the Description of a Development Environment Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0114.html", + "product_code":"modelarts", + "code":"17", + "des":"This API is used to delete a development environment instance.DELETE /v1/{project_id}/demanager/instances/{instance_id}Table 1 describes the required parameters.Parameter", + "doc_type":"api", + "kw":"Deleting a Development Environment Instance,DevEnviron,API Reference", + "title":"Deleting a Development Environment Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0115.html", + "product_code":"modelarts", + "code":"18", + "des":"This API is used to startor stop a notebook instance.POST /v1/{project_id}/demanager/instances/{instance_id}/actionTable 1 describes the required parameters.ParametersPar", + "doc_type":"api", + "kw":"Managing a Development Environment Instance,DevEnviron,API Reference", + "title":"Managing a Development Environment Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0152.html", + "product_code":"modelarts", + "code":"19", + "des":"This API is used to restart an ML Studio development environment instance.POST /v1/{project_id}/demanager/instances/{instance_id}/actionTable 1 describes the required par", + "doc_type":"api", + "kw":"Restarting an ML Studio Instance,DevEnviron,API Reference", + "title":"Restarting an ML Studio Instance", + "githuburl":"" + }, + { + "uri":"modelarts_03_0202.html", + "product_code":"modelarts", + "code":"20", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Data Management", + "title":"Data Management", + "githuburl":"" + }, + { + "uri":"modelarts_03_0299.html", + "product_code":"modelarts", + "code":"21", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Data Management APIs,Data Management,API Reference", + "title":"Data Management APIs", + "githuburl":"" + }, + { + "uri":"dataset_management.html", + "product_code":"modelarts", + "code":"22", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Dataset Management", + "title":"Dataset Management", + "githuburl":"" + }, + { + "uri":"ListDatasets.html", + "product_code":"modelarts", + "code":"23", + "des":"This API is used to query the created datasets that meet the search criteria by page.GET /v2/{project_id}/datasetsNoneStatus code: 200Querying the Dataset ListStatus code", + "doc_type":"api", + "kw":"Response body for querying the dataset list.,Dataset Management,API Reference", + "title":"Response body for querying the dataset list.", + "githuburl":"" + }, + { + "uri":"CreateDataset.html", + "product_code":"modelarts", + "code":"24", + "des":"This API is used to create a dataset.POST /v2/{project_id}/datasetsStatus code: 201Creating an Image Classification Dataset{\n \"workspace_id\" : \"0\",\n \"dataset_name\" : \"d", + "doc_type":"api", + "kw":"Creating a Dataset,Dataset Management,API Reference", + "title":"Creating a Dataset", + "githuburl":"" + }, + { + "uri":"DescDataset.html", + "product_code":"modelarts", + "code":"25", + "des":"This API is used to query details about a dataset.GET /v2/{project_id}/datasets/{dataset_id}NoneStatus code: 200Querying Details About a DatasetStatus code: 200OKSee Erro", + "doc_type":"api", + "kw":"Querying Details About a Dataset,Dataset Management,API Reference", + "title":"Querying Details About a Dataset", + "githuburl":"" + }, + { + "uri":"UpdateDataset.html", + "product_code":"modelarts", + "code":"26", + "des":"This API is used to modify basic information about a dataset, such as the dataset name, description, current version, and labels.PUT /v2/{project_id}/datasets/{dataset_id", + "doc_type":"api", + "kw":"Modifying a Dataset,Dataset Management,API Reference", + "title":"Modifying a Dataset", + "githuburl":"" + }, + { + "uri":"DeleteDataset.html", + "product_code":"modelarts", + "code":"27", + "des":"This API is used to delete a dataset without deleting the source data of the dataset.DELETE /v2/{project_id}/datasets/{dataset_id}NoneNoneDeleting a DatasetStatus code: 2", + "doc_type":"api", + "kw":"Deleting a Dataset,Dataset Management,API Reference", + "title":"Deleting a Dataset", + "githuburl":"" + }, + { + "uri":"ListStats.html", + "product_code":"modelarts", + "code":"28", + "des":"This API is used to query dataset statistics.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/statsNoneStatus code: 200Querying Dataset StatisticsStatus code: ", + "doc_type":"api", + "kw":"Querying Dataset Statistics,Dataset Management,API Reference", + "title":"Querying Dataset Statistics", + "githuburl":"" + }, + { + "uri":"GetDatasetMetrics.html", + "product_code":"modelarts", + "code":"29", + "des":"This API is used to query the monitoring data of a dataset within a specified time range.GET /v2/{project_id}/datasets/{dataset_id}/metricsNoneStatus code: 200Querying th", + "doc_type":"api", + "kw":"Querying the Monitoring Data of a Dataset,Dataset Management,API Reference", + "title":"Querying the Monitoring Data of a Dataset", + "githuburl":"" + }, + { + "uri":"ListWorkforceTaskStats.html", + "product_code":"modelarts", + "code":"30", + "des":"This API is used to query details about team labeling task statistics.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/stat", + "doc_type":"api", + "kw":"Querying Details About Team Labeling Task Statistics,Dataset Management,API Reference", + "title":"Querying Details About Team Labeling Task Statistics", + "githuburl":"" + }, + { + "uri":"GetWorkforceTaskMetrics.html", + "product_code":"modelarts", + "code":"31", + "des":"This API is used to query details about the progress of a team labeling task member.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/metrics", + "doc_type":"api", + "kw":"Querying Details About the Progress of a Team Labeling Task Member,Dataset Management,API Reference", + "title":"Querying Details About the Progress of a Team Labeling Task Member", + "githuburl":"" + }, + { + "uri":"dataset_version_management.html", + "product_code":"modelarts", + "code":"32", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Dataset Version Management", + "title":"Dataset Version Management", + "githuburl":"" + }, + { + "uri":"ListDatasetVersions.html", + "product_code":"modelarts", + "code":"33", + "des":"This API is used to query the version list of a specific dataset.GET /v2/{project_id}/datasets/{dataset_id}/versionsNoneStatus code: 200Querying the Version List of a Spe", + "doc_type":"api", + "kw":"Querying the Dataset Version List,Dataset Version Management,API Reference", + "title":"Querying the Dataset Version List", + "githuburl":"" + }, + { + "uri":"CreateDatasetVersion.html", + "product_code":"modelarts", + "code":"34", + "des":"This API is used to create a dataset labeling version.POST /v2/{project_id}/datasets/{dataset_id}/versionsStatus code: 201Creating a Dataset Labeling VersionStatus code: ", + "doc_type":"api", + "kw":"Creating a Dataset Labeling Version,Dataset Version Management,API Reference", + "title":"Creating a Dataset Labeling Version", + "githuburl":"" + }, + { + "uri":"DescribeDatasetVersion.html", + "product_code":"modelarts", + "code":"35", + "des":"This API is used to query the details about a dataset version.GET /v2/{project_id}/datasets/{dataset_id}/versions/{version_id}NoneStatus code: 200Querying Details About a", + "doc_type":"api", + "kw":"Querying Details About a Dataset Version,Dataset Version Management,API Reference", + "title":"Querying Details About a Dataset Version", + "githuburl":"" + }, + { + "uri":"DeleteDatasetVersion.html", + "product_code":"modelarts", + "code":"36", + "des":"This API is used to delete a dataset labeling version.DELETE /v2/{project_id}/datasets/{dataset_id}/versions/{version_id}NoneNoneDeleting a Dataset Labeling VersionStatus", + "doc_type":"api", + "kw":"Deleting a Dataset Labeling Version,Dataset Version Management,API Reference", + "title":"Deleting a Dataset Labeling Version", + "githuburl":"" + }, + { + "uri":"sample_management.html", + "product_code":"modelarts", + "code":"37", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Sample Management", + "title":"Sample Management", + "githuburl":"" + }, + { + "uri":"ListSamples.html", + "product_code":"modelarts", + "code":"38", + "des":"This API is used to query the sample list by page.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/samplesNoneStatus code: 200Querying the Sample List by PageS", + "doc_type":"api", + "kw":"Querying the Sample List,Sample Management,API Reference", + "title":"Querying the Sample List", + "githuburl":"" + }, + { + "uri":"UploadSamplesJson.html", + "product_code":"modelarts", + "code":"39", + "des":"This API is used to add samples in batches.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/samplesStatus code: 200Adding Samples in BatchesStatus code: 200OK", + "doc_type":"api", + "kw":"Adding Samples in Batches,Sample Management,API Reference", + "title":"Adding Samples in Batches", + "githuburl":"" + }, + { + "uri":"DeleteSamples.html", + "product_code":"modelarts", + "code":"40", + "des":"This API is used to delete samples in batches.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/deleteStatus code: 200Deleting Samples in BatchesStatus", + "doc_type":"api", + "kw":"Deleting Samples in Batches,Sample Management,API Reference", + "title":"Deleting Samples in Batches", + "githuburl":"" + }, + { + "uri":"DescribeSample.html", + "product_code":"modelarts", + "code":"41", + "des":"Query details about a sample.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/{sample_id}NoneStatus code: 200Querying Details About a SampleStatus code", + "doc_type":"api", + "kw":"Querying Details About a Sample,Sample Management,API Reference", + "title":"Querying Details About a Sample", + "githuburl":"" + }, + { + "uri":"ListSearch.html", + "product_code":"modelarts", + "code":"42", + "des":"This API is used to obtain sample search condition.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/search-conditionNoneStatus code: 200Obtaining Sample Search", + "doc_type":"api", + "kw":"Obtaining Sample Search Condition,Sample Management,API Reference", + "title":"Obtaining Sample Search Condition", + "githuburl":"" + }, + { + "uri":"ListWorkforceTaskSamples.html", + "product_code":"modelarts", + "code":"43", + "des":"This API is used to query the sample list of a team labeling task by page.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/", + "doc_type":"api", + "kw":"Querying the Sample List of a Team Labeling Task by Page,Sample Management,API Reference", + "title":"Querying the Sample List of a Team Labeling Task by Page", + "githuburl":"" + }, + { + "uri":"DescribeWorkforceTaskSample.html", + "product_code":"modelarts", + "code":"44", + "des":"This API is used to query details about team labeling samples.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples/{sam", + "doc_type":"api", + "kw":"Querying Details About Team Labeling Samples,Sample Management,API Reference", + "title":"Querying Details About Team Labeling Samples", + "githuburl":"" + }, + { + "uri":"label_management.html", + "product_code":"modelarts", + "code":"45", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Label Management", + "title":"Label Management", + "githuburl":"" + }, + { + "uri":"ListLabels.html", + "product_code":"modelarts", + "code":"46", + "des":"This API is used to query all labels of a dataset.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/labelsNoneStatus code: 200Querying All Labels of a DatasetSt", + "doc_type":"api", + "kw":"Querying the Dataset Label List,Label Management,API Reference", + "title":"Querying the Dataset Label List", + "githuburl":"" + }, + { + "uri":"CreateLabels.html", + "product_code":"modelarts", + "code":"47", + "des":"This API is used to create a dataset label.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/labelsStatus code: 200Creating a Dataset LabelStatus code: 200OKSe", + "doc_type":"api", + "kw":"Creating a Dataset Label,Label Management,API Reference", + "title":"Creating a Dataset Label", + "githuburl":"" + }, + { + "uri":"UpdateLabels.html", + "product_code":"modelarts", + "code":"48", + "des":"This API is used to modify labels in batches.PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/labelsStatus code: 200Modifying Labels in BatchesStatus code: 200", + "doc_type":"api", + "kw":"Modifying Labels in Batches,Label Management,API Reference", + "title":"Modifying Labels in Batches", + "githuburl":"" + }, + { + "uri":"DeleteLabels.html", + "product_code":"modelarts", + "code":"49", + "des":"This API is used to delete labels in batches.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/deleteStatus code: 200Deleting Labels in BatchesStatus co", + "doc_type":"api", + "kw":"Deleting Labels in Batches,Label Management,API Reference", + "title":"Deleting Labels in Batches", + "githuburl":"" + }, + { + "uri":"UpdateLabel.html", + "product_code":"modelarts", + "code":"50", + "des":"This API is used to update a label by label names.PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/{label_name}Status code: 204Updating a Label by Label", + "doc_type":"api", + "kw":"Updating a Label by Label Names,Label Management,API Reference", + "title":"Updating a Label by Label Names", + "githuburl":"" + }, + { + "uri":"DeleteLabelAndSamples.html", + "product_code":"modelarts", + "code":"51", + "des":"This API is used to delete a label and the files that only contain this label.DELETE /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/{label_name}NoneStatus", + "doc_type":"api", + "kw":"Deleting a Label and the Files that Only Contain the Label,Label Management,API Reference", + "title":"Deleting a Label and the Files that Only Contain the Label", + "githuburl":"" + }, + { + "uri":"manual_annotation_management.html", + "product_code":"modelarts", + "code":"52", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Manual Labeling", + "title":"Manual Labeling", + "githuburl":"" + }, + { + "uri":"UpdateSamples.html", + "product_code":"modelarts", + "code":"53", + "des":"This API is used to update sample labels in batches, including adding, modifying, and deleting sample labels. If the parameter Labels of a sample in the request body is n", + "doc_type":"api", + "kw":"Updating Sample Labels in Batches,Manual Labeling,API Reference", + "title":"Updating Sample Labels in Batches", + "githuburl":"" + }, + { + "uri":"label_task_management.html", + "product_code":"modelarts", + "code":"54", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Labeling Task Management", + "title":"Labeling Task Management", + "githuburl":"" + }, + { + "uri":"ListWorkforceTasks.html", + "product_code":"modelarts", + "code":"55", + "des":"This API is used to query the team labeling task list of a dataset.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasksNoneStatus code: 200Querying the Team Labelin", + "doc_type":"api", + "kw":"Querying the Team Labeling Task List of a Dataset,Labeling Task Management,API Reference", + "title":"Querying the Team Labeling Task List of a Dataset", + "githuburl":"" + }, + { + "uri":"CreateWorkforceTask.html", + "product_code":"modelarts", + "code":"56", + "des":"This API is used to create a team labeling task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasksStatus code: 200Creating a Team Labeling TaskStatus code: 200OK", + "doc_type":"api", + "kw":"Creating a Team Labeling Task,Labeling Task Management,API Reference", + "title":"Creating a Team Labeling Task", + "githuburl":"" + }, + { + "uri":"DescWorkforceTask.html", + "product_code":"modelarts", + "code":"57", + "des":"This API is used to query the details about a team labeling task.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneStatus code: 200Queryin", + "doc_type":"api", + "kw":"Querying Details About a Team Labeling Task,Labeling Task Management,API Reference", + "title":"Querying Details About a Team Labeling Task", + "githuburl":"" + }, + { + "uri":"StartWorkforceTask.html", + "product_code":"modelarts", + "code":"58", + "des":"This API is used to start a team labeling task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneStarting a Team Labeling TaskStatus code", + "doc_type":"api", + "kw":"Starting a Team Labeling Task,Labeling Task Management,API Reference", + "title":"Starting a Team Labeling Task", + "githuburl":"" + }, + { + "uri":"UpdateWorkforceTask.html", + "product_code":"modelarts", + "code":"59", + "des":"This API is used to update a team labeling task.PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneUpdating a Team Labeling TaskStatus code", + "doc_type":"api", + "kw":"Updating a Team Labeling Task,Labeling Task Management,API Reference", + "title":"Updating a Team Labeling Task", + "githuburl":"" + }, + { + "uri":"DeleteWorkforceTask.html", + "product_code":"modelarts", + "code":"60", + "des":"This API is used to delete a team labeling task.DELETE /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneNoneDeleting a Team Labeling TaskStat", + "doc_type":"api", + "kw":"Deleting a Team Labeling Task,Labeling Task Management,API Reference", + "title":"Deleting a Team Labeling Task", + "githuburl":"" + }, + { + "uri":"StartWorkforceSamplingTask.html", + "product_code":"modelarts", + "code":"61", + "des":"This API is used to create a team labeling acceptance task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptanceStatus code: 200Creat", + "doc_type":"api", + "kw":"Creating a Team Labeling Acceptance Task,Labeling Task Management,API Reference", + "title":"Creating a Team Labeling Acceptance Task", + "githuburl":"" + }, + { + "uri":"GetWorkforceSamplingTask.html", + "product_code":"modelarts", + "code":"62", + "des":"This API is used to query the report of a team labeling acceptance task.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/reportNo", + "doc_type":"api", + "kw":"Querying the Report of a Team Labeling Acceptance Task,Labeling Task Management,API Reference", + "title":"Querying the Report of a Team Labeling Acceptance Task", + "githuburl":"" + }, + { + "uri":"UpdateWorkforceSamplingTask.html", + "product_code":"modelarts", + "code":"63", + "des":"This API is used to update the sample status by confirming the acceptance scope and whether the labeled data is overwritten before the acceptance of the team labeling tas", + "doc_type":"api", + "kw":"Updating the Status of a Team Labeling Acceptance Task,Labeling Task Management,API Reference", + "title":"Updating the Status of a Team Labeling Acceptance Task", + "githuburl":"" + }, + { + "uri":"ListWorkerTasks.html", + "product_code":"modelarts", + "code":"64", + "des":"This API is used to query the team labeling task list by a team member.GET /v2/{project_id}/workforces/worker-tasksNoneStatus code: 200Querying the Team Labeling Task Lis", + "doc_type":"api", + "kw":"Querying the Team Labeling Task List by a Team Member,Labeling Task Management,API Reference", + "title":"Querying the Team Labeling Task List by a Team Member", + "githuburl":"" + }, + { + "uri":"workforce_process_management.html", + "product_code":"modelarts", + "code":"65", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Team Labeling Process Management", + "title":"Team Labeling Process Management", + "githuburl":"" + }, + { + "uri":"AcceptSamples.html", + "product_code":"modelarts", + "code":"66", + "des":"This API is used to submit sample review comments of an acceptance task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/batch-c", + "doc_type":"api", + "kw":"Submitting Sample Review Comments of an Acceptance Task,Team Labeling Process Management,API Referen", + "title":"Submitting Sample Review Comments of an Acceptance Task", + "githuburl":"" + }, + { + "uri":"ReviewSamples.html", + "product_code":"modelarts", + "code":"67", + "des":"This API is used to review team labeling results.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/reviewNoneReviewing Team", + "doc_type":"api", + "kw":"Reviewing Team Labeling Results,Team Labeling Process Management,API Reference", + "title":"Reviewing Team Labeling Results", + "githuburl":"" + }, + { + "uri":"UpdateWorkforceTaskSamples.html", + "product_code":"modelarts", + "code":"68", + "des":"This API is used to update labels of team labeling samples in batches.PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samp", + "doc_type":"api", + "kw":"Updating Labels of Team Labeling Samples in Batches,Team Labeling Process Management,API Reference", + "title":"Updating Labels of Team Labeling Samples in Batches", + "githuburl":"" + }, + { + "uri":"workforce_management.html", + "product_code":"modelarts", + "code":"69", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Labeling Team Management", + "title":"Labeling Team Management", + "githuburl":"" + }, + { + "uri":"ListWorkforces.html", + "product_code":"modelarts", + "code":"70", + "des":"This API is used to query the labeling team list.GET /v2/{project_id}/workforcesNoneStatus code: 200Querying the Labeling Team ListStatus code: 200OKSee Error Codes.", + "doc_type":"api", + "kw":"Querying the Labeling Team List,Labeling Team Management,API Reference", + "title":"Querying the Labeling Team List", + "githuburl":"" + }, + { + "uri":"CreateWorkforce.html", + "product_code":"modelarts", + "code":"71", + "des":"This API is used to create a labeling team.POST /v2/{project_id}/workforcesStatus code: 201Creating a Labeling TeamStatus code: 201CreatedSee Error Codes.", + "doc_type":"api", + "kw":"Creating a Labeling Team,Labeling Team Management,API Reference", + "title":"Creating a Labeling Team", + "githuburl":"" + }, + { + "uri":"DescWorkforce.html", + "product_code":"modelarts", + "code":"72", + "des":"This API is used to query the details about a labeling team.GET /v2/{project_id}/workforces/{workforce_id}NoneStatus code: 200Querying Details About a Labeling TeamStatus", + "doc_type":"api", + "kw":"Querying Details About a Labeling Team,Labeling Team Management,API Reference", + "title":"Querying Details About a Labeling Team", + "githuburl":"" + }, + { + "uri":"UpdateWorkforce.html", + "product_code":"modelarts", + "code":"73", + "des":"This API is used to update a labeling team.PUT /v2/{project_id}/workforces/{workforce_id}NoneUpdating a Labeling TeamStatus code: 200OKSee Error Codes.", + "doc_type":"api", + "kw":"Updating a Labeling Team,Labeling Team Management,API Reference", + "title":"Updating a Labeling Team", + "githuburl":"" + }, + { + "uri":"DeleteWorkforce.html", + "product_code":"modelarts", + "code":"74", + "des":"This API is used to delete a labeling team.DELETE /v2/{project_id}/workforces/{workforce_id}NoneStatus code: 204Deleting a Labeling TeamStatus code: 204No ContentSee Erro", + "doc_type":"api", + "kw":"Deleting a Labeling Team,Labeling Team Management,API Reference", + "title":"Deleting a Labeling Team", + "githuburl":"" + }, + { + "uri":"workforce_worker_management.html", + "product_code":"modelarts", + "code":"75", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Labeling Team Member Management", + "title":"Labeling Team Member Management", + "githuburl":"" + }, + { + "uri":"SendEmails.html", + "product_code":"modelarts", + "code":"76", + "des":"This API is used to send an email to a labeling team member.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/notifyStatus code: 200Sending ", + "doc_type":"api", + "kw":"Sending an Email to a Labeling Team Member,Labeling Team Member Management,API Reference", + "title":"Sending an Email to a Labeling Team Member", + "githuburl":"" + }, + { + "uri":"ListAllWorkers.html", + "product_code":"modelarts", + "code":"77", + "des":"This API is used to query the list of all labeling team members.GET /v2/{project_id}/workforces/workersNoneStatus code: 200Querying All Labeling Team AdministratorsStatus", + "doc_type":"api", + "kw":"Querying the List of All Labeling Team Members,Labeling Team Member Management,API Reference", + "title":"Querying the List of All Labeling Team Members", + "githuburl":"" + }, + { + "uri":"ListWorkers.html", + "product_code":"modelarts", + "code":"78", + "des":"This API is used to query the list of labeling team members.GET /v2/{project_id}/workforces/{workforce_id}/workersNoneStatus code: 200Querying the List of Labeling Team M", + "doc_type":"api", + "kw":"Querying the List of Labeling Team Members,Labeling Team Member Management,API Reference", + "title":"Querying the List of Labeling Team Members", + "githuburl":"" + }, + { + "uri":"CreateWorker.html", + "product_code":"modelarts", + "code":"79", + "des":"This API is used to create a labeling team member.POST /v2/{project_id}/workforces/{workforce_id}/workersNoneCreating a Labeling Team MemberStatus code: 201CreatedSee Err", + "doc_type":"api", + "kw":"Creating a Labeling Team Member,Labeling Team Member Management,API Reference", + "title":"Creating a Labeling Team Member", + "githuburl":"" + }, + { + "uri":"DeleteWorkers.html", + "product_code":"modelarts", + "code":"80", + "des":"This API is used to delete labeling team members in batches.POST /v2/{project_id}/workforces/{workforce_id}/workers/batch-deleteStatus code: 200Deleting Labeling Team Mem", + "doc_type":"api", + "kw":"Deleting Labeling Team Members in Batches,Labeling Team Member Management,API Reference", + "title":"Deleting Labeling Team Members in Batches", + "githuburl":"" + }, + { + "uri":"DescWorker.html", + "product_code":"modelarts", + "code":"81", + "des":"This API is used to query details about labeling team members.GET /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}NoneStatus code: 200Querying Details About", + "doc_type":"api", + "kw":"Querying Details About Labeling Team Members,Labeling Team Member Management,API Reference", + "title":"Querying Details About Labeling Team Members", + "githuburl":"" + }, + { + "uri":"UpdateWorker.html", + "product_code":"modelarts", + "code":"82", + "des":"This API is used to update a labeling team member.PUT /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}NoneUpdating a Labeling Team MemberStatus code: 200OKS", + "doc_type":"api", + "kw":"Updating a Labeling Team Member,Labeling Team Member Management,API Reference", + "title":"Updating a Labeling Team Member", + "githuburl":"" + }, + { + "uri":"DeleteWorker.html", + "product_code":"modelarts", + "code":"83", + "des":"This API is used to delete a labeling team member.DELETE /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}NoneNoneDeleting a Labeling Team MemberStatus code:", + "doc_type":"api", + "kw":"Deleting a Labeling Team Member,Labeling Team Member Management,API Reference", + "title":"Deleting a Labeling Team Member", + "githuburl":"" + }, + { + "uri":"data_import.html", + "product_code":"modelarts", + "code":"84", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Data Import Task", + "title":"Data Import Task", + "githuburl":"" + }, + { + "uri":"ListImportTasks.html", + "product_code":"modelarts", + "code":"85", + "des":"This API is used to query the dataset import task list by page.GET /v2/{project_id}/datasets/{dataset_id}/import-tasksNoneStatus code: 200Obtaining the Dataset Import Tas", + "doc_type":"api", + "kw":"Querying the Dataset Import Task List,Data Import Task,API Reference", + "title":"Querying the Dataset Import Task List", + "githuburl":"" + }, + { + "uri":"ImportTask.html", + "product_code":"modelarts", + "code":"86", + "des":"This API is used to create a dataset import task to import samples and labels from the storage system to the dataset.POST /v2/{project_id}/datasets/{dataset_id}/import-ta", + "doc_type":"api", + "kw":"Creating an Import Task,Data Import Task,API Reference", + "title":"Creating an Import Task", + "githuburl":"" + }, + { + "uri":"DescImportTask.html", + "product_code":"modelarts", + "code":"87", + "des":"This API is used to query details about a dataset import task.GET /v2/{project_id}/datasets/{dataset_id}/import-tasks/{task_id}NoneStatus code: 200Querying Details About ", + "doc_type":"api", + "kw":"Querying Details About a Dataset Import Task,Data Import Task,API Reference", + "title":"Querying Details About a Dataset Import Task", + "githuburl":"" + }, + { + "uri":"data_export.html", + "product_code":"modelarts", + "code":"88", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Data Export Task", + "title":"Data Export Task", + "githuburl":"" + }, + { + "uri":"GetExportTasksStatusOfDataset.html", + "product_code":"modelarts", + "code":"89", + "des":"This API is used to query the dataset export task list by page.GET /v2/{project_id}/datasets/{dataset_id}/export-tasksNoneStatus code: 200Querying the Export Task List by", + "doc_type":"api", + "kw":"Querying the Dataset Export Task List,Data Export Task,API Reference", + "title":"Querying the Dataset Export Task List", + "githuburl":"" + }, + { + "uri":"ExportTask.html", + "product_code":"modelarts", + "code":"90", + "des":"This API is used to create a dataset export task to export a dataset to OBS or new datasets.POST /v2/{project_id}/datasets/{dataset_id}/export-tasksStatus code: 200Creati", + "doc_type":"api", + "kw":"Creating a Dataset Export Task,Data Export Task,API Reference", + "title":"Creating a Dataset Export Task", + "githuburl":"" + }, + { + "uri":"GetExportTaskStatusOfDataset.html", + "product_code":"modelarts", + "code":"91", + "des":"This API is used to query the status of a dataset export task.GET /v2/{project_id}/datasets/{resource_id}/export-tasks/{task_id}NoneStatus code: 200Querying the Status of", + "doc_type":"api", + "kw":"Querying the Status of a Dataset Export Task,Data Export Task,API Reference", + "title":"Querying the Status of a Dataset Export Task", + "githuburl":"" + }, + { + "uri":"data_sync.html", + "product_code":"modelarts", + "code":"92", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Data Synchronization Task", + "title":"Data Synchronization Task", + "githuburl":"" + }, + { + "uri":"SyncDataSource.html", + "product_code":"modelarts", + "code":"93", + "des":"This API is used to synchronize samples and labeling information from the input dataset path to the dataset.POST /v2/{project_id}/datasets/{dataset_id}/sync-dataNoneNoneS", + "doc_type":"api", + "kw":"Synchronizing a Dataset,Data Synchronization Task,API Reference", + "title":"Synchronizing a Dataset", + "githuburl":"" + }, + { + "uri":"SyncDataSourceState.html", + "product_code":"modelarts", + "code":"94", + "des":"This API is used to query the status of a dataset synchronization task.GET /v2/{project_id}/datasets/{dataset_id}/sync-data/statusNoneStatus code: 200Obtaining the Status", + "doc_type":"api", + "kw":"Querying the Status of a Dataset Synchronization Task,Data Synchronization Task,API Reference", + "title":"Querying the Status of a Dataset Synchronization Task", + "githuburl":"" + }, + { + "uri":"auto_task.html", + "product_code":"modelarts", + "code":"95", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Intelligent Task", + "title":"Intelligent Task", + "githuburl":"" + }, + { + "uri":"ListAutoAnnotationSamples.html", + "product_code":"modelarts", + "code":"96", + "des":"This API is used to query auto labeling samples in a dataset.GET /v2/{project_id}/datasets/{dataset_id}/auto-annotations/samplesNoneStatus code: 200Querying Auto Labeling", + "doc_type":"api", + "kw":"Querying Auto Labeling Sample List,Intelligent Task,API Reference", + "title":"Querying Auto Labeling Sample List", + "githuburl":"" + }, + { + "uri":"DescribeAutoAnnotationSample.html", + "product_code":"modelarts", + "code":"97", + "des":"This API is used to query details about an auto labeling sample.GET /v2/{project_id}/datasets/{dataset_id}/auto-annotations/samples/{sample_id}NoneStatus code: 200Queryin", + "doc_type":"api", + "kw":"Querying Details About an Auto Labeling Sample,Intelligent Task,API Reference", + "title":"Querying Details About an Auto Labeling Sample", + "githuburl":"" + }, + { + "uri":"ListTasks.html", + "product_code":"modelarts", + "code":"98", + "des":"This API is used to query the intelligent task list by page, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the type parame", + "doc_type":"api", + "kw":"Querying the Intelligent Task List by Page,Intelligent Task,API Reference", + "title":"Querying the Intelligent Task List by Page", + "githuburl":"" + }, + { + "uri":"CreateTask.html", + "product_code":"modelarts", + "code":"99", + "des":"This API is used to start an intelligent task, which can be an auto labeling task or an auto grouping task. You can specify task_type in the request body to start a type ", + "doc_type":"api", + "kw":"Starting Intelligent Tasks,Intelligent Task,API Reference", + "title":"Starting Intelligent Tasks", + "githuburl":"" + }, + { + "uri":"AutoAnnotationProgress.html", + "product_code":"modelarts", + "code":"100", + "des":"This API is used to obtain information about intelligent tasks, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the task_id ", + "doc_type":"api", + "kw":"Obtaining Information About Intelligent Tasks,Intelligent Task,API Reference", + "title":"Obtaining Information About Intelligent Tasks", + "githuburl":"" + }, + { + "uri":"StopAutoAnnotation.html", + "product_code":"modelarts", + "code":"101", + "des":"This API is used to stop intelligent tasks, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the task_id parameter to stop a ", + "doc_type":"api", + "kw":"Stopping an Intelligent Task,Intelligent Task,API Reference", + "title":"Stopping an Intelligent Task", + "githuburl":"" + }, + { + "uri":"process_task.html", + "product_code":"modelarts", + "code":"102", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Processing Task", + "title":"Processing Task", + "githuburl":"" + }, + { + "uri":"ListProcessorTasks.html", + "product_code":"modelarts", + "code":"103", + "des":"This API is used to query the list of a processing task. You can query the feature analysis tasks and data processing tasks. You can specify the task_type parameter to qu", + "doc_type":"api", + "kw":"Querying the List of a Processing Task,Processing Task,API Reference", + "title":"Querying the List of a Processing Task", + "githuburl":"" + }, + { + "uri":"CreateProcessorTask.html", + "product_code":"modelarts", + "code":"104", + "des":"This API is used to create a processing task. You can create feature analysis tasks and data processing tasks. You can specify the id field of template composite paramete", + "doc_type":"api", + "kw":"Creating a Processing Task,Processing Task,API Reference", + "title":"Creating a Processing Task", + "githuburl":"" + }, + { + "uri":"GetProcessorTaskItems.html", + "product_code":"modelarts", + "code":"105", + "des":"This API is used to query the algorithm type for data processing.GET /v2/{project_id}/processor-tasks/itemsNoneStatus code: 200Querying the List of the Algorithm Type for", + "doc_type":"api", + "kw":"Querying the Algorithm Type for Data Processing,Processing Task,API Reference", + "title":"Querying the Algorithm Type for Data Processing", + "githuburl":"" + }, + { + "uri":"DescribeProcessorTask.html", + "product_code":"modelarts", + "code":"106", + "des":"This API is used to query the details about processing tasks. You can query feature analysis tasks and data processing tasks. You can specify the task_id parameter to que", + "doc_type":"api", + "kw":"Querying Details About a Processing Task,Processing Task,API Reference", + "title":"Querying Details About a Processing Task", + "githuburl":"" + }, + { + "uri":"UpdateProcessorTask.html", + "product_code":"modelarts", + "code":"107", + "des":"This API is used to update a processing task. You can update feature analysis tasks and data processing tasks. Only the description of updated tasks is supported. You can", + "doc_type":"api", + "kw":"Updating a Processing Task,Processing Task,API Reference", + "title":"Updating a Processing Task", + "githuburl":"" + }, + { + "uri":"DeleteProcessorTask.html", + "product_code":"modelarts", + "code":"108", + "des":"This API is used to delete a processing task. You can delete feature analysis tasks and data processing tasks. A specific task can be deleted by specifying the task_id pa", + "doc_type":"api", + "kw":"Deleting a Processing Task,Processing Task,API Reference", + "title":"Deleting a Processing Task", + "githuburl":"" + }, + { + "uri":"ListProcessorTaskVersions.html", + "product_code":"modelarts", + "code":"109", + "des":"This API is used to query the version list of a data processing task.GET /v2/{project_id}/processor-tasks/{task_id}/versionsNoneStatus code: 200Querying the Version List ", + "doc_type":"api", + "kw":"Querying the Version List of a Data Processing Task,Processing Task,API Reference", + "title":"Querying the Version List of a Data Processing Task", + "githuburl":"" + }, + { + "uri":"CreateProcessorTaskVersion.html", + "product_code":"modelarts", + "code":"110", + "des":"This API is used to create a data processing task version.POST /v2/{project_id}/processor-tasks/{task_id}/versionsStatus code: 200Creating a Data Validation Task VersionS", + "doc_type":"api", + "kw":"Creating a Data Processing Task Version,Processing Task,API Reference", + "title":"Creating a Data Processing Task Version", + "githuburl":"" + }, + { + "uri":"DescProcessorTaskVersion.html", + "product_code":"modelarts", + "code":"111", + "des":"This API is used to query the details about the version of a data processing task.GET /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}NoneStatus code: 200", + "doc_type":"api", + "kw":"Querying the Details About the Version of a Data Processing Task,Processing Task,API Reference", + "title":"Querying the Details About the Version of a Data Processing Task", + "githuburl":"" + }, + { + "uri":"DeleteProcessorTaskVersion.html", + "product_code":"modelarts", + "code":"112", + "des":"This API is used to delete a data processing task version.DELETE /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}NoneNoneDeleting a Data Processing Task V", + "doc_type":"api", + "kw":"Deleting a Data Processing Task Version,Processing Task,API Reference", + "title":"Deleting a Data Processing Task Version", + "githuburl":"" + }, + { + "uri":"ListProcessorTaskVersionResults.html", + "product_code":"modelarts", + "code":"113", + "des":"This API is used to query the result of a data processing task version.GET /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/resultsNoneStatus code: 200Que", + "doc_type":"api", + "kw":"Querying the Result of a Data Processing Task Version,Processing Task,API Reference", + "title":"Querying the Result of a Data Processing Task Version", + "githuburl":"" + }, + { + "uri":"StopProcessorTaskVersion.html", + "product_code":"modelarts", + "code":"114", + "des":"This API is used to stop the version of a data processing task.POST /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/stopNoneNoneThis API is used to stop ", + "doc_type":"api", + "kw":"Stopping the Version of a Data Processing Task,Processing Task,API Reference", + "title":"Stopping the Version of a Data Processing Task", + "githuburl":"" + }, + { + "uri":"modelarts_03_0043.html", + "product_code":"modelarts", + "code":"115", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Training Management", + "title":"Training Management", + "githuburl":"" + }, + { + "uri":"modelarts_03_0044.html", + "product_code":"modelarts", + "code":"116", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Training Jobs", + "title":"Training Jobs", + "githuburl":"" + }, + { + "uri":"modelarts_03_0045.html", + "product_code":"modelarts", + "code":"117", + "des":"This API is used to create a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Traini", + "doc_type":"api", + "kw":"Creating a Training Job,Training Jobs,API Reference", + "title":"Creating a Training Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0046.html", + "product_code":"modelarts", + "code":"118", + "des":"This API is used to query the created training jobs that meet the search criteria.GET /v1/{project_id}/training-jobsTable 1 describes the required parameters.ParametersPa", + "doc_type":"api", + "kw":"Querying a Training Job List,Training Jobs,API Reference", + "title":"Querying a Training Job List", + "githuburl":"" + }, + { + "uri":"modelarts_03_0047.html", + "product_code":"modelarts", + "code":"119", + "des":"This API is used to query the details about a specified training job based on the job ID.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}Table 1 describe", + "doc_type":"api", + "kw":"Querying the Details About a Training Job Version,Training Jobs,API Reference", + "title":"Querying the Details About a Training Job Version", + "githuburl":"" + }, + { + "uri":"modelarts_03_0048.html", + "product_code":"modelarts", + "code":"120", + "des":"This API is used to delete a version of a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Quer", + "doc_type":"api", + "kw":"Deleting a Version of a Training Job,Training Jobs,API Reference", + "title":"Deleting a Version of a Training Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0049.html", + "product_code":"modelarts", + "code":"121", + "des":"This API is used to query the version of a specified training job based on the job ID.GET /v1/{project_id}/training-jobs/{job_id}/versionsTable 1 describes the required p", + "doc_type":"api", + "kw":"Querying a List of Training Job Versions,Training Jobs,API Reference", + "title":"Querying a List of Training Job Versions", + "githuburl":"" + }, + { + "uri":"modelarts_03_0050.html", + "product_code":"modelarts", + "code":"122", + "des":"This API is used to create a version of a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Quer", + "doc_type":"api", + "kw":"Creating a Version of a Training Job,Training Jobs,API Reference", + "title":"Creating a Version of a Training Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0051.html", + "product_code":"modelarts", + "code":"123", + "des":"This API is used to stop a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training", + "doc_type":"api", + "kw":"Stopping a Training Job,Training Jobs,API Reference", + "title":"Stopping a Training Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0052.html", + "product_code":"modelarts", + "code":"124", + "des":"This API is used to modify the description of a training job.PUT /v1/{project_id}/training-jobs/{job_id}Table 1 describes the required parameters.ParametersParameterManda", + "doc_type":"api", + "kw":"Modifying the Description of a Training Job,Training Jobs,API Reference", + "title":"Modifying the Description of a Training Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0053.html", + "product_code":"modelarts", + "code":"125", + "des":"This API is used to delete a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Traini", + "doc_type":"api", + "kw":"Deleting a Training Job,Training Jobs,API Reference", + "title":"Deleting a Training Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0054.html", + "product_code":"modelarts", + "code":"126", + "des":"This API is used to obtain the name of a training job log file.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/log/file-namesTable 1 describes the requi", + "doc_type":"api", + "kw":"Obtaining the Name of a Training Job Log File,Training Jobs,API Reference", + "title":"Obtaining the Name of a Training Job Log File", + "githuburl":"" + }, + { + "uri":"modelarts_03_0056.html", + "product_code":"modelarts", + "code":"127", + "des":"This API is used to query the details about a built-in model.GET /v1/{project_id}/built-in-algorithmsTable 1 describes the required parameters.ParametersParameterMandator", + "doc_type":"api", + "kw":"Querying a Built-in Algorithm,Training Jobs,API Reference", + "title":"Querying a Built-in Algorithm", + "githuburl":"" + }, + { + "uri":"modelarts_03_0149.html", + "product_code":"modelarts", + "code":"128", + "des":"This API is used to query detailed information about training job logs by row.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/aom-logTable 1 describes t", + "doc_type":"api", + "kw":"Querying Training Job Logs,Training Jobs,API Reference", + "title":"Querying Training Job Logs", + "githuburl":"" + }, + { + "uri":"modelarts_03_0150.html", + "product_code":"modelarts", + "code":"129", + "des":"This API is used to query monitoring information about a single container of a job.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/pod/{pod_name}/metr", + "doc_type":"api", + "kw":"Querying Monitoring Information About a Single Container of a Job,Training Jobs,API Reference", + "title":"Querying Monitoring Information About a Single Container of a Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0151.html", + "product_code":"modelarts", + "code":"130", + "des":"This API is used to query monitoring information about resource pool nodes.GET /v1/{project_id}/pools/{pool_id}/nodes/{node_ip}/metric-statisticTable 1 describes the re", + "doc_type":"api", + "kw":"Querying Monitoring Information About Resource Pool Nodes,Training Jobs,API Reference", + "title":"Querying Monitoring Information About Resource Pool Nodes", + "githuburl":"" + }, + { + "uri":"en-us_topic_0000001147936839.html", + "product_code":"modelarts", + "code":"131", + "des":"You can use this API to query the overview information about all job versions created by a user based on specified conditions, including the statuses and GPU quantity of ", + "doc_type":"api", + "kw":"Querying the Statuses and GPU Quantity of All Job Versions,Training Jobs,API Reference", + "title":"Querying the Statuses and GPU Quantity of All Job Versions", + "githuburl":"" + }, + { + "uri":"modelarts_03_0057.html", + "product_code":"modelarts", + "code":"132", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Training Job Parameter Configuration", + "title":"Training Job Parameter Configuration", + "githuburl":"" + }, + { + "uri":"modelarts_03_0058.html", + "product_code":"modelarts", + "code":"133", + "des":"This API is used to create a training job configuration.POST /v1/{project_id}/training-job-configsTable 1 describes the required parameters.ParametersParameterMandatoryTy", + "doc_type":"api", + "kw":"Creating a Training Job Configuration,Training Job Parameter Configuration,API Reference", + "title":"Creating a Training Job Configuration", + "githuburl":"" + }, + { + "uri":"modelarts_03_0059.html", + "product_code":"modelarts", + "code":"134", + "des":"This API is used to query the created training job configurations that meet the search criteria.GET /v1/{project_id}/training-job-configsTable 1 describes the required pa", + "doc_type":"api", + "kw":"Querying a List of Training Job Configurations,Training Job Parameter Configuration,API Reference", + "title":"Querying a List of Training Job Configurations", + "githuburl":"" + }, + { + "uri":"modelarts_03_0060.html", + "product_code":"modelarts", + "code":"135", + "des":"This API is used to modify a training job configuration.PUT /v1/{project_id}/training-job-configs/{config_name}Table 1 describes the required parameters.ParametersParamet", + "doc_type":"api", + "kw":"Modifying a Training Job Configuration,Training Job Parameter Configuration,API Reference", + "title":"Modifying a Training Job Configuration", + "githuburl":"" + }, + { + "uri":"modelarts_03_0061.html", + "product_code":"modelarts", + "code":"136", + "des":"This API is used to delete a training job configuration.DELETE /v1/{project_id}/training-job-configs/{config_name}Table 1 describes the required parameters.Parameter desc", + "doc_type":"api", + "kw":"Deleting a Training Job Configuration,Training Job Parameter Configuration,API Reference", + "title":"Deleting a Training Job Configuration", + "githuburl":"" + }, + { + "uri":"modelarts_03_0062.html", + "product_code":"modelarts", + "code":"137", + "des":"This API is used to query the details about a specified training job configuration.GET /v1/{project_id}/training-job-configs/{config_name}Table 1 describes the required p", + "doc_type":"api", + "kw":"Querying the Details About a Training Job Configuration,Training Job Parameter Configuration,API Ref", + "title":"Querying the Details About a Training Job Configuration", + "githuburl":"" + }, + { + "uri":"modelarts_03_0063.html", + "product_code":"modelarts", + "code":"138", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Visualization Jobs", + "title":"Visualization Jobs", + "githuburl":"" + }, + { + "uri":"modelarts_03_0064.html", + "product_code":"modelarts", + "code":"139", + "des":"This API is used to create a visualization job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a V", + "doc_type":"api", + "kw":"Creating a Visualization Job,Visualization Jobs,API Reference", + "title":"Creating a Visualization Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0065.html", + "product_code":"modelarts", + "code":"140", + "des":"This API is used to query the visualization jobs that meet the search criteria.GET /v1/{project_id}/visualization-jobsTable 1 describes the required parameters.Parameters", + "doc_type":"api", + "kw":"Querying a Visualization Job List,Visualization Jobs,API Reference", + "title":"Querying a Visualization Job List", + "githuburl":"" + }, + { + "uri":"modelarts_03_0066.html", + "product_code":"modelarts", + "code":"141", + "des":"This API is used to query the details about a specified visualization job based on the job name.GET /v1/{project_id}/visualization-jobs/{job_id}Table 1 describes the requ", + "doc_type":"api", + "kw":"Querying the Details About a Visualization Job,Visualization Jobs,API Reference", + "title":"Querying the Details About a Visualization Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0067.html", + "product_code":"modelarts", + "code":"142", + "des":"This API is used to modify the description of a visualization job.PUT /v1/{project_id}/visualization-jobs/{job_id}Table 1 describes the required parameters.ParametersPara", + "doc_type":"api", + "kw":"Modifying the Description of a Visualization Job,Visualization Jobs,API Reference", + "title":"Modifying the Description of a Visualization Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0068.html", + "product_code":"modelarts", + "code":"143", + "des":"This API is used to delete a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a ", + "doc_type":"api", + "kw":"Deleting a Visualization Job,Visualization Jobs,API Reference", + "title":"Deleting a Visualization Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0069.html", + "product_code":"modelarts", + "code":"144", + "des":"This API is used to stop a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Vi", + "doc_type":"api", + "kw":"Stopping a Visualization Job,Visualization Jobs,API Reference", + "title":"Stopping a Visualization Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0070.html", + "product_code":"modelarts", + "code":"145", + "des":"This API is used to restart a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a", + "doc_type":"api", + "kw":"Restarting a Visualization Job,Visualization Jobs,API Reference", + "title":"Restarting a Visualization Job", + "githuburl":"" + }, + { + "uri":"modelarts_03_0071.html", + "product_code":"modelarts", + "code":"146", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Resource and Engine Specifications", + "title":"Resource and Engine Specifications", + "githuburl":"" + }, + { + "uri":"modelarts_03_0072.html", + "product_code":"modelarts", + "code":"147", + "des":"This API is used to query the resource specifications of a specified job.You must specify the resource specifications when creating a training job or an inference job.GET", + "doc_type":"api", + "kw":"Querying Job Resource Specifications,Resource and Engine Specifications,API Reference", + "title":"Querying Job Resource Specifications", + "githuburl":"" + }, + { + "uri":"modelarts_03_0073.html", + "product_code":"modelarts", + "code":"148", + "des":"This API is used to query the engine type and version of a specified job.You must specify the engine specifications when creating a training job or an inference job.GET /", + "doc_type":"api", + "kw":"Querying Job Engine Specifications,Resource and Engine Specifications,API Reference", + "title":"Querying Job Engine Specifications", + "githuburl":"" + }, + { + "uri":"modelarts_03_0074.html", + "product_code":"modelarts", + "code":"149", + "des":"Table 1 describes the job statuses.", + "doc_type":"api", + "kw":"Job Statuses,Training Management,API Reference", + "title":"Job Statuses", + "githuburl":"" + }, + { + "uri":"modelarts_03_0075.html", + "product_code":"modelarts", + "code":"150", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Model Management", + "title":"Model Management", + "githuburl":"" + }, + { + "uri":"modelarts_03_0153.html", + "product_code":"modelarts", + "code":"151", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Models", + "title":"Models", + "githuburl":"" + }, + { + "uri":"modelarts_03_0076.html", + "product_code":"modelarts", + "code":"152", + "des":"You can use the API to import a model.Ensure that the execution code and model have been uploaded to OBS. By default, the models generated by a training job are stored in", + "doc_type":"api", + "kw":"Importing a Model,Models,API Reference", + "title":"Importing a Model", + "githuburl":"" + }, + { + "uri":"modelarts_03_0077.html", + "product_code":"modelarts", + "code":"153", + "des":"This API is used to query the models that meet the search criteria.GET /v1/{project_id}/modelsTable 1 describes the required parameters.ParametersParameterMandatoryTypeDe", + "doc_type":"api", + "kw":"Querying a Model List,Models,API Reference", + "title":"Querying a Model List", + "githuburl":"" + }, + { + "uri":"modelarts_03_0078.html", + "product_code":"modelarts", + "code":"154", + "des":"This API is used to query details about a model based on the model ID.GET /v1/{project_id}/models/{model_id}Table 1 describes the required parameters.ParametersParameterM", + "doc_type":"api", + "kw":"Querying the Details About a Model,Models,API Reference", + "title":"Querying the Details About a Model", + "githuburl":"" + }, + { + "uri":"modelarts_03_0079.html", + "product_code":"modelarts", + "code":"155", + "des":"This API is used to delete a model based on the model ID. When cascade is set to true, the model specified by the model ID and models of different versions with the same ", + "doc_type":"api", + "kw":"Deleting a Model,Models,API Reference", + "title":"Deleting a Model", + "githuburl":"" + }, + { + "uri":"modelarts_03_0081.html", + "product_code":"modelarts", + "code":"156", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Service Management", + "title":"Service Management", + "githuburl":"" + }, + { + "uri":"modelarts_03_0082.html", + "product_code":"modelarts", + "code":"157", + "des":"This API is used to deploy a model as a service.POST /v1/{project_id}/servicesTable 1 describes the required parameters.ParametersParameterMandatoryTypeDescriptionproject", + "doc_type":"api", + "kw":"Deploying a Model as a Service,Service Management,API Reference", + "title":"Deploying a Model as a Service", + "githuburl":"" + }, + { + "uri":"modelarts_03_0083.html", + "product_code":"modelarts", + "code":"158", + "des":"This API is used to obtain model services.GET /v1/{project_id}/servicesTable 1 describes the required parameters.ParametersParameterMandatoryTypeDescriptionproject_idYesS", + "doc_type":"api", + "kw":"Querying a Service List,Service Management,API Reference", + "title":"Querying a Service List", + "githuburl":"" + }, + { + "uri":"modelarts_03_0084.html", + "product_code":"modelarts", + "code":"159", + "des":"This API is used to query the details about a model service based on the service ID.GET /v1/{project_id}/services/{service_id}Table 1 describes the required parameters.Pa", + "doc_type":"api", + "kw":"Querying the Details About a Service,Service Management,API Reference", + "title":"Querying the Details About a Service", + "githuburl":"" + }, + { + "uri":"modelarts_03_0086.html", + "product_code":"modelarts", + "code":"160", + "des":"This API is used to update configurations of a model service. It can also be used to start or stop a service.PUT /v1/{project_id}/services/{service_id}Table 1 describes t", + "doc_type":"api", + "kw":"Updating Service Configurations,Service Management,API Reference", + "title":"Updating Service Configurations", + "githuburl":"" + }, + { + "uri":"modelarts_03_0087.html", + "product_code":"modelarts", + "code":"161", + "des":"This API is used to query service monitoring information.GET /v1/{project_id}/services/{service_id}/monitorTable 1 describes the required parameters.ParametersParameterMa", + "doc_type":"api", + "kw":"Querying Service Monitoring Information,Service Management,API Reference", + "title":"Querying Service Monitoring Information", + "githuburl":"" + }, + { + "uri":"modelarts_03_0088.html", + "product_code":"modelarts", + "code":"162", + "des":"This API is used to query the update logs of a real-time service.GET /v1/{project_id}/services/{service_id}/logsTable 1 describes the required parameters.ParametersParame", + "doc_type":"api", + "kw":"Querying Service Update Logs,Service Management,API Reference", + "title":"Querying Service Update Logs", + "githuburl":"" + }, + { + "uri":"modelarts_03_0155.html", + "product_code":"modelarts", + "code":"163", + "des":"This API is used to query service event logs, including service operation records, key actions during deployment, and deployment failure causes.GET /v1/{project_id}/servi", + "doc_type":"api", + "kw":"Querying Service Event Logs,Service Management,API Reference", + "title":"Querying Service Event Logs", + "githuburl":"" + }, + { + "uri":"modelarts_03_0089.html", + "product_code":"modelarts", + "code":"164", + "des":"This API is used to delete a model service. You can delete your own services only.Table 1 describes the required parameters.ParametersParameterMandatoryTypeDescriptionpro", + "doc_type":"api", + "kw":"Deleting a Service,Service Management,API Reference", + "title":"Deleting a Service", + "githuburl":"" + }, + { + "uri":"modelarts_03_0200.html", + "product_code":"modelarts", + "code":"165", + "des":"This API is used to query supported service deployment specifications.URIGET /v1/{project_id}/services/specificationsNoneSample requestGET https://endpoint/v1/{project", + "doc_type":"api", + "kw":"Querying Supported Service Deployment Specifications,Service Management,API Reference", + "title":"Querying Supported Service Deployment Specifications", + "githuburl":"" + }, + { + "uri":"authorization.html", + "product_code":"modelarts", + "code":"166", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Authorization Management", + "title":"Authorization Management", + "githuburl":"" + }, + { + "uri":"CreateAuthorization.html", + "product_code":"modelarts", + "code":"167", + "des":"This API is used to configure ModelArts authorization. ModelArts functions such as training management, development environment, data management, and real-time services c", + "doc_type":"api", + "kw":"Configuring Authorization,Authorization Management,API Reference", + "title":"Configuring Authorization", + "githuburl":"" + }, + { + "uri":"GetAuthorizations.html", + "product_code":"modelarts", + "code":"168", + "des":"This API is used to view an authorization list.GET /v2/{project_id}/authorizationsNoneStatus code: 200View an authorization list.Status code: 200OKSee Error Codes.", + "doc_type":"api", + "kw":"Viewing an Authorization List,Authorization Management,API Reference", + "title":"Viewing an Authorization List", + "githuburl":"" + }, + { + "uri":"DeleteAuthorizations.html", + "product_code":"modelarts", + "code":"169", + "des":"This API is used to delete the authorization of a specified user or all users.DELETE /v2/{project_id}/authorizationsNoneNoneDelete the authorization of a specified user.S", + "doc_type":"api", + "kw":"Deleting Authorization,Authorization Management,API Reference", + "title":"Deleting Authorization", + "githuburl":"" + }, + { + "uri":"CreateModelArtsAgency.html", + "product_code":"modelarts", + "code":"170", + "des":"This API is used to create an agency so that ModelArts can access dependent services such as OBS, SWR, and IEF.POST /v2/{project_id}/agencyNoneCreate a ModelArts agency.S", + "doc_type":"api", + "kw":"Creating a ModelArts Agency,Authorization Management,API Reference", + "title":"Creating a ModelArts Agency", + "githuburl":"" + }, + { + "uri":"workspace.html", + "product_code":"modelarts", + "code":"171", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Workspace Management", + "title":"Workspace Management", + "githuburl":"" + }, + { + "uri":"CreateWorkspace.html", + "product_code":"modelarts", + "code":"172", + "des":"This API is used to create a workspace. The name of the created workspace cannot be default, which is the name of the default workspace reserved by the system.POST /v1/{p", + "doc_type":"api", + "kw":"Creating a Workspace,Workspace Management,API Reference", + "title":"Creating a Workspace", + "githuburl":"" + }, + { + "uri":"ListWorkspaces.html", + "product_code":"modelarts", + "code":"173", + "des":"This API is used to obtain a workspace list with detailed information contained in the response body.GET /v1/{project_id}/workspacesNoneStatus code: 200Querying a Workspa", + "doc_type":"api", + "kw":"Querying a Workspace List,Workspace Management,API Reference", + "title":"Querying a Workspace List", + "githuburl":"" + }, + { + "uri":"DeleteWorkspace.html", + "product_code":"modelarts", + "code":"174", + "des":"This API is used to delete a workspace.DELETE /v1/{project_id}/workspaces/{workspace_id}NoneStatus code: 200Deleting a WorkspaceStatus code: 200OKSee Error Codes.", + "doc_type":"api", + "kw":"Deleting a Workspace,Workspace Management,API Reference", + "title":"Deleting a Workspace", + "githuburl":"" + }, + { + "uri":"ShowWorkspaceInfo.html", + "product_code":"modelarts", + "code":"175", + "des":"This API is used to obtain details about a workspace.GET /v1/{project_id}/workspaces/{workspace_id}NoneStatus code: 200Querying Details About a WorkspaceStatus code: 200O", + "doc_type":"api", + "kw":"Querying Details About a Workspace,Workspace Management,API Reference", + "title":"Querying Details About a Workspace", + "githuburl":"" + }, + { + "uri":"UpdateWorkspace.html", + "product_code":"modelarts", + "code":"176", + "des":"This API is used to modify a workspace.PUT /v1/{project_id}/workspaces/{workspace_id}Status code: 200Modifying a WorkspaceStatus code: 200OKSee Error Codes.", + "doc_type":"api", + "kw":"Modifying a Workspace,Workspace Management,API Reference", + "title":"Modifying a Workspace", + "githuburl":"" + }, + { + "uri":"ListWorkspaceQuotas.html", + "product_code":"modelarts", + "code":"177", + "des":"This API is used to obtain workspace quotas.GET /v1/{project_id}/workspaces/{workspace_id}/quotasNoneStatus code: 200Querying Workspace QuotasStatus code: 200OKSee Error ", + "doc_type":"api", + "kw":"Querying a Workspace Quota,Workspace Management,API Reference", + "title":"Querying a Workspace Quota", + "githuburl":"" + }, + { + "uri":"UpdateWorkspaceQuotas.html", + "product_code":"modelarts", + "code":"178", + "des":"This API is used to modify a workspace quota.PUT /v1/{project_id}/workspaces/{workspace_id}/quotasStatus code: 200Modifying Workspace QuotasStatus code: 200OKSee Error Co", + "doc_type":"api", + "kw":"Modifying a Workspace Quota,Workspace Management,API Reference", + "title":"Modifying a Workspace Quota", + "githuburl":"" + }, + { + "uri":"modelarts_03_0400.html", + "product_code":"modelarts", + "code":"179", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Application Cases", + "title":"Application Cases", + "githuburl":"" + }, + { + "uri":"modelarts_03_0401.html", + "product_code":"modelarts", + "code":"180", + "des":"This section describes how to train a model on ModelArts by calling a series of APIs.The process for creating a training job using the TensorFlow framework is as follows:", + "doc_type":"api", + "kw":"Creating a Training Job Using the TensorFlow Framework,Application Cases,API Reference", + "title":"Creating a Training Job Using the TensorFlow Framework", + "githuburl":"" + }, + { + "uri":"modelarts_03_0093.html", + "product_code":"modelarts", + "code":"181", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Common Parameters", + "title":"Common Parameters", + "githuburl":"" + }, + { + "uri":"modelarts_03_0094.html", + "product_code":"modelarts", + "code":"182", + "des":"Table 1 describes the status codes.", + "doc_type":"api", + "kw":"Status Code,Common Parameters,API Reference", + "title":"Status Code", + "githuburl":"" + }, + { + "uri":"modelarts_03_0095.html", + "product_code":"modelarts", + "code":"183", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Error Codes,Common Parameters,API Reference", + "title":"Error Codes", + "githuburl":"" + }, + { + "uri":"modelarts_03_0147.html", + "product_code":"modelarts", + "code":"184", + "des":"A project ID or name is required for some requests when an API is called. Therefore, obtain the project ID and name before calling the API. Use either of the following me", + "doc_type":"api", + "kw":"Obtaining a Project ID and Name,Common Parameters,API Reference", + "title":"Obtaining a Project ID and Name", + "githuburl":"" + }, + { + "uri":"modelarts_03_0148.html", + "product_code":"modelarts", + "code":"185", + "des":"When you call APIs, certain requests require the account name and ID. To obtain an account name and ID, do as follows:Sign up and log in to the console.Hover the cursor o", + "doc_type":"api", + "kw":"Obtaining an Account Name and ID,Common Parameters,API Reference", + "title":"Obtaining an Account Name and ID", + "githuburl":"" + }, + { + "uri":"modelarts_03_0006.html", + "product_code":"modelarts", + "code":"186", + "des":"When you call APIs, certain requests require the username and ID. To obtain a username and ID, do as follows:Log in to the management console after registration.In the up", + "doc_type":"api", + "kw":"Obtaining a Username and ID,Common Parameters,API Reference", + "title":"Obtaining a Username and ID", + "githuburl":"" + }, + { + "uri":"modelarts_03_0097.html", + "product_code":"modelarts", + "code":"187", + "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "doc_type":"api", + "kw":"Change History,API Reference", + "title":"Change History", + "githuburl":"" + } +] \ No newline at end of file diff --git a/modelarts/api-ref/AcceptSamples.html b/modelarts/api-ref/AcceptSamples.html new file mode 100644 index 00000000..b5f53ad7 --- /dev/null +++ b/modelarts/api-ref/AcceptSamples.html @@ -0,0 +1,191 @@ + + +

Submitting Sample Review Comments of an Acceptance Task

+

Function

This API is used to submit sample review comments of an acceptance task.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/batch-comment

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

comments

+

No

+

Array of SampleComment objects

+

Review comment list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SampleComment

Parameter

+

Mandatory

+

Type

+

Description

+

accept

+

Yes

+

Boolean

+

Whether the submitted sample review comments are passed. The options are as follows:

+
  • true: passed

    +
  • false: not passed

    +
+

comment

+

No

+

String

+

Review comment, which contains 0 to 256 characters, excluding special characters (!<>=&"').

+

sample_id

+

No

+

String

+

Sample ID.

+

score

+

No

+

String

+

Review score, whose value can be A, B, C, or D, in descending order.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Submitting Sample Review Comments of an Acceptance Task

+
{
+  "comments" : [ {
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "sample_id" : "09ac49d5b06385849c8769fdcf0f6d60",
+    "accept" : true,
+    "comment" : "",
+    "score" : "A"
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/AutoAnnotationProgress.html b/modelarts/api-ref/AutoAnnotationProgress.html new file mode 100644 index 00000000..f239323e --- /dev/null +++ b/modelarts/api-ref/AutoAnnotationProgress.html @@ -0,0 +1,1728 @@ + + +

Obtaining Information About Intelligent Tasks

+

Function

This API is used to obtain information about intelligent tasks, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the task_id parameter to query the details about a specific task.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/tasks/{task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

Task ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

code

+

String

+

Task running status code.

+

config

+

SmartTaskConfig object

+

Task configuration.

+

create_time

+

String

+

Task creation time.

+

elapsed_time

+

Long

+

Execution time.

+

error_code

+

String

+

Error code.

+

error_detail

+

String

+

Error details.

+

error_msg

+

String

+

Error message.

+

message

+

String

+

Task running information.

+

progress

+

Float

+

Task progress percentage.

+

resource_id

+

String

+

Resource ID.

+

result

+

Result object

+

Task result.

+

status

+

Integer

+

Task status. Options:

+
  • -1: queuing

    +
  • 0: initialized

    +
  • 1: running

    +
  • 2: failed

    +
  • 3: succeeded

    +
  • 4: stopping

    +
  • 5: stopped

    +
+

task_id

+

String

+

Task ID.

+

task_name

+

String

+

Task name.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SmartTaskConfig

Parameter

+

Type

+

Description

+

algorithm_type

+

String

+

Algorithm type for auto labeling. Options:

+
  • fast: Only labeled samples are used for training. This type of algorithm achieves faster labeling.

    +
  • accurate: In addition to labeled samples, unlabeled samples are used for semi-supervised training. This type of algorithm achieves more accurate labeling.

    +
+

ambiguity

+

Boolean

+

Whether to perform clustering based on the image blurring degree.

+

annotation_output

+

String

+

Output path of the active learning labeling result.

+

collect_rule

+

String

+

Sample collection rule. The default value is all, indicating full collection. Currently, only value all is available.

+

collect_sample

+

Boolean

+

Whether to enable sample collection. The options are as follows:

+
  • true: Enable sample collection. (Default value)

    +
  • false: Do not enable sample collection.

    +
+

confidence_scope

+

String

+

Confidence range of key samples. The minimum and maximum values are separated by hyphens (-). Example: 0.10-0.90.

+

description

+

String

+

Task description.

+

engine_name

+

String

+

Engine name.

+

export_format

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

ExportParams object

+

Parameters of a dataset export task.

+

flavor

+

Flavor object

+

Training resource flavor.

+

image_brightness

+

Boolean

+

Whether to perform clustering based on the image brightness.

+

image_colorfulness

+

Boolean

+

Whether to perform clustering based on the image color.

+

inf_cluster_id

+

String

+

ID of a dedicated cluster. This parameter is left blank by default, indicating that a dedicated cluster is not used. When using the dedicated cluster to deploy services, ensure that the cluster status is normal. After this parameter is set, the network configuration of the cluster is used, and the vpc_id parameter does not take effect.

+

inf_config_list

+

Array of InfConfig objects

+

Configuration list required for running an inference task, which is optional and left blank by default.

+

inf_output

+

String

+

Output path of inference in active learning.

+

infer_result_output_dir

+

String

+

OBS directory for storing sample prediction results. This parameter is optional. The {service_id}-infer-result subdirectory in the output_dir directory is used by default.

+

key_sample_output

+

String

+

Output path of hard examples in active learning.

+

log_url

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank.

+

manifest_path

+

String

+

Path of the manifest file, which is used as the input for training and inference.

+

model_id

+

String

+

Model ID.

+

model_name

+

String

+

Model name.

+

model_parameter

+

String

+

Model parameter.

+

model_version

+

String

+

Model version.

+

n_clusters

+

Integer

+

Number of clusters.

+

name

+

String

+

Task name.

+

output_dir

+

String

+

Sample output path. The format is as follows: Dataset output path/Dataset name-Dataset ID/annotation/auto-deploy/. Example: /test/work_1608083108676/dataset123-g6IO9qSu6hoxwCAirfm/annotation/auto-deploy/.

+

parameters

+

Array of TrainingParameter objects

+

Running parameters of a training job.

+

pool_id

+

String

+

ID of a resource pool.

+

property

+

String

+

Attribute name.

+

req_uri

+

String

+

Inference path of a batch job.

+

result_type

+

Integer

+

Processing mode of auto grouping results. The options are as follows:

+
  • 0: Save to OBS.

    +
  • 1: Save to samples.

    +
+

samples

+

Array of SampleLabels objects

+

List of labeling information for samples to be auto labeled.

+

stop_time

+

Integer

+

Timeout interval, in minutes. The default value is 15 minutes. This parameter is used only in the scenario of auto labeling for videos.

+

time

+

String

+

Timestamp in active learning.

+

train_data_path

+

String

+

Path for storing existing training datasets.

+

train_url

+

String

+

URL of the OBS path where the file of a training job is outputted. By default, this parameter is left blank.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

worker_server_num

+

Integer

+

Number of workers in a training job.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 ExportParams

Parameter

+

Type

+

Description

+

clear_hard_property

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 6 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 8 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+ +
+ + + + + + + + + +
Table 9 Flavor

Parameter

+

Type

+

Description

+

code

+

String

+

Attribute code of a resource specification, which is used for task creating.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 InfConfig

Parameter

+

Type

+

Description

+

envs

+

Map<String,String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank. To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

instance_count

+

Integer

+

Instance number of model deployment, that is, the number of compute nodes.

+

model_id

+

String

+

Model ID.

+

specification

+

String

+

Resource specifications of real-time services. For details, see Deploying Services.

+

weight

+

Integer

+

Traffic weight allocated to a model. This parameter is mandatory only when infer_type is set to real-time. The sum of the weights must be 100.

+
+
+ +
+ + + + + + + + + + + + + +
Table 11 TrainingParameter

Parameter

+

Type

+

Description

+

label

+

String

+

Parameter name.

+

value

+

String

+

Parameter value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 Result

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Integer

+

Number of labeled samples.

+

confidence_scope

+

String

+

Confidence range.

+

dataset_name

+

String

+

Dataset name.

+

dataset_type

+

String

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

description

+

String

+

Description.

+

dlf_model_job_name

+

String

+

Name of a DLF model inference job.

+

dlf_service_job_name

+

String

+

Name of a DLF real-time service job.

+

dlf_train_job_name

+

String

+

Name of a DLF training job.

+

events

+

Array of Event objects

+

Event.

+

hard_example_path

+

String

+

Path for storing hard examples.

+

hard_select_tasks

+

Array of HardSelectTask objects

+

Selected task list of hard examples.

+

manifest_path

+

String

+

Path for storing the manifest files.

+

model_id

+

String

+

Model ID.

+

model_name

+

String

+

Model name.

+

model_version

+

String

+

Model version.

+

samples

+

Array of SampleLabels objects

+

Inference result of the real-time video service.

+

service_id

+

String

+

ID of a real-time service.

+

service_name

+

String

+

Name of a real-time service.

+

service_resource

+

String

+

ID of the real-time service bound to a user.

+

total_sample_count

+

Integer

+

Total number of samples.

+

train_data_path

+

String

+

Path for storing training data.

+

train_job_id

+

String

+

ID of a training job.

+

train_job_name

+

String

+

Name of a training job.

+

unconfirmed_sample_count

+

Integer

+

Number of samples to be confirmed.

+

version_id

+

String

+

Dataset version ID.

+

version_name

+

String

+

Dataset version name.

+

workspace_id

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 13 Event

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when an event is created.

+

description

+

String

+

Description.

+

elapsed_time

+

Long

+

Time when an event is executed.

+

error_code

+

String

+

Error code.

+

error_message

+

String

+

Error message.

+

events

+

Array of Event objects

+

Subevent list.

+

level

+

Integer

+

Event severity.

+

name

+

String

+

Event name.

+

ordinal

+

Integer

+

Sequence number.

+

parent_name

+

String

+

Parent event name.

+

status

+

String

+

Status. The options are as follows:

+
  • waiting: waiting

    +
  • running: running

    +
  • failed: failed

    +
  • success: successful

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 HardSelectTask

Parameter

+

Type

+

Description

+

create_at

+

Long

+

Creation time.

+

dataset_id

+

String

+

Dataset ID.

+

dataset_name

+

String

+

Dataset name.

+

hard_select_task_id

+

String

+

ID of a hard example filtering task.

+

task_status

+

String

+

Task status.

+

time

+

Long

+

Execution time.

+

update_at

+

Long

+

Update time.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 15 SampleLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SampleLabel objects

+

Sample label list. If this parameter is left blank, all sample labels are deleted.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

sample_id

+

String

+

Sample ID.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

sample_usage

+

String

+

Sample usage. The options are as follows:

+
  • TRAIN: training

    +
  • EVAL: evaluation

    +
  • TEST: test

    +
  • INFERENCE: inference

    +
+

source

+

String

+

Source address of sample data.

+

worker_id

+

String

+

ID of a labeling team member.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 16 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 17 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 18 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Obtaining Information About an Auto Labeling (Active Learning) Task

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/tasks/{task_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "resource_id" : "XGrRZuCV1qmMxnsmD5u",
+  "create_time" : "2020-11-23 11:08:20",
+  "progress" : 10.0,
+  "status" : 1,
+  "message" : "Start to export annotations. Export task id is jMZGm2SBp4Ymr2wrhAK",
+  "code" : "ModelArts.4902",
+  "elapsed_time" : 0,
+  "result" : {
+    "total_sample_count" : 49,
+    "annotated_sample_count" : 30,
+    "continuity" : false
+  },
+  "export_type" : 0,
+  "config" : {
+    "ambiguity" : false,
+    "worker_server_num" : 0,
+    "collect_sample" : false,
+    "algorithm_type" : "fast",
+    "image_brightness" : false,
+    "image_colorfulness" : false
+  }
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CLASS.TXT.json b/modelarts/api-ref/CLASS.TXT.json new file mode 100644 index 00000000..832da40f --- /dev/null +++ b/modelarts/api-ref/CLASS.TXT.json @@ -0,0 +1,1685 @@ +[ + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Before You Start", + "uri":"modelarts_03_0139.html", + "doc_type":"api", + "p_code":"", + "code":"1" + }, + { + "desc":"ModelArts is a one-stop AI development platform geared toward developers and data scientists of all skill levels. It enables you to rapidly build, train, and deploy model", + "product_code":"modelarts", + "title":"Overview", + "uri":"modelarts_03_0001.html", + "doc_type":"api", + "p_code":"1", + "code":"2" + }, + { + "desc":"ModelArts supports Representational State Transfer (REST) APIs, allowing you to call APIs using HTTPS. For details about API calling, see Calling APIs.", + "product_code":"modelarts", + "title":"API Calling", + "uri":"modelarts_03_0140.html", + "doc_type":"api", + "p_code":"1", + "code":"3" + }, + { + "desc":"Endpoints are request address for calling APIs. Endpoints vary depending on services and regions. To obtain the regions and endpoints, contact the enterprise administrato", + "product_code":"modelarts", + "title":"Endpoints", + "uri":"modelarts_03_0141.html", + "doc_type":"api", + "p_code":"1", + "code":"4" + }, + { + "desc":"AccountAn account is created upon successful registration with the cloud platform. The account has full access permissions for all of its cloud services and resources. It", + "product_code":"modelarts", + "title":"Basic Concepts", + "uri":"modelarts_03_0143.html", + "doc_type":"api", + "p_code":"1", + "code":"5" + }, + { + "desc":"All ModelArts APIs are proprietary.You can use these APIs to manage datasets, training jobs, models, and services.Data management APIs include the APIs for managing datas", + "product_code":"modelarts", + "title":"API Overview", + "uri":"modelarts_03_0002.html", + "doc_type":"api", + "p_code":"", + "code":"6" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Calling APIs", + "uri":"modelarts_03_0144.html", + "doc_type":"api", + "p_code":"", + "code":"7" + }, + { + "desc":"This section describes the structure of a REST API request, and uses the IAM API for obtaining a user token as an example to demonstrate how to call an API. The obtained ", + "product_code":"modelarts", + "title":"Making an API Request", + "uri":"modelarts_03_0005.html", + "doc_type":"api", + "p_code":"7", + "code":"8" + }, + { + "desc":"Requests for calling an API can be authenticated using either of the following methods: AK/SK-based authentication: Requests are authenticated by encrypting the request b", + "product_code":"modelarts", + "title":"Authentication", + "uri":"modelarts_03_0004.html", + "doc_type":"api", + "p_code":"7", + "code":"9" + }, + { + "desc":"After sending a request, you will receive a response, including the status code, response header, and response body.A status code is a group of digits, ranging from 1xx t", + "product_code":"modelarts", + "title":"Response", + "uri":"modelarts_03_0003.html", + "doc_type":"api", + "p_code":"7", + "code":"10" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"DevEnviron", + "uri":"modelarts_03_0108.html", + "doc_type":"api", + "p_code":"", + "code":"11" + }, + { + "desc":"This API is used to query the authentication information of a development environment instance, which is used to open the development environment instance.GET /v1/{projec", + "product_code":"modelarts", + "title":"Querying the Authentication Information of a Development Environment Instance", + "uri":"modelarts_03_0109.html", + "doc_type":"api", + "p_code":"11", + "code":"12" + }, + { + "desc":"This API is used to create a development environment instance for code development.Calling this API is an asynchronous operation. The job status can be obtained by callin", + "product_code":"modelarts", + "title":"Creating a Development Environment Instance", + "uri":"modelarts_03_0110.html", + "doc_type":"api", + "p_code":"11", + "code":"13" + }, + { + "desc":"This API is used to query the development environment instances that meet the search criteria.GET /v1/{project_id}/demanager/instances?de_type={de_type}&provision_type={p", + "product_code":"modelarts", + "title":"Querying a List of Development Environment Instances", + "uri":"modelarts_03_0111.html", + "doc_type":"api", + "p_code":"11", + "code":"14" + }, + { + "desc":"This API is used to query the details about a development environment instance.GET /v1/{project_id}/demanager/instances/{instance_id}Table 1 describes the required parame", + "product_code":"modelarts", + "title":"Querying the Details About a Development Environment Instance", + "uri":"modelarts_03_0112.html", + "doc_type":"api", + "p_code":"11", + "code":"15" + }, + { + "desc":"This API is used to modify the description of a development environment instance or information about the auto stop function.PUT /v1/{project_id}/demanager/instances/{ins", + "product_code":"modelarts", + "title":"Modifying the Description of a Development Environment Instance", + "uri":"modelarts_03_0113.html", + "doc_type":"api", + "p_code":"11", + "code":"16" + }, + { + "desc":"This API is used to delete a development environment instance.DELETE /v1/{project_id}/demanager/instances/{instance_id}Table 1 describes the required parameters.Parameter", + "product_code":"modelarts", + "title":"Deleting a Development Environment Instance", + "uri":"modelarts_03_0114.html", + "doc_type":"api", + "p_code":"11", + "code":"17" + }, + { + "desc":"This API is used to startor stop a notebook instance.POST /v1/{project_id}/demanager/instances/{instance_id}/actionTable 1 describes the required parameters.ParametersPar", + "product_code":"modelarts", + "title":"Managing a Development Environment Instance", + "uri":"modelarts_03_0115.html", + "doc_type":"api", + "p_code":"11", + "code":"18" + }, + { + "desc":"This API is used to restart an ML Studio development environment instance.POST /v1/{project_id}/demanager/instances/{instance_id}/actionTable 1 describes the required par", + "product_code":"modelarts", + "title":"Restarting an ML Studio Instance", + "uri":"modelarts_03_0152.html", + "doc_type":"api", + "p_code":"11", + "code":"19" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Data Management", + "uri":"modelarts_03_0202.html", + "doc_type":"api", + "p_code":"", + "code":"20" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Data Management APIs", + "uri":"modelarts_03_0299.html", + "doc_type":"api", + "p_code":"20", + "code":"21" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Dataset Management", + "uri":"dataset_management.html", + "doc_type":"api", + "p_code":"20", + "code":"22" + }, + { + "desc":"This API is used to query the created datasets that meet the search criteria by page.GET /v2/{project_id}/datasetsNoneStatus code: 200Querying the Dataset ListStatus code", + "product_code":"modelarts", + "title":"Response body for querying the dataset list.", + "uri":"ListDatasets.html", + "doc_type":"api", + "p_code":"22", + "code":"23" + }, + { + "desc":"This API is used to create a dataset.POST /v2/{project_id}/datasetsStatus code: 201Creating an Image Classification Dataset{\n \"workspace_id\" : \"0\",\n \"dataset_name\" : \"d", + "product_code":"modelarts", + "title":"Creating a Dataset", + "uri":"CreateDataset.html", + "doc_type":"api", + "p_code":"22", + "code":"24" + }, + { + "desc":"This API is used to query details about a dataset.GET /v2/{project_id}/datasets/{dataset_id}NoneStatus code: 200Querying Details About a DatasetStatus code: 200OKSee Erro", + "product_code":"modelarts", + "title":"Querying Details About a Dataset", + "uri":"DescDataset.html", + "doc_type":"api", + "p_code":"22", + "code":"25" + }, + { + "desc":"This API is used to modify basic information about a dataset, such as the dataset name, description, current version, and labels.PUT /v2/{project_id}/datasets/{dataset_id", + "product_code":"modelarts", + "title":"Modifying a Dataset", + "uri":"UpdateDataset.html", + "doc_type":"api", + "p_code":"22", + "code":"26" + }, + { + "desc":"This API is used to delete a dataset without deleting the source data of the dataset.DELETE /v2/{project_id}/datasets/{dataset_id}NoneNoneDeleting a DatasetStatus code: 2", + "product_code":"modelarts", + "title":"Deleting a Dataset", + "uri":"DeleteDataset.html", + "doc_type":"api", + "p_code":"22", + "code":"27" + }, + { + "desc":"This API is used to query dataset statistics.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/statsNoneStatus code: 200Querying Dataset StatisticsStatus code: ", + "product_code":"modelarts", + "title":"Querying Dataset Statistics", + "uri":"ListStats.html", + "doc_type":"api", + "p_code":"22", + "code":"28" + }, + { + "desc":"This API is used to query the monitoring data of a dataset within a specified time range.GET /v2/{project_id}/datasets/{dataset_id}/metricsNoneStatus code: 200Querying th", + "product_code":"modelarts", + "title":"Querying the Monitoring Data of a Dataset", + "uri":"GetDatasetMetrics.html", + "doc_type":"api", + "p_code":"22", + "code":"29" + }, + { + "desc":"This API is used to query details about team labeling task statistics.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/stat", + "product_code":"modelarts", + "title":"Querying Details About Team Labeling Task Statistics", + "uri":"ListWorkforceTaskStats.html", + "doc_type":"api", + "p_code":"22", + "code":"30" + }, + { + "desc":"This API is used to query details about the progress of a team labeling task member.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/metrics", + "product_code":"modelarts", + "title":"Querying Details About the Progress of a Team Labeling Task Member", + "uri":"GetWorkforceTaskMetrics.html", + "doc_type":"api", + "p_code":"22", + "code":"31" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Dataset Version Management", + "uri":"dataset_version_management.html", + "doc_type":"api", + "p_code":"20", + "code":"32" + }, + { + "desc":"This API is used to query the version list of a specific dataset.GET /v2/{project_id}/datasets/{dataset_id}/versionsNoneStatus code: 200Querying the Version List of a Spe", + "product_code":"modelarts", + "title":"Querying the Dataset Version List", + "uri":"ListDatasetVersions.html", + "doc_type":"api", + "p_code":"32", + "code":"33" + }, + { + "desc":"This API is used to create a dataset labeling version.POST /v2/{project_id}/datasets/{dataset_id}/versionsStatus code: 201Creating a Dataset Labeling VersionStatus code: ", + "product_code":"modelarts", + "title":"Creating a Dataset Labeling Version", + "uri":"CreateDatasetVersion.html", + "doc_type":"api", + "p_code":"32", + "code":"34" + }, + { + "desc":"This API is used to query the details about a dataset version.GET /v2/{project_id}/datasets/{dataset_id}/versions/{version_id}NoneStatus code: 200Querying Details About a", + "product_code":"modelarts", + "title":"Querying Details About a Dataset Version", + "uri":"DescribeDatasetVersion.html", + "doc_type":"api", + "p_code":"32", + "code":"35" + }, + { + "desc":"This API is used to delete a dataset labeling version.DELETE /v2/{project_id}/datasets/{dataset_id}/versions/{version_id}NoneNoneDeleting a Dataset Labeling VersionStatus", + "product_code":"modelarts", + "title":"Deleting a Dataset Labeling Version", + "uri":"DeleteDatasetVersion.html", + "doc_type":"api", + "p_code":"32", + "code":"36" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Sample Management", + "uri":"sample_management.html", + "doc_type":"api", + "p_code":"20", + "code":"37" + }, + { + "desc":"This API is used to query the sample list by page.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/samplesNoneStatus code: 200Querying the Sample List by PageS", + "product_code":"modelarts", + "title":"Querying the Sample List", + "uri":"ListSamples.html", + "doc_type":"api", + "p_code":"37", + "code":"38" + }, + { + "desc":"This API is used to add samples in batches.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/samplesStatus code: 200Adding Samples in BatchesStatus code: 200OK", + "product_code":"modelarts", + "title":"Adding Samples in Batches", + "uri":"UploadSamplesJson.html", + "doc_type":"api", + "p_code":"37", + "code":"39" + }, + { + "desc":"This API is used to delete samples in batches.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/deleteStatus code: 200Deleting Samples in BatchesStatus", + "product_code":"modelarts", + "title":"Deleting Samples in Batches", + "uri":"DeleteSamples.html", + "doc_type":"api", + "p_code":"37", + "code":"40" + }, + { + "desc":"Query details about a sample.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/{sample_id}NoneStatus code: 200Querying Details About a SampleStatus code", + "product_code":"modelarts", + "title":"Querying Details About a Sample", + "uri":"DescribeSample.html", + "doc_type":"api", + "p_code":"37", + "code":"41" + }, + { + "desc":"This API is used to obtain sample search condition.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/search-conditionNoneStatus code: 200Obtaining Sample Search", + "product_code":"modelarts", + "title":"Obtaining Sample Search Condition", + "uri":"ListSearch.html", + "doc_type":"api", + "p_code":"37", + "code":"42" + }, + { + "desc":"This API is used to query the sample list of a team labeling task by page.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/", + "product_code":"modelarts", + "title":"Querying the Sample List of a Team Labeling Task by Page", + "uri":"ListWorkforceTaskSamples.html", + "doc_type":"api", + "p_code":"37", + "code":"43" + }, + { + "desc":"This API is used to query details about team labeling samples.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples/{sam", + "product_code":"modelarts", + "title":"Querying Details About Team Labeling Samples", + "uri":"DescribeWorkforceTaskSample.html", + "doc_type":"api", + "p_code":"37", + "code":"44" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Label Management", + "uri":"label_management.html", + "doc_type":"api", + "p_code":"20", + "code":"45" + }, + { + "desc":"This API is used to query all labels of a dataset.GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/labelsNoneStatus code: 200Querying All Labels of a DatasetSt", + "product_code":"modelarts", + "title":"Querying the Dataset Label List", + "uri":"ListLabels.html", + "doc_type":"api", + "p_code":"45", + "code":"46" + }, + { + "desc":"This API is used to create a dataset label.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/labelsStatus code: 200Creating a Dataset LabelStatus code: 200OKSe", + "product_code":"modelarts", + "title":"Creating a Dataset Label", + "uri":"CreateLabels.html", + "doc_type":"api", + "p_code":"45", + "code":"47" + }, + { + "desc":"This API is used to modify labels in batches.PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/labelsStatus code: 200Modifying Labels in BatchesStatus code: 200", + "product_code":"modelarts", + "title":"Modifying Labels in Batches", + "uri":"UpdateLabels.html", + "doc_type":"api", + "p_code":"45", + "code":"48" + }, + { + "desc":"This API is used to delete labels in batches.POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/deleteStatus code: 200Deleting Labels in BatchesStatus co", + "product_code":"modelarts", + "title":"Deleting Labels in Batches", + "uri":"DeleteLabels.html", + "doc_type":"api", + "p_code":"45", + "code":"49" + }, + { + "desc":"This API is used to update a label by label names.PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/{label_name}Status code: 204Updating a Label by Label", + "product_code":"modelarts", + "title":"Updating a Label by Label Names", + "uri":"UpdateLabel.html", + "doc_type":"api", + "p_code":"45", + "code":"50" + }, + { + "desc":"This API is used to delete a label and the files that only contain this label.DELETE /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/{label_name}NoneStatus", + "product_code":"modelarts", + "title":"Deleting a Label and the Files that Only Contain the Label", + "uri":"DeleteLabelAndSamples.html", + "doc_type":"api", + "p_code":"45", + "code":"51" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Manual Labeling", + "uri":"manual_annotation_management.html", + "doc_type":"api", + "p_code":"20", + "code":"52" + }, + { + "desc":"This API is used to update sample labels in batches, including adding, modifying, and deleting sample labels. If the parameter Labels of a sample in the request body is n", + "product_code":"modelarts", + "title":"Updating Sample Labels in Batches", + "uri":"UpdateSamples.html", + "doc_type":"api", + "p_code":"52", + "code":"53" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Labeling Task Management", + "uri":"label_task_management.html", + "doc_type":"api", + "p_code":"20", + "code":"54" + }, + { + "desc":"This API is used to query the team labeling task list of a dataset.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasksNoneStatus code: 200Querying the Team Labelin", + "product_code":"modelarts", + "title":"Querying the Team Labeling Task List of a Dataset", + "uri":"ListWorkforceTasks.html", + "doc_type":"api", + "p_code":"54", + "code":"55" + }, + { + "desc":"This API is used to create a team labeling task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasksStatus code: 200Creating a Team Labeling TaskStatus code: 200OK", + "product_code":"modelarts", + "title":"Creating a Team Labeling Task", + "uri":"CreateWorkforceTask.html", + "doc_type":"api", + "p_code":"54", + "code":"56" + }, + { + "desc":"This API is used to query the details about a team labeling task.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneStatus code: 200Queryin", + "product_code":"modelarts", + "title":"Querying Details About a Team Labeling Task", + "uri":"DescWorkforceTask.html", + "doc_type":"api", + "p_code":"54", + "code":"57" + }, + { + "desc":"This API is used to start a team labeling task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneStarting a Team Labeling TaskStatus code", + "product_code":"modelarts", + "title":"Starting a Team Labeling Task", + "uri":"StartWorkforceTask.html", + "doc_type":"api", + "p_code":"54", + "code":"58" + }, + { + "desc":"This API is used to update a team labeling task.PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneUpdating a Team Labeling TaskStatus code", + "product_code":"modelarts", + "title":"Updating a Team Labeling Task", + "uri":"UpdateWorkforceTask.html", + "doc_type":"api", + "p_code":"54", + "code":"59" + }, + { + "desc":"This API is used to delete a team labeling task.DELETE /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}NoneNoneDeleting a Team Labeling TaskStat", + "product_code":"modelarts", + "title":"Deleting a Team Labeling Task", + "uri":"DeleteWorkforceTask.html", + "doc_type":"api", + "p_code":"54", + "code":"60" + }, + { + "desc":"This API is used to create a team labeling acceptance task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptanceStatus code: 200Creat", + "product_code":"modelarts", + "title":"Creating a Team Labeling Acceptance Task", + "uri":"StartWorkforceSamplingTask.html", + "doc_type":"api", + "p_code":"54", + "code":"61" + }, + { + "desc":"This API is used to query the report of a team labeling acceptance task.GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/reportNo", + "product_code":"modelarts", + "title":"Querying the Report of a Team Labeling Acceptance Task", + "uri":"GetWorkforceSamplingTask.html", + "doc_type":"api", + "p_code":"54", + "code":"62" + }, + { + "desc":"This API is used to update the sample status by confirming the acceptance scope and whether the labeled data is overwritten before the acceptance of the team labeling tas", + "product_code":"modelarts", + "title":"Updating the Status of a Team Labeling Acceptance Task", + "uri":"UpdateWorkforceSamplingTask.html", + "doc_type":"api", + "p_code":"54", + "code":"63" + }, + { + "desc":"This API is used to query the team labeling task list by a team member.GET /v2/{project_id}/workforces/worker-tasksNoneStatus code: 200Querying the Team Labeling Task Lis", + "product_code":"modelarts", + "title":"Querying the Team Labeling Task List by a Team Member", + "uri":"ListWorkerTasks.html", + "doc_type":"api", + "p_code":"54", + "code":"64" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Team Labeling Process Management", + "uri":"workforce_process_management.html", + "doc_type":"api", + "p_code":"20", + "code":"65" + }, + { + "desc":"This API is used to submit sample review comments of an acceptance task.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/batch-c", + "product_code":"modelarts", + "title":"Submitting Sample Review Comments of an Acceptance Task", + "uri":"AcceptSamples.html", + "doc_type":"api", + "p_code":"65", + "code":"66" + }, + { + "desc":"This API is used to review team labeling results.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/reviewNoneReviewing Team", + "product_code":"modelarts", + "title":"Reviewing Team Labeling Results", + "uri":"ReviewSamples.html", + "doc_type":"api", + "p_code":"65", + "code":"67" + }, + { + "desc":"This API is used to update labels of team labeling samples in batches.PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samp", + "product_code":"modelarts", + "title":"Updating Labels of Team Labeling Samples in Batches", + "uri":"UpdateWorkforceTaskSamples.html", + "doc_type":"api", + "p_code":"65", + "code":"68" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Labeling Team Management", + "uri":"workforce_management.html", + "doc_type":"api", + "p_code":"20", + "code":"69" + }, + { + "desc":"This API is used to query the labeling team list.GET /v2/{project_id}/workforcesNoneStatus code: 200Querying the Labeling Team ListStatus code: 200OKSee Error Codes.", + "product_code":"modelarts", + "title":"Querying the Labeling Team List", + "uri":"ListWorkforces.html", + "doc_type":"api", + "p_code":"69", + "code":"70" + }, + { + "desc":"This API is used to create a labeling team.POST /v2/{project_id}/workforcesStatus code: 201Creating a Labeling TeamStatus code: 201CreatedSee Error Codes.", + "product_code":"modelarts", + "title":"Creating a Labeling Team", + "uri":"CreateWorkforce.html", + "doc_type":"api", + "p_code":"69", + "code":"71" + }, + { + "desc":"This API is used to query the details about a labeling team.GET /v2/{project_id}/workforces/{workforce_id}NoneStatus code: 200Querying Details About a Labeling TeamStatus", + "product_code":"modelarts", + "title":"Querying Details About a Labeling Team", + "uri":"DescWorkforce.html", + "doc_type":"api", + "p_code":"69", + "code":"72" + }, + { + "desc":"This API is used to update a labeling team.PUT /v2/{project_id}/workforces/{workforce_id}NoneUpdating a Labeling TeamStatus code: 200OKSee Error Codes.", + "product_code":"modelarts", + "title":"Updating a Labeling Team", + "uri":"UpdateWorkforce.html", + "doc_type":"api", + "p_code":"69", + "code":"73" + }, + { + "desc":"This API is used to delete a labeling team.DELETE /v2/{project_id}/workforces/{workforce_id}NoneStatus code: 204Deleting a Labeling TeamStatus code: 204No ContentSee Erro", + "product_code":"modelarts", + "title":"Deleting a Labeling Team", + "uri":"DeleteWorkforce.html", + "doc_type":"api", + "p_code":"69", + "code":"74" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Labeling Team Member Management", + "uri":"workforce_worker_management.html", + "doc_type":"api", + "p_code":"20", + "code":"75" + }, + { + "desc":"This API is used to send an email to a labeling team member.POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/notifyStatus code: 200Sending ", + "product_code":"modelarts", + "title":"Sending an Email to a Labeling Team Member", + "uri":"SendEmails.html", + "doc_type":"api", + "p_code":"75", + "code":"76" + }, + { + "desc":"This API is used to query the list of all labeling team members.GET /v2/{project_id}/workforces/workersNoneStatus code: 200Querying All Labeling Team AdministratorsStatus", + "product_code":"modelarts", + "title":"Querying the List of All Labeling Team Members", + "uri":"ListAllWorkers.html", + "doc_type":"api", + "p_code":"75", + "code":"77" + }, + { + "desc":"This API is used to query the list of labeling team members.GET /v2/{project_id}/workforces/{workforce_id}/workersNoneStatus code: 200Querying the List of Labeling Team M", + "product_code":"modelarts", + "title":"Querying the List of Labeling Team Members", + "uri":"ListWorkers.html", + "doc_type":"api", + "p_code":"75", + "code":"78" + }, + { + "desc":"This API is used to create a labeling team member.POST /v2/{project_id}/workforces/{workforce_id}/workersNoneCreating a Labeling Team MemberStatus code: 201CreatedSee Err", + "product_code":"modelarts", + "title":"Creating a Labeling Team Member", + "uri":"CreateWorker.html", + "doc_type":"api", + "p_code":"75", + "code":"79" + }, + { + "desc":"This API is used to delete labeling team members in batches.POST /v2/{project_id}/workforces/{workforce_id}/workers/batch-deleteStatus code: 200Deleting Labeling Team Mem", + "product_code":"modelarts", + "title":"Deleting Labeling Team Members in Batches", + "uri":"DeleteWorkers.html", + "doc_type":"api", + "p_code":"75", + "code":"80" + }, + { + "desc":"This API is used to query details about labeling team members.GET /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}NoneStatus code: 200Querying Details About", + "product_code":"modelarts", + "title":"Querying Details About Labeling Team Members", + "uri":"DescWorker.html", + "doc_type":"api", + "p_code":"75", + "code":"81" + }, + { + "desc":"This API is used to update a labeling team member.PUT /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}NoneUpdating a Labeling Team MemberStatus code: 200OKS", + "product_code":"modelarts", + "title":"Updating a Labeling Team Member", + "uri":"UpdateWorker.html", + "doc_type":"api", + "p_code":"75", + "code":"82" + }, + { + "desc":"This API is used to delete a labeling team member.DELETE /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}NoneNoneDeleting a Labeling Team MemberStatus code:", + "product_code":"modelarts", + "title":"Deleting a Labeling Team Member", + "uri":"DeleteWorker.html", + "doc_type":"api", + "p_code":"75", + "code":"83" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Data Import Task", + "uri":"data_import.html", + "doc_type":"api", + "p_code":"20", + "code":"84" + }, + { + "desc":"This API is used to query the dataset import task list by page.GET /v2/{project_id}/datasets/{dataset_id}/import-tasksNoneStatus code: 200Obtaining the Dataset Import Tas", + "product_code":"modelarts", + "title":"Querying the Dataset Import Task List", + "uri":"ListImportTasks.html", + "doc_type":"api", + "p_code":"84", + "code":"85" + }, + { + "desc":"This API is used to create a dataset import task to import samples and labels from the storage system to the dataset.POST /v2/{project_id}/datasets/{dataset_id}/import-ta", + "product_code":"modelarts", + "title":"Creating an Import Task", + "uri":"ImportTask.html", + "doc_type":"api", + "p_code":"84", + "code":"86" + }, + { + "desc":"This API is used to query details about a dataset import task.GET /v2/{project_id}/datasets/{dataset_id}/import-tasks/{task_id}NoneStatus code: 200Querying Details About ", + "product_code":"modelarts", + "title":"Querying Details About a Dataset Import Task", + "uri":"DescImportTask.html", + "doc_type":"api", + "p_code":"84", + "code":"87" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Data Export Task", + "uri":"data_export.html", + "doc_type":"api", + "p_code":"20", + "code":"88" + }, + { + "desc":"This API is used to query the dataset export task list by page.GET /v2/{project_id}/datasets/{dataset_id}/export-tasksNoneStatus code: 200Querying the Export Task List by", + "product_code":"modelarts", + "title":"Querying the Dataset Export Task List", + "uri":"GetExportTasksStatusOfDataset.html", + "doc_type":"api", + "p_code":"88", + "code":"89" + }, + { + "desc":"This API is used to create a dataset export task to export a dataset to OBS or new datasets.POST /v2/{project_id}/datasets/{dataset_id}/export-tasksStatus code: 200Creati", + "product_code":"modelarts", + "title":"Creating a Dataset Export Task", + "uri":"ExportTask.html", + "doc_type":"api", + "p_code":"88", + "code":"90" + }, + { + "desc":"This API is used to query the status of a dataset export task.GET /v2/{project_id}/datasets/{resource_id}/export-tasks/{task_id}NoneStatus code: 200Querying the Status of", + "product_code":"modelarts", + "title":"Querying the Status of a Dataset Export Task", + "uri":"GetExportTaskStatusOfDataset.html", + "doc_type":"api", + "p_code":"88", + "code":"91" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Data Synchronization Task", + "uri":"data_sync.html", + "doc_type":"api", + "p_code":"20", + "code":"92" + }, + { + "desc":"This API is used to synchronize samples and labeling information from the input dataset path to the dataset.POST /v2/{project_id}/datasets/{dataset_id}/sync-dataNoneNoneS", + "product_code":"modelarts", + "title":"Synchronizing a Dataset", + "uri":"SyncDataSource.html", + "doc_type":"api", + "p_code":"92", + "code":"93" + }, + { + "desc":"This API is used to query the status of a dataset synchronization task.GET /v2/{project_id}/datasets/{dataset_id}/sync-data/statusNoneStatus code: 200Obtaining the Status", + "product_code":"modelarts", + "title":"Querying the Status of a Dataset Synchronization Task", + "uri":"SyncDataSourceState.html", + "doc_type":"api", + "p_code":"92", + "code":"94" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Intelligent Task", + "uri":"auto_task.html", + "doc_type":"api", + "p_code":"20", + "code":"95" + }, + { + "desc":"This API is used to query auto labeling samples in a dataset.GET /v2/{project_id}/datasets/{dataset_id}/auto-annotations/samplesNoneStatus code: 200Querying Auto Labeling", + "product_code":"modelarts", + "title":"Querying Auto Labeling Sample List", + "uri":"ListAutoAnnotationSamples.html", + "doc_type":"api", + "p_code":"95", + "code":"96" + }, + { + "desc":"This API is used to query details about an auto labeling sample.GET /v2/{project_id}/datasets/{dataset_id}/auto-annotations/samples/{sample_id}NoneStatus code: 200Queryin", + "product_code":"modelarts", + "title":"Querying Details About an Auto Labeling Sample", + "uri":"DescribeAutoAnnotationSample.html", + "doc_type":"api", + "p_code":"95", + "code":"97" + }, + { + "desc":"This API is used to query the intelligent task list by page, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the type parame", + "product_code":"modelarts", + "title":"Querying the Intelligent Task List by Page", + "uri":"ListTasks.html", + "doc_type":"api", + "p_code":"95", + "code":"98" + }, + { + "desc":"This API is used to start an intelligent task, which can be an auto labeling task or an auto grouping task. You can specify task_type in the request body to start a type ", + "product_code":"modelarts", + "title":"Starting Intelligent Tasks", + "uri":"CreateTask.html", + "doc_type":"api", + "p_code":"95", + "code":"99" + }, + { + "desc":"This API is used to obtain information about intelligent tasks, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the task_id ", + "product_code":"modelarts", + "title":"Obtaining Information About Intelligent Tasks", + "uri":"AutoAnnotationProgress.html", + "doc_type":"api", + "p_code":"95", + "code":"100" + }, + { + "desc":"This API is used to stop intelligent tasks, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the task_id parameter to stop a ", + "product_code":"modelarts", + "title":"Stopping an Intelligent Task", + "uri":"StopAutoAnnotation.html", + "doc_type":"api", + "p_code":"95", + "code":"101" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Processing Task", + "uri":"process_task.html", + "doc_type":"api", + "p_code":"20", + "code":"102" + }, + { + "desc":"This API is used to query the list of a processing task. You can query the feature analysis tasks and data processing tasks. You can specify the task_type parameter to qu", + "product_code":"modelarts", + "title":"Querying the List of a Processing Task", + "uri":"ListProcessorTasks.html", + "doc_type":"api", + "p_code":"102", + "code":"103" + }, + { + "desc":"This API is used to create a processing task. You can create feature analysis tasks and data processing tasks. You can specify the id field of template composite paramete", + "product_code":"modelarts", + "title":"Creating a Processing Task", + "uri":"CreateProcessorTask.html", + "doc_type":"api", + "p_code":"102", + "code":"104" + }, + { + "desc":"This API is used to query the algorithm type for data processing.GET /v2/{project_id}/processor-tasks/itemsNoneStatus code: 200Querying the List of the Algorithm Type for", + "product_code":"modelarts", + "title":"Querying the Algorithm Type for Data Processing", + "uri":"GetProcessorTaskItems.html", + "doc_type":"api", + "p_code":"102", + "code":"105" + }, + { + "desc":"This API is used to query the details about processing tasks. You can query feature analysis tasks and data processing tasks. You can specify the task_id parameter to que", + "product_code":"modelarts", + "title":"Querying Details About a Processing Task", + "uri":"DescribeProcessorTask.html", + "doc_type":"api", + "p_code":"102", + "code":"106" + }, + { + "desc":"This API is used to update a processing task. You can update feature analysis tasks and data processing tasks. Only the description of updated tasks is supported. You can", + "product_code":"modelarts", + "title":"Updating a Processing Task", + "uri":"UpdateProcessorTask.html", + "doc_type":"api", + "p_code":"102", + "code":"107" + }, + { + "desc":"This API is used to delete a processing task. You can delete feature analysis tasks and data processing tasks. A specific task can be deleted by specifying the task_id pa", + "product_code":"modelarts", + "title":"Deleting a Processing Task", + "uri":"DeleteProcessorTask.html", + "doc_type":"api", + "p_code":"102", + "code":"108" + }, + { + "desc":"This API is used to query the version list of a data processing task.GET /v2/{project_id}/processor-tasks/{task_id}/versionsNoneStatus code: 200Querying the Version List ", + "product_code":"modelarts", + "title":"Querying the Version List of a Data Processing Task", + "uri":"ListProcessorTaskVersions.html", + "doc_type":"api", + "p_code":"102", + "code":"109" + }, + { + "desc":"This API is used to create a data processing task version.POST /v2/{project_id}/processor-tasks/{task_id}/versionsStatus code: 200Creating a Data Validation Task VersionS", + "product_code":"modelarts", + "title":"Creating a Data Processing Task Version", + "uri":"CreateProcessorTaskVersion.html", + "doc_type":"api", + "p_code":"102", + "code":"110" + }, + { + "desc":"This API is used to query the details about the version of a data processing task.GET /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}NoneStatus code: 200", + "product_code":"modelarts", + "title":"Querying the Details About the Version of a Data Processing Task", + "uri":"DescProcessorTaskVersion.html", + "doc_type":"api", + "p_code":"102", + "code":"111" + }, + { + "desc":"This API is used to delete a data processing task version.DELETE /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}NoneNoneDeleting a Data Processing Task V", + "product_code":"modelarts", + "title":"Deleting a Data Processing Task Version", + "uri":"DeleteProcessorTaskVersion.html", + "doc_type":"api", + "p_code":"102", + "code":"112" + }, + { + "desc":"This API is used to query the result of a data processing task version.GET /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/resultsNoneStatus code: 200Que", + "product_code":"modelarts", + "title":"Querying the Result of a Data Processing Task Version", + "uri":"ListProcessorTaskVersionResults.html", + "doc_type":"api", + "p_code":"102", + "code":"113" + }, + { + "desc":"This API is used to stop the version of a data processing task.POST /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/stopNoneNoneThis API is used to stop ", + "product_code":"modelarts", + "title":"Stopping the Version of a Data Processing Task", + "uri":"StopProcessorTaskVersion.html", + "doc_type":"api", + "p_code":"102", + "code":"114" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Training Management", + "uri":"modelarts_03_0043.html", + "doc_type":"api", + "p_code":"", + "code":"115" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Training Jobs", + "uri":"modelarts_03_0044.html", + "doc_type":"api", + "p_code":"115", + "code":"116" + }, + { + "desc":"This API is used to create a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Traini", + "product_code":"modelarts", + "title":"Creating a Training Job", + "uri":"modelarts_03_0045.html", + "doc_type":"api", + "p_code":"116", + "code":"117" + }, + { + "desc":"This API is used to query the created training jobs that meet the search criteria.GET /v1/{project_id}/training-jobsTable 1 describes the required parameters.ParametersPa", + "product_code":"modelarts", + "title":"Querying a Training Job List", + "uri":"modelarts_03_0046.html", + "doc_type":"api", + "p_code":"116", + "code":"118" + }, + { + "desc":"This API is used to query the details about a specified training job based on the job ID.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}Table 1 describe", + "product_code":"modelarts", + "title":"Querying the Details About a Training Job Version", + "uri":"modelarts_03_0047.html", + "doc_type":"api", + "p_code":"116", + "code":"119" + }, + { + "desc":"This API is used to delete a version of a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Quer", + "product_code":"modelarts", + "title":"Deleting a Version of a Training Job", + "uri":"modelarts_03_0048.html", + "doc_type":"api", + "p_code":"116", + "code":"120" + }, + { + "desc":"This API is used to query the version of a specified training job based on the job ID.GET /v1/{project_id}/training-jobs/{job_id}/versionsTable 1 describes the required p", + "product_code":"modelarts", + "title":"Querying a List of Training Job Versions", + "uri":"modelarts_03_0049.html", + "doc_type":"api", + "p_code":"116", + "code":"121" + }, + { + "desc":"This API is used to create a version of a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Quer", + "product_code":"modelarts", + "title":"Creating a Version of a Training Job", + "uri":"modelarts_03_0050.html", + "doc_type":"api", + "p_code":"116", + "code":"122" + }, + { + "desc":"This API is used to stop a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training", + "product_code":"modelarts", + "title":"Stopping a Training Job", + "uri":"modelarts_03_0051.html", + "doc_type":"api", + "p_code":"116", + "code":"123" + }, + { + "desc":"This API is used to modify the description of a training job.PUT /v1/{project_id}/training-jobs/{job_id}Table 1 describes the required parameters.ParametersParameterManda", + "product_code":"modelarts", + "title":"Modifying the Description of a Training Job", + "uri":"modelarts_03_0052.html", + "doc_type":"api", + "p_code":"116", + "code":"124" + }, + { + "desc":"This API is used to delete a training job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Traini", + "product_code":"modelarts", + "title":"Deleting a Training Job", + "uri":"modelarts_03_0053.html", + "doc_type":"api", + "p_code":"116", + "code":"125" + }, + { + "desc":"This API is used to obtain the name of a training job log file.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/log/file-namesTable 1 describes the requi", + "product_code":"modelarts", + "title":"Obtaining the Name of a Training Job Log File", + "uri":"modelarts_03_0054.html", + "doc_type":"api", + "p_code":"116", + "code":"126" + }, + { + "desc":"This API is used to query the details about a built-in model.GET /v1/{project_id}/built-in-algorithmsTable 1 describes the required parameters.ParametersParameterMandator", + "product_code":"modelarts", + "title":"Querying a Built-in Algorithm", + "uri":"modelarts_03_0056.html", + "doc_type":"api", + "p_code":"116", + "code":"127" + }, + { + "desc":"This API is used to query detailed information about training job logs by row.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/aom-logTable 1 describes t", + "product_code":"modelarts", + "title":"Querying Training Job Logs", + "uri":"modelarts_03_0149.html", + "doc_type":"api", + "p_code":"116", + "code":"128" + }, + { + "desc":"This API is used to query monitoring information about a single container of a job.GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/pod/{pod_name}/metr", + "product_code":"modelarts", + "title":"Querying Monitoring Information About a Single Container of a Job", + "uri":"modelarts_03_0150.html", + "doc_type":"api", + "p_code":"116", + "code":"129" + }, + { + "desc":"This API is used to query monitoring information about resource pool nodes.GET /v1/{project_id}/pools/{pool_id}/nodes/{node_ip}/metric-statisticTable 1 describes the re", + "product_code":"modelarts", + "title":"Querying Monitoring Information About Resource Pool Nodes", + "uri":"modelarts_03_0151.html", + "doc_type":"api", + "p_code":"116", + "code":"130" + }, + { + "desc":"You can use this API to query the overview information about all job versions created by a user based on specified conditions, including the statuses and GPU quantity of ", + "product_code":"modelarts", + "title":"Querying the Statuses and GPU Quantity of All Job Versions", + "uri":"en-us_topic_0000001147936839.html", + "doc_type":"api", + "p_code":"116", + "code":"131" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Training Job Parameter Configuration", + "uri":"modelarts_03_0057.html", + "doc_type":"api", + "p_code":"115", + "code":"132" + }, + { + "desc":"This API is used to create a training job configuration.POST /v1/{project_id}/training-job-configsTable 1 describes the required parameters.ParametersParameterMandatoryTy", + "product_code":"modelarts", + "title":"Creating a Training Job Configuration", + "uri":"modelarts_03_0058.html", + "doc_type":"api", + "p_code":"132", + "code":"133" + }, + { + "desc":"This API is used to query the created training job configurations that meet the search criteria.GET /v1/{project_id}/training-job-configsTable 1 describes the required pa", + "product_code":"modelarts", + "title":"Querying a List of Training Job Configurations", + "uri":"modelarts_03_0059.html", + "doc_type":"api", + "p_code":"132", + "code":"134" + }, + { + "desc":"This API is used to modify a training job configuration.PUT /v1/{project_id}/training-job-configs/{config_name}Table 1 describes the required parameters.ParametersParamet", + "product_code":"modelarts", + "title":"Modifying a Training Job Configuration", + "uri":"modelarts_03_0060.html", + "doc_type":"api", + "p_code":"132", + "code":"135" + }, + { + "desc":"This API is used to delete a training job configuration.DELETE /v1/{project_id}/training-job-configs/{config_name}Table 1 describes the required parameters.Parameter desc", + "product_code":"modelarts", + "title":"Deleting a Training Job Configuration", + "uri":"modelarts_03_0061.html", + "doc_type":"api", + "p_code":"132", + "code":"136" + }, + { + "desc":"This API is used to query the details about a specified training job configuration.GET /v1/{project_id}/training-job-configs/{config_name}Table 1 describes the required p", + "product_code":"modelarts", + "title":"Querying the Details About a Training Job Configuration", + "uri":"modelarts_03_0062.html", + "doc_type":"api", + "p_code":"132", + "code":"137" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Visualization Jobs", + "uri":"modelarts_03_0063.html", + "doc_type":"api", + "p_code":"115", + "code":"138" + }, + { + "desc":"This API is used to create a visualization job.Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a V", + "product_code":"modelarts", + "title":"Creating a Visualization Job", + "uri":"modelarts_03_0064.html", + "doc_type":"api", + "p_code":"138", + "code":"139" + }, + { + "desc":"This API is used to query the visualization jobs that meet the search criteria.GET /v1/{project_id}/visualization-jobsTable 1 describes the required parameters.Parameters", + "product_code":"modelarts", + "title":"Querying a Visualization Job List", + "uri":"modelarts_03_0065.html", + "doc_type":"api", + "p_code":"138", + "code":"140" + }, + { + "desc":"This API is used to query the details about a specified visualization job based on the job name.GET /v1/{project_id}/visualization-jobs/{job_id}Table 1 describes the requ", + "product_code":"modelarts", + "title":"Querying the Details About a Visualization Job", + "uri":"modelarts_03_0066.html", + "doc_type":"api", + "p_code":"138", + "code":"141" + }, + { + "desc":"This API is used to modify the description of a visualization job.PUT /v1/{project_id}/visualization-jobs/{job_id}Table 1 describes the required parameters.ParametersPara", + "product_code":"modelarts", + "title":"Modifying the Description of a Visualization Job", + "uri":"modelarts_03_0067.html", + "doc_type":"api", + "p_code":"138", + "code":"142" + }, + { + "desc":"This API is used to delete a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a ", + "product_code":"modelarts", + "title":"Deleting a Visualization Job", + "uri":"modelarts_03_0068.html", + "doc_type":"api", + "p_code":"138", + "code":"143" + }, + { + "desc":"This API is used to stop a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Vi", + "product_code":"modelarts", + "title":"Stopping a Visualization Job", + "uri":"modelarts_03_0069.html", + "doc_type":"api", + "p_code":"138", + "code":"144" + }, + { + "desc":"This API is used to restart a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a", + "product_code":"modelarts", + "title":"Restarting a Visualization Job", + "uri":"modelarts_03_0070.html", + "doc_type":"api", + "p_code":"138", + "code":"145" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Resource and Engine Specifications", + "uri":"modelarts_03_0071.html", + "doc_type":"api", + "p_code":"115", + "code":"146" + }, + { + "desc":"This API is used to query the resource specifications of a specified job.You must specify the resource specifications when creating a training job or an inference job.GET", + "product_code":"modelarts", + "title":"Querying Job Resource Specifications", + "uri":"modelarts_03_0072.html", + "doc_type":"api", + "p_code":"146", + "code":"147" + }, + { + "desc":"This API is used to query the engine type and version of a specified job.You must specify the engine specifications when creating a training job or an inference job.GET /", + "product_code":"modelarts", + "title":"Querying Job Engine Specifications", + "uri":"modelarts_03_0073.html", + "doc_type":"api", + "p_code":"146", + "code":"148" + }, + { + "desc":"Table 1 describes the job statuses.", + "product_code":"modelarts", + "title":"Job Statuses", + "uri":"modelarts_03_0074.html", + "doc_type":"api", + "p_code":"115", + "code":"149" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Model Management", + "uri":"modelarts_03_0075.html", + "doc_type":"api", + "p_code":"", + "code":"150" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Models", + "uri":"modelarts_03_0153.html", + "doc_type":"api", + "p_code":"150", + "code":"151" + }, + { + "desc":"You can use the API to import a model.Ensure that the execution code and model have been uploaded to OBS. By default, the models generated by a training job are stored in", + "product_code":"modelarts", + "title":"Importing a Model", + "uri":"modelarts_03_0076.html", + "doc_type":"api", + "p_code":"151", + "code":"152" + }, + { + "desc":"This API is used to query the models that meet the search criteria.GET /v1/{project_id}/modelsTable 1 describes the required parameters.ParametersParameterMandatoryTypeDe", + "product_code":"modelarts", + "title":"Querying a Model List", + "uri":"modelarts_03_0077.html", + "doc_type":"api", + "p_code":"151", + "code":"153" + }, + { + "desc":"This API is used to query details about a model based on the model ID.GET /v1/{project_id}/models/{model_id}Table 1 describes the required parameters.ParametersParameterM", + "product_code":"modelarts", + "title":"Querying the Details About a Model", + "uri":"modelarts_03_0078.html", + "doc_type":"api", + "p_code":"151", + "code":"154" + }, + { + "desc":"This API is used to delete a model based on the model ID. When cascade is set to true, the model specified by the model ID and models of different versions with the same ", + "product_code":"modelarts", + "title":"Deleting a Model", + "uri":"modelarts_03_0079.html", + "doc_type":"api", + "p_code":"151", + "code":"155" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Service Management", + "uri":"modelarts_03_0081.html", + "doc_type":"api", + "p_code":"", + "code":"156" + }, + { + "desc":"This API is used to deploy a model as a service.POST /v1/{project_id}/servicesTable 1 describes the required parameters.ParametersParameterMandatoryTypeDescriptionproject", + "product_code":"modelarts", + "title":"Deploying a Model as a Service", + "uri":"modelarts_03_0082.html", + "doc_type":"api", + "p_code":"156", + "code":"157" + }, + { + "desc":"This API is used to obtain model services.GET /v1/{project_id}/servicesTable 1 describes the required parameters.ParametersParameterMandatoryTypeDescriptionproject_idYesS", + "product_code":"modelarts", + "title":"Querying a Service List", + "uri":"modelarts_03_0083.html", + "doc_type":"api", + "p_code":"156", + "code":"158" + }, + { + "desc":"This API is used to query the details about a model service based on the service ID.GET /v1/{project_id}/services/{service_id}Table 1 describes the required parameters.Pa", + "product_code":"modelarts", + "title":"Querying the Details About a Service", + "uri":"modelarts_03_0084.html", + "doc_type":"api", + "p_code":"156", + "code":"159" + }, + { + "desc":"This API is used to update configurations of a model service. It can also be used to start or stop a service.PUT /v1/{project_id}/services/{service_id}Table 1 describes t", + "product_code":"modelarts", + "title":"Updating Service Configurations", + "uri":"modelarts_03_0086.html", + "doc_type":"api", + "p_code":"156", + "code":"160" + }, + { + "desc":"This API is used to query service monitoring information.GET /v1/{project_id}/services/{service_id}/monitorTable 1 describes the required parameters.ParametersParameterMa", + "product_code":"modelarts", + "title":"Querying Service Monitoring Information", + "uri":"modelarts_03_0087.html", + "doc_type":"api", + "p_code":"156", + "code":"161" + }, + { + "desc":"This API is used to query the update logs of a real-time service.GET /v1/{project_id}/services/{service_id}/logsTable 1 describes the required parameters.ParametersParame", + "product_code":"modelarts", + "title":"Querying Service Update Logs", + "uri":"modelarts_03_0088.html", + "doc_type":"api", + "p_code":"156", + "code":"162" + }, + { + "desc":"This API is used to query service event logs, including service operation records, key actions during deployment, and deployment failure causes.GET /v1/{project_id}/servi", + "product_code":"modelarts", + "title":"Querying Service Event Logs", + "uri":"modelarts_03_0155.html", + "doc_type":"api", + "p_code":"156", + "code":"163" + }, + { + "desc":"This API is used to delete a model service. You can delete your own services only.Table 1 describes the required parameters.ParametersParameterMandatoryTypeDescriptionpro", + "product_code":"modelarts", + "title":"Deleting a Service", + "uri":"modelarts_03_0089.html", + "doc_type":"api", + "p_code":"156", + "code":"164" + }, + { + "desc":"This API is used to query supported service deployment specifications.URIGET /v1/{project_id}/services/specificationsNoneSample requestGET https://endpoint/v1/{project", + "product_code":"modelarts", + "title":"Querying Supported Service Deployment Specifications", + "uri":"modelarts_03_0200.html", + "doc_type":"api", + "p_code":"156", + "code":"165" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Authorization Management", + "uri":"authorization.html", + "doc_type":"api", + "p_code":"", + "code":"166" + }, + { + "desc":"This API is used to configure ModelArts authorization. ModelArts functions such as training management, development environment, data management, and real-time services c", + "product_code":"modelarts", + "title":"Configuring Authorization", + "uri":"CreateAuthorization.html", + "doc_type":"api", + "p_code":"166", + "code":"167" + }, + { + "desc":"This API is used to view an authorization list.GET /v2/{project_id}/authorizationsNoneStatus code: 200View an authorization list.Status code: 200OKSee Error Codes.", + "product_code":"modelarts", + "title":"Viewing an Authorization List", + "uri":"GetAuthorizations.html", + "doc_type":"api", + "p_code":"166", + "code":"168" + }, + { + "desc":"This API is used to delete the authorization of a specified user or all users.DELETE /v2/{project_id}/authorizationsNoneNoneDelete the authorization of a specified user.S", + "product_code":"modelarts", + "title":"Deleting Authorization", + "uri":"DeleteAuthorizations.html", + "doc_type":"api", + "p_code":"166", + "code":"169" + }, + { + "desc":"This API is used to create an agency so that ModelArts can access dependent services such as OBS, SWR, and IEF.POST /v2/{project_id}/agencyNoneCreate a ModelArts agency.S", + "product_code":"modelarts", + "title":"Creating a ModelArts Agency", + "uri":"CreateModelArtsAgency.html", + "doc_type":"api", + "p_code":"166", + "code":"170" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Workspace Management", + "uri":"workspace.html", + "doc_type":"api", + "p_code":"", + "code":"171" + }, + { + "desc":"This API is used to create a workspace. The name of the created workspace cannot be default, which is the name of the default workspace reserved by the system.POST /v1/{p", + "product_code":"modelarts", + "title":"Creating a Workspace", + "uri":"CreateWorkspace.html", + "doc_type":"api", + "p_code":"171", + "code":"172" + }, + { + "desc":"This API is used to obtain a workspace list with detailed information contained in the response body.GET /v1/{project_id}/workspacesNoneStatus code: 200Querying a Workspa", + "product_code":"modelarts", + "title":"Querying a Workspace List", + "uri":"ListWorkspaces.html", + "doc_type":"api", + "p_code":"171", + "code":"173" + }, + { + "desc":"This API is used to delete a workspace.DELETE /v1/{project_id}/workspaces/{workspace_id}NoneStatus code: 200Deleting a WorkspaceStatus code: 200OKSee Error Codes.", + "product_code":"modelarts", + "title":"Deleting a Workspace", + "uri":"DeleteWorkspace.html", + "doc_type":"api", + "p_code":"171", + "code":"174" + }, + { + "desc":"This API is used to obtain details about a workspace.GET /v1/{project_id}/workspaces/{workspace_id}NoneStatus code: 200Querying Details About a WorkspaceStatus code: 200O", + "product_code":"modelarts", + "title":"Querying Details About a Workspace", + "uri":"ShowWorkspaceInfo.html", + "doc_type":"api", + "p_code":"171", + "code":"175" + }, + { + "desc":"This API is used to modify a workspace.PUT /v1/{project_id}/workspaces/{workspace_id}Status code: 200Modifying a WorkspaceStatus code: 200OKSee Error Codes.", + "product_code":"modelarts", + "title":"Modifying a Workspace", + "uri":"UpdateWorkspace.html", + "doc_type":"api", + "p_code":"171", + "code":"176" + }, + { + "desc":"This API is used to obtain workspace quotas.GET /v1/{project_id}/workspaces/{workspace_id}/quotasNoneStatus code: 200Querying Workspace QuotasStatus code: 200OKSee Error ", + "product_code":"modelarts", + "title":"Querying a Workspace Quota", + "uri":"ListWorkspaceQuotas.html", + "doc_type":"api", + "p_code":"171", + "code":"177" + }, + { + "desc":"This API is used to modify a workspace quota.PUT /v1/{project_id}/workspaces/{workspace_id}/quotasStatus code: 200Modifying Workspace QuotasStatus code: 200OKSee Error Co", + "product_code":"modelarts", + "title":"Modifying a Workspace Quota", + "uri":"UpdateWorkspaceQuotas.html", + "doc_type":"api", + "p_code":"171", + "code":"178" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Application Cases", + "uri":"modelarts_03_0400.html", + "doc_type":"api", + "p_code":"", + "code":"179" + }, + { + "desc":"This section describes how to train a model on ModelArts by calling a series of APIs.The process for creating a training job using the TensorFlow framework is as follows:", + "product_code":"modelarts", + "title":"Creating a Training Job Using the TensorFlow Framework", + "uri":"modelarts_03_0401.html", + "doc_type":"api", + "p_code":"179", + "code":"180" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Common Parameters", + "uri":"modelarts_03_0093.html", + "doc_type":"api", + "p_code":"", + "code":"181" + }, + { + "desc":"Table 1 describes the status codes.", + "product_code":"modelarts", + "title":"Status Code", + "uri":"modelarts_03_0094.html", + "doc_type":"api", + "p_code":"181", + "code":"182" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Error Codes", + "uri":"modelarts_03_0095.html", + "doc_type":"api", + "p_code":"181", + "code":"183" + }, + { + "desc":"A project ID or name is required for some requests when an API is called. Therefore, obtain the project ID and name before calling the API. Use either of the following me", + "product_code":"modelarts", + "title":"Obtaining a Project ID and Name", + "uri":"modelarts_03_0147.html", + "doc_type":"api", + "p_code":"181", + "code":"184" + }, + { + "desc":"When you call APIs, certain requests require the account name and ID. To obtain an account name and ID, do as follows:Sign up and log in to the console.Hover the cursor o", + "product_code":"modelarts", + "title":"Obtaining an Account Name and ID", + "uri":"modelarts_03_0148.html", + "doc_type":"api", + "p_code":"181", + "code":"185" + }, + { + "desc":"When you call APIs, certain requests require the username and ID. To obtain a username and ID, do as follows:Log in to the management console after registration.In the up", + "product_code":"modelarts", + "title":"Obtaining a Username and ID", + "uri":"modelarts_03_0006.html", + "doc_type":"api", + "p_code":"181", + "code":"186" + }, + { + "desc":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", + "product_code":"modelarts", + "title":"Change History", + "uri":"modelarts_03_0097.html", + "doc_type":"api", + "p_code":"", + "code":"187" + } +] \ No newline at end of file diff --git a/modelarts/api-ref/CreateAuthorization.html b/modelarts/api-ref/CreateAuthorization.html new file mode 100644 index 00000000..6dc0a29f --- /dev/null +++ b/modelarts/api-ref/CreateAuthorization.html @@ -0,0 +1,153 @@ + + +

Configuring Authorization

+

Function

This API is used to configure ModelArts authorization. ModelArts functions such as training management, development environment, data management, and real-time services can be properly used only after required permissions are assigned. The administrator can use this API to set an agency for IAM users and set the access key of the current user.

+
+

URI

POST /v2/{project_id}/authorizations

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain a project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

user_id

+

No

+

String

+

User ID. For details about how to obtain a user ID, see Obtaining a User ID.

+

If user_id is set to all, all IAM users are authorized. If some IAM users have been authorized, the authorization setting will be updated.

+

This parameter is mandatory only if the authorization type is set to agency.

+

type

+

No

+

String

+

Authorization type. Agency is recommended.

+

Options:

+
  • agency: authorization through an agency

    +
  • credential: authorization through an access Key (AK/SK)

    +
+

Default: agency

+

content

+

Yes

+

String

+

Authorization content.

+
  • If Authorization Type is set to Agency, this field indicates the agency name.

    +
  • If Authorization Type is set to AK/SK, this field indicates the access key ID (AK).

    +
+

secret_key

+

No

+

String

+

Secret Access Key (SK). This field is required only when Authorization Method is set to AK/SK.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Upload authorization.

+
POST https://{endpoint}/v2/{project_id}/authorizations
+
+{
+  "user_id" : "****d80fb058844ae8b82aa66d9fe****",
+  "type" : "agency",
+  "content" : "modelarts_agency"
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "result" : "true"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

Bad Request

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateDataset.html b/modelarts/api-ref/CreateDataset.html new file mode 100644 index 00000000..f5ddfa70 --- /dev/null +++ b/modelarts/api-ref/CreateDataset.html @@ -0,0 +1,1207 @@ + + +

Creating a Dataset

+

Function

This API is used to create a dataset.

+
+

URI

POST /v2/{project_id}/datasets

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

data_format

+

No

+

String

+

Data format. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
+

data_sources

+

No

+

Array of DataSource objects

+

Input dataset path, which is used to synchronize source data (such as images, text files, and audio files) in the directory and its subdirectories to the dataset. For a table dataset, this parameter indicates the import directory. The work directory of a table dataset cannot be an OBS path in a KMS-encrypted bucket.

+

dataset_name

+

Yes

+

String

+

Dataset name. The value contains 1 to 100 characters. Only letters, digits, underscores (_), and hyphens (-) are allowed, for example, dataset-9f3b.

+

dataset_type

+

No

+

Integer

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

description

+

No

+

String

+

Dataset description. The value is empty by default. The description contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

import_annotations

+

No

+

Boolean

+

Whether to automatically import the labeling information in the input directory, supporting detection, image classification, and text classification. The options are as follows:

+
  • true: Import labeling information in the input directory. (Default value)

    +
  • false: Do not import labeling information in the input directory.

    +
+

import_data

+

No

+

Boolean

+

Whether to import data. This parameter is used only for table datasets. The options are as follows:

+
  • true: Import data when creating a database.

    +
  • false: Do not import data when creating a database. (Default value)

    +
+

label_format

+

No

+

LabelFormat object

+

Label format information. This parameter is used only for text datasets.

+

labels

+

No

+

Array of Label objects

+

Dataset label list.

+

managed

+

No

+

Boolean

+

Whether to host a dataset. The options are as follows:

+
  • true: Host a dataset.

    +
  • false: Do not host a dataset. (Default value)

    +
+

schema

+

No

+

Array of Field objects

+

Schema list.

+

work_path

+

Yes

+

String

+

Output dataset path, which is used to store output files such as label files.

+
  • The format is /Bucket name/File path, for example, /obs-bucket/flower/rose/. (The directory is used as the path.)

    +
  • A bucket cannot be directly used as a path.

    +
  • The output dataset path is different from the input dataset path or its subdirectory.

    +
  • The value contains 3 to 700 characters.

    +
+

work_path_type

+

Yes

+

Integer

+

Type of the dataset output path. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
+

workforce_information

+

No

+

WorkforceInformation object

+

Team labeling information.

+

workspace_id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 DataSource

Parameter

+

Mandatory

+

Type

+

Description

+

data_path

+

No

+

String

+

Data source path.

+

data_type

+

No

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

No

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

No

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

No

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 4 SchemaMap

Parameter

+

Mandatory

+

Type

+

Description

+

dest_name

+

No

+

String

+

Name of the destination column.

+

src_name

+

No

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SourceInfo

Parameter

+

Mandatory

+

Type

+

Description

+

cluster_id

+

No

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

No

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

No

+

String

+

Name of an MRS cluster.

+

database_name

+

No

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

No

+

String

+

HDFS path of a table dataset.

+

ip

+

No

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

No

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

No

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

No

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

No

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

No

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

No

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

No

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelFormat

Parameter

+

Mandatory

+

Type

+

Description

+

label_type

+

No

+

String

+

Label type of text classification. The options are as follows:

+
  • 0: The label is separated from the text, and they are distinguished by the fixed suffix _result. For example, the text file is abc.txt, and the label file is abc_result.txt.

    +
  • 1: Default value. Labels and texts are stored in the same file and separated by separators. You can use text_sample_separator to specify the separator between the text and label and text_label_separator to specify the separator between labels.

    +
+

text_label_separator

+

No

+

String

+

Separator between labels. By default, the comma (,) is used as the separator. The separator needs to be escaped. Only one character can be used as the separator. The value must contain letters, digits, and one special character (!@#$%^&*_=|?/':.;,).

+

text_sample_separator

+

No

+

String

+

Separator between the text and label. By default, the Tab key is used as the separator. The separator needs to be escaped. Only one character can be used as the separator. The value must contain letters, digits, and one special character (!@#$%^&*_=|?/':.;,).

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 Label

Parameter

+

Mandatory

+

Type

+

Description

+

attributes

+

No

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 LabelAttribute

Parameter

+

Mandatory

+

Type

+

Description

+

default_value

+

No

+

String

+

Default value of a label attribute.

+

id

+

No

+

String

+

Label attribute ID.

+

name

+

No

+

String

+

Label attribute name.

+

type

+

No

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

No

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 9 LabelAttributeValue

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Label attribute value ID.

+

value

+

No

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 LabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 11 Field

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Schema description.

+

name

+

No

+

String

+

Schema name.

+

schema_id

+

No

+

Integer

+

Schema ID.

+

type

+

No

+

String

+

Schema value type.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 WorkforceInformation

Parameter

+

Mandatory

+

Type

+

Description

+

data_sync_type

+

No

+

Integer

+

Synchronization type. The options are as follows:

+
  • 0: not to be synchronized

    +
  • 1: data to be synchronized

    +
  • 2: label to be synchronized

    +
  • 3: data and label to be synchronized

    +
+

repetition

+

No

+

Integer

+

Number of persons who label each sample. The minimum value is 1.

+

synchronize_auto_labeling_data

+

No

+

Boolean

+

Whether to synchronously update auto labeling data. The options are as follows:

+
  • true: Update auto labeling data synchronously.

    +
  • false: Do not update auto labeling data synchronously.

    +
+

synchronize_data

+

No

+

Boolean

+

Whether to synchronize updated data, such as uploading files, synchronizing data sources, and assigning imported unlabeled files to team members. The options are as follows:

+
  • true: Synchronize updated data to team members.

    +
  • false: Do not synchronize updated data to team members.

    +
+

task_id

+

No

+

String

+

ID of a team labeling task.

+

task_name

+

Yes

+

String

+

Name of a team labeling task. The value contains 1 to 64 characters, including only letters, digits, underscores (_), and hyphens (-).

+

workforces_config

+

No

+

WorkforcesConfig object

+

Manpower assignment of a team labeling task. You can delegate the administrator to assign the manpower or do it by yourself.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 13 WorkforcesConfig

Parameter

+

Mandatory

+

Type

+

Description

+

agency

+

No

+

String

+

Administrator.

+

workforces

+

No

+

Array of WorkforceConfig objects

+

List of teams that execute labeling tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 14 WorkforceConfig

Parameter

+

Mandatory

+

Type

+

Description

+

workers

+

No

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+

workforce_name

+

No

+

String

+

Name of a labeling team. The value contains 0 to 1024 characters and does not support the following special characters: !<>=&"'

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 15 Worker

Parameter

+

Mandatory

+

Type

+

Description

+

create_time

+

No

+

Long

+

Creation time.

+

description

+

No

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

No

+

String

+

Email address of a labeling team member.

+

role

+

No

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

No

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

No

+

Long

+

Update time.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+
+
+
+

Response Parameters

Status code: 201

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 16 Response body parameters

Parameter

+

Type

+

Description

+

dataset_id

+

String

+

Dataset ID.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

import_task_id

+

String

+

ID of an import task.

+
+
+
+

Example Requests

+
+

Example Responses

Status code: 201

+

Created

+
{
+  "dataset_id" : "WxCREuCkBSAlQr9xrde"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

201

+

Created

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateDatasetVersion.html b/modelarts/api-ref/CreateDatasetVersion.html new file mode 100644 index 00000000..e124cc91 --- /dev/null +++ b/modelarts/api-ref/CreateDatasetVersion.html @@ -0,0 +1,217 @@ + + +

Creating a Dataset Labeling Version

+

Function

This API is used to create a dataset labeling version.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/versions

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

clear_hard_property

+

No

+

Boolean

+

Whether to clear hard example properties. The options are as follows:

+
  • true: Clear hard example properties. (Default value)

    +
  • false: Do not clear hard example properties.

    +
+

description

+

No

+

String

+

Version description. The value is empty by default. The description contains 0 to 256 characters and does not support the following special characters: !<>=&"'

+

export_images

+

No

+

Boolean

+

Whether to export images to the version output directory during release. The options are as follows:

+
  • true: Export images to the version output directory.

    +
  • false: Do not export images to the version output directory. (Default value)

    +
+

remove_sample_usage

+

No

+

Boolean

+

Whether to clear the existing usage information of a dataset during release. The options are as follows:

+
  • true: Clear the existing usage information of a dataset. (Default value)

    +
  • false: Do not clear the existing usage information of a dataset.

    +
+

train_evaluate_sample_ratio

+

No

+

String

+

Split training and verification ratio during version release. The default value is 1.00, indicating that all labeled samples are split into the training set.

+

version_format

+

No

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_name

+

No

+

String

+

Version name. The value contains 1 to 32 characters. Only letters, digits, underscores (_), and hyphens (-) are allowed.

+

with_column_header

+

No

+

Boolean

+

Whether to write the column name in the first line of the CSV file during release. This field is valid for the table dataset. The options are as follows:

+
  • true: Write the column name in the first line of the CSV file. (Default value)

    +
  • false: Do not write the column name in the first line of the CSV file.

    +
+
+
+
+

Response Parameters

Status code: 201

+ +
+ + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

version_id

+

String

+

Dataset version ID.

+
+
+
+

Example Requests

Creating a Dataset Labeling Version

+
{
+  "version_name" : "V004",
+  "version_format" : "Default",
+  "description" : "",
+  "clear_hard_property" : true
+}
+
+

Example Responses

Status code: 201

+

Created

+
{
+  "version_id" : "sntOdOuB0D9C6fC4TXs"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

201

+

Created

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateLabels.html b/modelarts/api-ref/CreateLabels.html new file mode 100644 index 00000000..31a02955 --- /dev/null +++ b/modelarts/api-ref/CreateLabels.html @@ -0,0 +1,432 @@ + + +

Creating a Dataset Label

+

Function

This API is used to create a dataset label.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of Label objects

+

List of labels to be created.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Label

Parameter

+

Mandatory

+

Type

+

Description

+

attributes

+

No

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelAttribute

Parameter

+

Mandatory

+

Type

+

Description

+

default_value

+

No

+

String

+

Default value of a label attribute.

+

id

+

No

+

String

+

Label attribute ID.

+

name

+

No

+

String

+

Label attribute name.

+

type

+

No

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

No

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 5 LabelAttributeValue

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Label attribute value ID.

+

value

+

No

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Response body for creating a label.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 8 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Creating a Dataset Label

+
{
+  "labels" : [ {
+    "name" : "Cat",
+    "property" : {
+      "@modelarts:color" : "#3399ff",
+      "@modelarts:default_shape" : "bndbox"
+    }
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateModelArtsAgency.html b/modelarts/api-ref/CreateModelArtsAgency.html new file mode 100644 index 00000000..fa3aa8e4 --- /dev/null +++ b/modelarts/api-ref/CreateModelArtsAgency.html @@ -0,0 +1,118 @@ + + +

Creating a ModelArts Agency

+

Function

This API is used to create an agency so that ModelArts can access dependent services such as OBS, SWR, and IEF.

+
+

URI

POST /v2/{project_id}/agency

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain a project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

agency_name_suffix

+

No

+

String

+

Agency name suffix.

+

The parameter contains a maximum of 50 characters.

+

The agency name prefix is consistently to be ma_agency.

+

For example, if the value of this parameter is iam-user01, the name of the created agency is ma_agency_iam-user01.

+

The value of this parameter is left blank by default, indicating that an agency named modelarts_agency is created.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Create a ModelArts agency.

+
POST https://{endpoint}/v2/{project_id}/agency
+
+{
+  "agency_name_suffix" : "iam-user01"
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "agency_name" : "ma_agency_iam-user01"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

Bad Request

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateProcessorTask.html b/modelarts/api-ref/CreateProcessorTask.html new file mode 100644 index 00000000..8b241c5f --- /dev/null +++ b/modelarts/api-ref/CreateProcessorTask.html @@ -0,0 +1,587 @@ + + +

Creating a Processing Task

+

Function

This API is used to create a processing task. You can create feature analysis tasks and data processing tasks. You can specify the id field of template composite parameter in the request body to create a task.

+ + +
+

URI

POST /v2/{project_id}/processor-tasks

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

create_version

+

No

+

Boolean

+

Whether to synchronously create a task version when creating a task. Set this parameter to true only when creating a data processing task. For other types of tasks, this parameter is set to false or left blank. The options are as follows:

+
  • true: Create a task version when creating a task.

    +
  • false: Do not create a task version when creating a task. (Default value)

    +
+

data_source

+

No

+

ProcessorDataSource object

+

Data source. Either this parameter or inputs is used. A data source path cannot be an OBS path in a KMS-encrypted bucket.

+

description

+

No

+

String

+

Description of a data processing task. The description contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

inputs

+

No

+

Array of ProcessorDataSource objects

+

Data sources. Either this parameter or data_source is used. A data source path cannot be an OBS path in a KMS-encrypted bucket.

+

name

+

Yes

+

String

+

Name of a data processing task.

+

template

+

No

+

TemplateParam object

+

Data processing template, such as the algorithm ID and parameters.

+

version_id

+

No

+

String

+

Dataset version ID.

+

work_path

+

No

+

WorkPath object

+

Work directory of a data processing task. A work directory cannot be an OBS path in a KMS-encrypted bucket.

+

workspace_id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 ProcessorDataSource

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Dataset name.

+

source

+

No

+

String

+

Data source path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

type

+

No

+

String

+

Data source type. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

version_id

+

No

+

String

+

Version of a dataset.

+

version_name

+

No

+

String

+

Dataset version name.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 TemplateParam

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

name

+

No

+

String

+

Template name.

+

operator_params

+

No

+

Array of OperatorParam objects

+

Operator parameter list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 OperatorParam

Parameter

+

Mandatory

+

Type

+

Description

+

advanced_params_switch

+

No

+

Boolean

+

Advanced parameter switch.

+

id

+

No

+

String

+

ID of an operator.

+

name

+

No

+

String

+

Name of an operator.

+

params

+

No

+

Object

+

Operator parameter. The parameter type is map<string,object>. Currently, object only supports the types of Boolean, Integer, Long, String, List and Map<String,String>. For two special scenarios of object detection and image classification in a data preprocessing task, the value of task_type is object_detection or image_classification.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 WorkPath

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Dataset name.

+

output_path

+

No

+

String

+

Output path.

+

path

+

No

+

String

+

Working path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
+

type

+

No

+

String

+

Type of a working path. The options are as follows:

+
  • OBS: OBS path

    +
  • DATASET: dataset

    +
+

version_id

+

No

+

String

+

Version of a dataset.

+

version_name

+

No

+

String

+

Name of a dataset version. The value can contain 0 to 32 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

task_id

+

String

+

ID of a data processing task.

+
+
+
+

Example Requests

+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "SNEJua7qdZZN8GvkcEr"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateProcessorTaskVersion.html b/modelarts/api-ref/CreateProcessorTaskVersion.html new file mode 100644 index 00000000..d0d29198 --- /dev/null +++ b/modelarts/api-ref/CreateProcessorTaskVersion.html @@ -0,0 +1,487 @@ + + +

Creating a Data Processing Task Version

+

Function

This API is used to create a data processing task version.

+
+

URI

POST /v2/{project_id}/processor-tasks/{task_id}/versions

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

create_version

+

No

+

Boolean

+

Whether to synchronously create a task version when creating a task. The options are as follows:

+
  • true: Create a task version when creating a task.

    +
  • false: Do not create a task version when creating a task. (Default value)

    +
+

data_source

+

No

+

ProcessorDataSource object

+

Data source. Either this parameter or inputs is delivered.

+

description

+

No

+

String

+

Description of a data processing task. The description contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

inputs

+

No

+

Array of ProcessorDataSource objects

+

Data source list. Either this parameter or data_source is delivered.

+

name

+

Yes

+

String

+

Name of a data processing task.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+

template

+

No

+

TemplateParam object

+

Data processing template, such as the algorithm ID and parameters.

+

version_id

+

No

+

String

+

Dataset version ID.

+

work_path

+

No

+

WorkPath object

+

Working directory of a data processing task.

+

workspace_id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 ProcessorDataSource

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Dataset name.

+

source

+

No

+

String

+

Data source path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

type

+

No

+

String

+

Data source type. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

version_id

+

No

+

String

+

Version of a dataset.

+

version_name

+

No

+

String

+

Dataset version name.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 TemplateParam

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

name

+

No

+

String

+

Template name.

+

operator_params

+

No

+

Array of OperatorParam objects

+

Operator parameter list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 OperatorParam

Parameter

+

Mandatory

+

Type

+

Description

+

advanced_params_switch

+

No

+

Boolean

+

Advanced parameter switch.

+

id

+

No

+

String

+

ID of an operator.

+

name

+

No

+

String

+

Name of an operator.

+

params

+

No

+

Object

+

Operator parameter. The parameter type is map<string,object>. Currently, object only supports the types of Boolean, Integer, Long, String, List and Map<String,String>. For two special scenarios of object detection and image classification in a data preprocessing task, the value of task_type is object_detection or image_classification.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 WorkPath

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Dataset name.

+

output_path

+

No

+

String

+

Output path.

+

path

+

No

+

String

+

Working path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
+

type

+

No

+

String

+

Type of a working path. The options are as follows:

+
  • OBS: OBS path

    +
  • DATASET: dataset

    +
+

version_id

+

No

+

String

+

Version of a dataset.

+

version_name

+

No

+

String

+

Name of a dataset version. The value can contain 0 to 32 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

version_id

+

String

+

Version ID of a data processing task.

+
+
+
+

Example Requests

Creating a Data Validation Task Version

+
{
+  "name" : "PRE-e77c",
+  "inputs" : [ {
+    "type" : "DATASET",
+    "source" : "PYc9H2HGv5BJNwBGXyK",
+    "version_id" : "Osc8SZ7TZStiRV4vYkZ"
+  } ],
+  "work_path" : {
+    "type" : "DATASET",
+    "path" : "PYc9H2HGv5BJNwBGXyK",
+    "version_name" : "V0011"
+  },
+  "description" : "",
+  "template" : {
+    "id" : "sys_data_validation",
+    "operator_params" : [ {
+      "name" : "MetaValidation",
+      "advanced_params_switch" : false,
+      "params" : {
+        "task_type" : "image_classification",
+        "dataset_type" : "manifest",
+        "source_service" : "select",
+        "filter_func" : "data_validation_select",
+        "image_max_width" : "1920",
+        "image_max_height" : "1920",
+        "total_status" : "[0,1,2]"
+      }
+    } ]
+  },
+  "workspace_id" : "0"
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "version_id" : "qSaudx2sbPvthHygckA"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateTask.html b/modelarts/api-ref/CreateTask.html new file mode 100644 index 00000000..bc88b7c2 --- /dev/null +++ b/modelarts/api-ref/CreateTask.html @@ -0,0 +1,1573 @@ + + +

Starting Intelligent Tasks

+

Function

This API is used to start an intelligent task, which can be an auto labeling task or an auto grouping task. You can specify task_type in the request body to start a type of tasks. The datasets whose data path or work path is an OBS path in a KMS-encrypted bucket support pre-labeling but do not support active learning and auto grouping.

+

Auto labeling: Learning and training are performed based on selected labels and images and an existing model is selected to quickly label the remaining images. Auto labeling includes active learning and pre-labeling. Active learning: The system uses semi-supervised learning and hard example filtering to perform auto labeling, reducing manual labeling workload and helping you find hard examples. Pre-labeling: Select a model displayed on the Model Management page for auto labeling.

+ +
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

collect_key_sample

+

No

+

Boolean

+

Whether to collect key samples. The options are as follows:

+
  • true: Collect key samples.

    +
  • false: Do not collect key samples. (Default value)

    +
+

config

+

No

+

SmartTaskConfig object

+

Task configuration.

+

model_id

+

No

+

String

+

Model ID.

+

task_type

+

No

+

String

+

Task type. The options are as follows:

+
  • auto-label: active learning

    +
  • pre-label: pre-labeling

    +
  • auto-grouping: auto grouping

    +
  • auto-deploy: one-click model deployment

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SmartTaskConfig

Parameter

+

Mandatory

+

Type

+

Description

+

algorithm_type

+

No

+

String

+

Algorithm type for auto labeling. Options:

+
  • fast: Only labeled samples are used for training. This type of algorithm achieves faster labeling.

    +
  • accurate: In addition to labeled samples, unlabeled samples are used for semi-supervised training. This type of algorithm achieves more accurate labeling.

    +
+

ambiguity

+

No

+

Boolean

+

Whether to perform clustering based on the image blurring degree.

+

annotation_output

+

No

+

String

+

Output path of the active learning labeling result.

+

collect_rule

+

No

+

String

+

Sample collection rule. The default value is all, indicating full collection. Currently, only value all is available.

+

collect_sample

+

No

+

Boolean

+

Whether to enable sample collection. The options are as follows:

+
  • true: Enable sample collection. (Default value)

    +
  • false: Do not enable sample collection.

    +
+

confidence_scope

+

No

+

String

+

Confidence range of key samples. The minimum and maximum values are separated by hyphens (-). Example: 0.10-0.90.

+

description

+

No

+

String

+

Task description.

+

engine_name

+

No

+

String

+

Engine name.

+

export_format

+

No

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

No

+

ExportParams object

+

Parameters of a dataset export task.

+

flavor

+

No

+

Flavor object

+

Training resource flavor.

+

image_brightness

+

No

+

Boolean

+

Whether to perform clustering based on the image brightness.

+

image_colorfulness

+

No

+

Boolean

+

Whether to perform clustering based on the image color.

+

inf_cluster_id

+

No

+

String

+

ID of a dedicated cluster. This parameter is left blank by default, indicating that a dedicated cluster is not used. When using the dedicated cluster to deploy services, ensure that the cluster status is normal. After this parameter is set, the network configuration of the cluster is used, and the vpc_id parameter does not take effect.

+

inf_config_list

+

No

+

Array of InfConfig objects

+

Configuration list required for running an inference task, which is optional and left blank by default.

+

inf_output

+

No

+

String

+

Output path of inference in active learning.

+

infer_result_output_dir

+

No

+

String

+

OBS directory for storing sample prediction results. This parameter is optional. The {service_id}-infer-result subdirectory in the output_dir directory is used by default.

+

key_sample_output

+

No

+

String

+

Output path of hard examples in active learning.

+

log_url

+

No

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank.

+

manifest_path

+

No

+

String

+

Path of the manifest file, which is used as the input for training and inference.

+

model_id

+

No

+

String

+

Model ID.

+

model_name

+

No

+

String

+

Model name.

+

model_parameter

+

No

+

String

+

Model parameter.

+

model_version

+

No

+

String

+

Model version.

+

n_clusters

+

No

+

Integer

+

Number of clusters.

+

name

+

No

+

String

+

Task name.

+

output_dir

+

No

+

String

+

Sample output path. The format is as follows: Dataset output path/Dataset name-Dataset ID/annotation/auto-deploy/. Example: /test/work_1608083108676/dataset123-g6IO9qSu6hoxwCAirfm/annotation/auto-deploy/.

+

parameters

+

No

+

Array of TrainingParameter objects

+

Running parameters of a training job.

+

pool_id

+

No

+

String

+

ID of a resource pool.

+

property

+

No

+

String

+

Attribute name.

+

req_uri

+

No

+

String

+

Inference path of a batch job.

+

result_type

+

No

+

Integer

+

Processing mode of auto grouping results. The options are as follows:

+
  • 0: Save to OBS.

    +
  • 1: Save to samples.

    +
+

samples

+

No

+

Array of SampleLabels objects

+

List of labeling information for samples to be auto labeled.

+

stop_time

+

No

+

Integer

+

Timeout interval, in minutes. The default value is 15 minutes. This parameter is used only in the scenario of auto labeling for videos.

+

time

+

No

+

String

+

Timestamp in active learning.

+

train_data_path

+

No

+

String

+

Path for storing existing training datasets.

+

train_url

+

No

+

String

+

URL of the OBS path where the file of a training job is outputted. By default, this parameter is left blank.

+

version_format

+

No

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

worker_server_num

+

No

+

Integer

+

Number of workers in a training job.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 ExportParams

Parameter

+

Mandatory

+

Type

+

Description

+

clear_hard_property

+

No

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

No

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

No

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

No

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

No

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

No

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

No

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

No

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

No

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

No

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SearchCondition

Parameter

+

Mandatory

+

Type

+

Description

+

coefficient

+

No

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

No

+

Integer

+

A frame in the video.

+

hard

+

No

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

No

+

String

+

Filter by data source.

+

kvp

+

No

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

No

+

SearchLabels object

+

Label search criteria.

+

labeler

+

No

+

String

+

Labeler.

+

metadata

+

No

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

No

+

String

+

Parent sample ID.

+

sample_dir

+

No

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

No

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

No

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

No

+

String

+

Search by confidence.

+

slice_thickness

+

No

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

No

+

String

+

DICOM scanning time.

+

time_in_video

+

No

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 6 SearchLabels

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

No

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SearchLabel

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Label name.

+

op

+

No

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

No

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 8 SearchProp

Parameter

+

Mandatory

+

Type

+

Description

+

op

+

No

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

No

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+ +
+ + + + + + + + + + + +
Table 9 Flavor

Parameter

+

Mandatory

+

Type

+

Description

+

code

+

No

+

String

+

Attribute code of a resource specification, which is used for task creating.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 InfConfig

Parameter

+

Mandatory

+

Type

+

Description

+

envs

+

No

+

Map<String,String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank. To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

instance_count

+

No

+

Integer

+

Instance number of model deployment, that is, the number of compute nodes.

+

model_id

+

No

+

String

+

Model ID.

+

specification

+

No

+

String

+

Resource specifications of real-time services. For details, see Deploying Services.

+

weight

+

No

+

Integer

+

Traffic weight allocated to a model. This parameter is mandatory only when infer_type is set to real-time. The sum of the weights must be 100.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 11 TrainingParameter

Parameter

+

Mandatory

+

Type

+

Description

+

label

+

No

+

String

+

Parameter name.

+

value

+

No

+

String

+

Parameter value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 SampleLabels

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of SampleLabel objects

+

Sample label list. If this parameter is left blank, all sample labels are deleted.

+

metadata

+

No

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

sample_id

+

No

+

String

+

Sample ID.

+

sample_type

+

No

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

sample_usage

+

No

+

String

+

Sample usage. The options are as follows:

+
  • TRAIN: training

    +
  • EVAL: evaluation

    +
  • TEST: test

    +
  • INFERENCE: inference

    +
+

source

+

No

+

String

+

Source address of sample data.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 13 SampleLabel

Parameter

+

Mandatory

+

Type

+

Description

+

annotated_by

+

No

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

No

+

String

+

Label ID.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

No

+

Float

+

Confidence.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 SampleLabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:content

+

No

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

No

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

No

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

No

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

No

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

No

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

No

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

No

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

No

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

No

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

No

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

No

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 15 SampleMetadata

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:hard

+

No

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

No

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

No

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 16 Response body parameters

Parameter

+

Type

+

Description

+

task_id

+

String

+

Task ID.

+
+
+
+

Example Requests

+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "r0jT2zwxBDKf8KEnSuZ"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateWorker.html b/modelarts/api-ref/CreateWorker.html new file mode 100644 index 00000000..fb3e41eb --- /dev/null +++ b/modelarts/api-ref/CreateWorker.html @@ -0,0 +1,138 @@ + + +

Creating a Labeling Team Member

+

Function

This API is used to create a labeling team member.

+
+

URI

POST /v2/{project_id}/workforces/{workforce_id}/workers

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Member description. The description contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

emails

+

Yes

+

String

+

Email address of a labeling team member.

+

role

+

Yes

+

Integer

+

Member role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
+
+
+
+

Response Parameters

None

+
+

Example Requests

Creating a Labeling Team Member

+
{
+  "emails" : "xxx@xxx.com",
+  "description" : "",
+  "role" : "2"
+}
+
+

Example Responses

Status code: 201

+

Created

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

201

+

Created

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateWorkforce.html b/modelarts/api-ref/CreateWorkforce.html new file mode 100644 index 00000000..a6c65e93 --- /dev/null +++ b/modelarts/api-ref/CreateWorkforce.html @@ -0,0 +1,136 @@ + + +

Creating a Labeling Team

+

Function

This API is used to create a labeling team.

+
+

URI

POST /v2/{project_id}/workforces

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Labeling team description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

workforce_name

+

Yes

+

String

+

Labeling team name. The value contains 1 to 64 characters, including only letters, digits, underscores (_), and hyphens (-).

+
+
+
+

Response Parameters

Status code: 201

+ +
+ + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Creating a Labeling Team

+
{
+  "workforce_name" : "team-123",
+  "description" : "my team"
+}
+
+

Example Responses

Status code: 201

+

Created

+
{
+  "workforce_id" : "ZUH8gqkjuaib8pxkDdz"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

201

+

Created

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateWorkforceTask.html b/modelarts/api-ref/CreateWorkforceTask.html new file mode 100644 index 00000000..117a30a7 --- /dev/null +++ b/modelarts/api-ref/CreateWorkforceTask.html @@ -0,0 +1,392 @@ + + +

Creating a Team Labeling Task

+

Function

This API is used to create a team labeling task.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

auto_sync_dataset

+

No

+

Boolean

+

Whether to automatically synchronize the result of a team labeling task to the dataset. The options are as follows:

+
  • true: Automatically synchronize the result of a team labeling task to the dataset. (Default value)

    +
  • false: Do not automatically synchronize the result of a team labeling task to the dataset.

    +
+

data_sync_type

+

No

+

Integer

+

Synchronization type. The options are as follows:

+
  • 0: not to be synchronized

    +
  • 1: data to be synchronized

    +
  • 2: label to be synchronized

    +
  • 3: data and label to be synchronized

    +
+

repetition

+

No

+

Integer

+

Number of persons who label each sample in a team labeling task. The minimum value is 1.

+

sample_conditions

+

No

+

String

+

Search conditions of dataset samples. Samples that meet the conditions are filtered for team labeling.

+

synchronize_auto_labeling_data

+

No

+

Boolean

+

Whether to synchronize the auto labeling result of a team labeling task. The options are as follows:

+
  • true: Synchronize the results to be confirmed to team members after auto labeling is complete.

    +
  • false: Do not synchronize the auto labeling results. (Default value)

    +
+

workforces_config

+

No

+

WorkforcesConfig object

+

Team labeling task information: Tasks can be assigned by the team administrator or a specified team.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 WorkforcesConfig

Parameter

+

Mandatory

+

Type

+

Description

+

agency

+

No

+

String

+

Administrator.

+

workforces

+

No

+

Array of WorkforceConfig objects

+

List of teams that execute labeling tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 WorkforceConfig

Parameter

+

Mandatory

+

Type

+

Description

+

workers

+

No

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+

workforce_name

+

No

+

String

+

Name of a labeling team. The value contains 0 to 1024 characters and does not support the following special characters: !<>=&"'

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Worker

Parameter

+

Mandatory

+

Type

+

Description

+

create_time

+

No

+

Long

+

Creation time.

+

description

+

No

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

No

+

String

+

Email address of a labeling team member.

+

role

+

No

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

No

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

No

+

Long

+

Update time.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 6 Response body parameters

Parameter

+

Type

+

Description

+

task_id

+

String

+

ID of a team labeling task.

+
+
+
+

Example Requests

Creating a Team Labeling Task

+
{
+  "workspace_id" : "0",
+  "task_name" : "task-eb17",
+  "task_type" : 0,
+  "description" : "",
+  "version_id" : "",
+  "labels" : [ {
+    "name" : "Cat",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  }, {
+    "name" : "Dog",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  } ],
+  "synchronize_data" : false,
+  "synchronize_auto_labeling_data" : false,
+  "workforces_config" : {
+    "workforces" : [ {
+      "workforce_id" : "feSUo5NUIUnQAQNNTiS",
+      "workers" : [ {
+        "email" : "xxx@xxx.com"
+      }, {
+        "email" : "xxx@xxx.com"
+      }, {
+        "email" : "xxx@xxx.com"
+      } ]
+    } ]
+  },
+  "auto_sync_dataset" : false
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "6phXEto29utpaMwbQkg"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/CreateWorkspace.html b/modelarts/api-ref/CreateWorkspace.html new file mode 100644 index 00000000..f481abfe --- /dev/null +++ b/modelarts/api-ref/CreateWorkspace.html @@ -0,0 +1,329 @@ + + +

Creating a Workspace

+

Function

This API is used to create a workspace. The name of the created workspace cannot be default, which is the name of the default workspace reserved by the system.

+
+

URI

POST /v1/{project_id}/workspaces

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

Yes

+

String

+

Workspace name, encoded using UTF-8. Enter 4 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed. In addition, default is the name of the default workspace reserved by the system. You are not allowed to create a workspace named default.

+

description

+

No

+

String

+

Workspace description. By default, this parameter is left blank. Enter 0 to 256 characters.

+

enterprise_project_id

+

No

+

String

+

Enterprise project ID. The method of obtaining an enterprise project ID is the same as that of obtaining a project ID.

+

Default: 0

+

auth_type

+

No

+

String

+

Authorization type. Options:

+
  • PUBLIC: public access of tenants (default value)

    +
  • PRIVATE: accessible only to the creator and primary account

    +
  • INTERNAL: accessible to the creator, primary account, and specified IAM users. This parameter must be used together with grants.

    +
+

grants

+

No

+

Array of grants objects

+

List of authorized users, which is left blank by default. This parameter must be used together with auth_type and takes effect only when auth_type is set to INTERNAL.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 grants

Parameter

+

Mandatory

+

Type

+

Description

+

user_id

+

No

+

String

+

User ID. For details about how to obtain a user ID, see Obtaining a User ID. Either this parameter or user_name must be set. If both of them are set, user_id is used preferentially.

+

user_name

+

No

+

String

+

IAM username. Either this parameter or user_id must be set.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Response body parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID, which is a 32-bit UUID generated by the system without hyphens (-). The ID of the default workspace is 0.

+

name

+

String

+

Workspace name

+

description

+

String

+

Workspace description. Enter 0 to 256 characters.

+

owner

+

String

+

Creator name. Enter 0 to 64 characters.

+

create_time

+

Number

+

Time when a workspace was created, in UTC format

+

update_time

+

Number

+

Last modification time, in UTC format

+

auth_type

+

String

+

Authorization type. Options:

+
  • PUBLIC: public access of tenants (default value)

    +
  • PRIVATE: accessible only to the creator and primary account

    +
  • INTERNAL: accessible to the creator, primary account, and specified IAM users. This parameter must be used together with grants.

    +
+

enterprise_project_id

+

String

+

Enterprise project ID

+

enterprise_project_name

+

String

+

Name of an enterprise project

+

status

+

String

+

Workspace status. Options:

+
  • CREATE_FAILED: Creating the workspace failed.

    +
  • NORMAL: The workspace is running properly.

    +
  • DELETING: The workspace is being deleted.

    +
  • DELETE_FAILED: Deleting the workspace failed.

    +
+

status_info

+

String

+

Status description. By default, this parameter is left blank. This parameter is used to show detailed information about a status. If a deletion failed, you can use this parameter to obtain the failure cause.

+

grants

+

Array of grants objects

+

List of authorized users, which is left blank by default. This parameter must be used together with auth_type and takes effect only when auth_type is set to INTERNAL.

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 grants

Parameter

+

Type

+

Description

+

user_id

+

String

+

User ID. Either this parameter or user_name must be set. If both of them are set, user_id is used preferentially.

+

user_name

+

String

+

IAM username. Either this parameter or user_id must be set.

+
+
+
+

Example Requests

Creating a Workspace

+
POST https://{endpoint}/v1/{project_id}/workspaces
+
+{
+  "name" : "test-workspace",
+  "description" : "It is a test project",
+  "enterprise_project_id" : "***b0091-887f-4839-9929-cbc884f1e***",
+  "auth_type" : "internal",
+  "grants" : [ {
+    "user_name" : "test"
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "id" : "**d05d1a553b4e188ea878e7dcb85e**",
+  "name" : "test-workspace",
+  "description" : "It is a test project'",
+  "owner" : "testUser",
+  "create_time" : 1470000020000,
+  "update_time" : 1470000030000,
+  "enterprise_project_id" : "***b0091-887f-4839-9929-cbc884f1e***",
+  "enterprise_project_name" : "test-eps",
+  "auth_type" : "public",
+  "status" : "NORMAL",
+  "status_info" : ""
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteAuthorizations.html b/modelarts/api-ref/DeleteAuthorizations.html new file mode 100644 index 00000000..7450e651 --- /dev/null +++ b/modelarts/api-ref/DeleteAuthorizations.html @@ -0,0 +1,111 @@ + + +

Deleting Authorization

+

Function

This API is used to delete the authorization of a specified user or all users.

+
+

URI

DELETE /v2/{project_id}/authorizations

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain a project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

user_id

+

No

+

String

+

User ID. If this parameter is set to all, the authorization of all IAM users will be deleted.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Delete the authorization of a specified user.

+
DELETE https://{endpoint}/v2/{project_id}/authorizations?user_id=****d80fb058844ae8b82aa66d9fe****
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "result" : "true"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

Bad Request

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteDataset.html b/modelarts/api-ref/DeleteDataset.html new file mode 100644 index 00000000..c72e82e0 --- /dev/null +++ b/modelarts/api-ref/DeleteDataset.html @@ -0,0 +1,85 @@ + + +

Deleting a Dataset

+

Function

This API is used to delete a dataset without deleting the source data of the dataset.

+
+

URI

DELETE /v2/{project_id}/datasets/{dataset_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Deleting a Dataset

+
DELETE https://{endpoint}/v2/{project_id}/datasets/{dataset_id}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteDatasetVersion.html b/modelarts/api-ref/DeleteDatasetVersion.html new file mode 100644 index 00000000..594fa7b9 --- /dev/null +++ b/modelarts/api-ref/DeleteDatasetVersion.html @@ -0,0 +1,94 @@ + + +

Deleting a Dataset Labeling Version

+

Function

This API is used to delete a dataset labeling version.

+
+

URI

DELETE /v2/{project_id}/datasets/{dataset_id}/versions/{version_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

version_id

+

Yes

+

String

+

Dataset version ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Deleting a Dataset Labeling Version

+
DELETE https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/versions/{version_id}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteLabelAndSamples.html b/modelarts/api-ref/DeleteLabelAndSamples.html new file mode 100644 index 00000000..30e41ee1 --- /dev/null +++ b/modelarts/api-ref/DeleteLabelAndSamples.html @@ -0,0 +1,178 @@ + + +

Deleting a Label and the Files that Only Contain the Label

+

Function

This API is used to delete a label and the files that only contain this label.

+
+

URI

DELETE /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/{label_name}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

label_name

+

Yes

+

String

+

Label name.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

delete_source

+

No

+

Boolean

+

Whether to delete the sample source files. The options are as follows:

+
  • true: Delete the sample source files.

    +
  • false: Do not delete the sample source files. (Default value)

    +
+

label_type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 204

+ +
+ + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Deleting a Label and the Files that Only Contain the Label

+
DELETE https://{endpoint}/v2/{project_id}/datasets/WxCREuCkBSAlQr9xrde/data-annotations/labels/%E8%8D%89%E8%8E%93
+
+

Example Responses

Status code: 204

+

No Content

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteLabels.html b/modelarts/api-ref/DeleteLabels.html new file mode 100644 index 00000000..f38bed67 --- /dev/null +++ b/modelarts/api-ref/DeleteLabels.html @@ -0,0 +1,455 @@ + + +

Deleting Labels in Batches

+

Function

This API is used to delete labels in batches.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/delete

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

delete_policy

+

No

+

Integer

+

Whether to delete a label and samples containing the label. The options are as follows:

+
  • 0: Delete the label.

    +
  • 1: Delete the label and the samples that only contain this label, but do not delete source files.

    +
  • 2: Delete the label and the samples that only contain this label and also delete source files.

    +
+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 3 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of Label objects

+

List of labels to be deleted.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Label

Parameter

+

Mandatory

+

Type

+

Description

+

attributes

+

No

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 LabelAttribute

Parameter

+

Mandatory

+

Type

+

Description

+

default_value

+

No

+

String

+

Default value of a label attribute.

+

id

+

No

+

String

+

Label attribute ID.

+

name

+

No

+

String

+

Label attribute name.

+

type

+

No

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

No

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 6 LabelAttributeValue

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Label attribute value ID.

+

value

+

No

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 LabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Response body for deleting a label.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 9 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Deleting Labels in Batches

+
{
+  "labels" : [ {
+    "name" : "strawberry"
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteProcessorTask.html b/modelarts/api-ref/DeleteProcessorTask.html new file mode 100644 index 00000000..b8e1ce40 --- /dev/null +++ b/modelarts/api-ref/DeleteProcessorTask.html @@ -0,0 +1,85 @@ + + +

Deleting a Processing Task

+

Function

This API is used to delete a processing task. You can delete feature analysis tasks and data processing tasks. A specific task can be deleted by specifying the task_id path parameter.

+
+

URI

DELETE /v2/{project_id}/processor-tasks/{task_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Deleting a Data Processing Task

+
DELETE https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteProcessorTaskVersion.html b/modelarts/api-ref/DeleteProcessorTaskVersion.html new file mode 100644 index 00000000..3882ec9a --- /dev/null +++ b/modelarts/api-ref/DeleteProcessorTaskVersion.html @@ -0,0 +1,94 @@ + + +

Deleting a Data Processing Task Version

+

Function

This API is used to delete a data processing task version.

+
+

URI

DELETE /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+

version_id

+

Yes

+

String

+

Version ID of a data processing task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Deleting a Data Processing Task Version

+
DELETE https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteSamples.html b/modelarts/api-ref/DeleteSamples.html new file mode 100644 index 00000000..8d3e810f --- /dev/null +++ b/modelarts/api-ref/DeleteSamples.html @@ -0,0 +1,207 @@ + + +

Deleting Samples in Batches

+

Function

This API is used to delete samples in batches.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/delete

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

delete_source

+

No

+

Boolean

+

Whether to delete the source file. This field is valid for non-text datasets. (A text dataset is the entire text file. Therefore, deleting a piece of data from the text file does not affect the source text.) The options are as follows:

+
  • false: Do not delete the source file. (Default value)

    +
  • true: Delete the source file. (Note: This operation may affect the dataset versions or other datasets that have used these files. As a result, the page display, training, or inference is abnormal.)

    +
+

samples

+

No

+

Array of strings

+

Sample ID list.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Response list for deleting a sample in batches.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Deleting Samples in Batches

+
{
+  "samples" : [ "9cb9bc9b34bf53b6ec9a84998b1711bf", "9ea63ef78d8c9037c9bcb12b477821bf" ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteWorker.html b/modelarts/api-ref/DeleteWorker.html new file mode 100644 index 00000000..4a73aa29 --- /dev/null +++ b/modelarts/api-ref/DeleteWorker.html @@ -0,0 +1,94 @@ + + +

Deleting a Labeling Team Member

+

Function

This API is used to delete a labeling team member.

+
+

URI

DELETE /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

worker_id

+

Yes

+

String

+

ID of a labeling team member.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Deleting a Labeling Team Member

+
DELETE https://{endpoint}/v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteWorkers.html b/modelarts/api-ref/DeleteWorkers.html new file mode 100644 index 00000000..ec7325cb --- /dev/null +++ b/modelarts/api-ref/DeleteWorkers.html @@ -0,0 +1,200 @@ + + +

Deleting Labeling Team Members in Batches

+

Function

This API is used to delete labeling team members in batches.

+
+

URI

POST /v2/{project_id}/workforces/{workforce_id}/workers/batch-delete

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

workers

+

No

+

Array of strings

+

Team member ID list.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Result of deleting team members in batches.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Deleting Labeling Team Members in Batches

+
{
+  "workers" : [ "89d4ae38431b8905449821605abdc3a9", "a2abd3f27b4e92c593c15282f8b6bd29" ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true,
+  "results" : [ {
+    "success" : true
+  }, {
+    "success" : true
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteWorkforce.html b/modelarts/api-ref/DeleteWorkforce.html new file mode 100644 index 00000000..b6859e90 --- /dev/null +++ b/modelarts/api-ref/DeleteWorkforce.html @@ -0,0 +1,164 @@ + + +

Deleting a Labeling Team

+

Function

This API is used to delete a labeling team.

+
+

URI

DELETE /v2/{project_id}/workforces/{workforce_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 204

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Result of deleting team members in batches.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 3 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Deleting a Labeling Team

+
DELETE https://{endpoint}/v2/{project_id}/workforces/{workforce_id}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteWorkforceTask.html b/modelarts/api-ref/DeleteWorkforceTask.html new file mode 100644 index 00000000..f75e1906 --- /dev/null +++ b/modelarts/api-ref/DeleteWorkforceTask.html @@ -0,0 +1,94 @@ + + +

Deleting a Team Labeling Task

+

Function

This API is used to delete a team labeling task.

+
+

URI

DELETE /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Deleting a Team Labeling Task

+
DELETE https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DeleteWorkspace.html b/modelarts/api-ref/DeleteWorkspace.html new file mode 100644 index 00000000..90736505 --- /dev/null +++ b/modelarts/api-ref/DeleteWorkspace.html @@ -0,0 +1,111 @@ + + +

Deleting a Workspace

+

Function

This API is used to delete a workspace.

+
+

URI

DELETE /v1/{project_id}/workspaces/{workspace_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workspace_id

+

Yes

+

String

+

Workspace ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

workspace_id

+

String

+

Workspace ID, which is a 32-bit UUID generated by the system without hyphens (-). The ID of the default workspace is 0.

+
+
+
+

Example Requests

Deleting a Workspace

+
DELETE https://{endpoint}/v1/{project_id}/workspaces/{workspace_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "workspace_id" : "***05d1a553b4e188ea878e7dcb85***"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescDataset.html b/modelarts/api-ref/DescDataset.html new file mode 100644 index 00000000..cd6450a6 --- /dev/null +++ b/modelarts/api-ref/DescDataset.html @@ -0,0 +1,1624 @@ + + +

Querying Details About a Dataset

+

Function

This API is used to query details about a dataset.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

check_running_task

+

No

+

Boolean

+

Whether to detect tasks (including initialization tasks) that are running in a dataset. The options are as follows:

+
  • true: Detect tasks (including initialization tasks) that are running in the dataset.

    +
  • false: Do not detect tasks that are running in the dataset. (Default value)

    +
+

running_task_type

+

No

+

Integer

+

Type of the running tasks (including initialization tasks) to be detected. The options are as follows:

+
  • 0: auto labeling

    +
  • 1: pre-labeling

    +
  • 2: export

    +
  • 3: version switch

    +
  • 4: manifest file export

    +
  • 5: manifest file import

    +
  • 6: version publishing

    +
  • 7: auto grouping

    +
  • 10: one-click model deployment (default value)

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Integer

+

Number of labeled samples in a dataset.

+

annotated_sub_sample_count

+

Integer

+

Number of labeled subsamples.

+

content_labeling

+

Boolean

+

Whether to enable content labeling for the speech paragraph labeling dataset. This function is enabled by default.

+

create_time

+

Long

+

Time when a dataset is created.

+

current_version_id

+

String

+

Current version ID of a dataset.

+

current_version_name

+

String

+

Current version name of a dataset.

+

data_format

+

String

+

Data format.

+

data_sources

+

Array of DataSource objects

+

Data source list.

+

data_statistics

+

Map<String,Object>

+

Sample statistics on a dataset, including the statistics on sample metadata.

+

data_update_time

+

Long

+

Time when a sample and a label are updated.

+

dataset_format

+

Integer

+

Dataset format. The options are as follows:

+
  • 0: file

    +
  • 1: table

    +
+

dataset_id

+

String

+

Dataset ID.

+

dataset_name

+

String

+

Dataset name.

+

dataset_tags

+

Array of strings

+

Key identifier list of a dataset, for example, ["Image","Object detection"].

+

dataset_type

+

Integer

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

dataset_version_count

+

Integer

+

Number of dataset versions.

+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

deletion_stats

+

Map<String,Integer>

+

Deletion reason statistics.

+

description

+

String

+

Dataset description.

+

enterprise_project_id

+

String

+

Enterprise project ID.

+

exist_running_task

+

Boolean

+

Whether the dataset contains running (including initialization) tasks. The options are as follows:

+
  • true: The dataset contains running tasks.

    +
  • false: The dataset does not contain running tasks.

    +
+

exist_workforce_task

+

Boolean

+

Whether the dataset contains team labeling tasks. The options are as follows:

+
  • true: The dataset contains team labeling tasks.

    +
  • false: The dataset does not contain team labeling tasks.

    +
+

feature_supports

+

Array of strings

+

List of features supported by the dataset. Currently, only the value 0 is supported, indicating that the OBS file size is limited.

+

import_data

+

Boolean

+

Whether to import data. The options are as follows:

+
  • true: Import data.

    +
  • false: Do not import data.

    +
+

import_task_id

+

String

+

ID of an import task.

+

inner_annotation_path

+

String

+

Path for storing the labeling result of a dataset.

+

inner_data_path

+

String

+

Path for storing the internal data of a dataset.

+

inner_log_path

+

String

+

Path for storing internal logs of a dataset.

+

inner_task_path

+

String

+

Path for internal task of a dataset.

+

inner_temp_path

+

String

+

Path for storing internal temporary files of a dataset.

+

inner_work_path

+

String

+

Output directory of a dataset.

+

label_task_count

+

Integer

+

Number of labeling tasks.

+

labels

+

Array of Label objects

+

Dataset label list.

+

loading_sample_count

+

Integer

+

Number of loading samples.

+

managed

+

Boolean

+

Whether a dataset is hosted. The options are as follows:

+
  • true: The dataset is hosted.

    +
  • false: The dataset is not hosted.

    +
+

next_version_num

+

Integer

+

Number of next versions of a dataset.

+

running_tasks_id

+

Array of strings

+

ID list of running (including initialization) tasks.

+

schema

+

Array of Field objects

+

Schema list.

+

status

+

Integer

+

Dataset status. The options are as follows:

+
  • 0: creating dataset

    +
  • 1: normal dataset

    +
  • 2: deleting dataset

    +
  • 3: deleted dataset

    +
  • 4: abnormal dataset

    +
  • 5: synchronizing dataset

    +
  • 6: releasing dataset

    +
  • 7: dataset in version switching

    +
  • 8: importing dataset

    +
+

third_path

+

String

+

Third-party path.

+

total_sample_count

+

Integer

+

Total number of dataset samples.

+

total_sub_sample_count

+

Integer

+

Total number of subsamples generated from the parent samples. For example, the total number of key frame images extracted from the video labeling dataset is that of subsamples.

+

unconfirmed_sample_count

+

Integer

+

Number of auto labeling samples to be confirmed.

+

update_time

+

Long

+

Time when a dataset is updated.

+

versions

+

Array of DatasetVersion objects

+

Dataset version information. Currently, only the current version information of a dataset is recorded.

+

work_path

+

String

+

Output dataset path, which is used to store output files such as label files. The path is an OBS path in the format of /Bucket name/File path. For example: /obs-bucket.

+

work_path_type

+

Integer

+

Type of the dataset output path. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
+

workforce_descriptor

+

WorkforceDescriptor object

+

Team labeling information.

+

workforce_task_count

+

Integer

+

Number of team labeling tasks of a dataset.

+

workspace_id

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DataSource

Parameter

+

Type

+

Description

+

data_path

+

String

+

Data source path.

+

data_type

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 5 SchemaMap

Parameter

+

Type

+

Description

+

dest_name

+

String

+

Name of the destination column.

+

src_name

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SourceInfo

Parameter

+

Type

+

Description

+

cluster_id

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

String

+

Name of an MRS cluster.

+

database_name

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

String

+

HDFS path of a table dataset.

+

ip

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 Label

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 Field

Parameter

+

Type

+

Description

+

description

+

String

+

Schema description.

+

name

+

String

+

Schema name.

+

schema_id

+

Integer

+

Schema ID.

+

type

+

String

+

Schema value type.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 DatasetVersion

Parameter

+

Type

+

Description

+

add_sample_count

+

Integer

+

Number of added samples.

+

annotated_sample_count

+

Integer

+

Number of samples with labeled versions.

+

annotated_sub_sample_count

+

Integer

+

Number of labeled subsamples.

+

clear_hard_property

+

Boolean

+

Whether to clear hard example properties during release. The options are as follows:

+
  • true: Clear hard example properties. (Default value)

    +
  • false: Do not clear hard example properties.

    +
+

code

+

String

+

Status code of a preprocessing task such as rotation and cropping.

+

create_time

+

Long

+

Time when a version is created.

+

crop

+

Boolean

+

Whether to crop the image. This field is valid only for the object detection dataset whose labeling box is in the rectangle shape. The options are as follows:

+
  • true: Crop the image.

    +
  • false: Do not crop the image. (Default value)

    +
+

crop_path

+

String

+

Path for storing cropped files.

+

crop_rotate_cache_path

+

String

+

Temporary directory for executing the rotation and cropping task.

+

data_path

+

String

+

Path for storing data.

+

data_statistics

+

Map<String,Object>

+

Sample statistics on a dataset, including the statistics on sample metadata in JSON format.

+

data_validate

+

Boolean

+

Whether data is validated by the validation algorithm before release. The options are as follows:

+
  • true: The data has been validated.

    +
  • false: The data has not been validated.

    +
+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

deletion_stats

+

Map<String,Integer>

+

Deletion reason statistics.

+

description

+

String

+

Description of a version.

+

export_images

+

Boolean

+

Whether to export images to the version output directory during release. The options are as follows:

+
  • true: Export images to the version output directory.

    +
  • false: Do not export images to the version output directory. (Default value)

    +
+

extract_serial_number

+

Boolean

+

Whether to parse the subsample number during release. The field is valid for the healthcare dataset. The options are as follows:

+
  • true: Parse the subsample number.

    +
  • false: Do not parse the subsample number. (Default value)

    +
+

include_dataset_data

+

Boolean

+

Whether to include the source data of a dataset during release. The options are as follows:

+
  • true: The source data of a dataset is included.

    +
  • false: The source data of a dataset is not included.

    +
+

is_current

+

Boolean

+

Whether the current dataset version is used. The options are as follows:

+
  • true: The current dataset version is used.

    +
  • false: The current dataset version is not used.

    +
+

label_stats

+

Array of LabelStats objects

+

Label statistics list of a released version.

+

label_type

+

String

+

Label type of a released version. The options are as follows:

+
  • multi: Multi-label samples are included.

    +
  • single: All samples are single-labeled.

    +
+

manifest_cache_input_path

+

String

+

Input path for the manifest file cache during version release.

+

manifest_path

+

String

+

Path for storing the manifest file with the released version.

+

message

+

String

+

Task information recorded during release (for example, error information).

+

modified_sample_count

+

Integer

+

Number of modified samples.

+

previous_annotated_sample_count

+

Integer

+

Number of labeled samples of parent versions.

+

previous_total_sample_count

+

Integer

+

Total samples of parent versions.

+

previous_version_id

+

String

+

Parent version ID

+

processor_task_id

+

String

+

ID of a preprocessing task such as rotation and cropping.

+

processor_task_status

+

Integer

+

Status of a preprocessing task such as rotation and cropping. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
  • 5: timeout

    +
  • 6: deletion failed

    +
  • 7: stop failed

    +
+

remove_sample_usage

+

Boolean

+

Whether to clear the existing usage information of a dataset during release. The options are as follows:

+
  • true: Clear the existing usage information of a dataset. (Default value)

    +
  • false: Do not clear the existing usage information of a dataset.

    +
+

rotate

+

Boolean

+

Whether to rotate the image. The options are as follows:

+
  • true: Rotate the image.

    +
  • false: Do not rotate the image. (Default value)

    +
+

rotate_path

+

String

+

Path for storing the rotated file.

+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

status

+

Integer

+

Status of a dataset version. The options are as follows:

+
  • 0: creating

    +
  • 1: running

    +
  • 2: deleting

    +
  • 3: deleted

    +
  • 4: error

    +
+

tags

+

Array of strings

+

Key identifier list of the dataset. The labeling type is used as the default label when the labeling task releases a version. For example, ["Image","Object detection"].

+

task_type

+

Integer

+

Labeling task type of the released version, which is the same as the dataset type.

+

total_sample_count

+

Integer

+

Total number of version samples.

+

total_sub_sample_count

+

Integer

+

Total number of subsamples generated from the parent samples.

+

train_evaluate_sample_ratio

+

String

+

Split training and verification ratio during version release. The default value is 1.00, indicating that all labeled samples are split into the training set.

+

update_time

+

Long

+

Time when a version is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+

version_name

+

String

+

Dataset version name.

+

with_column_header

+

Boolean

+

Whether the first row in the released CSV file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the released CSV file is a column name.

    +
  • false: The first row in the released CSV file is not a column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 11 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 13 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 WorkforceDescriptor

Parameter

+

Type

+

Description

+

current_task_id

+

String

+

ID of a team labeling task.

+

current_task_name

+

String

+

Name of a team labeling task.

+

reject_num

+

Integer

+

Number of rejected samples.

+

repetition

+

Integer

+

Number of persons who label each sample. The minimum value is 1.

+

is_synchronize_auto_labeling_data

+

Boolean

+

Whether to synchronously update auto labeling data. The options are as follows:

+
  • true: Update auto labeling data synchronously.

    +
  • false: Do not update auto labeling data synchronously.

    +
+

is_synchronize_data

+

Boolean

+

Whether to synchronize updated data, such as uploading files, synchronizing data sources, and assigning imported unlabeled files to team members. The options are as follows:

+
  • true: Synchronize updated data to team members.

    +
  • false: Do not synchronize updated data to team members.

    +
+

workers

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

String

+

ID of a labeling team.

+

workforce_name

+

String

+

Name of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 15 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying Details About a Dataset

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "dataset_id" : "gfghHSokody6AJigS5A",
+  "dataset_name" : "dataset-f9e8",
+  "dataset_type" : 0,
+  "data_format" : "Default",
+  "next_version_num" : 4,
+  "status" : 1,
+  "data_sources" : [ {
+    "data_type" : 0,
+    "data_path" : "/test-obs/classify/input/catDog4/"
+  } ],
+  "create_time" : 1605690595404,
+  "update_time" : 1605690595404,
+  "description" : "",
+  "current_version_id" : "54IXbeJhfttGpL46lbv",
+  "current_version_name" : "V003",
+  "total_sample_count" : 10,
+  "annotated_sample_count" : 10,
+  "unconfirmed_sample_count" : 0,
+  "work_path" : "/test-obs/classify/output/",
+  "inner_work_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/",
+  "inner_annotation_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/",
+  "inner_data_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/data/",
+  "inner_log_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/logs/",
+  "inner_temp_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/temp/",
+  "inner_task_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/task/",
+  "work_path_type" : 0,
+  "workspace_id" : "0",
+  "enterprise_project_id" : "0",
+  "workforce_task_count" : 0,
+  "feature_supports" : [ "0" ],
+  "managed" : false,
+  "import_data" : false,
+  "ai_project" : "default-ai-project",
+  "label_task_count" : 1,
+  "dataset_format" : 0,
+  "dataset_version_count" : 3,
+  "dataset_version" : "v1",
+  "content_labeling" : true,
+  "labels" : [ {
+    "name" : "Cat",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  }, {
+    "name" : "Dog",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescImportTask.html b/modelarts/api-ref/DescImportTask.html new file mode 100644 index 00000000..d254ef05 --- /dev/null +++ b/modelarts/api-ref/DescImportTask.html @@ -0,0 +1,540 @@ + + +

Querying Details About a Dataset Import Task

+

Function

This API is used to query details about a dataset import task.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/import-tasks/{task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

Task ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Long

+

Number of labeled samples.

+

create_time

+

Long

+

Time when a task is created.

+

data_source

+

DataSource object

+

Data source.

+

dataset_id

+

String

+

Dataset ID.

+

elapsed_time

+

Long

+

Task running time, in seconds.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

file_statistics

+

FileCopyProgress object

+

Progress of file copy.

+

finished_file_count

+

Long

+

Number of files that have been transferred.

+

finished_file_size

+

Long

+

Size of the file that has been transferred, in bytes.

+

import_path

+

String

+

OBS path or manifest path to be imported.

+
  • When importing a manifest file, ensure that the path is accurate to the manifest file.

    +
  • When a path is imported as a directory, the dataset type can only support image classification, object detection, text classification, or sound classification.

    +
+

import_type

+

Integer

+

Import mode. The options are as follows:

+
  • 0: Import by directory.

    +
  • 1: Import by manifest file.

    +
+

imported_sample_count

+

Long

+

Number of imported samples.

+

imported_sub_sample_count

+

Long

+

Number of imported subsamples.

+

processor_task_id

+

String

+

ID of a preprocessing task.

+

processor_task_status

+

Integer

+

Status of a preprocessing task.

+

status

+

String

+

Status of an import task. The options are as follows:

+
  • QUEUING: queuing

    +
  • STARTING: execution started

    +
  • RUNNING: running

    +
  • COMPLETED: completed

    +
  • FAILED: failed

    +
  • NOT_EXIST: not found

    +
+

task_id

+

String

+

Task ID.

+

total_file_count

+

Long

+

Total number of files.

+

total_file_size

+

Long

+

Total file size, in bytes.

+

total_sample_count

+

Long

+

Total number of samples.

+

total_sub_sample_count

+

Long

+

Total number of subsamples generated from the parent samples.

+

unconfirmed_sample_count

+

Long

+

Number of samples to be confirmed.

+

update_ms

+

Long

+

Time when a task is updated.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 DataSource

Parameter

+

Type

+

Description

+

data_path

+

String

+

Data source path.

+

data_type

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 4 SchemaMap

Parameter

+

Type

+

Description

+

dest_name

+

String

+

Name of the destination column.

+

src_name

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SourceInfo

Parameter

+

Type

+

Description

+

cluster_id

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

String

+

Name of an MRS cluster.

+

database_name

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

String

+

HDFS path of a table dataset.

+

ip

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 FileCopyProgress

Parameter

+

Type

+

Description

+

file_num_finished

+

Long

+

Number of files that have been transferred.

+

file_num_total

+

Long

+

Total number of files.

+

file_size_finished

+

Long

+

Size of the file that has been transferred, in bytes.

+

file_size_total

+

Long

+

Total file size, in bytes.

+
+
+
+

Example Requests

Querying Details About an Import Task

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/import-tasks/{task_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "status" : "COMPLETED",
+  "task_id" : "gfghHSokody6AJigS5A_RHJ1zOkIoI3Nzwxj8nh",
+  "dataset_id" : "gfghHSokody6AJigS5A",
+  "import_path" : "obs://test-obs/daoLu_images/cat-dog/",
+  "import_type" : 0,
+  "total_sample_count" : 20,
+  "imported_sample_count" : 20,
+  "annotated_sample_count" : 20,
+  "total_sub_sample_count" : 0,
+  "imported_sub_sample_count" : 0,
+  "total_file_size" : 0,
+  "finished_file_count" : 0,
+  "finished_file_size" : 0,
+  "total_file_count" : 0,
+  "update_ms" : 1606114833955,
+  "create_time" : 1606114833874,
+  "elapsed_time" : 2
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescProcessorTaskVersion.html b/modelarts/api-ref/DescProcessorTaskVersion.html new file mode 100644 index 00000000..32ef813b --- /dev/null +++ b/modelarts/api-ref/DescProcessorTaskVersion.html @@ -0,0 +1,464 @@ + + +

Querying the Details About the Version of a Data Processing Task

+

Function

This API is used to query the details about the version of a data processing task.

+
+

URI

GET /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+

version_id

+

Yes

+

String

+

Version ID of a data processing task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

add_sample_count

+

Integer

+

Number of added images after processing.

+

create_time

+

Long

+

Time when a data processing task is created.

+

deleted_sample_count

+

Integer

+

Number of deleted images after processing.

+

description

+

String

+

Version description of a data processing task.

+

duration_seconds

+

Integer

+

Running time of a data processing task, in seconds.

+

inputs

+

Array of ProcessorDataSource objects

+

Input channel of a data processing task.

+

modified_sample_count

+

Integer

+

Number of modified images after processing.

+

origin_sample_count

+

Integer

+

Number of images before processing.

+

status

+

Integer

+

Status of a data processing task. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
+

task_id

+

String

+

ID of a data processing task.

+

task_version_id

+

String

+

Version ID of a data processing task.

+

template

+

TemplateParam object

+

Algorithm template, such as the algorithm ID and parameters.

+

unmodified_sample_count

+

Integer

+

Number of unmodified images after processing.

+

update_time

+

Long

+

Time when a data processing task is created.

+

version_name

+

String

+

Version name of a data processing task.

+

work_path

+

WorkPath object

+

Output channel of a data processing task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 ProcessorDataSource

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

source

+

String

+

Data source path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

type

+

String

+

Data source type. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Dataset version name.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 TemplateParam

Parameter

+

Type

+

Description

+

id

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

name

+

String

+

Template name.

+

operator_params

+

Array of OperatorParam objects

+

Operator parameter list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 OperatorParam

Parameter

+

Type

+

Description

+

advanced_params_switch

+

Boolean

+

Advanced parameter switch.

+

id

+

String

+

ID of an operator.

+

name

+

String

+

Name of an operator.

+

params

+

Object

+

Operator parameter. The parameter type is map<string,object>. Currently, object only supports the types of Boolean, Integer, Long, String, List and Map<String,String>. For two special scenarios of object detection and image classification in a data preprocessing task, the value of task_type is object_detection or image_classification.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 WorkPath

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

output_path

+

String

+

Output path.

+

path

+

String

+

Working path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
+

type

+

String

+

Type of a working path. The options are as follows:

+
  • OBS: OBS path

    +
  • DATASET: dataset

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Name of a dataset version. The value can contain 0 to 32 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed.

+
+
+
+

Example Requests

Querying Details About a Data Processing Task Version

+
GET https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_version_id" : "qSaudx2sbPvthHygckA",
+  "task_id" : "kM7j9TSa611ZzBThzSr",
+  "version_name" : "V002",
+  "description" : "",
+  "status" : 0,
+  "create_time" : 1606377874450,
+  "inputs" : [ {
+    "type" : "DATASET",
+    "source" : "PYc9H2HGv5BJNwBGXyK",
+    "version_id" : "Osc8SZ7TZStiRV4vYkZ",
+    "name" : "dataset-test",
+    "version_name" : "V0010"
+  } ],
+  "work_path" : {
+    "type" : "DATASET",
+    "path" : "PYc9H2HGv5BJNwBGXyK",
+    "name" : "dataset-test",
+    "version_name" : "V0011",
+    "output_path" : "/test-obs/classify/output/qSaudx2sbPvthHygckA/"
+  },
+  "template" : {
+    "id" : "sys_data_validation",
+    "name" : "name to translate",
+    "operator_params" : [ {
+      "name" : "MetaValidation",
+      "advanced_params_switch" : false,
+      "params" : {
+        "task_type" : "image_classification",
+        "dataset_type" : "manifest",
+        "source_service" : "select",
+        "filter_func" : "data_validation_select",
+        "image_max_width" : "1920",
+        "image_max_height" : "1920",
+        "total_status" : "[0,1,2]"
+      }
+    } ]
+  },
+  "duration_seconds" : 312
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescWorker.html b/modelarts/api-ref/DescWorker.html new file mode 100644 index 00000000..3aea055f --- /dev/null +++ b/modelarts/api-ref/DescWorker.html @@ -0,0 +1,186 @@ + + +

Querying Details About Labeling Team Members

+

Function

This API is used to query details about labeling team members.

+
+

URI

GET /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

worker_id

+

Yes

+

String

+

ID of a labeling team member.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying Details About Labeling Team Members

+
GET https://{endpoint}/v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "email" : "xxx@xxx.com",
+  "worker_id" : "b1e4054407ecb36a7bcde70f52ba37f2",
+  "workforce_id" : "gyb7IaAvkLc5IhEY2dv",
+  "status" : 0,
+  "role" : 2,
+  "description" : "",
+  "create_time" : 1606356324223,
+  "update_time" : 1606356324223
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescWorkforce.html b/modelarts/api-ref/DescWorkforce.html new file mode 100644 index 00000000..bf04d119 --- /dev/null +++ b/modelarts/api-ref/DescWorkforce.html @@ -0,0 +1,158 @@ + + +

Querying Details About a Labeling Team

+

Function

This API is used to query the details about a labeling team.

+
+

URI

GET /v2/{project_id}/workforces/{workforce_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a labeling team is created.

+

description

+

String

+

Description of a labeling team.

+

update_time

+

Long

+

Time when a labeling team is updated.

+

worker_count

+

Integer

+

Total number of labeling team members.

+

workforce_id

+

String

+

ID of a labeling team.

+

workforce_name

+

String

+

Name of a labeling team.

+

workspace_id

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+
+

Example Requests

Querying Details About a Labeling Team

+
GET https://{endpoint}/v2/{project_id}/workforces/{workforce_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "workforce_id" : "gyb7IaAvkLc5IhEY2dv",
+  "workforce_name" : "team-aed7",
+  "description" : "",
+  "worker_count" : 2,
+  "create_time" : 1575104620882,
+  "update_time" : 1575104620882
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescWorkforceTask.html b/modelarts/api-ref/DescWorkforceTask.html new file mode 100644 index 00000000..2463b15d --- /dev/null +++ b/modelarts/api-ref/DescWorkforceTask.html @@ -0,0 +1,1375 @@ + + +

Querying Details About a Team Labeling Task

+

Function

This API is used to query the details about a team labeling task.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label_stats

+

No

+

Boolean

+

Whether to return label statistics. The options are as follows:

+
  • true: Return label statistics.

    +
  • false: Do not return label statistics. (Default value)

    +
+

sample_stats

+

No

+

Boolean

+

Whether to return sample statistics. The options are as follows:

+
  • true: Return sample statistics.

    +
  • false: Do not return sample statistics. (Default value)

    +
+

workforce_stats

+

No

+

Boolean

+

Whether to return personnel statistics. The options are as follows:

+
  • true: Return personnel statistics.

    +
  • false: Do not return personnel statistics. (Default value)

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

auto_sync_dataset

+

Boolean

+

Whether to automatically synchronize the result of a team labeling task to the dataset. The options are as follows:

+
  • true: Automatically synchronize the result of a team labeling task to the dataset.

    +
  • false: Do not automatically synchronize the result of a team labeling task to the dataset.

    +
+

check_rate

+

Double

+

Acceptance ratio of a team labeling task.

+

checking_task_desc

+

WorkforceSamplingTaskDesc object

+

Details about the current acceptance task of a team labeling task.

+

create_time

+

Long

+

Time when a labeling task is created.

+

dataset_id

+

String

+

Dataset ID.

+

description

+

String

+

Description of a labeling task.

+

label_stats

+

Array of LabelStats objects

+

Label statistics of a labeling task.

+

pass_rate

+

Double

+

Acceptance pass rate of a team labeling task.

+

repetition

+

Integer

+

Number of persons who label each sample in a team labeling task. The minimum value is 1.

+

sample_search_conditions

+

Array of SearchCondition objects

+

Sample search conditions when creating a task.

+

sample_stats

+

SampleStats object

+

Sample statistics of a labeling task.

+

score

+

Double

+

Average acceptance score of a team labeling task.

+

status

+

Integer

+

Status of a team labeling task. The options are as follows:

+
  • 6: created. The owner has created a task but does not start it. Only the owner and manager can view the task list.

    +
  • 0: starting. The owner or manager starts the task and assigns the files to be labeled. The owner, manager, labeler, and reviewer can view the task list. If the task assignment is not complete, a new task cannot be started.

    +
  • 1: running. The task is labeled and reviewed by the labeler and reviewer, respectively and is accepted by the owner. If auto labeling files are added or synchronized, or unlabeled files are imported, the new files need to be assigned again.

    +
  • 2: under acceptance. The owner initiates an acceptance task but does not complete it. In this state, a new acceptance task cannot be initiated until the current one is completed.

    +
  • 3: passed. The team labeling task has been completed.

    +
  • 4: rejected. In this state, the manager starts the task again and assigns it for labeling and reviewing.

    +
  • 5: synchronizing acceptance result. This state is displayed when acceptance tasks are changed to be asynchronous. In this state, new acceptance tasks cannot be initiated and the current acceptance task cannot be continued. In the task name area, a message is displayed, indicating that the acceptance result is being synchronized.

    +
  • 7: acceptance sampling. This state is displayed when acceptance tasks are changed to be asynchronous. In this state, new acceptance tasks cannot be initiated and the current acceptance task cannot be continued. In the task name area, a message is displayed, indicating that the acceptance sampling is in progress.

    +
+

synchronize_auto_labeling_data

+

Boolean

+

Whether to synchronize the auto labeling result of a team labeling task. The options are as follows:

+
  • true: Synchronize the results to be confirmed to team members after auto labeling is complete.

    +
  • false: Do not synchronize the auto labeling results. (Default value)

    +
+

synchronize_data

+

Boolean

+

Whether to synchronize the added data of a team labeling task. The options are as follows:

+
  • true: Upload files, synchronize data sources, and synchronize imported unlabeled files to team members.

    +
  • false: Do not synchronize the added data. (Default value)

    +
+

task_id

+

String

+

ID of a labeling task.

+

task_name

+

String

+

Name of a labeling task.

+

update_time

+

Long

+

Time when a labeling task is updated.

+

version_id

+

String

+

Version ID of the dataset associated with a labeling task.

+

worker_stats

+

Array of WorkerTask objects

+

Labeling progress statistics on team labeling task members.

+

workforce_stats

+

WorkforceStats object

+

Statistics on team labeling task members.

+

workforces_config

+

WorkforcesConfig object

+

Team labeling task information: Tasks can be assigned by the team administrator or a specified team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 WorkforceSamplingTaskDesc

Parameter

+

Type

+

Description

+

action

+

Integer

+

Action after the acceptance. The options are as follows:

+
  • 0: Pass all samples when the acceptance is completed (including single-rejected samples)

    +
  • 1: Reject all samples when the acceptance is completed (including single-accepted samples)

    +
  • 4: Pass only single-accepted samples and unaccepted samples.

    +
  • 5: Reject only single-rejected samples and unaccepted samples.

    +
+

checking_stats

+

CheckTaskStats object

+

Real-time report of acceptance tasks.

+

checking_task_id

+

String

+

ID of the current acceptance task.

+

overwrite_last_result

+

Boolean

+

Whether to use the acceptance result to overwrite the labeled result if a sample has been labeled during acceptance. The options are as follows:

+
  • true: Overwrite the labeled result.

    +
  • false: Do not overwrite the labeled result. (Default value)

    +
+

total_stats

+

CheckTaskStats object

+

Overall report of historical acceptance tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 CheckTaskStats

Parameter

+

Type

+

Description

+

accepted_sample_count

+

Integer

+

Accepted samples.

+

checked_sample_count

+

Integer

+

Checked samples.

+

pass_rate

+

Double

+

Pass rate of samples.

+

rejected_sample_count

+

Integer

+

Rejected samples.

+

sampled_sample_count

+

Integer

+

Number of sampled samples.

+

sampling_num

+

Integer

+

Samples of an acceptance task.

+

sampling_rate

+

Double

+

Sampling rate of an acceptance task.

+

score

+

String

+

Acceptance score.

+

task_id

+

String

+

ID of an acceptance task.

+

total_sample_count

+

Integer

+

Total samples.

+

total_score

+

Long

+

Total acceptance score.

+

unchecked_sample_count

+

Integer

+

Unchecked samples.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 8 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 11 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 12 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 13 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 SampleStats

Parameter

+

Type

+

Description

+

accepted_sample_count

+

Integer

+

Number of samples accepted by the owner.

+

auto_annotation_sample_count

+

Integer

+

Number of samples to be confirmed after intelligent labeling.

+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

rejected_sample_count

+

Integer

+

Number of samples that failed to pass the owner acceptance.

+

sampled_sample_count

+

Integer

+

Number of samples that are to be accepted by the owner and sampled.

+

total_sample_count

+

Integer

+

Total number of samples.

+

unannotated_sample_count

+

Integer

+

Number of unlabeled samples.

+

uncheck_sample_count

+

Integer

+

Number of samples that have been approved by the reviewer and are to be accepted by the owner.

+

unreviewed_sample_count

+

Integer

+

Number of samples that have been labeled by the labeler but have not been reviewed by the reviewer.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 15 WorkerTask

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a labeling team member's task is created.

+

dataset_id

+

String

+

ID of a dataset associated with a labeling team member's task.

+

dataset_type

+

Integer

+

Labeling type of a team member's task.

+

email

+

String

+

Email address of a labeling team member.

+

email_status

+

Integer

+

Email notification status of a labeling team member's labeling task. The options are as follows:

+
  • 0: The email has not been sent.

    +
  • 1: The email format is incorrect.

    +
  • 2: The email address is unreachable.

    +
  • 3: The email has been sent.

    +
+

last_notify_time

+

Long

+

Timestamp of the latest notification email sent to a labeling team member.

+

pass_rate

+

Double

+

Pass rate of task acceptance review for a labeling team member.

+

role

+

Integer

+

Role of a labeling team member.

+

sample_stats

+

SampleStats object

+

Sample statistics of a labeling team member's task.

+

score

+

Double

+

Average acceptance score of labeling team members' task samples.

+

task_id

+

String

+

Team labeling task ID associated with a member's task.

+

task_status

+

Integer

+

Task status of a labeling team member. The options are as follows:

+
  • 6: created

    +
  • 0: starting

    +
  • 1: running

    +
  • 2: under acceptance

    +
  • 3: approved, indicating the team labeling task is complete

    +
  • 4: rejected, indicating that the task needs to be labeled and reviewed again

    +
+

update_time

+

Long

+

Time when a labeling team member's task is updated.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_task_name

+

String

+

Team labeling task name associated with a member's task.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 16 WorkforceStats

Parameter

+

Type

+

Description

+

labeler_count

+

Integer

+

Number of labeling persons.

+

reviewer_count

+

Integer

+

Number of reviewers.

+

workforce_count

+

Integer

+

Number of teams.

+
+
+ +
+ + + + + + + + + + + + + +
Table 17 WorkforcesConfig

Parameter

+

Type

+

Description

+

agency

+

String

+

Administrator.

+

workforces

+

Array of WorkforceConfig objects

+

List of teams that execute labeling tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 18 WorkforceConfig

Parameter

+

Type

+

Description

+

workers

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

String

+

ID of a labeling team.

+

workforce_name

+

String

+

Name of a labeling team. The value contains 0 to 1024 characters and does not support the following special characters: !<>=&"'

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 19 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying Details About a Team Labeling Task

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "dataset_id" : "WxCREuCkBSAlQr9xrde",
+  "task_id" : "iYZx7gScPUozOXner9k",
+  "task_name" : "task-e63f",
+  "status" : 1,
+  "create_time" : 1606184400278,
+  "update_time" : 1606184400278,
+  "repetition" : 1,
+  "workforces_config" : {
+    "workforces" : [ {
+      "workforce_id" : "q3ZFSwORu1ztKljDLYQ",
+      "workers" : [ {
+        "email" : "xxx@xxx.com",
+        "worker_id" : "afdda13895bc66322ffbf36ae833bcf0",
+        "role" : 0
+      } ]
+    } ]
+  },
+  "synchronize_data" : false,
+  "synchronize_auto_labeling_data" : false,
+  "workforce_stats" : {
+    "workforce_count" : 1,
+    "labeler_count" : 1,
+    "reviewer_count" : 0
+  },
+  "sample_stats" : {
+    "total_sample_count" : 317,
+    "unannotated_sample_count" : 310,
+    "unreviewed_sample_count" : 0,
+    "uncheck_sample_count" : 0,
+    "sampled_sample_count" : 0,
+    "rejected_sample_count" : 0,
+    "accepted_sample_count" : 7,
+    "auto_annotation_sample_count" : 0
+  },
+  "checking_task_desc" : {
+    "checking_task_id" : "onSbri2oqYOmDjDyW17",
+    "action" : 0,
+    "overwrite_last_result" : false
+  },
+  "auto_check_samples" : true,
+  "auto_sync_dataset" : true,
+  "worker_stats" : [ {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "afdda13895bc66322ffbf36ae833bcf0",
+    "role" : 0,
+    "task_id" : "iYZx7gScPUozOXner9k",
+    "workforce_task_name" : "task-e63f",
+    "dataset_id" : "WxCREuCkBSAlQr9xrde",
+    "sample_stats" : {
+      "total_sample_count" : 317,
+      "unannotated_sample_count" : 310,
+      "unreviewed_sample_count" : 0,
+      "uncheck_sample_count" : 0,
+      "sampled_sample_count" : 0,
+      "rejected_sample_count" : 0,
+      "accepted_sample_count" : 7,
+      "auto_annotation_sample_count" : 0
+    },
+    "create_time" : 1606184400278,
+    "update_time" : 1606184795050,
+    "email_status" : 3,
+    "last_notify_time" : 0,
+    "user" : {
+      "domainId" : "04f924738800d3270fc0c013a47363a0",
+      "domainName" : "test_123",
+      "projectId" : "04f924739300d3272fc3c013e36bb4b8",
+      "userId" : "04f924743b00d4331f31c0131ada6769",
+      "userName" : "test_123"
+    }
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescribeAutoAnnotationSample.html b/modelarts/api-ref/DescribeAutoAnnotationSample.html new file mode 100644 index 00000000..c2c092ae --- /dev/null +++ b/modelarts/api-ref/DescribeAutoAnnotationSample.html @@ -0,0 +1,741 @@ + + +

Querying Details About an Auto Labeling Sample

+

Function

This API is used to query details about an auto labeling sample.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/auto-annotations/samples/{sample_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

sample_id

+

Yes

+

String

+

Sample ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying Details About an Auto Labeling Sample

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/auto-annotations/samples/{sample_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_id" : "0059c1b9458a2da9443af684b5099b4e",
+  "sample_type" : 0,
+  "labels" : [ {
+    "name" : "Dog",
+    "type" : 0,
+    "id" : "1",
+    "property" : {
+      "@modelarts:hard_coefficient" : "0.0",
+      "@modelarts:hard" : "false"
+    },
+    "score" : 0.66
+  } ],
+  "source" : "https://test-obs.obs.xxx.com:443/data/3_1597649054631.jpg?AccessKeyId=RciyO7RHmhNTfOZVryUH&Expires=1606299474&Signature=anOLGOHPSrj3WXUHVc70tAxWlf4%3D",
+  "metadata" : {
+    "test" : "1",
+    "@modelarts:hard_coefficient" : 0.0,
+    "@modelarts:hard" : false,
+    "@modelarts:import_origin" : 0,
+    "@modelarts:hard_reasons" : [ ]
+  },
+  "sample_time" : 1600490651933,
+  "sample_status" : "UN_ANNOTATION",
+  "annotated_by" : ""
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescribeDatasetVersion.html b/modelarts/api-ref/DescribeDatasetVersion.html new file mode 100644 index 00000000..daf3e7c0 --- /dev/null +++ b/modelarts/api-ref/DescribeDatasetVersion.html @@ -0,0 +1,733 @@ + + +

Querying Details About a Dataset Version

+

Function

This API is used to query the details about a dataset version.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/versions/{version_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

version_id

+

Yes

+

String

+

Dataset version ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

add_sample_count

+

Integer

+

Number of added samples.

+

annotated_sample_count

+

Integer

+

Number of samples with labeled versions.

+

annotated_sub_sample_count

+

Integer

+

Number of labeled subsamples.

+

clear_hard_property

+

Boolean

+

Whether to clear hard example properties during release. The options are as follows:

+
  • true: Clear hard example properties. (Default value)

    +
  • false: Do not clear hard example properties.

    +
+

code

+

String

+

Status code of a preprocessing task such as rotation and cropping.

+

create_time

+

Long

+

Time when a version is created.

+

crop

+

Boolean

+

Whether to crop the image. This field is valid only for the object detection dataset whose labeling box is in the rectangle shape. The options are as follows:

+
  • true: Crop the image.

    +
  • false: Do not crop the image. (Default value)

    +
+

crop_path

+

String

+

Path for storing cropped files.

+

crop_rotate_cache_path

+

String

+

Temporary directory for executing the rotation and cropping task.

+

data_path

+

String

+

Path for storing data.

+

data_statistics

+

Map<String,Object>

+

Sample statistics on a dataset, including the statistics on sample metadata in JSON format.

+

data_validate

+

Boolean

+

Whether data is validated by the validation algorithm before release. The options are as follows:

+
  • true: The data has been validated.

    +
  • false: The data has not been validated.

    +
+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

deletion_stats

+

Map<String,Integer>

+

Deletion reason statistics.

+

description

+

String

+

Description of a version.

+

export_images

+

Boolean

+

Whether to export images to the version output directory during release. The options are as follows:

+
  • true: Export images to the version output directory.

    +
  • false: Do not export images to the version output directory. (Default value)

    +
+

extract_serial_number

+

Boolean

+

Whether to parse the subsample number during release. The field is valid for the healthcare dataset. The options are as follows:

+
  • true: Parse the subsample number.

    +
  • false: Do not parse the subsample number. (Default value)

    +
+

include_dataset_data

+

Boolean

+

Whether to include the source data of a dataset during release. The options are as follows:

+
  • true: The source data of a dataset is included.

    +
  • false: The source data of a dataset is not included.

    +
+

is_current

+

Boolean

+

Whether the current dataset version is used. The options are as follows:

+
  • true: The current dataset version is used.

    +
  • false: The current dataset version is not used.

    +
+

label_stats

+

Array of LabelStats objects

+

Label statistics list of a released version.

+

label_type

+

String

+

Label type of a released version. The options are as follows:

+
  • multi: Multi-label samples are included.

    +
  • single: All samples are single-labeled.

    +
+

manifest_cache_input_path

+

String

+

Input path for the manifest file cache during version release.

+

manifest_path

+

String

+

Path for storing the manifest file with the released version.

+

message

+

String

+

Task information recorded during release (for example, error information).

+

modified_sample_count

+

Integer

+

Number of modified samples.

+

previous_annotated_sample_count

+

Integer

+

Number of labeled samples of parent versions.

+

previous_total_sample_count

+

Integer

+

Total samples of parent versions.

+

previous_version_id

+

String

+

Parent version ID

+

processor_task_id

+

String

+

ID of a preprocessing task such as rotation and cropping.

+

processor_task_status

+

Integer

+

Status of a preprocessing task such as rotation and cropping. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
  • 5: timeout

    +
  • 6: deletion failed

    +
  • 7: stop failed

    +
+

remove_sample_usage

+

Boolean

+

Whether to clear the existing usage information of a dataset during release. The options are as follows:

+
  • true: Clear the existing usage information of a dataset. (Default value)

    +
  • false: Do not clear the existing usage information of a dataset.

    +
+

rotate

+

Boolean

+

Whether to rotate the image. The options are as follows:

+
  • true: Rotate the image.

    +
  • false: Do not rotate the image. (Default value)

    +
+

rotate_path

+

String

+

Path for storing the rotated file.

+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

status

+

Integer

+

Status of a dataset version. The options are as follows:

+
  • 0: creating

    +
  • 1: running

    +
  • 2: deleting

    +
  • 3: deleted

    +
  • 4: error

    +
+

tags

+

Array of strings

+

Key identifier list of the dataset. The labeling type is used as the default label when the labeling task releases a version. For example, ["Image","Object detection"].

+

task_type

+

Integer

+

Labeling task type of the released version, which is the same as the dataset type.

+

total_sample_count

+

Integer

+

Total number of version samples.

+

total_sub_sample_count

+

Integer

+

Total number of subsamples generated from the parent samples.

+

train_evaluate_sample_ratio

+

String

+

Split training and verification ratio during version release. The default value is 1.00, indicating that all labeled samples are split into the training set.

+

update_time

+

Long

+

Time when a version is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+

version_name

+

String

+

Dataset version name.

+

with_column_header

+

Boolean

+

Whether the first row in the released CSV file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the released CSV file is a column name.

    +
  • false: The first row in the released CSV file is not a column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Example Requests

Querying Details About a Dataset Version

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/versions/{version_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "version_id" : "eSOKEQaXhKzxN00WKoV",
+  "version_name" : "V002",
+  "version_format" : "Default",
+  "previous_version_id" : "vlGvUqOcxxGPIB0ugeE",
+  "status" : 1,
+  "create_time" : 1605691027084,
+  "total_sample_count" : 10,
+  "annotated_sample_count" : 10,
+  "total_sub_sample_count" : 0,
+  "annotated_sub_sample_count" : 0,
+  "manifest_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V002/V002.manifest",
+  "data_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V002/data/",
+  "is_current" : true,
+  "train_evaluate_sample_ratio" : "0.9999",
+  "remove_sample_usage" : false,
+  "export_images" : false,
+  "description" : "",
+  "label_stats" : [ {
+    "name" : "Cat",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    },
+    "count" : 5,
+    "sample_count" : 5
+  }, {
+    "name" : "Dog",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    },
+    "count" : 5,
+    "sample_count" : 5
+  } ],
+  "label_type" : "single",
+  "task_type" : 0,
+  "extract_serial_number" : false
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescribeProcessorTask.html b/modelarts/api-ref/DescribeProcessorTask.html new file mode 100644 index 00000000..7db6560a --- /dev/null +++ b/modelarts/api-ref/DescribeProcessorTask.html @@ -0,0 +1,464 @@ + + +

Querying Details About a Processing Task

+

Function

This API is used to query the details about processing tasks. You can query feature analysis tasks and data processing tasks. You can specify the task_id parameter to query the details about a specific task.

+
+

URI

GET /v2/{project_id}/processor-tasks/{task_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a data processing task is created.

+

data_source

+

ProcessorDataSource object

+

Input of a data processing task. Either this parameter or inputs is delivered.

+

description

+

String

+

Description of a data processing task.

+

duration_seconds

+

Integer

+

Running time of data processing, in seconds.

+

error_msg

+

String

+

Error message. This field is displayed when the value of status is 3.

+

inputs

+

Array of ProcessorDataSource objects

+

Input channel list of a data processing task. Either this parameter or data_source is delivered.

+

is_current

+

Boolean

+

Whether the current task is the latest of the same type of this version.

+

name

+

String

+

Name of a data processing task.

+

result

+

Object

+

Output result of a data processing task. This field is displayed when status is set to 2 and is valid for a feature analysis task.

+

status

+

Integer

+

Status of a data processing task. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
+

task_id

+

String

+

ID of a data processing task.

+

template

+

TemplateParam object

+

Data processing template, such as the algorithm ID and parameters.

+

version_count

+

Integer

+

Version number of a data processing task.

+

version_id

+

String

+

Dataset version ID corresponding to a data processing task.

+

version_name

+

String

+

Dataset version name corresponding to a data processing task.

+

work_path

+

WorkPath object

+

Working directory of a data processing task.

+

workspace_id

+

String

+

Workspace ID of a data processing task. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 ProcessorDataSource

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

source

+

String

+

Data source path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

type

+

String

+

Data source type. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Dataset version name.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 TemplateParam

Parameter

+

Type

+

Description

+

id

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

name

+

String

+

Template name.

+

operator_params

+

Array of OperatorParam objects

+

Operator parameter list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 OperatorParam

Parameter

+

Type

+

Description

+

advanced_params_switch

+

Boolean

+

Advanced parameter switch.

+

id

+

String

+

ID of an operator.

+

name

+

String

+

Name of an operator.

+

params

+

Object

+

Operator parameter. The parameter type is map<string,object>. Currently, object only supports the types of Boolean, Integer, Long, String, List and Map<String,String>. For two special scenarios of object detection and image classification in a data preprocessing task, the value of task_type is object_detection or image_classification.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 WorkPath

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

output_path

+

String

+

Output path.

+

path

+

String

+

Working path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
+

type

+

String

+

Type of a working path. The options are as follows:

+
  • OBS: OBS path

    +
  • DATASET: dataset

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Name of a dataset version. The value can contain 0 to 32 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed.

+
+
+
+

Example Requests

Querying Details About a Data Processing Task

+
GET https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "SSzH9AdmHTvIBeihArb",
+  "name" : "PRE-6c83",
+  "description" : "test",
+  "inputs" : [ {
+    "type" : "DATASET",
+    "source" : "qjHAs14pRu4n2so1Qlb",
+    "version_id" : "cUELhTAYGIR36YpTE5Y",
+    "name" : "dataset-dba1",
+    "version_name" : "V001"
+  } ],
+  "work_path" : {
+    "type" : "DATASET",
+    "path" : "qjHAs14pRu4n2so1Qlb",
+    "name" : "dataset-dba1",
+    "version_name" : "V002",
+    "output_path" : "/test-lxm/data-out/EnyHCFzjTFY20U3sYSE/"
+  },
+  "template" : {
+    "id" : "sys_data_validation",
+    "name" : "data validation template name",
+    "operator_params" : [ {
+      "name" : "MetaValidation",
+      "advanced_params_switch" : false,
+      "params" : {
+        "task_type" : "image_classification",
+        "dataset_type" : "manifest",
+        "source_service" : "select",
+        "filter_func" : "data_validation_select",
+        "image_max_width" : "-1",
+        "image_max_height" : "-1",
+        "total_status" : "[0,1,2]"
+      }
+    } ]
+  },
+  "status" : 2,
+  "duration_seconds" : 277,
+  "create_time" : 1614245065569,
+  "workspace_id" : "0",
+  "version_count" : 1,
+  "ai_project" : ""
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescribeSample.html b/modelarts/api-ref/DescribeSample.html new file mode 100644 index 00000000..15add5d9 --- /dev/null +++ b/modelarts/api-ref/DescribeSample.html @@ -0,0 +1,795 @@ + + +

Querying Details About a Sample

+

Function

Query details about a sample.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/{sample_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

sample_id

+

Yes

+

String

+

Sample ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

locale

+

No

+

String

+

Language. The options are as follows:

+
  • en-us: English (default value)

    +
  • zh-cn: Chinese

    +
+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying Details About a Sample

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/data-annotations/samples/{sample_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_id" : "012f99f3cf405860130b6ed2350c2228",
+  "sample_type" : 0,
+  "labels" : [ {
+    "name" : "car",
+    "type" : 0,
+    "property" : { }
+  } ],
+  "source" : "https://test-obs.obs.xxx.com:443/image/aifood/%E5%86%B0%E6%BF%80%E5%87%8C/36502.jpg?AccessKeyId=RciyO7RHmhNTfOZVryUH&Expires=1606297079&Signature=Ju4FYpEu973ii%2FAdUVLTfpLCTbg%3D",
+  "metadata" : {
+    "@modelarts:import_origin" : 0
+  },
+  "sample_time" : 1589190552106,
+  "sample_status" : "MANUAL_ANNOTATION",
+  "annotated_by" : "human/test_123/test_123",
+  "labelers" : [ {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "5d8d4033b428fed5ac158942c33940a2",
+    "role" : 0
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/DescribeWorkforceTaskSample.html b/modelarts/api-ref/DescribeWorkforceTaskSample.html new file mode 100644 index 00000000..2094fc5c --- /dev/null +++ b/modelarts/api-ref/DescribeWorkforceTaskSample.html @@ -0,0 +1,801 @@ + + +

Querying Details About Team Labeling Samples

+

Function

This API is used to query details about team labeling samples.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples/{sample_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

sample_id

+

Yes

+

String

+

Sample ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a labeling task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

locale

+

No

+

String

+

Language. The options are as follows:

+
  • en-us: English (default value)

    +
  • zh-cn: Chinese

    +
+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying Details About Team Labeling Samples

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples/{sample_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_id" : "26c6dd793d80d3274eb89349ec76d678",
+  "sample_type" : 0,
+  "labels" : [ ],
+  "source" : "https://test-obs.obs.xxx.com:443/detect/data/dataset-car-and-person/IMG_kitti_0000_000016.png?AccessKeyId=4D34AYDTK93HUY79NSD7&Expires=1606300437&x-obs-security-token=gQpjbi1ub3J0aC03jELficNKUP87aSTIhvsHQAvImcyVbXlYTrU2XJIc28F7kiXanJ3TyJV39iXl0yi5mzQ...",
+  "metadata" : {
+    "@modelarts:import_origin" : 0,
+    "@modelarts:size" : [ 1242, 375, 3 ]
+  },
+  "sample_time" : 1598263639997,
+  "sample_status" : "UN_ANNOTATION",
+  "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+  "labelers" : [ {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "afdda13895bc66322ffbf36ae833bcf0",
+    "role" : 0
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ExportTask.html b/modelarts/api-ref/ExportTask.html new file mode 100644 index 00000000..b8355ae7 --- /dev/null +++ b/modelarts/api-ref/ExportTask.html @@ -0,0 +1,1218 @@ + + +

Creating a Dataset Export Task

+

Function

This API is used to create a dataset export task to export a dataset to OBS or new datasets.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/export-tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

annotation_format

+

No

+

String

+

Labeling format. The options are as follows:

+
  • VOC: VOC

    +
  • COCO: COCO

    +
+

dataset_id

+

No

+

String

+

Dataset ID.

+

dataset_type

+

No

+

Integer

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

export_format

+

No

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

No

+

ExportParams object

+

Parameters of a dataset export task.

+

export_type

+

No

+

Integer

+

Export type. The options are as follows:

+
  • 0: labeled

    +
  • 1: unlabeled

    +
  • 2: all

    +
  • 3: conditional search

    +
+

path

+

No

+

String

+

Export output path.

+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

source_type_header

+

No

+

String

+

Prefix of the OBS path in the exported labeling file. The default value is obs://. You can set it to s3://. The image path starting with obs cannot be parsed during training. Set the path prefix in the exported manifest file to s3://.

+

status

+

No

+

Integer

+

Task status.

+

task_id

+

No

+

String

+

Task ID.

+

version_format

+

No

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

No

+

String

+

Dataset version ID.

+

with_column_header

+

No

+

Boolean

+

Whether to write the column name in the first line of the CSV file during export. This field is valid for the table dataset. The options are as follows:

+
  • true: Write the column name in the first line of the CSV file. (Default value)

    +
  • false: Do not write the column name in the first line of the CSV file.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 ExportParams

Parameter

+

Mandatory

+

Type

+

Description

+

clear_hard_property

+

No

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

No

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

No

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

No

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

No

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

No

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

No

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

No

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

No

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

No

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 SearchCondition

Parameter

+

Mandatory

+

Type

+

Description

+

coefficient

+

No

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

No

+

Integer

+

A frame in the video.

+

hard

+

No

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

No

+

String

+

Filter by data source.

+

kvp

+

No

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

No

+

SearchLabels object

+

Label search criteria.

+

labeler

+

No

+

String

+

Labeler.

+

metadata

+

No

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

No

+

String

+

Parent sample ID.

+

sample_dir

+

No

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

No

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

No

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

No

+

String

+

Search by confidence.

+

slice_thickness

+

No

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

No

+

String

+

DICOM scanning time.

+

time_in_video

+

No

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 5 SearchLabels

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

No

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SearchLabel

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Label name.

+

op

+

No

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

No

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 7 SearchProp

Parameter

+

Mandatory

+

Type

+

Description

+

op

+

No

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

No

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 Response body parameters

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a task is created.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

export_format

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

ExportParams object

+

Parameters of a dataset export task.

+

export_type

+

Integer

+

Export type. The options are as follows:

+
  • 0: labeled

    +
  • 1: unlabeled

    +
  • 2: all

    +
  • 3: conditional search

    +
+

finished_sample_count

+

Integer

+

Number of completed samples.

+

path

+

String

+

Export output path.

+

progress

+

Float

+

Percentage of current task progress.

+

status

+

String

+

Task status. The options are as follows:

+
  • INIT: initialized

    +
  • RUNNING: running

    +
  • FAILED: failed

    +
  • SUCCESSED: completed

    +
+

task_id

+

String

+

Task ID.

+

total_sample_count

+

Integer

+

Total number of samples.

+

update_time

+

Long

+

Time when a task is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 ExportParams

Parameter

+

Type

+

Description

+

clear_hard_property

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 11 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 12 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 13 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+
+

Example Requests

+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "rF9NNoB56k5rtYKg2Y7"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetAuthorizations.html b/modelarts/api-ref/GetAuthorizations.html new file mode 100644 index 00000000..4b7c1955 --- /dev/null +++ b/modelarts/api-ref/GetAuthorizations.html @@ -0,0 +1,239 @@ + + +

Viewing an Authorization List

+

Function

This API is used to view an authorization list.

+
+

URI

GET /v2/{project_id}/authorizations

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain a project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

sort_by

+

No

+

String

+

Sorting field.

+

Options:

+
  • user_name: IAM user

    +
  • create_time: creation time

    +
+

Default: user_name

+

order

+

No

+

String

+

Sorting method.

+

Options:

+
  • asc: ascending order

    +
  • desc: descending order

    +
+

Default: asc

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. Default value: 1000

+

The value ranges from 1 to 1000.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

total_count

+

Number

+

Authorization information.

+

auth

+

Array of AuthorizationResponse objects

+

Authorization information list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 AuthorizationResponse

Parameter

+

Type

+

Description

+

user_id

+

String

+

User ID. For details about how to obtain a user ID, see Obtaining a User ID.

+

If user_id is set to all, all IAM users are authorized. If some IAM users have been authorized, the authorization setting will be updated.

+

This parameter is mandatory only if the authorization type is set to agency.

+

type

+

String

+

Authorization type. Agency is recommended.

+

Options:

+
  • agency: authorization through an agency

    +
  • credential: authorization through an access Key (AK/SK)

    +
+

Default: agency

+

content

+

String

+

Authorization content.

+
  • If Authorization Type is set to Agency, this field indicates the agency name.

    +
  • If Authorization Type is set to AK/SK, this field indicates the access key ID (AK).

    +
+

secret_key

+

String

+

Secret Access Key (SK). This field is required only when Authorization Method is set to AK/SK.

+

create_time

+

Long

+

Timestamp when the quality job was created.

+
+
+
+

Example Requests

View an authorization list.

+
GET https://{endpoint}/v2/{project_id}/authorizations
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_count" : 1,
+  "auth" : [ {
+    "user_id" : "****d80fb058844ae8b82aa66d9fe****",
+    "user_name" : "iam-user01",
+    "type" : "agency",
+    "content" : "modelarts_agency",
+    "create_time" : 15657747821288
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

Bad Request

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetDatasetMetrics.html b/modelarts/api-ref/GetDatasetMetrics.html new file mode 100644 index 00000000..8d85fec1 --- /dev/null +++ b/modelarts/api-ref/GetDatasetMetrics.html @@ -0,0 +1,171 @@ + + +

Querying the Monitoring Data of a Dataset

+

Function

This API is used to query the monitoring data of a dataset within a specified time range.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/metrics

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

end_time

+

Yes

+

Long

+

End time of the monitoring information.

+

start_time

+

Yes

+

Long

+

Start time of the monitoring information.

+

workforce_task_id

+

No

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

metrics

+

Map<String,Map<String,Integer>>

+

Dataset monitoring information.

+
+
+
+

Example Requests

Querying the Monitoring Data of a Dataset

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/metrics
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "metrics" : {
+    "un_annotation" : {
+      "1606233612612" : 16,
+      "1606320012681" : 16
+    },
+    "failed_user" : { },
+    "total" : {
+      "1606233612612" : 16,
+      "1606320012681" : 16
+    },
+    "queuing" : { },
+    "success" : { },
+    "unfinished" : { },
+    "manual_annotation" : {
+      "1606233612612" : 0,
+      "1606320012681" : 0
+    },
+    "failed" : { },
+    "failed_system" : { }
+  }
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetExportTaskStatusOfDataset.html b/modelarts/api-ref/GetExportTaskStatusOfDataset.html new file mode 100644 index 00000000..31228880 --- /dev/null +++ b/modelarts/api-ref/GetExportTaskStatusOfDataset.html @@ -0,0 +1,601 @@ + + +

Querying the Status of a Dataset Export Task

+

Function

This API is used to query the status of a dataset export task.

+
+

URI

GET /v2/{project_id}/datasets/{resource_id}/export-tasks/{task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

resource_id

+

Yes

+

String

+

Resource ID. Currently, the dataset ID can be specified.

+

task_id

+

Yes

+

String

+

ID of an export task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a task is created.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

export_format

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

ExportParams object

+

Parameters of a dataset export task.

+

export_type

+

Integer

+

Export type. The options are as follows:

+
  • 0: labeled

    +
  • 1: unlabeled

    +
  • 2: all

    +
  • 3: conditional search

    +
+

finished_sample_count

+

Integer

+

Number of completed samples.

+

path

+

String

+

Export output path.

+

progress

+

Float

+

Percentage of current task progress.

+

status

+

String

+

Task status.

+

task_id

+

String

+

Task ID.

+

total_sample_count

+

Integer

+

Total number of samples.

+

update_time

+

Long

+

Time when a task is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 ExportParams

Parameter

+

Type

+

Description

+

clear_hard_property

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 7 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+
+

Example Requests

Querying the Status of an Export Task (Exporting Data to OBS)

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/export-tasks/{task_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "TZMuy7OKbClkGCAc3gb",
+  "path" : "/test-obs/daoChu/",
+  "export_type" : 3,
+  "version_format" : "Default",
+  "export_format" : 2,
+  "export_params" : {
+    "sample_state" : "",
+    "export_dest" : "DIR",
+    "clear_hard_property" : true,
+    "clear_difficult" : false,
+    "train_sample_ratio" : 1.0,
+    "ratio_sample_usage" : false
+  },
+  "status" : "RUNNING",
+  "progress" : 0.0,
+  "create_time" : 1606103424662,
+  "update_time" : 1606103494124
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetExportTasksStatusOfDataset.html b/modelarts/api-ref/GetExportTasksStatusOfDataset.html new file mode 100644 index 00000000..51df5161 --- /dev/null +++ b/modelarts/api-ref/GetExportTasksStatusOfDataset.html @@ -0,0 +1,808 @@ + + +

Querying the Dataset Export Task List

+

Function

This API is used to query the dataset export task list by page.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/export-tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

export_type

+

No

+

Integer

+

Export type. If this parameter is not specified, all types of export tasks are queried by default. The options are as follows:

+
  • 0: labeled

    +
  • 1: unlabeled

    +
  • 2: all

    +
  • 3: conditional search

    +
+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a task is created.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

export_format

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

ExportParams object

+

Parameters of a dataset export task.

+

export_tasks

+

Array of ExportTaskStatusResp objects

+

Export task list

+

export_type

+

Integer

+

Export type. The options are as follows:

+
  • 0: labeled

    +
  • 1: unlabeled

    +
  • 2: all

    +
  • 3: conditional search

    +
+

finished_sample_count

+

Integer

+

Number of completed samples.

+

path

+

String

+

Export output path.

+

progress

+

Float

+

Percentage of current task progress.

+

status

+

String

+

Task status.

+

task_id

+

String

+

Task ID.

+

total_count

+

Integer

+

Total number of export tasks

+

total_sample_count

+

Integer

+

Total number of samples.

+

update_time

+

Long

+

Time when a task is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 ExportParams

Parameter

+

Type

+

Description

+

clear_hard_property

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 ExportTaskStatusResp

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a task is created.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

export_format

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

ExportParams object

+

Parameters of a dataset export task.

+

export_type

+

Integer

+

Export type. The options are as follows:

+
  • 0: labeled

    +
  • 1: unlabeled

    +
  • 2: all

    +
  • 3: conditional search

    +
+

finished_sample_count

+

Integer

+

Number of completed samples.

+

path

+

String

+

Export output path.

+

progress

+

Float

+

Percentage of current task progress.

+

status

+

String

+

Task status.

+

task_id

+

String

+

Task ID.

+

total_sample_count

+

Integer

+

Total number of samples.

+

update_time

+

Long

+

Time when a task is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 7 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 9 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+
+

Example Requests

Querying the Export Task List by Page

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/export-tasks
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_count" : 2,
+  "export_tasks" : [ {
+    "task_id" : "rF9NNoB56k5rtYKg2Y7",
+    "path" : "/test-obs/classify/input/",
+    "export_type" : 3,
+    "version_format" : "Default",
+    "export_format" : 2,
+    "export_params" : {
+      "sample_state" : "",
+      "export_dest" : "NEW_DATASET",
+      "export_new_dataset_name" : "dataset-export-test",
+      "export_new_dataset_work_path" : "/test-obs/classify/output/",
+      "clear_hard_property" : true,
+      "clear_difficult" : false,
+      "train_sample_ratio" : 1.0,
+      "ratio_sample_usage" : false
+    },
+    "status" : "SUCCESSED",
+    "progress" : 100.0,
+    "total_sample_count" : 20,
+    "finished_sample_count" : 20,
+    "create_time" : 1606103820120,
+    "update_time" : 1606103824823
+  }, {
+    "task_id" : "TZMuy7OKbClkGCAc3gb",
+    "path" : "/test-obs/daoChu/",
+    "export_type" : 3,
+    "version_format" : "Default",
+    "export_format" : 2,
+    "export_params" : {
+      "sample_state" : "",
+      "export_dest" : "DIR",
+      "clear_hard_property" : true,
+      "clear_difficult" : false,
+      "train_sample_ratio" : 1.0,
+      "ratio_sample_usage" : false
+    },
+    "status" : "SUCCESSED",
+    "progress" : 100.0,
+    "total_sample_count" : 20,
+    "finished_sample_count" : 20,
+    "create_time" : 1606103424662,
+    "update_time" : 1606103497519
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetProcessorTaskItems.html b/modelarts/api-ref/GetProcessorTaskItems.html new file mode 100644 index 00000000..95fba2f9 --- /dev/null +++ b/modelarts/api-ref/GetProcessorTaskItems.html @@ -0,0 +1,159 @@ + + +

Querying the Algorithm Type for Data Processing

+

Function

This API is used to query the algorithm type for data processing.

+
+

URI

GET /v2/{project_id}/processor-tasks/items

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

items

+

Array of ProcessorTaskItem objects

+

Algorithm type list.

+

total

+

Integer

+

Total number.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 3 ProcessorTaskItem

Parameter

+

Type

+

Description

+

label_en

+

String

+

English name of an algorithm type.

+

label_zh

+

String

+

Chinese name of an algorithm type.

+

template_id

+

String

+

Algorithm type ID.

+
+
+
+

Example Requests

Querying the List of the Algorithm Type for Data Processing

+
GET https://{endpoint}/v2/{project_id}/processor-tasks/items
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total" : 4,
+  "items" : [ {
+    "template_id" : "sys_data_cleaning",
+    "label_zh" : "label_zh to translate",
+    "label_en" : "data cleaning"
+  }, {
+    "template_id" : "sys_data_validation",
+    "label_zh" : "label_zh to translate",
+    "label_en" : "data validation"
+  }, {
+    "template_id" : "sys_data_selection",
+    "label_zh" : "label_zh to translate",
+    "label_en" : "data selection"
+  }, {
+    "template_id" : "sys_data_augmentation",
+    "label_zh" : "label_zh to translate",
+    "label_en" : "data augmentation"
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetWorkforceSamplingTask.html b/modelarts/api-ref/GetWorkforceSamplingTask.html new file mode 100644 index 00000000..9478ce1d --- /dev/null +++ b/modelarts/api-ref/GetWorkforceSamplingTask.html @@ -0,0 +1,258 @@ + + +

Querying the Report of a Team Labeling Acceptance Task

+

Function

This API is used to query the report of a team labeling acceptance task.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/report

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

checking_task_id

+

Yes

+

String

+

ID of the task that is being checked.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

checking_stats

+

CheckTaskStats object

+

Real-time acceptance statistics.

+

total_stats

+

CheckTaskStats object

+

Historical statistics.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 CheckTaskStats

Parameter

+

Type

+

Description

+

accepted_sample_count

+

Integer

+

Accepted samples.

+

checked_sample_count

+

Integer

+

Checked samples.

+

pass_rate

+

Double

+

Pass rate of samples.

+

rejected_sample_count

+

Integer

+

Rejected samples.

+

sampled_sample_count

+

Integer

+

Number of sampled samples.

+

sampling_num

+

Integer

+

Samples of an acceptance task.

+

sampling_rate

+

Double

+

Sampling rate of an acceptance task.

+

score

+

String

+

Acceptance score.

+

task_id

+

String

+

ID of an acceptance task.

+

total_sample_count

+

Integer

+

Total samples.

+

total_score

+

Long

+

Total acceptance score.

+

unchecked_sample_count

+

Integer

+

Unchecked samples.

+
+
+
+

Example Requests

Querying the Report of a Team Labeling Acceptance Task

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/report
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_stats" : {
+    "sampling_rate" : 1.0,
+    "sampling_num" : 3,
+    "pass_rate" : 0.0,
+    "score" : "E",
+    "total_score" : 0,
+    "total_sample_count" : 3,
+    "sampled_sample_count" : 3,
+    "unchecked_sample_count" : 3,
+    "checked_sample_count" : 0,
+    "accepted_sample_count" : 0,
+    "rejected_sample_count" : 0
+  }
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/GetWorkforceTaskMetrics.html b/modelarts/api-ref/GetWorkforceTaskMetrics.html new file mode 100644 index 00000000..de738263 --- /dev/null +++ b/modelarts/api-ref/GetWorkforceTaskMetrics.html @@ -0,0 +1,129 @@ + + +

Querying Details About the Progress of a Team Labeling Task Member

+

Function

This API is used to query details about the progress of a team labeling task member.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/metrics

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

sample_stats

+

Map<String,Map<String,Integer>>

+

Statistics on team labeling task members.

+
+
+
+

Example Requests

Querying Details About the Progress of a Team Labeling Task Member

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/metrics
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_stats" : {
+    "xxx@xxx.com" : {
+      "un_annotation" : 51,
+      "rejected" : 0,
+      "unreviewed" : 0,
+      "accepted" : 0,
+      "auto_annotation" : 0,
+      "uncheck" : 0
+    }
+  }
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ImportTask.html b/modelarts/api-ref/ImportTask.html new file mode 100644 index 00000000..09b41644 --- /dev/null +++ b/modelarts/api-ref/ImportTask.html @@ -0,0 +1,807 @@ + + +

Creating an Import Task

+

Function

This API is used to create a dataset import task to import samples and labels from the storage system to the dataset.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/import-tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

annotation_format

+

No

+

String

+

Format of the labeling information. Currently, only object detection is supported. The options are as follows:

+
  • VOC: VOC

    +
  • COCO: COCO

    +
+

data_source

+

No

+

DataSource object

+

Data source.

+

difficult_only

+

No

+

Boolean

+

Whether to import only hard examples. The options are as follows:

+
  • true: Only difficult samples are imported.

    +
  • false: All samples are imported. (Default value)

    +
+

excluded_labels

+

No

+

Array of Label objects

+

Do not import samples containing the specified label.

+

final_annotation

+

No

+

Boolean

+

Whether to import data to the final state. The options are as follows:

+
  • true: Import data to the final state. (Default value)

    +
  • false: Do not import data to the final state.

    +
+

import_annotations

+

No

+

Boolean

+

Whether to import labels. The options are as follows:

+
  • true: Import labels. (Default value)

    +
  • false: Do not import labels.

    +
+

import_folder

+

No

+

String

+

Name of the subdirectory in the dataset storage directory after import. You can specify the same subdirectory for multiple import tasks to avoid repeated import of the same samples. This field is invalid for table datasets.

+

import_origin

+

No

+

String

+

Data source. The options are as follows:

+
  • obs: OBS bucket (default value)

    +
  • dws: GaussDB(DWS)

    +
  • dli: DLI

    +
  • rds: RDS

    +
  • mrs: MRS

    +
  • inference: Inference service

    +
+

import_path

+

No

+

String

+

OBS path or manifest path to be imported.

+
  • When importing a manifest file, ensure that the path is accurate to the manifest file.

    +
  • When a path is imported as a directory, the dataset type can only support image classification, object detection, text classification, or sound classification.

    +
+

import_samples

+

No

+

Boolean

+

Whether to import samples. The options are as follows:

+
  • true: Import samples. (Default value)

    +
  • false: Do not import samples.

    +
+

import_type

+

No

+

String

+

Import mode. The options are as follows:

+
  • 0: Import by directory.

    +
  • 1: Import by manifest file.

    +
+

included_labels

+

No

+

Array of Label objects

+

Import samples containing the specified label.

+

label_format

+

No

+

LabelFormat object

+

Label format. This parameter is used only for text datasets.

+

with_column_header

+

No

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name. (Default value)

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 DataSource

Parameter

+

Mandatory

+

Type

+

Description

+

data_path

+

No

+

String

+

Data source path.

+

data_type

+

No

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

No

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

No

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

No

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 4 SchemaMap

Parameter

+

Mandatory

+

Type

+

Description

+

dest_name

+

No

+

String

+

Name of the destination column.

+

src_name

+

No

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SourceInfo

Parameter

+

Mandatory

+

Type

+

Description

+

cluster_id

+

No

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

No

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

No

+

String

+

Name of an MRS cluster.

+

database_name

+

No

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

No

+

String

+

HDFS path of a table dataset.

+

ip

+

No

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

No

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

No

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

No

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

No

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

No

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

No

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

No

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 Label

Parameter

+

Mandatory

+

Type

+

Description

+

attributes

+

No

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 LabelAttribute

Parameter

+

Mandatory

+

Type

+

Description

+

default_value

+

No

+

String

+

Default value of a label attribute.

+

id

+

No

+

String

+

Label attribute ID.

+

name

+

No

+

String

+

Label attribute name.

+

type

+

No

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

No

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 8 LabelAttributeValue

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Label attribute value ID.

+

value

+

No

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 LabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 10 LabelFormat

Parameter

+

Mandatory

+

Type

+

Description

+

label_type

+

No

+

String

+

Label type of text classification. The options are as follows:

+
  • 0: The label is separated from the text, and they are distinguished by the fixed suffix _result. For example, the text file is abc.txt, and the label file is abc_result.txt.

    +
  • 1: Default value. Labels and texts are stored in the same file and separated by separators. You can use text_sample_separator to specify the separator between the text and label and text_label_separator to specify the separator between labels.

    +
+

text_label_separator

+

No

+

String

+

Separator between labels. By default, the comma (,) is used as the separator. The separator needs to be escaped. Only one character can be used as the separator. The value must contain letters, digits, and one special character (!@#$%^&*_=|?/':.;,).

+

text_sample_separator

+

No

+

String

+

Separator between the text and label. By default, the Tab key is used as the separator. The separator needs to be escaped. Only one character can be used as the separator. The value must contain letters, digits, and one special character (!@#$%^&*_=|?/':.;,).

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 11 Response body parameters

Parameter

+

Type

+

Description

+

task_id

+

String

+

ID of an import task.

+
+
+
+

Example Requests

+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "gfghHSokody6AJigS5A_m1dYqOw8vWCAznw1V28"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListAllWorkers.html b/modelarts/api-ref/ListAllWorkers.html new file mode 100644 index 00000000..a4f42dd2 --- /dev/null +++ b/modelarts/api-ref/ListAllWorkers.html @@ -0,0 +1,275 @@ + + +

Querying the List of All Labeling Team Members

+

Function

This API is used to query the list of all labeling team members.

+
+

URI

GET /v2/{project_id}/workforces/workers

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

role

+

No

+

Integer

+

Filtering query based on the member role. The options are as follows:

+
  • 0: labeling personnel (default value)

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • email: Sort by email.

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

total_number

+

Integer

+

Total number of labeling team members.

+

workers

+

Array of Worker objects

+

Labeling team members list queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying All Labeling Team Administrators

+
GET https://{endpoint}/v2/{project_id}/workforces/workers??role=2
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_number" : 2,
+  "workers" : [ {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "df40e4afcb793d13f01f6c9022341e6f",
+    "workforce_id" : "feSUo5NUIUnQAQNNTiS",
+    "status" : 0,
+    "role" : 2,
+    "description" : "",
+    "create_time" : 1595927749772,
+    "update_time" : 1595927749772
+  }, {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "27906df1d06c0827b7c24f761d618541",
+    "workforce_id" : "XiL5RcHmxyIt3aYIOtI",
+    "status" : 0,
+    "role" : 2,
+    "description" : "",
+    "create_time" : 1590027298717,
+    "update_time" : 1590027298717
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListAutoAnnotationSamples.html b/modelarts/api-ref/ListAutoAnnotationSamples.html new file mode 100644 index 00000000..dd788010 --- /dev/null +++ b/modelarts/api-ref/ListAutoAnnotationSamples.html @@ -0,0 +1,869 @@ + + +

Querying Auto Labeling Sample List

+

Function

This API is used to query auto labeling samples in a dataset.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/auto-annotations/samples

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

high_score

+

No

+

String

+

Upper confidence limit. The default value is 1.

+

label_name

+

No

+

String

+

Label name.

+

label_type

+

No

+

Integer

+

Labeling type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

low_score

+

No

+

String

+

Lower confidence limit. The default value is 0.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

process_parameter

+

No

+

String

+

Image resizing setting, which is the same as the OBS resizing setting. For details, see . For example, image/resize,m_lfit,h_200 indicates that the target image is resized proportionally and the height is set to 200 pixels.

+

search_conditions

+

No

+

String

+

Multi-dimensional search condition after URL encoding. The relationship between multiple search conditions is AND.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

sample_count

+

Integer

+

Number of samples.

+

samples

+

Array of DescribeSampleResp objects

+

Sample list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DescribeSampleResp

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying Auto Labeling Sample List

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/auto-annotations/samples
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_count" : 1,
+  "samples" : [ {
+    "sample_id" : "10de574cbf0f09d4798b87ba0eb34e37",
+    "sample_type" : 0,
+    "labels" : [ {
+      "name" : "sunflowers",
+      "type" : 0,
+      "id" : "1",
+      "property" : {
+        "@modelarts:hard_coefficient" : "0.0",
+        "@modelarts:hard" : "false"
+      },
+      "score" : 1.0
+    } ],
+    "source" : "https://test-obs.obs.xxx.com:443/dogcat/8_1597649054631.jpeg?AccessKeyId=alRn0xskf5luJaG2jBJe&Expires=1606299230&x-image-process=image%2Fresize%2Cm_lfit%2Ch_200&Signature=MNAAjXz%2Fmwn%2BSabSK9wkaG6b6bU%3D",
+    "metadata" : {
+      "@modelarts:hard_coefficient" : 1.0,
+      "@modelarts:hard" : true,
+      "@modelarts:import_origin" : 0,
+      "@modelarts:hard_reasons" : [ 8, 6, 5, 3 ]
+    },
+    "sample_time" : 1601432758000,
+    "sample_status" : "UN_ANNOTATION"
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListDatasetVersions.html b/modelarts/api-ref/ListDatasetVersions.html new file mode 100644 index 00000000..ee8fa84a --- /dev/null +++ b/modelarts/api-ref/ListDatasetVersions.html @@ -0,0 +1,825 @@ + + +

Querying the Dataset Version List

+

Function

This API is used to query the version list of a specific dataset.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/versions

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

status

+

No

+

Integer

+

Status of a dataset version. The options are as follows:

+
  • 0: creating

    +
  • 1: running

    +
  • 2: deleting

    +
  • 3: deleted

    +
  • 4: error

    +
+

train_evaluate_ratio

+

No

+

String

+

Version split ratio for version filtering. The numbers before and after the comma indicate the minimum and maximum split ratios, and the versions whose split ratios are within the range are filtered out, for example, 0.0,1.0. Note: If this parameter is left blank or unavailable, the system does not filter datasets based on the version split ratio by default.

+

version_format

+

No

+

Integer

+

Format of a dataset version. The options are as follows:

+
  • 0: default format

    +
  • 1: CarbonData (supported only by table datasets)

    +
  • 2: CSV

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

total_number

+

Integer

+

Total number of dataset versions.

+

versions

+

Array of DatasetVersion objects

+

Dataset version list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DatasetVersion

Parameter

+

Type

+

Description

+

add_sample_count

+

Integer

+

Number of added samples.

+

annotated_sample_count

+

Integer

+

Number of samples with labeled versions.

+

annotated_sub_sample_count

+

Integer

+

Number of labeled subsamples.

+

clear_hard_property

+

Boolean

+

Whether to clear hard example properties during release. The options are as follows:

+
  • true: Clear hard example properties. (Default value)

    +
  • false: Do not clear hard example properties.

    +
+

code

+

String

+

Status code of a preprocessing task such as rotation and cropping.

+

create_time

+

Long

+

Time when a version is created.

+

crop

+

Boolean

+

Whether to crop the image. This field is valid only for the object detection dataset whose labeling box is in the rectangle shape. The options are as follows:

+
  • true: Crop the image.

    +
  • false: Do not crop the image. (Default value)

    +
+

crop_path

+

String

+

Path for storing cropped files.

+

crop_rotate_cache_path

+

String

+

Temporary directory for executing the rotation and cropping task.

+

data_path

+

String

+

Path for storing data.

+

data_statistics

+

Map<String,Object>

+

Sample statistics on a dataset, including the statistics on sample metadata in JSON format.

+

data_validate

+

Boolean

+

Whether data is validated by the validation algorithm before release. The options are as follows:

+
  • true: The data has been validated.

    +
  • false: The data has not been validated.

    +
+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

deletion_stats

+

Map<String,Integer>

+

Deletion reason statistics.

+

description

+

String

+

Description of a version.

+

export_images

+

Boolean

+

Whether to export images to the version output directory during release. The options are as follows:

+
  • true: Export images to the version output directory.

    +
  • false: Do not export images to the version output directory. (Default value)

    +
+

extract_serial_number

+

Boolean

+

Whether to parse the subsample number during release. The field is valid for the healthcare dataset. The options are as follows:

+
  • true: Parse the subsample number.

    +
  • false: Do not parse the subsample number. (Default value)

    +
+

include_dataset_data

+

Boolean

+

Whether to include the source data of a dataset during release. The options are as follows:

+
  • true: The source data of a dataset is included.

    +
  • false: The source data of a dataset is not included.

    +
+

is_current

+

Boolean

+

Whether the current dataset version is used. The options are as follows:

+
  • true: The current dataset version is used.

    +
  • false: The current dataset version is not used.

    +
+

label_stats

+

Array of LabelStats objects

+

Label statistics list of a released version.

+

label_type

+

String

+

Label type of a released version. The options are as follows:

+
  • multi: Multi-label samples are included.

    +
  • single: All samples are single-labeled.

    +
+

manifest_cache_input_path

+

String

+

Input path for the manifest file cache during version release.

+

manifest_path

+

String

+

Path for storing the manifest file with the released version.

+

message

+

String

+

Task information recorded during release (for example, error information).

+

modified_sample_count

+

Integer

+

Number of modified samples.

+

previous_annotated_sample_count

+

Integer

+

Number of labeled samples of parent versions.

+

previous_total_sample_count

+

Integer

+

Total samples of parent versions.

+

previous_version_id

+

String

+

Parent version ID

+

processor_task_id

+

String

+

ID of a preprocessing task such as rotation and cropping.

+

processor_task_status

+

Integer

+

Status of a preprocessing task such as rotation and cropping. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
  • 5: timeout

    +
  • 6: deletion failed

    +
  • 7: stop failed

    +
+

remove_sample_usage

+

Boolean

+

Whether to clear the existing usage information of a dataset during release. The options are as follows:

+
  • true: Clear the existing usage information of a dataset. (Default value)

    +
  • false: Do not clear the existing usage information of a dataset.

    +
+

rotate

+

Boolean

+

Whether to rotate the image. The options are as follows:

+
  • true: Rotate the image.

    +
  • false: Do not rotate the image. (Default value)

    +
+

rotate_path

+

String

+

Path for storing the rotated file.

+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

status

+

Integer

+

Status of a dataset version. The options are as follows:

+
  • 0: creating

    +
  • 1: running

    +
  • 2: deleting

    +
  • 3: deleted

    +
  • 4: error

    +
+

tags

+

Array of strings

+

Key identifier list of the dataset. The labeling type is used as the default label when the labeling task releases a version. For example, ["Image","Object detection"].

+

task_type

+

Integer

+

Labeling task type of the released version, which is the same as the dataset type.

+

total_sample_count

+

Integer

+

Total number of version samples.

+

total_sub_sample_count

+

Integer

+

Total number of subsamples generated from the parent samples.

+

train_evaluate_sample_ratio

+

String

+

Split training and verification ratio during version release. The default value is 1.00, indicating that all labeled samples are split into the training set.

+

update_time

+

Long

+

Time when a version is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+

version_name

+

String

+

Dataset version name.

+

with_column_header

+

Boolean

+

Whether the first row in the released CSV file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the released CSV file is a column name.

    +
  • false: The first row in the released CSV file is not a column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 7 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Example Requests

Querying the Version List of a Specific Dataset

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/versions
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_number" : 3,
+  "versions" : [ {
+    "version_id" : "54IXbeJhfttGpL46lbv",
+    "version_name" : "V003",
+    "version_format" : "Default",
+    "previous_version_id" : "eSOKEQaXhKzxN00WKoV",
+    "status" : 1,
+    "create_time" : 1605930512183,
+    "total_sample_count" : 10,
+    "annotated_sample_count" : 10,
+    "total_sub_sample_count" : 0,
+    "annotated_sub_sample_count" : 0,
+    "manifest_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V003/V003.manifest",
+    "data_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V003/data/",
+    "is_current" : true,
+    "train_evaluate_sample_ratio" : "0.8",
+    "remove_sample_usage" : false,
+    "export_images" : false,
+    "description" : "",
+    "task_type" : 0,
+    "extract_serial_number" : false
+  }, {
+    "version_id" : "eSOKEQaXhKzxN00WKoV",
+    "version_name" : "V002",
+    "version_format" : "Default",
+    "previous_version_id" : "vlGvUqOcxxGPIB0ugeE",
+    "status" : 1,
+    "create_time" : 1605691027084,
+    "total_sample_count" : 10,
+    "annotated_sample_count" : 10,
+    "total_sub_sample_count" : 0,
+    "annotated_sub_sample_count" : 0,
+    "manifest_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V002/V002.manifest",
+    "data_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V002/data/",
+    "is_current" : false,
+    "train_evaluate_sample_ratio" : "0.9999",
+    "remove_sample_usage" : false,
+    "export_images" : false,
+    "description" : "",
+    "task_type" : 0,
+    "extract_serial_number" : false
+  }, {
+    "version_id" : "vlGvUqOcxxGPIB0ugeE",
+    "version_name" : "V001",
+    "version_format" : "Default",
+    "status" : 1,
+    "create_time" : 1605690687346,
+    "total_sample_count" : 10,
+    "annotated_sample_count" : 10,
+    "total_sub_sample_count" : 0,
+    "annotated_sub_sample_count" : 0,
+    "manifest_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V001/V001.manifest",
+    "data_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/V001/data/",
+    "is_current" : false,
+    "train_evaluate_sample_ratio" : "0.99",
+    "remove_sample_usage" : false,
+    "export_images" : false,
+    "description" : "",
+    "task_type" : 0,
+    "extract_serial_number" : false
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListDatasets.html b/modelarts/api-ref/ListDatasets.html new file mode 100644 index 00000000..6c8723da --- /dev/null +++ b/modelarts/api-ref/ListDatasets.html @@ -0,0 +1,1916 @@ + + +

Response body for querying the dataset list.

+

Function

This API is used to query the created datasets that meet the search criteria by page.

+
+

URI

GET /v2/{project_id}/datasets

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

check_running_task

+

No

+

Boolean

+

Whether to detect tasks (including initialization tasks) that are running in a dataset. The options are as follows:

+
  • true: Detect tasks (including initialization tasks) that are running in the dataset.

    +
  • false: Do not detect tasks (including initialization tasks) that are running in the dataset. (Default value)

    +
+

contain_versions

+

No

+

Boolean

+

Whether the dataset contains a version.

+

dataset_type

+

No

+

Integer

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

file_preview

+

No

+

Boolean

+

Whether a dataset supports preview when it is queried. The options are as follows:

+
  • true: Preview is supported and the list of four dataset files is returned.

    +
  • false: Preview is not supported. (Default value)

    +
+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

running_task_type

+

No

+

Integer

+

Type of the running tasks (including initialization tasks) to be detected. The options are as follows:

+
  • 0: auto labeling

    +
  • 1: pre-labeling

    +
  • 2: export

    +
  • 3: version switch

    +
  • 4: manifest file export

    +
  • 5: manifest file import

    +
  • 6: version publishing

    +
  • 7: auto grouping

    +
  • 10: one-click model deployment (default value)

    +
+

search_content

+

No

+

String

+

Fuzzy search keyword. By default, this parameter is left blank.

+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • dataset_name: Sort by dataset name.

    +
+

support_export

+

No

+

Boolean

+

Whether to filter datasets that can be exported only (including datasets of image classification, object detection, and custom format). If this parameter is left blank or the value is set to false, no filtering is performed. The options are as follows:

+
  • true: Filter datasets that can be exported only.

    +
  • false: Do not filter datasets that can be exported only. (Default value)

    +
+

train_evaluate_ratio

+

No

+

String

+

Version split ratio for dataset filtering. The numbers before and after the comma indicate the minimum and maximum split ratios, and the versions whose split ratios are within the range are filtered out, for example, 0.0,1.0. Note: If this parameter is left blank or unavailable, the system does not filter datasets based on the version split ratio by default.

+

version_format

+

No

+

Integer

+

Dataset version format for dataset filtering. This parameter is used to filter datasets that meet the filter criteria. The options are as follows:

+
  • 0: default format

    +
  • 1: CarbonData (supported only by table datasets)

    +
  • 2: CSV

    +
+

with_labels

+

No

+

Boolean

+

Whether to return dataset labels. The options are as follows:

+
  • true: Return label information.

    +
  • false: Do not return label information. (Default value)

    +
+

workspace_id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

datasets

+

Array of DatasetAndFilePreview objects

+

Dataset list queried by page.

+

total_number

+

Integer

+

Total number of datasets.

+

workspaceId

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DatasetAndFilePreview

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Integer

+

Number of labeled samples in a dataset.

+

annotated_sub_sample_count

+

Integer

+

Number of labeled subsamples.

+

content_labeling

+

Boolean

+

Whether to enable content labeling for the speech paragraph labeling dataset. This function is enabled by default.

+

create_time

+

Long

+

Time when a dataset is created.

+

current_version_id

+

String

+

Current version ID of a dataset.

+

current_version_name

+

String

+

Current version name of a dataset.

+

data_format

+

String

+

Data format.

+

data_sources

+

Array of DataSource objects

+

Data source list.

+

data_statistics

+

Map<String,Object>

+

Sample statistics on a dataset, including the statistics on sample metadata in JSON format.

+

data_update_time

+

Long

+

Time when a sample and a label are updated.

+

data_url

+

String

+

Data path for training.

+

dataset_format

+

Integer

+

Dataset format. The options are as follows:

+
  • 0: file

    +
  • 1: table

    +
+

dataset_id

+

String

+

Dataset ID.

+

dataset_name

+

String

+

Dataset name.

+

dataset_tags

+

Array of strings

+

Key identifier list of a dataset, for example, ["Image","Object detection"].

+

dataset_type

+

Integer

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

dataset_version_count

+

Integer

+

Version number of a dataset.

+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

deletion_stats

+

Map<String,Integer>

+

Deletion reason statistics.

+

description

+

String

+

Dataset description.

+

enterprise_project_id

+

String

+

Enterprise project ID.

+

exist_running_task

+

Boolean

+

Whether the dataset contains running (including initialization) tasks. The options are as follows:

+
  • true: The dataset contains running tasks.

    +
  • false: The dataset does not contain running tasks.

    +
+

exist_workforce_task

+

Boolean

+

Whether the dataset contains team labeling tasks. The options are as follows:

+
  • true: The dataset contains team labeling tasks.

    +
  • false: The dataset does not contain team labeling tasks.

    +
+

feature_supports

+

Array of strings

+

List of features supported by the dataset. Currently, only the value 0 is supported, indicating that the OBS file size is limited.

+

import_data

+

Boolean

+

Whether to import data. The options are as follows:

+
  • true: Import data.

    +
  • false: Do not import data.

    +
+

import_task_id

+

String

+

ID of an import task.

+

inner_annotation_path

+

String

+

Path for storing the labeling result of a dataset.

+

inner_data_path

+

String

+

Path for storing the internal data of a dataset.

+

inner_log_path

+

String

+

Path for storing internal logs of a dataset.

+

inner_task_path

+

String

+

Path for internal task of a dataset.

+

inner_temp_path

+

String

+

Path for storing internal temporary files of a dataset.

+

inner_work_path

+

String

+

Output directory of a dataset.

+

label_task_count

+

Integer

+

Number of labeling tasks.

+

labels

+

Array of Label objects

+

Dataset label list.

+

loading_sample_count

+

Integer

+

Number of loading samples.

+

managed

+

Boolean

+

Whether a dataset is hosted. The options are as follows:

+
  • true: The dataset is hosted.

    +
  • false: The dataset is not hosted.

    +
+

next_version_num

+

Integer

+

Number of next versions of a dataset.

+

running_tasks_id

+

Array of strings

+

ID list of running (including initialization) tasks.

+

samples

+

Array of AnnotationFile objects

+

Sample list.

+

schema

+

Array of Field objects

+

Schema list.

+

status

+

Integer

+

Dataset status. The options are as follows:

+
  • 0: creating dataset

    +
  • 1: normal dataset

    +
  • 2: deleting dataset

    +
  • 3: deleted dataset

    +
  • 4: abnormal dataset

    +
  • 5: synchronizing dataset

    +
  • 6: releasing dataset

    +
  • 7: dataset in version switching

    +
  • 8: importing dataset

    +
+

third_path

+

String

+

Third-party path.

+

total_sample_count

+

Integer

+

Total number of dataset samples.

+

total_sub_sample_count

+

Integer

+

Total number of subsamples generated from the parent samples. For example, the total number of key frame images extracted from the video labeling dataset is that of subsamples.

+

unconfirmed_sample_count

+

Integer

+

Number of auto labeling samples to be confirmed.

+

update_time

+

Long

+

Time when a dataset is updated.

+

versions

+

Array of DatasetVersion objects

+

Dataset version information. Currently, only the current version information of a dataset is recorded.

+

work_path

+

String

+

Output dataset path, which is used to store output files such as label files. The path is an OBS path in the format of /Bucket name/File path. For example: /obs-bucket.

+

work_path_type

+

Integer

+

Type of the dataset output path. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
+

workforce_descriptor

+

WorkforceDescriptor object

+

Team labeling information.

+

workforce_task_count

+

Integer

+

Number of team labeling tasks of a dataset.

+

workspace_id

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 DataSource

Parameter

+

Type

+

Description

+

data_path

+

String

+

Data source path.

+

data_type

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 6 SchemaMap

Parameter

+

Type

+

Description

+

dest_name

+

String

+

Name of the destination column.

+

src_name

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SourceInfo

Parameter

+

Type

+

Description

+

cluster_id

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

String

+

Name of an MRS cluster.

+

database_name

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

String

+

HDFS path of a table dataset.

+

ip

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 Label

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 AnnotationFile

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a sample is created.

+

dataset_id

+

String

+

Dataset ID.

+

depth

+

Integer

+

Number of image sample channels.

+

file_Name

+

String

+

Sample name.

+

file_id

+

String

+

Sample ID.

+

file_type

+

String

+

File type.

+

height

+

Integer

+

Image sample height.

+

size

+

Long

+

Image sample size.

+

tags

+

Map<String,String>

+

Label information of a sample.

+

url

+

String

+

OBS address of the preview sample.

+

width

+

Integer

+

Image sample width.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 11 Field

Parameter

+

Type

+

Description

+

description

+

String

+

Schema description.

+

name

+

String

+

Schema name.

+

schema_id

+

Integer

+

Schema ID.

+

type

+

String

+

Schema value type.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 DatasetVersion

Parameter

+

Type

+

Description

+

add_sample_count

+

Integer

+

Number of added samples.

+

annotated_sample_count

+

Integer

+

Number of samples with labeled versions.

+

annotated_sub_sample_count

+

Integer

+

Number of labeled subsamples.

+

clear_hard_property

+

Boolean

+

Whether to clear hard example properties during release. The options are as follows:

+
  • true: Clear hard example properties. (Default value)

    +
  • false: Do not clear hard example properties.

    +
+

code

+

String

+

Status code of a preprocessing task such as rotation and cropping.

+

create_time

+

Long

+

Time when a version is created.

+

crop

+

Boolean

+

Whether to crop the image. This field is valid only for the object detection dataset whose labeling box is in the rectangle shape. The options are as follows:

+
  • true: Crop the image.

    +
  • false: Do not crop the image. (Default value)

    +
+

crop_path

+

String

+

Path for storing cropped files.

+

crop_rotate_cache_path

+

String

+

Temporary directory for executing the rotation and cropping task.

+

data_path

+

String

+

Path for storing data.

+

data_statistics

+

Map<String,Object>

+

Sample statistics on a dataset, including the statistics on sample metadata in JSON format.

+

data_validate

+

Boolean

+

Whether data is validated by the validation algorithm before release. The options are as follows:

+
  • true: The data has been validated.

    +
  • false: The data has not been validated.

    +
+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

deletion_stats

+

Map<String,Integer>

+

Deletion reason statistics.

+

description

+

String

+

Description of a version.

+

export_images

+

Boolean

+

Whether to export images to the version output directory during release. The options are as follows:

+
  • true: Export images to the version output directory.

    +
  • false: Do not export images to the version output directory. (Default value)

    +
+

extract_serial_number

+

Boolean

+

Whether to parse the subsample number during release. The field is valid for the healthcare dataset. The options are as follows:

+
  • true: Parse the subsample number.

    +
  • false: Do not parse the subsample number. (Default value)

    +
+

include_dataset_data

+

Boolean

+

Whether to include the source data of a dataset during release. The options are as follows:

+
  • true: The source data of a dataset is included.

    +
  • false: The source data of a dataset is not included.

    +
+

is_current

+

Boolean

+

Whether the current dataset version is used. The options are as follows:

+
  • true: The current dataset version is used.

    +
  • false: The current dataset version is not used.

    +
+

label_stats

+

Array of LabelStats objects

+

Label statistics list of a released version.

+

label_type

+

String

+

Label type of a released version. The options are as follows:

+
  • multi: Multi-label samples are included.

    +
  • single: All samples are single-labeled.

    +
+

manifest_cache_input_path

+

String

+

Input path for the manifest file cache during version release.

+

manifest_path

+

String

+

Path for storing the manifest file with the released version.

+

message

+

String

+

Task information recorded during release (for example, error information).

+

modified_sample_count

+

Integer

+

Number of modified samples.

+

previous_annotated_sample_count

+

Integer

+

Number of labeled samples of parent versions.

+

previous_total_sample_count

+

Integer

+

Total samples of parent versions.

+

previous_version_id

+

String

+

Parent version ID

+

processor_task_id

+

String

+

ID of a preprocessing task such as rotation and cropping.

+

processor_task_status

+

Integer

+

Status of a preprocessing task such as rotation and cropping. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
  • 5: timeout

    +
  • 6: deletion failed

    +
  • 7: stop failed

    +
+

remove_sample_usage

+

Boolean

+

Whether to clear the existing usage information of a dataset during release. The options are as follows:

+
  • true: Clear the existing usage information of a dataset. (Default value)

    +
  • false: Do not clear the existing usage information of a dataset.

    +
+

rotate

+

Boolean

+

Whether to rotate the image. The options are as follows:

+
  • true: Rotate the image.

    +
  • false: Do not rotate the image. (Default value)

    +
+

rotate_path

+

String

+

Path for storing the rotated file.

+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

status

+

Integer

+

Status of a dataset version. The options are as follows:

+
  • 0: creating

    +
  • 1: running

    +
  • 2: deleting

    +
  • 3: deleted

    +
  • 4: error

    +
+

tags

+

Array of strings

+

Key identifier list of the dataset. The labeling type is used as the default label when the labeling task releases a version. For example, ["Image","Object detection"].

+

task_type

+

Integer

+

Labeling task type of the released version, which is the same as the dataset type.

+

total_sample_count

+

Integer

+

Total number of version samples.

+

total_sub_sample_count

+

Integer

+

Total number of subsamples generated from the parent samples.

+

train_evaluate_sample_ratio

+

String

+

Split training and verification ratio during version release. The default value is 1.00, indicating that all labeled samples are split into the training set.

+

update_time

+

Long

+

Time when a version is updated.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

version_id

+

String

+

Dataset version ID.

+

version_name

+

String

+

Dataset version name.

+

with_column_header

+

Boolean

+

Whether the first row in the released CSV file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the released CSV file is a column name.

    +
  • false: The first row in the released CSV file is not a column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 13 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 15 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 16 WorkforceDescriptor

Parameter

+

Type

+

Description

+

current_task_id

+

String

+

ID of a team labeling task.

+

current_task_name

+

String

+

Name of a team labeling task.

+

reject_num

+

Integer

+

Number of rejected samples.

+

repetition

+

Integer

+

Number of persons who label each sample. The minimum value is 1.

+

is_synchronize_auto_labeling_data

+

Boolean

+

Whether to synchronously update auto labeling data. The options are as follows:

+
  • true: Update auto labeling data synchronously.

    +
  • false: Do not update auto labeling data synchronously.

    +
+

is_synchronize_data

+

Boolean

+

Whether to synchronize updated data, such as uploading files, synchronizing data sources, and assigning imported unlabeled files to team members. The options are as follows:

+
  • true: Synchronize updated data to team members.

    +
  • false: Do not synchronize updated data to team members.

    +
+

workers

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

String

+

ID of a labeling team.

+

workforce_name

+

String

+

Name of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 17 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying the Dataset List

+
GET https://{endpoint}/v2/{project_id}/datasets?offset=0&limit=10&sort_by=create_time&order=desc&dataset_type=0&file_preview=true
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_number" : 1,
+  "datasets" : [ {
+    "dataset_id" : "gfghHSokody6AJigS5A",
+    "dataset_name" : "dataset-f9e8",
+    "dataset_type" : 0,
+    "data_format" : "Default",
+    "next_version_num" : 4,
+    "status" : 1,
+    "data_sources" : [ {
+      "data_type" : 0,
+      "data_path" : "/test-obs/classify/input/catDog4/"
+    } ],
+    "create_time" : 1605690595404,
+    "update_time" : 1605690595404,
+    "description" : "",
+    "current_version_id" : "54IXbeJhfttGpL46lbv",
+    "current_version_name" : "V003",
+    "total_sample_count" : 10,
+    "annotated_sample_count" : 10,
+    "work_path" : "/test-obs/classify/output/",
+    "inner_work_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/",
+    "inner_annotation_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/annotation/",
+    "inner_data_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/data/",
+    "inner_log_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/logs/",
+    "inner_temp_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/temp/",
+    "inner_task_path" : "/test-obs/classify/output/dataset-f9e8-gfghHSokody6AJigS5A/task/",
+    "work_path_type" : 0,
+    "workspace_id" : "0",
+    "enterprise_project_id" : "0",
+    "exist_running_task" : false,
+    "exist_workforce_task" : false,
+    "running_tasks_id" : [ ],
+    "workforce_task_count" : 0,
+    "feature_supports" : [ "0" ],
+    "managed" : false,
+    "import_data" : false,
+    "ai_project" : "default-ai-project",
+    "label_task_count" : 1,
+    "dataset_format" : 0,
+    "dataset_version" : "v1",
+    "content_labeling" : true,
+    "samples" : [ {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/15.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=tuUo9jl6lqoMKAwNBz5g8dxO%2FdE%3D",
+      "create_time" : 1605690596035
+    }, {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/8.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=NITOdBnkUXtdnKuEgDzZpkQzNfM%3D",
+      "create_time" : 1605690596046
+    }, {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/9.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=%2BwUo1BL38%2F2d7p7anPi4fNzm1VU%3D",
+      "create_time" : 1605690596050
+    }, {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/7.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=tOrHfcWo%2FEJ0wRzfi1M5Wk2MrXg%3D",
+      "create_time" : 1605690596043
+    } ],
+    "files" : [ {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/15.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=tuUo9jl6lqoMKAwNBz5g8dxO%2FdE%3D",
+      "create_time" : 1605690596035
+    }, {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/8.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=NITOdBnkUXtdnKuEgDzZpkQzNfM%3D",
+      "create_time" : 1605690596046
+    }, {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/9.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=%2BwUo1BL38%2F2d7p7anPi4fNzm1VU%3D",
+      "create_time" : 1605690596050
+    }, {
+      "url" : "https://test-obs.obs.xxx.com:443/classify/input/catDog4/7.jpg?AccessKeyId=vprCCTY1NmHudlvC0bXr&Expires=1606100112&Signature=tOrHfcWo%2FEJ0wRzfi1M5Wk2MrXg%3D",
+      "create_time" : 1605690596043
+    } ]
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListImportTasks.html b/modelarts/api-ref/ListImportTasks.html new file mode 100644 index 00000000..5c139b30 --- /dev/null +++ b/modelarts/api-ref/ListImportTasks.html @@ -0,0 +1,591 @@ + + +

Querying the Dataset Import Task List

+

Function

This API is used to query the dataset import task list by page.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/import-tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

import_tasks

+

Array of ImportTaskStatusResp objects

+

List of import tasks.

+

total_count

+

Integer

+

Number of import tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 ImportTaskStatusResp

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Long

+

Number of labeled samples.

+

create_time

+

Long

+

Time when a task is created.

+

data_source

+

DataSource object

+

Data source.

+

dataset_id

+

String

+

Dataset ID.

+

elapsed_time

+

Long

+

Task running time, in seconds.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

file_statistics

+

FileCopyProgress object

+

Progress of file copy.

+

finished_file_count

+

Long

+

Number of files that have been transferred.

+

finished_file_size

+

Long

+

Size of the file that has been transferred, in bytes.

+

import_path

+

String

+

OBS path or manifest path to be imported.

+
  • When importing a manifest file, ensure that the path is accurate to the manifest file.

    +
  • When a path is imported as a directory, the dataset type can only support image classification, object detection, text classification, or sound classification.

    +
+

import_type

+

Integer

+

Import mode. The options are as follows:

+
  • 0: Import by directory.

    +
  • 1: Import by manifest file.

    +
+

imported_sample_count

+

Long

+

Number of imported samples.

+

imported_sub_sample_count

+

Long

+

Number of imported subsamples.

+

processor_task_id

+

String

+

ID of a preprocessing task.

+

processor_task_status

+

Integer

+

Status of a preprocessing task.

+

status

+

String

+

Status of an import task. The options are as follows:

+
  • QUEUING: queuing

    +
  • STARTING: execution started

    +
  • RUNNING: running

    +
  • COMPLETED: completed

    +
  • FAILED: failed

    +
  • NOT_EXIST: not found

    +
+

task_id

+

String

+

Task ID.

+

total_file_count

+

Long

+

Total number of files.

+

total_file_size

+

Long

+

Total file size, in bytes.

+

total_sample_count

+

Long

+

Total number of samples.

+

total_sub_sample_count

+

Long

+

Total number of subsamples generated from the parent samples.

+

unconfirmed_sample_count

+

Long

+

Number of samples to be confirmed.

+

update_ms

+

Long

+

Time when a task is updated.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 DataSource

Parameter

+

Type

+

Description

+

data_path

+

String

+

Data source path.

+

data_type

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 6 SchemaMap

Parameter

+

Type

+

Description

+

dest_name

+

String

+

Name of the destination column.

+

src_name

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SourceInfo

Parameter

+

Type

+

Description

+

cluster_id

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

String

+

Name of an MRS cluster.

+

database_name

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

String

+

HDFS path of a table dataset.

+

ip

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 FileCopyProgress

Parameter

+

Type

+

Description

+

file_num_finished

+

Long

+

Number of files that have been transferred.

+

file_num_total

+

Long

+

Total number of files.

+

file_size_finished

+

Long

+

Size of the file that has been transferred, in bytes.

+

file_size_total

+

Long

+

Total file size, in bytes.

+
+
+
+

Example Requests

Obtaining the Dataset Import Task List

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/import-tasks
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_count" : 1,
+  "import_tasks" : [ {
+    "status" : "COMPLETED",
+    "task_id" : "gfghHSokody6AJigS5A_RHJ1zOkIoI3Nzwxj8nh",
+    "dataset_id" : "gfghHSokody6AJigS5A",
+    "import_path" : "obs://test-obs/daoLu_images/cat-dog/",
+    "import_type" : 0,
+    "total_sample_count" : 20,
+    "imported_sample_count" : 20,
+    "annotated_sample_count" : 20,
+    "total_sub_sample_count" : 0,
+    "imported_sub_sample_count" : 0,
+    "total_file_size" : 0,
+    "finished_file_count" : 0,
+    "finished_file_size" : 0,
+    "total_file_count" : 0,
+    "create_time" : 1606114833874,
+    "elapsed_time" : 2
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListLabels.html b/modelarts/api-ref/ListLabels.html new file mode 100644 index 00000000..d9cc1783 --- /dev/null +++ b/modelarts/api-ref/ListLabels.html @@ -0,0 +1,335 @@ + + +

Querying the Dataset Label List

+

Function

This API is used to query all labels of a dataset.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

version_id

+

No

+

String

+

Dataset version ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

labels

+

Array of Label objects

+

Label list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 Label

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 6 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Example Requests

Querying All Labels of a Dataset

+
GET https://{endpoint}/v2/{project_id}/datasets/WxCREuCkBSAlQr9xrde/data-annotations/labels
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "labels" : [ {
+    "name" : "Cat",
+    "type" : 1,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  }, {
+    "name" : "Dog",
+    "type" : 1,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListProcessorTaskVersionResults.html b/modelarts/api-ref/ListProcessorTaskVersionResults.html new file mode 100644 index 00000000..f176d8db --- /dev/null +++ b/modelarts/api-ref/ListProcessorTaskVersionResults.html @@ -0,0 +1,293 @@ + + +

Querying the Result of a Data Processing Task Version

+

Function

This API is used to query the result of a data processing task version.

+
+

URI

GET /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/results

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+

version_id

+

Yes

+

String

+

Version ID of a data processing task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

process_parameter

+

No

+

String

+

Image resizing setting, which is the same as the OBS resizing setting. For details, see . For example, image/resize,m_lfit,h_200 indicates that the target image is resized proportionally and the height is set to 200 pixels.

+

result_property

+

No

+

String

+

Sample status. If this parameter is not delivered or is set to -1, all samples are returned by default. The options are as follows:

+
  • -1: all

    +
  • 0: reserve

    +
  • 1: modify

    +
  • 2: delete

    +
  • 3: add

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

count

+

Integer

+

Total number of results.

+

has_more

+

Boolean

+

Whether all results are returned.

+

results

+

Array of DescProcessorTaskVersionResultsResp objects

+

Result displayed by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DescProcessorTaskVersionResultsResp

Parameter

+

Type

+

Description

+

new_source

+

String

+

Address of the sample after processing.

+

origin_source

+

String

+

Source address of a sample.

+

result_description

+

Array of objects

+

Processing description of a sample.

+

result_property

+

Integer

+

Processing status of a sample. The options are as follows:

+
  • -1: all

    +
  • 0: reserve

    +
  • 1: modify

    +
  • 2: delete

    +
  • 3: add

    +
+

sample_id

+

String

+

Sample ID, which is generated by md5 in the OBS path.

+

signed_new_source

+

String

+

Address of the processed sample after signature.

+

signed_origin_source

+

String

+

Source sample address after signature.

+

version_id

+

String

+

Version ID of a data processing task.

+
+
+
+

Example Requests

Querying the Result of a Data Processing Task Version

+
GET https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/results?offset=0&limit=14&result_property=-1
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "count" : 3,
+  "results" : [ {
+    "sample_id" : "0ac9aee517acbef965f547bb5a3268af",
+    "version_id" : "7PoIhUzSk92OglQrTxr",
+    "origin_source" : "s3://test-obs/classify/data/cat-dog/8.jpg",
+    "new_source" : "obs://test-obs/classify/output/7PoIhUzSk92OglQrTxr/Data/8.jpg",
+    "signed_origin_source" : "https://test-obs.obs.xxx.com:443/classify/data/cat-dog/8.jpg?AccessKeyId=I5IZ9R29S1W9WACNJJ0J&Expires=1606380154&x-obs-security-token=gQpjbi1ub3J0aC03jQ5yFSR1TfKXjeawutgyAnMrdoGNaSkeSBOKK...&Signature=GbnVBZ5JxUWhiAulUzpV9TD835Q%3D",
+    "signed_new_source" : "https://test-obs.obs.xxx.com:443/classify/output/7PoIhUzSk92OglQrTxr/Data/8.jpg?AccessKeyId=I5IZ9R29S1W9WACNJJ0J&Expires=1606380154&x-obs-security-token=gQpjbi1ub3J0aC03jQ5yFSR1TfKXjeawutgyAnMrdoGNaSkeSBOKK...&Signature=Q5stFFFfVx9kykR49S8PPBlFqe0%3D",
+    "result_property" : 3,
+    "result_description" : [ [ "use AddNoise augmentation" ], [ "result_description to translate" ] ]
+  }, {
+    "sample_id" : "196799b2d731727b1800b70851fc60b0",
+    "version_id" : "7PoIhUzSk92OglQrTxr",
+    "origin_source" : "s3://test-obs/classify/data/cat-dog/2.jpg",
+    "new_source" : "obs://test-obs/classify/output/7PoIhUzSk92OglQrTxr/Data/2.jpg",
+    "signed_origin_source" : "https://test-obs.obs.xxx.com:443/classify/data/cat-dog/2.jpg?AccessKeyId=QEKFB6WFGZWC2YUP2JPK&Expires=1606380154&x-obs-security-token=gQpjbi1ub3J0aC03jdUZcXVRCNOHjWNNWiuu2E9Q...&Signature=6yvhJufi5kQO6UjToQgR0ztP%2Bis%3D",
+    "signed_new_source" : "https://test-obs.obs.xxx.com:443/classify/output/7PoIhUzSk92OglQrTxr/Data/2.jpg?AccessKeyId=QEKFB6WFGZWC2YUP2JPK&Expires=1606380154&x-obs-security-token=gQpjbi1ub3J0aC03jdUZcXVRCNOHjWNNWiuu2E...&Signature=Zr%2BAEBDJwKS%2FpS6vzxK7MSzjblA%3D",
+    "result_property" : 3,
+    "result_description" : [ [ "use AddNoise augmentation" ], [ "result_description to translate" ] ]
+  }, {
+    "sample_id" : "1dc7351b78dcb24850f71d20267edd0e",
+    "version_id" : "7PoIhUzSk92OglQrTxr",
+    "origin_source" : "s3://test-obs/classify/data/cat-dog/import_1603716822103/test-obs/classify/output/E8ZLnTQvPBVtbZ6QsAp/Data/13.jpg",
+    "new_source" : "obs://test-obs/classify/output/7PoIhUzSk92OglQrTxr/Data/13.jpg",
+    "signed_origin_source" : "https://test-obs.obs.xxx.com:443/classify/data/cat-dog/import_1603716822103/test-obs/classify/output/E8ZLnTQvPBVtbZ6QsAp/Data/13.jpg?AccessKeyId=W6TSX9F1BRS8AUBDYKPY&Expires=1606380154&x-obs-security-token=gQpjbi1ub3J0aC03jVVFic8iObvdqZLuWxyIHlAjbJPCTX...&Signature=WV73XnoMkBDoSuVe%2BFSUaP1GxKw%3D",
+    "signed_new_source" : "https://test-obs.obs.xxx.com:443/classify/output/7PoIhUzSk92OglQrTxr/Data/13.jpg?AccessKeyId=W6TSX9F1BRS8AUBDYKPY&Expires=1606380154&x-obs-security-token=gQpjbi1ub3J0aC03jVVFic8iObvdqZLuWxyIHlAjbJPCTXeYXkQh8z...&Signature=%2FYsgrsbyrz5ZQrndrQ9QyoHluYQ%3D",
+    "result_property" : 3,
+    "result_description" : [ [ "use AddNoise augmentation" ], [ "result_description to translate" ] ]
+  } ],
+  "has_more" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListProcessorTaskVersions.html b/modelarts/api-ref/ListProcessorTaskVersions.html new file mode 100644 index 00000000..a5fd694f --- /dev/null +++ b/modelarts/api-ref/ListProcessorTaskVersions.html @@ -0,0 +1,585 @@ + + +

Querying the Version List of a Data Processing Task

+

Function

This API is used to query the version list of a data processing task.

+
+

URI

GET /v2/{project_id}/processor-tasks/{task_id}/versions

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 100.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • version_name: Sort by task version name.

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

count

+

Integer

+

Total number of versions.

+

versions

+

Array of DescTaskVersionResp objects

+

Version list of a data processing task queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DescTaskVersionResp

Parameter

+

Type

+

Description

+

add_sample_count

+

Integer

+

Number of added images after processing.

+

create_time

+

Long

+

Time when a data processing task is created.

+

deleted_sample_count

+

Integer

+

Number of deleted images after processing.

+

description

+

String

+

Version description of a data processing task.

+

duration_seconds

+

Integer

+

Running time of a data processing task, in seconds.

+

inputs

+

Array of ProcessorDataSource objects

+

Input channel of a data processing task.

+

modified_sample_count

+

Integer

+

Number of modified images after processing.

+

origin_sample_count

+

Integer

+

Number of images before processing.

+

status

+

Integer

+

Status of a data processing task. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
+

task_id

+

String

+

ID of a data processing task.

+

task_version_id

+

String

+

Version ID of a data processing task.

+

template

+

TemplateParam object

+

Algorithm template, such as the algorithm ID and parameters.

+

unmodified_sample_count

+

Integer

+

Number of unmodified images after processing.

+

update_time

+

Long

+

Time when a data processing task is created.

+

version_name

+

String

+

Version name of a data processing task.

+

work_path

+

WorkPath object

+

Output channel of a data processing task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 ProcessorDataSource

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

source

+

String

+

Data source path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

type

+

String

+

Data source type. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Dataset version name.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 6 TemplateParam

Parameter

+

Type

+

Description

+

id

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

name

+

String

+

Template name.

+

operator_params

+

Array of OperatorParam objects

+

Operator parameter list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 OperatorParam

Parameter

+

Type

+

Description

+

advanced_params_switch

+

Boolean

+

Advanced parameter switch.

+

id

+

String

+

ID of an operator.

+

name

+

String

+

Name of an operator.

+

params

+

Object

+

Operator parameter. The parameter type is map<string,object>. Currently, object only supports the types of Boolean, Integer, Long, String, List and Map<String,String>. For two special scenarios of object detection and image classification in a data preprocessing task, the value of task_type is object_detection or image_classification.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 WorkPath

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

output_path

+

String

+

Output path.

+

path

+

String

+

Working path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
+

type

+

String

+

Type of a working path. The options are as follows:

+
  • OBS: OBS path

    +
  • DATASET: dataset

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Name of a dataset version. The value can contain 0 to 32 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed.

+
+
+
+

Example Requests

Querying the Version List of a Data Processing Task

+
GET https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}/versions?offset=0&limit=5
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "count" : 2,
+  "versions" : [ {
+    "task_version_id" : "qSaudx2sbPvthHygckA",
+    "task_id" : "kM7j9TSa611ZzBThzSr",
+    "version_name" : "V002",
+    "description" : "",
+    "status" : 0,
+    "create_time" : 1606377874450,
+    "inputs" : [ {
+      "type" : "DATASET",
+      "source" : "PYc9H2HGv5BJNwBGXyK",
+      "version_id" : "Osc8SZ7TZStiRV4vYkZ",
+      "name" : "dataset-test",
+      "version_name" : "V0010"
+    } ],
+    "work_path" : {
+      "type" : "DATASET",
+      "path" : "PYc9H2HGv5BJNwBGXyK",
+      "name" : "dataset-test",
+      "version_name" : "V0011",
+      "output_path" : "/test-obs/classify/output/qSaudx2sbPvthHygckA/"
+    },
+    "template" : {
+      "id" : "sys_data_validation",
+      "name" : "name to translate",
+      "operator_params" : [ {
+        "name" : "MetaValidation",
+        "advanced_params_switch" : false,
+        "params" : {
+          "task_type" : "image_classification",
+          "dataset_type" : "manifest",
+          "source_service" : "select",
+          "filter_func" : "data_validation_select",
+          "image_max_width" : "1920",
+          "image_max_height" : "1920",
+          "total_status" : "[0,1,2]"
+        }
+      } ]
+    },
+    "duration_seconds" : 151
+  }, {
+    "task_version_id" : "YHFWU18zXuNbqxtzegG",
+    "task_id" : "kM7j9TSa611ZzBThzSr",
+    "version_name" : "V001",
+    "description" : "",
+    "status" : 2,
+    "create_time" : 1606375407276,
+    "inputs" : [ {
+      "type" : "DATASET",
+      "source" : "PYc9H2HGv5BJNwBGXyK",
+      "version_id" : "yoJ5ssClpNlOrsjjFDa",
+      "name" : "dataset-test",
+      "version_name" : "V009"
+    } ],
+    "work_path" : {
+      "type" : "DATASET",
+      "path" : "PYc9H2HGv5BJNwBGXyK",
+      "name" : "dataset-test",
+      "version_id" : "Osc8SZ7TZStiRV4vYkZ",
+      "version_name" : "V0010",
+      "output_path" : "/test-obs/classify/output/YHFWU18zXuNbqxtzegG/"
+    },
+    "template" : {
+      "id" : "sys_data_validation",
+      "name" : "name to translate",
+      "operator_params" : [ {
+        "name" : "MetaValidation",
+        "advanced_params_switch" : false,
+        "params" : {
+          "task_type" : "image_classification",
+          "dataset_type" : "manifest",
+          "source_service" : "select",
+          "filter_func" : "data_validation_select",
+          "image_max_width" : "1920",
+          "image_max_height" : "1920",
+          "total_status" : "[0,1,2]"
+        }
+      } ]
+    },
+    "duration_seconds" : 812,
+    "origin_sample_count" : 18,
+    "add_sample_count" : 0,
+    "modified_sample_count" : 0,
+    "unmodified_sample_count" : 18,
+    "deleted_sample_count" : 0
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListProcessorTasks.html b/modelarts/api-ref/ListProcessorTasks.html new file mode 100644 index 00000000..9c7a5dfc --- /dev/null +++ b/modelarts/api-ref/ListProcessorTasks.html @@ -0,0 +1,655 @@ + + +

Querying the List of a Processing Task

+

Function

This API is used to query the list of a processing task. You can query the feature analysis tasks and data processing tasks. You can specify the task_type parameter to query the list of a specific type of tasks.

+ + +
+

URI

GET /v2/{project_id}/processor-tasks

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

query_current

+

No

+

Boolean

+

Whether to query only the latest tasks of dataset version. The options are as follows:

+
  • true: Query only the latest tasks of the dataset version.

    +
  • false: Query all tasks of the dataset version. (Default value)

    +
+

return_result

+

No

+

Boolean

+

Whether to return the task result. The options are as follows:

+
  • true: Return the task result. (Default value)

    +
  • false: Do not return the task result.

    +
+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • name: Sort by task name.

    +
  • duration_seconds: Sort by running time.

    +
+

source

+

No

+

String

+

Data source path of the query. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

source_type

+

No

+

String

+

Data source type of the query. If this parameter is not specified, all data sources are queried by default. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

status

+

No

+

Integer

+

Task status of the query. If this parameter is not specified, tasks in all states are queried by default. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
+

task_name

+

No

+

String

+

Fuzzy search keyword.

+

task_type

+

No

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

version_id

+

No

+

Array

+

Version ID list of a specific dataset of the query.

+

workspace_id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

count

+

Integer

+

Total number of data processing tasks.

+

tasks

+

Array of DescribeProcessorTaskResp objects

+

Data processing task list queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DescribeProcessorTaskResp

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a data processing task is created.

+

data_source

+

ProcessorDataSource object

+

Input of a data processing task. Either this parameter or inputs is delivered.

+

description

+

String

+

Description of a data processing task.

+

duration_seconds

+

Integer

+

Running time of data processing, in seconds.

+

error_msg

+

String

+

Error message. This field is displayed when the value of status is 3.

+

inputs

+

Array of ProcessorDataSource objects

+

Input channel list of a data processing task. Either this parameter or data_source is delivered.

+

is_current

+

Boolean

+

Whether the current task is the latest of the same type of this version.

+

name

+

String

+

Name of a data processing task.

+

result

+

Object

+

Output result of a data processing task. This field is displayed when status is set to 2 and is valid for a feature analysis task.

+

status

+

Integer

+

Status of a data processing task. The options are as follows:

+
  • 0: initialized

    +
  • 1: running

    +
  • 2: completed

    +
  • 3: failed

    +
  • 4: stopped

    +
+

task_id

+

String

+

ID of a data processing task.

+

template

+

TemplateParam object

+

Data processing template, such as the algorithm ID and parameters.

+

version_count

+

Integer

+

Version number of a data processing task.

+

version_id

+

String

+

Dataset version ID corresponding to a data processing task.

+

version_name

+

String

+

Dataset version name corresponding to a data processing task.

+

work_path

+

WorkPath object

+

Working directory of a data processing task.

+

workspace_id

+

String

+

Workspace ID of a data processing task. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 ProcessorDataSource

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

source

+

String

+

Data source path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to TASK, source is a task ID.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
  • If type is set to CUSTOM and the API is called by resource tenants, set source to the project_id of the actual user. Otherwise, this field is left blank.

    +
+

type

+

String

+

Data source type. The options are as follows:

+
  • OBS: Data obtained from OBS

    +
  • TASK: Data processing task

    +
  • DATASET: Dataset

    +
  • CUSTOM: Data called by resource tenants

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Dataset version name.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 6 TemplateParam

Parameter

+

Type

+

Description

+

id

+

String

+

Task type, that is, ID of a data processing template. The options are as follows:

+
  • sys_data_analyse: feature analysis

    +
  • sys_data_cleaning: data cleansing

    +
  • sys_data_augmentation: data augmentation

    +
  • sys_data_validation: data validation

    +
  • sys_data_selection: data selection

    +
+

name

+

String

+

Template name.

+

operator_params

+

Array of OperatorParam objects

+

Operator parameter list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 OperatorParam

Parameter

+

Type

+

Description

+

advanced_params_switch

+

Boolean

+

Advanced parameter switch.

+

id

+

String

+

ID of an operator.

+

name

+

String

+

Name of an operator.

+

params

+

Object

+

Operator parameter. The parameter type is map<string,object>. Currently, object only supports the types of Boolean, Integer, Long, String, List and Map<String,String>. For two special scenarios of object detection and image classification in a data preprocessing task, the value of task_type is object_detection or image_classification.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 WorkPath

Parameter

+

Type

+

Description

+

name

+

String

+

Dataset name.

+

output_path

+

String

+

Output path.

+

path

+

String

+

Working path. The options are as follows:

+
  • If type is set to OBS, source is an OBS path.

    +
  • If type is set to DATASET, source is a dataset ID.

    +
+

type

+

String

+

Type of a working path. The options are as follows:

+
  • OBS: OBS path

    +
  • DATASET: dataset

    +
+

version_id

+

String

+

Version of a dataset.

+

version_name

+

String

+

Name of a dataset version. The value can contain 0 to 32 characters. Only digits, letters, underscores (_), and hyphens (-) are allowed.

+
+
+
+

Example Requests

Query historical data validation tasks of a specified dataset.

+
GET https://{endpoint}/v2/{project_id}/processor-tasks?offset=0&limit=10&sort_by=create_time&order=desc&source_type=DATASET&source=qjHAs14pRu4n2so1Qlb&task_type=sys_data_validation&return_result=false
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "SSzH9AdmHTvIBeihArb",
+  "name" : "PRE-6c83",
+  "description" : "test",
+  "inputs" : [ {
+    "type" : "DATASET",
+    "source" : "qjHAs14pRu4n2so1Qlb",
+    "version_id" : "cUELhTAYGIR36YpTE5Y",
+    "name" : "dataset-dba1",
+    "version_name" : "V001"
+  } ],
+  "work_path" : {
+    "type" : "DATASET",
+    "path" : "qjHAs14pRu4n2so1Qlb",
+    "name" : "dataset-dba1",
+    "version_name" : "V002",
+    "output_path" : "/test-lxm/data-out/EnyHCFzjTFY20U3sYSE/"
+  },
+  "template" : {
+    "id" : "sys_data_validation",
+    "name" : "data validation template name",
+    "operator_params" : [ {
+      "name" : "MetaValidation",
+      "advanced_params_switch" : false,
+      "params" : {
+        "task_type" : "image_classification",
+        "dataset_type" : "manifest",
+        "source_service" : "select",
+        "filter_func" : "data_validation_select",
+        "image_max_width" : "-1",
+        "image_max_height" : "-1",
+        "total_status" : "[0,1,2]"
+      }
+    } ]
+  },
+  "status" : 2,
+  "duration_seconds" : 277,
+  "create_time" : 1614245065569,
+  "workspace_id" : "0",
+  "version_count" : 1,
+  "ai_project" : ""
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListSamples.html b/modelarts/api-ref/ListSamples.html new file mode 100644 index 00000000..cb727b75 --- /dev/null +++ b/modelarts/api-ref/ListSamples.html @@ -0,0 +1,968 @@ + + +

Querying the Sample List

+

Function

This API is used to query the sample list by page.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

email

+

No

+

String

+

Email address of a labeling team member.

+

high_score

+

No

+

String

+

Upper confidence limit. The default value is 1.

+

label_name

+

No

+

String

+

Label name.

+

label_type

+

No

+

Integer

+

Labeling type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

locale

+

No

+

String

+

Language. The options are as follows:

+
  • en-us: English (default value)

    +
  • zh-cn: Chinese

    +
+

low_score

+

No

+

String

+

Lower confidence limit. The default value is 0.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

preview

+

No

+

Boolean

+

Whether to support preview. The options are as follows:

+
  • true: Preview is supported.

    +
  • false: Preview is not supported.

    +
+

process_parameter

+

No

+

String

+

Image resizing setting, which is the same as the OBS resizing setting. For details, see . For example, image/resize,m_lfit,h_200 indicates that the target image is resized proportionally and the height is set to 200 pixels.

+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_type

+

No

+

Integer

+

Sample file type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: audio

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format (default value)

    +
+

search_conditions

+

No

+

String

+

Multi-dimensional search condition after URL encoding. The relationship between multiple search conditions is AND.

+

version_id

+

No

+

String

+

Dataset version ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

sample_count

+

Integer

+

Number of samples.

+

samples

+

Array of DescribeSampleResp objects

+

Sample list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DescribeSampleResp

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying the Sample List by Page

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/data-annotations/samples
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_count" : 2,
+  "samples" : [ {
+    "sample_id" : "012f99f3cf405860130b6ed2350c2228",
+    "sample_type" : 0,
+    "labels" : [ {
+      "name" : "car",
+      "type" : 0,
+      "property" : { }
+    } ],
+    "source" : "https://test-obs.obs.xxx.com:443/image/aifood/%E5%86%B0%E6%BF%80%E5%87%8C/36502.jpg?AccessKeyId=RciyO7RHmhNTfOZVryUH&Expires=1606296688&x-image-process=image%2Fresize%2Cm_lfit%2Ch_200&Signature=icyvHhFew9vnmy3zh1uZMP15Mbg%3D",
+    "metadata" : {
+      "@modelarts:import_origin" : 0
+    },
+    "sample_time" : 1589190552106,
+    "sample_status" : "MANUAL_ANNOTATION",
+    "annotated_by" : "human/test_123/test_123",
+    "labelers" : [ {
+      "email" : "xxx@xxx.com",
+      "worker_id" : "5d8d4033b428fed5ac158942c33940a2",
+      "role" : 0
+    } ]
+  }, {
+    "sample_id" : "0192f3acfb000666033a0f85c21577c7",
+    "sample_type" : 0,
+    "labels" : [ {
+      "name" : "car",
+      "type" : 0,
+      "property" : { }
+    } ],
+    "source" : "https://test-obs.obs.xxx.com:443/image/aifood/%E5%86%B0%E6%BF%80%E5%87%8C/36139.jpg?AccessKeyId=RciyO7RHmhNTfOZVryUH&Expires=1606296688&x-image-process=image%2Fresize%2Cm_lfit%2Ch_200&Signature=RRr9r2cghLCXk%2B0%2BfHtYJi8eZ4k%3D",
+    "metadata" : {
+      "@modelarts:import_origin" : 0
+    },
+    "sample_time" : 1589190543327,
+    "sample_status" : "MANUAL_ANNOTATION",
+    "annotated_by" : "human/test_123/test_123",
+    "labelers" : [ {
+      "email" : "xxx@xxx.com",
+      "worker_id" : "a2abd3f27b4e92c593c15282f8b6bd29",
+      "role" : 0
+    } ]
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListSearch.html b/modelarts/api-ref/ListSearch.html new file mode 100644 index 00000000..bcf59578 --- /dev/null +++ b/modelarts/api-ref/ListSearch.html @@ -0,0 +1,328 @@ + + +

Obtaining Sample Search Condition

+

Function

This API is used to obtain sample search condition.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/search-condition

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

labelers

+

Array of strings

+

List of labeling team members.

+

labels

+

Array of Label objects

+

Label list.

+

metadata

+

Map<String,Array<String>>

+

Attribute key-value pair of a dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Label

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Example Requests

Obtaining Sample Search Condition

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/data-annotations/search-condition
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "labels" : [ {
+    "name" : "Cat",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  }, {
+    "name" : "Dog",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  } ],
+  "metadata" : { },
+  "labelers" : [ "human/test_123/test_123", "human/xxx@xxx.com", "human/xxx@xxx.com" ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListStats.html b/modelarts/api-ref/ListStats.html new file mode 100644 index 00000000..c8b16558 --- /dev/null +++ b/modelarts/api-ref/ListStats.html @@ -0,0 +1,515 @@ + + +

Querying Dataset Statistics

+

Function

This API is used to query dataset statistics.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/data-annotations/stats

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

email

+

No

+

String

+

Email address of a labeling team member.

+

locale

+

No

+

String

+

Language. The options are as follows:

+
  • zh-cn: Chinese

    +
  • en-us: English (default value)

    +
+

sample_state

+

No

+

String

+

Query statistics on samples in a specified state. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

data_path

+

String

+

Path for storing data of a dataset.

+

data_spliting_enable

+

Boolean

+

Whether the dataset can be split into training set and validation set based on the sample labeling statistics. The options are as follows:

+
  • true: The dataset can be split into training set and validation set.

    +
  • false: The dataset cannot be split into training set and validation set.

    +
+

grouped_label_stats

+

Map<String,Array<Object>>

+

Label statistics grouped by labeling type.

+

hard_detail_stats

+

Map<String,Object>

+

Statistics on hard example reasons. The type is Map<Integer, Pair<Integer, HardDetail>>. The key of Map indicates the ID of the hard example reason, the key of Pair indicates the number of times that the hard example reason occurs, and the value of Pair indicates the hard example reason.

+

key_sample_stats

+

Map<String,Integer>

+

Statistics on hard examples.

+

label_stats

+

Array of LabelStats objects

+

List of label statistics.

+

metadata_stats

+

Map<String,Object>

+

Statistics on sample metadata, in JSON format.

+

sample_stats

+

Map<String,Integer>

+

Statistics on sample status.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + +
Table 6 PairOfintAndHardDetail

Parameter

+

Type

+

Description

+

key

+

Integer

+

Number of times that a hard example reason occurs.

+

value

+

HardDetail object

+

Reason for a hard example.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 9 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+
+

Example Requests

Querying Dataset Statistics

+
GET https://{endpoint}/v2/{project_id}/datasets/WxCREuCkBSAlQr9xrde/data-annotations/stats
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "label_stats" : [ {
+    "name" : "Dog",
+    "type" : 1,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    },
+    "count" : 8,
+    "sample_count" : 5
+  } ],
+  "sample_stats" : {
+    "un_annotation" : 309,
+    "all" : 317,
+    "total" : 317,
+    "deleted" : 0,
+    "manual_annotation" : 8,
+    "auto_annotation" : 0,
+    "lefted" : 317
+  },
+  "key_sample_stats" : {
+    "total" : 317,
+    "non_key_sample" : 315,
+    "key_sample" : 2
+  },
+  "deletion_stats" : { },
+  "metadata_stats" : { },
+  "data_spliting_enable" : false
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListTasks.html b/modelarts/api-ref/ListTasks.html new file mode 100644 index 00000000..ac1200e0 --- /dev/null +++ b/modelarts/api-ref/ListTasks.html @@ -0,0 +1,1910 @@ + + +

Querying the Intelligent Task List by Page

+

Function

This API is used to query the intelligent task list by page, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the type parameter to query the list of a specific type of tasks.

+ + + +
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

task_name

+

No

+

String

+

Task name filtering.

+

type

+

No

+

String

+

Task type. If this parameter is not delivered, the auto labeling (active learning or pre-labeling) task list is returned by default. The options are as follows:

+
  • auto-label: active learning

    +
  • pre-label: pre-labeling

    +
  • auto-grouping: auto grouping

    +
  • auto-deploy: one-click model deployment

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

tasks

+

Array of RunningTask objects

+

Task list.

+

total_count

+

Integer

+

Total number of tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 RunningTask

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Integer

+

Number of labeled samples.

+

code

+

String

+

Error code.

+

config

+

SmartTaskConfig object

+

Task configuration.

+

create_time

+

String

+

Task creation time.

+

dataset_id

+

String

+

Dataset ID.

+

elapsed_time

+

Long

+

Execution time.

+

error_code

+

String

+

Error code.

+

error_detail

+

String

+

Error details.

+

error_msg

+

String

+

Error message.

+

message

+

String

+

Error message.

+

model_id

+

String

+

Model ID.

+

model_name

+

String

+

Model name.

+

model_version

+

String

+

Model version.

+

progress

+

Float

+

Task progress percentage.

+

result

+

Result object

+

Task result.

+

status

+

Integer

+

Task status.

+

task_id

+

String

+

Task ID.

+

task_name

+

String

+

Task name.

+

total_sample_count

+

Integer

+

Total number of samples.

+

type

+

Integer

+

Task type.

+

unconfirmed_sample_count

+

Integer

+

Number of samples to be confirmed.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SmartTaskConfig

Parameter

+

Type

+

Description

+

algorithm_type

+

String

+

Algorithm type for auto labeling. Options:

+
  • fast: Only labeled samples are used for training. This type of algorithm achieves faster labeling.

    +
  • accurate: In addition to labeled samples, unlabeled samples are used for semi-supervised training. This type of algorithm achieves more accurate labeling.

    +
+

ambiguity

+

Boolean

+

Whether to perform clustering based on the image blurring degree.

+

annotation_output

+

String

+

Output path of the active learning labeling result.

+

collect_rule

+

String

+

Sample collection rule. The default value is all, indicating full collection. Currently, only value all is available.

+

collect_sample

+

Boolean

+

Whether to enable sample collection. The options are as follows:

+
  • true: Enable sample collection. (Default value)

    +
  • false: Do not enable sample collection.

    +
+

confidence_scope

+

String

+

Confidence range of key samples. The minimum and maximum values are separated by hyphens (-). Example: 0.10-0.90.

+

description

+

String

+

Task description.

+

engine_name

+

String

+

Engine name.

+

export_format

+

Integer

+

Format of the exported directory. The options are as follows:

+
  • 1: tree structure. For example: cat/1.jpg,dog/2.jpg.

    +
  • 2: tile structure. For example: 1.jpg, 1.txt; 2.jpg,2.txt.

    +
+

export_params

+

ExportParams object

+

Parameters of a dataset export task.

+

flavor

+

Flavor object

+

Training resource flavor.

+

image_brightness

+

Boolean

+

Whether to perform clustering based on the image brightness.

+

image_colorfulness

+

Boolean

+

Whether to perform clustering based on the image color.

+

inf_cluster_id

+

String

+

ID of a dedicated cluster. This parameter is left blank by default, indicating that a dedicated cluster is not used. When using the dedicated cluster to deploy services, ensure that the cluster status is normal. After this parameter is set, the network configuration of the cluster is used, and the vpc_id parameter does not take effect.

+

inf_config_list

+

Array of InfConfig objects

+

Configuration list required for running an inference task, which is optional and left blank by default.

+

inf_output

+

String

+

Output path of inference in active learning.

+

infer_result_output_dir

+

String

+

OBS directory for storing sample prediction results. This parameter is optional. The {service_id}-infer-result subdirectory in the output_dir directory is used by default.

+

key_sample_output

+

String

+

Output path of hard examples in active learning.

+

log_url

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank.

+

manifest_path

+

String

+

Path of the manifest file, which is used as the input for training and inference.

+

model_id

+

String

+

Model ID.

+

model_name

+

String

+

Model name.

+

model_parameter

+

String

+

Model parameter.

+

model_version

+

String

+

Model version.

+

n_clusters

+

Integer

+

Number of clusters.

+

name

+

String

+

Task name.

+

output_dir

+

String

+

Sample output path. The format is as follows: Dataset output path/Dataset name-Dataset ID/annotation/auto-deploy/. Example: /test/work_1608083108676/dataset123-g6IO9qSu6hoxwCAirfm/annotation/auto-deploy/.

+

parameters

+

Array of TrainingParameter objects

+

Running parameters of a training job.

+

pool_id

+

String

+

ID of a resource pool.

+

property

+

String

+

Attribute name.

+

req_uri

+

String

+

Inference path of a batch job.

+

result_type

+

Integer

+

Processing mode of auto grouping results. The options are as follows:

+
  • 0: Save to OBS.

    +
  • 1: Save to samples.

    +
+

samples

+

Array of SampleLabels objects

+

List of labeling information for samples to be auto labeled.

+

stop_time

+

Integer

+

Timeout interval, in minutes. The default value is 15 minutes. This parameter is used only in the scenario of auto labeling for videos.

+

time

+

String

+

Timestamp in active learning.

+

train_data_path

+

String

+

Path for storing existing training datasets.

+

train_url

+

String

+

URL of the OBS path where the file of a training job is outputted. By default, this parameter is left blank.

+

version_format

+

String

+

Format of a dataset version. The options are as follows:

+
  • Default: default format

    +
  • CarbonData: CarbonData (supported only by table datasets)

    +
  • CSV: CSV

    +
+

worker_server_num

+

Integer

+

Number of workers in a training job.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 ExportParams

Parameter

+

Type

+

Description

+

clear_hard_property

+

Boolean

+

Whether to clear hard example attributes. The options are as follows:

+
  • true: Clear hard example attributes. (Default value)

    +
  • false: Do not clear hard example attributes.

    +
+

export_dataset_version_format

+

String

+

Format of the dataset version to which data is exported.

+

export_dataset_version_name

+

String

+

Name of the dataset version to which data is exported.

+

export_dest

+

String

+

Export destination. The options are as follows:

+
  • DIR: Export data to OBS. (Default value)

    +
  • NEW_DATASET: Export data to a new dataset.

    +
+

export_new_dataset_name

+

String

+

Name of the new dataset to which data is exported.

+

export_new_dataset_work_path

+

String

+

Working directory of the new dataset to which data is exported.

+

ratio_sample_usage

+

Boolean

+

Whether to randomly allocate the training set and validation set based on the specified ratio. The options are as follows:

+
  • true: Allocate the training set and validation set.

    +
  • false: Do not allocate the training set and validation set. (Default value)

    +
+

sample_state

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

samples

+

Array of strings

+

ID list of exported samples.

+

search_conditions

+

Array of SearchCondition objects

+

Exported search conditions. The relationship between multiple search conditions is OR.

+

train_sample_ratio

+

String

+

Split ratio of training set and verification set during specified version release. The default value is 1.00, indicating that all released versions are training sets.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 8 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 10 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+ +
+ + + + + + + + + +
Table 11 Flavor

Parameter

+

Type

+

Description

+

code

+

String

+

Attribute code of a resource specification, which is used for task creating.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 InfConfig

Parameter

+

Type

+

Description

+

envs

+

Map<String,String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank. To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

instance_count

+

Integer

+

Instance number of model deployment, that is, the number of compute nodes.

+

model_id

+

String

+

Model ID.

+

specification

+

String

+

Resource specifications of real-time services. For details, see Deploying Services.

+

weight

+

Integer

+

Traffic weight allocated to a model. This parameter is mandatory only when infer_type is set to real-time. The sum of the weights must be 100.

+
+
+ +
+ + + + + + + + + + + + + +
Table 13 TrainingParameter

Parameter

+

Type

+

Description

+

label

+

String

+

Parameter name.

+

value

+

String

+

Parameter value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 Result

Parameter

+

Type

+

Description

+

annotated_sample_count

+

Integer

+

Number of labeled samples.

+

confidence_scope

+

String

+

Confidence range.

+

dataset_name

+

String

+

Dataset name.

+

dataset_type

+

String

+

Dataset type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

description

+

String

+

Description.

+

dlf_model_job_name

+

String

+

Name of a DLF model inference job.

+

dlf_service_job_name

+

String

+

Name of a DLF real-time service job.

+

dlf_train_job_name

+

String

+

Name of a DLF training job.

+

events

+

Array of Event objects

+

Event.

+

hard_example_path

+

String

+

Path for storing hard examples.

+

hard_select_tasks

+

Array of HardSelectTask objects

+

Selected task list of hard examples.

+

manifest_path

+

String

+

Path for storing the manifest files.

+

model_id

+

String

+

Model ID.

+

model_name

+

String

+

Model name.

+

model_version

+

String

+

Model version.

+

samples

+

Array of SampleLabels objects

+

Inference result of the real-time video service.

+

service_id

+

String

+

ID of a real-time service.

+

service_name

+

String

+

Name of a real-time service.

+

service_resource

+

String

+

ID of the real-time service bound to a user.

+

total_sample_count

+

Integer

+

Total number of samples.

+

train_data_path

+

String

+

Path for storing training data.

+

train_job_id

+

String

+

ID of a training job.

+

train_job_name

+

String

+

Name of a training job.

+

unconfirmed_sample_count

+

Integer

+

Number of samples to be confirmed.

+

version_id

+

String

+

Dataset version ID.

+

version_name

+

String

+

Dataset version name.

+

workspace_id

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 15 Event

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when an event is created.

+

description

+

String

+

Description.

+

elapsed_time

+

Long

+

Time when an event is executed.

+

error_code

+

String

+

Error code.

+

error_message

+

String

+

Error message.

+

events

+

Array of Event objects

+

Subevent list.

+

level

+

Integer

+

Event severity.

+

name

+

String

+

Event name.

+

ordinal

+

Integer

+

Sequence number.

+

parent_name

+

String

+

Parent event name.

+

status

+

String

+

Status. The options are as follows:

+
  • waiting: waiting

    +
  • running: running

    +
  • failed: failed

    +
  • success: successful

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 16 HardSelectTask

Parameter

+

Type

+

Description

+

create_at

+

Long

+

Creation time.

+

dataset_id

+

String

+

Dataset ID.

+

dataset_name

+

String

+

Dataset name.

+

hard_select_task_id

+

String

+

ID of a hard example filtering task.

+

task_status

+

String

+

Task status.

+

time

+

Long

+

Execution time.

+

update_at

+

Long

+

Update time.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 17 SampleLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SampleLabel objects

+

Sample label list. If this parameter is left blank, all sample labels are deleted.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

sample_id

+

String

+

Sample ID.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

sample_usage

+

String

+

Sample usage. The options are as follows:

+
  • TRAIN: training

    +
  • EVAL: evaluation

    +
  • TEST: test

    +
  • INFERENCE: inference

    +
+

source

+

String

+

Source address of sample data.

+

worker_id

+

String

+

ID of a labeling team member.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 18 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 19 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 20 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying the List of Auto Labeling, One-Click Model Deployment, or Auto Grouping Tasks by Page

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/tasks?offset=0&limit=10
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "tasks" : [ {
+    "dataset_id" : "OBegCXHxTJ2JHRAZWr0",
+    "task_id" : "14cyxyu6UXaNT3lrPFl",
+    "type" : 1,
+    "create_time" : "2020-11-03 15:22:39",
+    "status" : 3,
+    "code" : "ModelArts.4996",
+    "message" : "prelabel task execute successfully.",
+    "elapsed_time" : 531,
+    "result" : {
+      "service_id" : "ee2ade80-0967-4ef3-b6da-e8c873017b9a",
+      "service_name" : "prelabel_infer_1604388201993_xubo_cls_d910_2_993",
+      "hard_select_tasks" : [ {
+        "id" : "887c4ac6-26c8-4ca3-b07a-cd75e16fdc2d",
+        "hard_select_task_id" : "86711ab3-8ceb-4b0e-bd52-8545b184a2a7",
+        "dataset_id" : "OBegCXHxTJ2JHRAZWr0",
+        "dataset_name" : "xubo_cls_d910_2",
+        "task_status" : "import_dataset_completed",
+        "time" : 262,
+        "create_at" : 0,
+        "update_at" : 0
+      } ],
+      "continuity" : false
+    },
+    "export_type" : 0,
+    "progress" : 100.0,
+    "total_sample_count" : 246,
+    "annotated_sample_count" : 38,
+    "unconfirmed_sample_count" : 208,
+    "model_id" : "c717a39f-c64f-45df-a9d3-be9ed79cdcb4",
+    "model_name" : "auto-deploy-50041602581620628",
+    "model_version" : "0.0.1",
+    "config" : {
+      "ambiguity" : false,
+      "name" : "5fXxR01TyUoiobqNEd9",
+      "worker_server_num" : 0,
+      "inf_config_list" : [ {
+        "model_id" : null,
+        "specification" : "modelarts.vm.cpu.2u",
+        "weight" : 0,
+        "instance_count" : 1,
+        "envs" : null
+      } ],
+      "collect_sample" : false,
+      "confidence_scope" : "0.0-0.5",
+      "algorithm_type" : "supervisory",
+      "image_brightness" : false,
+      "image_colorfulness" : false
+    }
+  }, {
+    "dataset_id" : "OBegCXHxTJ2JHRAZWr0",
+    "task_id" : "5QPy73VwnwHi5NqvbcP",
+    "type" : 0,
+    "create_time" : "2020-10-31 16:11:37",
+    "status" : 3,
+    "code" : "ModelArts.4996",
+    "message" : "task executed successfully.",
+    "elapsed_time" : 397,
+    "result" : {
+      "train_job_name" : "BNFURaEyftGNMITaBiv",
+      "train_job_id" : "74679",
+      "version_id" : "89745",
+      "continuity" : false
+    },
+    "export_type" : 0,
+    "progress" : 100.0,
+    "total_sample_count" : 246,
+    "annotated_sample_count" : 38,
+    "unconfirmed_sample_count" : 198,
+    "model_name" : "Supervisory",
+    "model_version" : "0.0.1",
+    "config" : {
+      "ambiguity" : false,
+      "worker_server_num" : 0,
+      "collect_sample" : false,
+      "algorithm_type" : "fast",
+      "image_brightness" : false,
+      "image_colorfulness" : false
+    }
+  } ],
+  "total_count" : 2,
+  "exist_running_deploy_task" : false
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkerTasks.html b/modelarts/api-ref/ListWorkerTasks.html new file mode 100644 index 00000000..15383b6c --- /dev/null +++ b/modelarts/api-ref/ListWorkerTasks.html @@ -0,0 +1,439 @@ + + +

Querying the Team Labeling Task List by a Team Member

+

Function

This API is used to query the team labeling task list by a team member.

+
+

URI

GET /v2/{project_id}/workforces/worker-tasks

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting method. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

search_content

+

No

+

String

+

Fuzzy search keyword. By default, this parameter is left blank.

+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • workforce_task_name: Sort by task name.

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

count

+

Integer

+

Total number of team labeling tasks.

+

worker_tasks

+

Array of WorkerTask objects

+

Team labeling task list queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 WorkerTask

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a labeling team member's task is created.

+

dataset_id

+

String

+

ID of a dataset associated with a labeling team member's task.

+

dataset_type

+

Integer

+

Labeling type of a team member's task.

+

email

+

String

+

Email address of a labeling team member.

+

email_status

+

Integer

+

Email notification status of a labeling team member's labeling task. The options are as follows:

+
  • 0: The email has not been sent.

    +
  • 1: The email format is incorrect.

    +
  • 2: The email address is unreachable.

    +
  • 3: The email has been sent.

    +
+

last_notify_time

+

Long

+

Timestamp of the latest notification email sent to a labeling team member.

+

pass_rate

+

Double

+

Pass rate of task acceptance review for a labeling team member.

+

role

+

Integer

+

Role of a labeling team member.

+

sample_stats

+

SampleStats object

+

Sample statistics of a labeling team member's task.

+

score

+

Double

+

Average acceptance score of labeling team members' task samples.

+

task_id

+

String

+

Team labeling task ID associated with a member's task.

+

task_status

+

Integer

+

Task status of a labeling team member. The options are as follows:

+
  • 6: created

    +
  • 0: starting

    +
  • 1: running

    +
  • 2: under acceptance

    +
  • 3: approved, indicating the team labeling task is complete

    +
  • 4: rejected, indicating that the task needs to be labeled and reviewed again

    +
+

update_time

+

Long

+

Time when a labeling team member's task is updated.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_task_name

+

String

+

Team labeling task name associated with a member's task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SampleStats

Parameter

+

Type

+

Description

+

accepted_sample_count

+

Integer

+

Number of samples accepted by the owner.

+

auto_annotation_sample_count

+

Integer

+

Number of samples to be confirmed after intelligent labeling.

+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

rejected_sample_count

+

Integer

+

Number of samples that failed to pass the owner acceptance.

+

sampled_sample_count

+

Integer

+

Number of samples that are to be accepted by the owner and sampled.

+

total_sample_count

+

Integer

+

Total number of samples.

+

unannotated_sample_count

+

Integer

+

Number of unlabeled samples.

+

uncheck_sample_count

+

Integer

+

Number of samples that have been approved by the reviewer and are to be accepted by the owner.

+

unreviewed_sample_count

+

Integer

+

Number of samples that have been labeled by the labeler but have not been reviewed by the reviewer.

+
+
+
+

Example Requests

Querying the Team Labeling Task List by a Team Member

+
GET https://{endpoint}/v2/{project_id}/workforces/worker-tasks?offset=0&limit=10&sort_by=create_time&order=desc&filePreview=false
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "count" : 2,
+  "worker_tasks" : [ {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "role" : 0,
+    "task_id" : "tY330MHxV9dqIPVaTRM",
+    "workforce_task_name" : "task-cd60",
+    "dataset_id" : "WxCREuCkBSAlQr9xrde",
+    "sample_stats" : {
+      "total_sample_count" : 309,
+      "unannotated_sample_count" : 308,
+      "unreviewed_sample_count" : 0,
+      "uncheck_sample_count" : 1,
+      "sampled_sample_count" : 0,
+      "rejected_sample_count" : 0,
+      "accepted_sample_count" : 0,
+      "auto_annotation_sample_count" : 0
+    },
+    "create_time" : 1606224714358,
+    "update_time" : 1606224878490,
+    "email_status" : 3,
+    "last_notify_time" : 0,
+    "dataset_type" : 1,
+    "task_status" : 1,
+    "user" : {
+      "domainId" : "04f924738800d3270fc0c013a47363a0",
+      "domainName" : "test_123",
+      "projectId" : "04f924739300d3272fc3c013e36bb4b8",
+      "userId" : "04f924743b00d4331f31c0131ada6769",
+      "userName" : "test_123"
+    }
+  }, {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "role" : 0,
+    "task_id" : "MJVjCQDMso95a8dvUm4",
+    "workforce_task_name" : "task-2720",
+    "dataset_id" : "OY82gjEHxt9w1efgrhS",
+    "sample_stats" : {
+      "total_sample_count" : 50005,
+      "unannotated_sample_count" : 50005,
+      "unreviewed_sample_count" : 0,
+      "uncheck_sample_count" : 0,
+      "sampled_sample_count" : 0,
+      "rejected_sample_count" : 0,
+      "accepted_sample_count" : 0,
+      "auto_annotation_sample_count" : 0
+    },
+    "create_time" : 1605949737134,
+    "update_time" : 1605949737134,
+    "email_status" : 3,
+    "last_notify_time" : 0,
+    "dataset_type" : 0,
+    "task_status" : 2,
+    "user" : {
+      "domainId" : "04f924738800d3270fc0c013a47363a0",
+      "domainName" : "test_123",
+      "projectId" : "04f924739300d3272fc3c013e36bb4b8",
+      "userId" : "04f924743b00d4331f31c0131ada6769",
+      "userName" : "test_123"
+    }
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkers.html b/modelarts/api-ref/ListWorkers.html new file mode 100644 index 00000000..ccdb5e2d --- /dev/null +++ b/modelarts/api-ref/ListWorkers.html @@ -0,0 +1,280 @@ + + +

Querying the List of Labeling Team Members

+

Function

This API is used to query the list of labeling team members.

+
+

URI

GET /v2/{project_id}/workforces/{workforce_id}/workers

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • email: Sort by email.

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

total_number

+

Integer

+

Total number of labeling team members.

+

workers

+

Array of Worker objects

+

Labeling team members list queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying the List of Labeling Team Members

+
GET https://{endpoint}/v2/{project_id}/workforces/{workforce_id}/workers
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_number" : 3,
+  "workers" : [ {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "b1e4054407ecb36a7bcde70f52ba37f2",
+    "workforce_id" : "gyb7IaAvkLc5IhEY2dv",
+    "status" : 0,
+    "role" : 2,
+    "description" : "",
+    "create_time" : 1606356324223,
+    "update_time" : 1606356324223
+  }, {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "workforce_id" : "gyb7IaAvkLc5IhEY2dv",
+    "status" : 0,
+    "role" : 0,
+    "description" : "",
+    "create_time" : 1591783804629,
+    "update_time" : 1591783804629
+  }, {
+    "email" : "xxx@xxx.com",
+    "worker_id" : "2880f38d660dd68ca6ab578b5a1d9558",
+    "workforce_id" : "gyb7IaAvkLc5IhEY2dv",
+    "status" : 0,
+    "role" : 0,
+    "description" : "",
+    "create_time" : 1575104629439,
+    "update_time" : 1575104629439
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkforceTaskSamples.html b/modelarts/api-ref/ListWorkforceTaskSamples.html new file mode 100644 index 00000000..55eb9894 --- /dev/null +++ b/modelarts/api-ref/ListWorkforceTaskSamples.html @@ -0,0 +1,895 @@ + + +

Querying the Sample List of a Team Labeling Task by Page

+

Function

This API is used to query the sample list of a team labeling task by page.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label_name

+

No

+

String

+

Label name.

+

label_type

+

No

+

Integer

+

Labeling type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet

    +
  • 200: sound classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 400: table dataset

    +
  • 600: video labeling

    +
  • 900: custom format

    +
+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

locale

+

No

+

String

+

Language. The options are as follows:

+
  • en-us: English (default value)

    +
  • zh-cn: Chinese

    +
+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

process_parameter

+

No

+

String

+

Image resizing setting, which is the same as the OBS resizing setting. For details, see . For example, image/resize,m_lfit,h_200 indicates that the target image is resized proportionally and the height is set to 200 pixels.

+

sample_state

+

No

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

search_conditions

+

No

+

String

+

Multi-dimensional search condition after URL encoding. The relationship between multiple search conditions is AND.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

sample_count

+

Integer

+

Number of samples.

+

samples

+

Array of DescribeSampleResp objects

+

Sample list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 DescribeSampleResp

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

Querying the Sample List of a Team Labeling Task by Page

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "sample_count" : 2,
+  "samples" : [ {
+    "sample_id" : "26c6dd793d80d3274eb89349ec76d678",
+    "sample_type" : 0,
+    "labels" : [ ],
+    "source" : "https://test-obs.obs.xxx.com:443/detect/data/dataset-car-and-person/IMG_kitti_0000_000016.png?AccessKeyId=P19W9X830R1Z39P5X5M5&Expires=1606300137&x-obs-security-token=gQpjbi1ub3J0aC03jKj8N6gtS4VsdTTW3QFoHMtpMoFLtCa6W_J4DxT0nYIfx...",
+    "metadata" : {
+      "@modelarts:import_origin" : 0,
+      "@modelarts:size" : [ 1242, 375, 3 ]
+    },
+    "sample_time" : 1598263639997,
+    "sample_status" : "UN_ANNOTATION",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "labelers" : [ {
+      "email" : "xxx@xxx.com",
+      "worker_id" : "afdda13895bc66322ffbf36ae833bcf0",
+      "role" : 0
+    } ]
+  }, {
+    "sample_id" : "2971815bbb11a462161b48dddf19344f",
+    "sample_type" : 0,
+    "labels" : [ ],
+    "source" : "https://test-obs.obs.xxx.com:443/detect/data/dataset-car-and-person/IMG_kitti_0000_000011.png?AccessKeyId=P19W9X830R1Z39P5X5M5&Expires=1606300137&x-obs-security-token=gQpjbi1ub3J0aC03jKj8N6gtS4VsdTTW3QFoHMtpMoFLtC...",
+    "metadata" : {
+      "@modelarts:import_origin" : 0,
+      "@modelarts:size" : [ 1242, 375, 3 ]
+    },
+    "sample_time" : 1598263639997,
+    "sample_status" : "UN_ANNOTATION",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "labelers" : [ {
+      "email" : "xxx@xxx.com",
+      "worker_id" : "305595e1901a526017d2e11f3ab0ffe1",
+      "role" : 0
+    } ]
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkforceTaskStats.html b/modelarts/api-ref/ListWorkforceTaskStats.html new file mode 100644 index 00000000..3d12a377 --- /dev/null +++ b/modelarts/api-ref/ListWorkforceTaskStats.html @@ -0,0 +1,511 @@ + + +

Querying Details About Team Labeling Task Statistics

+

Function

This API is used to query details about team labeling task statistics.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/stats

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

sample_state

+

No

+

String

+

Statistics on the specified sample state. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

data_path

+

String

+

Path for storing data of a dataset.

+

data_spliting_enable

+

Boolean

+

Whether the dataset can be split into training set and validation set based on the sample labeling statistics. The options are as follows:

+
  • true: The dataset can be split into training set and validation set.

    +
  • false: The dataset cannot be split into training set and validation set.

    +
+

grouped_label_stats

+

Map<String,Array<Object>>

+

Label statistics grouped by labeling type.

+

hard_detail_stats

+

Map<String,Object>

+

Statistics on hard example reasons. The type is Map<Integer, Pair<Integer, HardDetail>>. The key of Map indicates the ID of the hard example reason, the key of Pair indicates the number of times that the hard example reason occurs, and the value of Pair indicates the hard example reason.

+

key_sample_stats

+

Map<String,Integer>

+

Statistics on hard examples.

+

label_stats

+

Array of LabelStats objects

+

List of label statistics.

+

metadata_stats

+

Map<String,Object>

+

Statistics on sample metadata, in JSON format.

+

sample_stats

+

Map<String,Integer>

+

Statistics on sample status.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + +
Table 6 PairOfintAndHardDetail

Parameter

+

Type

+

Description

+

key

+

Integer

+

Number of times that a hard example reason occurs.

+

value

+

HardDetail object

+

Reason for a hard example.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 9 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+
+

Example Requests

Querying Statistics on Unapproved Samples of a Team Labeling Task

+
GET https://{endpoint}/v2/{project_id}/datasets/WxCREuCkBSAlQr9xrde/workforce-tasks/tY330MHxV9dqIPVaTRM/data-annotations/stats?sample_state=__unreviewed__
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "label_stats" : [ {
+    "name" : "Cat",
+    "type" : 1,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    },
+    "count" : 0,
+    "sample_count" : 0
+  }, {
+    "name" : "Dog",
+    "type" : 1,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    },
+    "count" : 1,
+    "sample_count" : 1
+  } ],
+  "sample_stats" : {
+    "un_annotation" : 308,
+    "total" : 309,
+    "rejected" : 0,
+    "unreviewed" : 1,
+    "accepted" : 0,
+    "auto_annotation" : 0,
+    "uncheck" : 0
+  },
+  "key_sample_stats" : {
+    "total" : 309,
+    "non_key_sample" : 309,
+    "key_sample" : 0
+  },
+  "deletion_stats" : { },
+  "metadata_stats" : { },
+  "data_spliting_enable" : false
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkforceTasks.html b/modelarts/api-ref/ListWorkforceTasks.html new file mode 100644 index 00000000..fe719568 --- /dev/null +++ b/modelarts/api-ref/ListWorkforceTasks.html @@ -0,0 +1,1241 @@ + + +

Querying the Team Labeling Task List of a Dataset

+

Function

This API is used to query the team labeling task list of a dataset.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/workforce-tasks

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

count

+

Integer

+

Total number of team labeling tasks.

+

tasks

+

Array of WorkforceTask objects

+

Team labeling task list queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 WorkforceTask

Parameter

+

Type

+

Description

+

auto_sync_dataset

+

Boolean

+

Whether to automatically synchronize the result of a team labeling task to the dataset. The options are as follows:

+
  • true: Automatically synchronize the result of a team labeling task to the dataset.

    +
  • false: Do not automatically synchronize the result of a team labeling task to the dataset.

    +
+

check_rate

+

Double

+

Acceptance ratio of a team labeling task.

+

checking_task_desc

+

WorkforceSamplingTaskDesc object

+

Details about the current acceptance task of a team labeling task.

+

create_time

+

Long

+

Time when a labeling task is created.

+

dataset_id

+

String

+

Dataset ID.

+

description

+

String

+

Description of a labeling task.

+

label_stats

+

Array of LabelStats objects

+

Label statistics of a labeling task.

+

pass_rate

+

Double

+

Acceptance pass rate of a team labeling task.

+

repetition

+

Integer

+

Number of persons who label each sample in a team labeling task. The minimum value is 1.

+

sample_search_conditions

+

Array of SearchCondition objects

+

Sample search criteria when creating a task.

+

sample_stats

+

SampleStats object

+

Sample statistics of a labeling task.

+

score

+

Double

+

Average acceptance score of a team labeling task.

+

status

+

Integer

+

Status of a team labeling task. The options are as follows:

+
  • 6: created. The owner has created a task but does not start it. Only the owner and manager can view the task list.

    +
  • 0: starting. The owner or manager starts the task and assigns the files to be labeled. The owner, manager, labeler, and reviewer can view the task list. If the task assignment is not complete, a new task cannot be started.

    +
  • 1: running. The task is labeled and reviewed by the labeler and reviewer, respectively and is accepted by the owner. If auto labeling files are added or synchronized, or unlabeled files are imported, the new files need to be assigned again.

    +
  • 2: under acceptance. The owner initiates an acceptance task but does not complete it. In this state, a new acceptance task cannot be initiated until the current one is completed.

    +
  • 3: passed. The team labeling task has been completed.

    +
  • 4: rejected. In this state, the manager starts the task again and assigns it for labeling and reviewing.

    +
  • 5: synchronizing acceptance result. This state is displayed when acceptance tasks are changed to be asynchronous. In this state, new acceptance tasks cannot be initiated and the current acceptance task cannot be continued. In the task name area, a message is displayed, indicating that the acceptance result is being synchronized.

    +
  • 7: acceptance sampling. This state is displayed when acceptance tasks are changed to be asynchronous. In this state, new acceptance tasks cannot be initiated and the current acceptance task cannot be continued. In the task name area, a message is displayed, indicating that the acceptance sampling is in progress.

    +
+

synchronize_auto_labeling_data

+

Boolean

+

Whether to synchronize the auto labeling result of a team labeling task. The options are as follows:

+
  • true: Synchronize the results to be confirmed to team members after auto labeling is complete.

    +
  • false: Do not synchronize the auto labeling results. (Default value)

    +
+

synchronize_data

+

Boolean

+

Whether to synchronize the added data of a team labeling task. The options are as follows:

+
  • true: Synchronize uploaded files, data sources, and imported unlabeled files to team members.

    +
  • false: Do not synchronize the added data. (Default value)

    +
+

task_id

+

String

+

ID of a labeling task.

+

task_name

+

String

+

Name of a labeling task.

+

update_time

+

Long

+

Time when a labeling task is updated.

+

version_id

+

String

+

Version ID of the dataset associated with a labeling task.

+

workforce_stats

+

WorkforceStats object

+

Statistics on team labeling task members.

+

workforces_config

+

WorkforcesConfig object

+

Team labeling task information: Tasks can be assigned by the team administrator or a specified team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 WorkforceSamplingTaskDesc

Parameter

+

Type

+

Description

+

action

+

Integer

+

Action after the acceptance. The options are as follows:

+
  • 0: Pass all samples when the acceptance is completed (including single-rejected samples)

    +
  • 1: Reject all samples when the acceptance is completed (including single-accepted samples)

    +
  • 4: Pass only single-accepted samples and unaccepted samples.

    +
  • 5: Reject only single-rejected samples and unaccepted samples.

    +
+

checking_stats

+

CheckTaskStats object

+

Real-time report of acceptance tasks.

+

checking_task_id

+

String

+

ID of the current acceptance task.

+

overwrite_last_result

+

Boolean

+

Whether to use the acceptance result to overwrite the labeled result if a sample has been labeled during acceptance. The options are as follows:

+
  • true: Overwrite the labeled result.

    +
  • false: Do not overwrite the labeled result. (Default value)

    +
+

total_stats

+

CheckTaskStats object

+

Overall report of historical acceptance tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 CheckTaskStats

Parameter

+

Type

+

Description

+

accepted_sample_count

+

Integer

+

Accepted samples.

+

checked_sample_count

+

Integer

+

Checked samples.

+

pass_rate

+

Double

+

Pass rate of samples.

+

rejected_sample_count

+

Integer

+

Rejected samples.

+

sampled_sample_count

+

Integer

+

Number of sampled samples.

+

sampling_num

+

Integer

+

Samples of an acceptance task.

+

sampling_rate

+

Double

+

Sampling rate of an acceptance task.

+

score

+

String

+

Acceptance score.

+

task_id

+

String

+

ID of an acceptance task.

+

total_sample_count

+

Integer

+

Total samples.

+

total_score

+

Long

+

Total acceptance score.

+

unchecked_sample_count

+

Integer

+

Unchecked samples.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelStats

Parameter

+

Type

+

Description

+

attributes

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

count

+

Integer

+

Number of labels.

+

name

+

String

+

Label name.

+

property

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

sample_count

+

Integer

+

Number of samples containing the label.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 LabelAttribute

Parameter

+

Type

+

Description

+

default_value

+

String

+

Default value of a label attribute.

+

id

+

String

+

Label attribute ID.

+

name

+

String

+

Label attribute name.

+

type

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + +
Table 8 LabelAttributeValue

Parameter

+

Type

+

Description

+

id

+

String

+

Label attribute value ID.

+

value

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 LabelProperty

Parameter

+

Type

+

Description

+

@modelarts:color

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 SearchCondition

Parameter

+

Type

+

Description

+

coefficient

+

String

+

Filter by coefficient of difficulty.

+

frame_in_video

+

Integer

+

A frame in the video.

+

hard

+

String

+

Whether a sample is a hard sample. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

import_origin

+

String

+

Filter by data source.

+

kvp

+

String

+

CT dosage, filtered by dosage.

+

label_list

+

SearchLabels object

+

Label search criteria.

+

labeler

+

String

+

Labeler.

+

metadata

+

SearchProp object

+

Search by sample attribute.

+

parent_sample_id

+

String

+

Parent sample ID.

+

sample_dir

+

String

+

Directory where data samples are stored (the directory must end with a slash (/)). Only samples in the specified directory are searched for. Recursive search of directories is not supported.

+

sample_name

+

String

+

Search by sample name, including the file name extension.

+

sample_time

+

String

+

When a sample is added to the dataset, an index is created based on the last modification time (accurate to day) of the sample on OBS. You can search for the sample based on the time. The options are as follows:

+
  • month: Search for samples added from 30 days ago to the current day.

    +
  • day: Search for samples added from yesterday (one day ago) to the current day.

    +
  • yyyyMMdd-yyyyMMdd: Search for samples added in a specified period (at most 30 days), in the format of Start date-End date. For example, 20190901-2019091501 indicates that samples generated from September 1 to September 15, 2019 are searched.

    +
+

score

+

String

+

Search by confidence.

+

slice_thickness

+

String

+

DICOM layer thickness. Samples are filtered by layer thickness.

+

study_date

+

String

+

DICOM scanning time.

+

time_in_video

+

String

+

A time point in the video.

+
+
+ +
+ + + + + + + + + + + + + +
Table 11 SearchLabels

Parameter

+

Type

+

Description

+

labels

+

Array of SearchLabel objects

+

List of label search criteria.

+

op

+

String

+

If you want to search for multiple labels, op must be specified. If you search for only one label, op can be left blank. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 12 SearchLabel

Parameter

+

Type

+

Description

+

name

+

String

+

Label name.

+

op

+

String

+

Operation type between multiple attributes. The options are as follows:

+
  • OR: OR operation

    +
  • AND: AND operation

    +
+

property

+

Map<String,Array<String>>

+

Label attribute, which is in the Object format and stores any key-value pairs. key indicates the attribute name, and value indicates the value list. If value is null, the search is not performed by value. Otherwise, the search value can be any value in the list.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + +
Table 13 SearchProp

Parameter

+

Type

+

Description

+

op

+

String

+

Relationship between attribute values. The options are as follows:

+
  • AND: AND relationship

    +
  • OR: OR relationship

    +
+

props

+

Map<String,Array<String>>

+

Search criteria of an attribute. Multiple search criteria can be set.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 SampleStats

Parameter

+

Type

+

Description

+

accepted_sample_count

+

Integer

+

Number of samples accepted by the owner.

+

auto_annotation_sample_count

+

Integer

+

Number of samples to be confirmed after intelligent labeling.

+

deleted_sample_count

+

Integer

+

Number of deleted samples.

+

rejected_sample_count

+

Integer

+

Number of samples that failed to pass the owner acceptance.

+

sampled_sample_count

+

Integer

+

Number of samples that are to be accepted by the owner and sampled.

+

total_sample_count

+

Integer

+

Total number of samples.

+

unannotated_sample_count

+

Integer

+

Number of unlabeled samples.

+

uncheck_sample_count

+

Integer

+

Number of samples that have been approved by the reviewer and are to be accepted by the owner.

+

unreviewed_sample_count

+

Integer

+

Number of samples that have been labeled by the labeler but have not been reviewed by the reviewer.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 15 WorkforceStats

Parameter

+

Type

+

Description

+

labeler_count

+

Integer

+

Number of labeling persons.

+

reviewer_count

+

Integer

+

Number of reviewers.

+

workforce_count

+

Integer

+

Number of teams.

+
+
+ +
+ + + + + + + + + + + + + +
Table 16 WorkforcesConfig

Parameter

+

Type

+

Description

+

agency

+

String

+

Administrator.

+

workforces

+

Array of WorkforceConfig objects

+

List of teams that execute labeling tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 17 WorkforceConfig

Parameter

+

Type

+

Description

+

workers

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

String

+

ID of a labeling team.

+

workforce_name

+

String

+

Name of a labeling team. The value contains 0 to 1024 characters and does not support the following special characters: !<>=&"'

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 18 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+
+

Example Requests

Querying the Team Labeling Task List of a Dataset

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/workforce-tasks
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "count" : 2,
+  "tasks" : [ {
+    "dataset_id" : "WxCREuCkBSAlQr9xrde",
+    "task_id" : "tY330MHxV9dqIPVaTRM",
+    "task_name" : "task-cd60",
+    "status" : 1,
+    "create_time" : 1606224714358,
+    "update_time" : 1606224714358,
+    "repetition" : 1,
+    "workforces_config" : {
+      "workforces" : [ {
+        "workforce_id" : "0PfqwXA8M59pppYBx4k",
+        "workforce_name" : "team-123",
+        "workers" : [ {
+          "email" : "xxx@xxx.com",
+          "worker_id" : "6db04ae0afb54d7274a2982255516c29",
+          "role" : 2
+        }, {
+          "email" : "xxx@xxx.com",
+          "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+          "role" : 0
+        } ]
+      } ]
+    },
+    "synchronize_data" : true,
+    "synchronize_auto_labeling_data" : true,
+    "workforce_stats" : {
+      "workforce_count" : 1,
+      "labeler_count" : 1,
+      "reviewer_count" : 0
+    },
+    "sample_stats" : {
+      "total_sample_count" : 309,
+      "unannotated_sample_count" : 308,
+      "unreviewed_sample_count" : 0,
+      "uncheck_sample_count" : 1,
+      "sampled_sample_count" : 0,
+      "rejected_sample_count" : 0,
+      "accepted_sample_count" : 0,
+      "auto_annotation_sample_count" : 0
+    },
+    "auto_check_samples" : true,
+    "auto_sync_dataset" : true,
+    "project_id" : "04f924739300d3272fc3c013e36bb4b8",
+    "task_type" : 1,
+    "dataset_name" : "dataset-95a6",
+    "total_sample_count" : 309,
+    "annotated_sample_count" : 0,
+    "feature_supports" : [ "0" ],
+    "label_task_status" : 1,
+    "sync_labels" : true,
+    "workforce_task" : true
+  }, {
+    "dataset_id" : "WxCREuCkBSAlQr9xrde",
+    "task_id" : "iYZx7gScPUozOXner9k",
+    "task_name" : "task-e63f",
+    "status" : 1,
+    "create_time" : 1606184400278,
+    "update_time" : 1606184400278,
+    "repetition" : 1,
+    "workforces_config" : {
+      "workforces" : [ {
+        "workforce_id" : "q3ZFSwORu1ztKljDLYQ",
+        "workforce_name" : "modelarts-team",
+        "workers" : [ {
+          "email" : "xxx@xxx.com",
+          "worker_id" : "afdda13895bc66322ffbf36ae833bcf0",
+          "role" : 0
+        } ]
+      } ]
+    },
+    "synchronize_data" : false,
+    "synchronize_auto_labeling_data" : false,
+    "workforce_stats" : {
+      "workforce_count" : 1,
+      "labeler_count" : 1,
+      "reviewer_count" : 0
+    },
+    "sample_stats" : {
+      "total_sample_count" : 317,
+      "unannotated_sample_count" : 310,
+      "unreviewed_sample_count" : 0,
+      "uncheck_sample_count" : 0,
+      "sampled_sample_count" : 0,
+      "rejected_sample_count" : 0,
+      "accepted_sample_count" : 7,
+      "auto_annotation_sample_count" : 0
+    },
+    "checking_task_desc" : {
+      "checking_task_id" : "onSbri2oqYOmDjDyW17",
+      "action" : 0,
+      "overwrite_last_result" : false
+    },
+    "auto_check_samples" : true,
+    "auto_sync_dataset" : true,
+    "project_id" : "04f924739300d3272fc3c013e36bb4b8",
+    "task_type" : 1,
+    "dataset_name" : "dataset-95a6",
+    "total_sample_count" : 317,
+    "annotated_sample_count" : 0,
+    "feature_supports" : [ "0" ],
+    "label_task_status" : 1,
+    "sync_labels" : true,
+    "workforce_task" : true
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkforces.html b/modelarts/api-ref/ListWorkforces.html new file mode 100644 index 00000000..a824c0a7 --- /dev/null +++ b/modelarts/api-ref/ListWorkforces.html @@ -0,0 +1,259 @@ + + +

Querying the Labeling Team List

+

Function

This API is used to query the labeling team list.

+
+

URI

GET /v2/{project_id}/workforces

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The value ranges from 1 to 100. The default value is 10.

+

offset

+

No

+

Integer

+

Start page of the paging list. The default value is 0.

+

order

+

No

+

String

+

Sorting sequence of the query. The options are as follows:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

search_content

+

No

+

String

+

Fuzzy search keyword. By default, this parameter is left blank.

+

sort_by

+

No

+

String

+

Sorting mode of the query. The options are as follows:

+
  • create_time: Sort by creation time. (Default value)

    +
  • workforce_name: Sort by labeling team name.

    +
+

workforce_task_id

+

No

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

total_number

+

Integer

+

Total number of labeling teams.

+

workforces

+

Array of Workforce objects

+

Labeling team list queried by page.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Workforce

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Time when a labeling team is created.

+

description

+

String

+

Description of a labeling team.

+

update_time

+

Long

+

Time when a labeling team is updated.

+

worker_count

+

Integer

+

Total number of labeling team members.

+

workforce_id

+

String

+

ID of a labeling team.

+

workforce_name

+

String

+

Name of a labeling team.

+

workspace_id

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+
+

Example Requests

Querying the Labeling Team List

+
GET https://{endpoint}/v2/{project_id}/workforces
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_number" : 2,
+  "workforces" : [ {
+    "workforce_id" : "ZUH8gqkjuaib8pxkDdz",
+    "workforce_name" : "team-123",
+    "description" : "my team",
+    "worker_count" : 0,
+    "create_time" : 1606354772548,
+    "update_time" : 1606354772548
+  }, {
+    "workforce_id" : "3Ry04NsqvEybuWYLDvC",
+    "workforce_name" : "team-170a",
+    "description" : "",
+    "worker_count" : 1,
+    "create_time" : 1604644946891,
+    "update_time" : 1606238678626
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkspaceQuotas.html b/modelarts/api-ref/ListWorkspaceQuotas.html new file mode 100644 index 00000000..fb50fcdd --- /dev/null +++ b/modelarts/api-ref/ListWorkspaceQuotas.html @@ -0,0 +1,204 @@ + + +

Querying a Workspace Quota

+

Function

This API is used to obtain workspace quotas.

+
+

URI

GET /v1/{project_id}/workspaces/{workspace_id}/quotas

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workspace_id

+

Yes

+

String

+

Workspace ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

quotas

+

Array of quotas objects

+

List of quotas

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 quotas

Parameter

+

Type

+

Description

+

name_en

+

String

+

Name of a quota, in English

+

name_cn

+

String

+

Name of a quota, in Chinese

+

resource

+

String

+

Unique resource ID

+

quota

+

Integer

+

Existing quota. Value -1 indicates that the quota is not limited.

+

min_quota

+

Integer

+

Minimum quota

+

max_quota

+

Integer

+

Maximum quota

+

unit_en

+

String

+

Quota unit, in English

+

unit_cn

+

String

+

Quota unit, in Chinese

+

update_time

+

Number

+

Last modification time, in UTC format. If the resource quota has not been modified, the default value is the time when a workspace was created.

+

used_quota

+

Number

+

Used quota. If the value of quota is -1 (indicating that the quota is not limited), the used_quota value is null.

+
+
+
+

Example Requests

Querying Workspace Quotas

+
GET https://{endpoint}/v1/{project_id}/workspaces/{workspace_id}/quotas
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "quotas" : [ {
+    "name_en" : "ExeMLtraining duration (image classification, object detection, and soundclassification)",
+    "name_cn" : "Chinese name of the quota",
+    "resource" : "exemlProject.gpu_duration",
+    "quota" : 10,
+    "min_quota" : -1,
+    "max_quota" : 60000,
+    "unit_en" : "minute",
+    "unit_cn" : "Chinese name of the minute",
+    "update_time" : 1470000020000,
+    "used_quota" : 5
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ListWorkspaces.html b/modelarts/api-ref/ListWorkspaces.html new file mode 100644 index 00000000..7e8357cb --- /dev/null +++ b/modelarts/api-ref/ListWorkspaces.html @@ -0,0 +1,314 @@ + + +

Querying a Workspace List

+

Function

This API is used to obtain a workspace list with detailed information contained in the response body.

+
+

URI

GET /v1/{project_id}/workspaces

+ +
+ + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

offset

+

No

+

Integer

+

Start page for pagination display. The default value is 0.

+

Minimum: 0

+

Default: 0

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. Default value: 1000

+

Minimum: 1

+

Default: 1000

+

sort_by

+

No

+

String

+

Sorting mode. The value can be name, update_time, or status. Default value: name

+

Default: name

+

order

+

No

+

String

+

Query sequence. Options:

+
  • asc: ascending order

    +
  • desc: descending order (default value)

    +
+

enterprise_project_id

+

No

+

String

+

Enterprise project ID. If this parameter is specified, only the workspaces of the enterprise project are returned. By default, all workspaces are displayed.

+

name

+

No

+

String

+

Workspace name. If this parameter is specified, the fuzzy-match workspaces are obtained. By default, all workspaces are displayed.

+

filter_accessible

+

No

+

Boolean

+

Filter accessible workspaces. If this parameter is set to true, the workspaces you are not allowed to access are filtered out. The default value is false, indicating all workspaces are displayed.

+

Default: false

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

total_count

+

Integer

+

Total number of workspace queries

+

count

+

Integer

+

Number of workspaces returned for the current request

+

workspaces

+

Array of workspacePropertiesWithoutGrants objects

+

Workspace details

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 workspacePropertiesWithoutGrants

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID, which is a 32-bit UUID generated by the system without hyphens (-). The ID of the default workspace is 0.

+

name

+

String

+

Workspace name

+

description

+

String

+

Workspace description. Enter 0 to 256 characters.

+

owner

+

String

+

Creator name. Enter 0 to 64 characters.

+

create_time

+

Number

+

Time when a workspace was created, in UTC format

+

update_time

+

Number

+

Last modification time, in UTC format

+

auth_type

+

String

+

Authorization type. Options:

+
  • PUBLIC: public access of tenants (default value)

    +
  • PRIVATE: accessible only to the creator and primary account

    +
  • INTERNAL: accessible to the creator, primary account, and specified IAM users. This parameter must be used together with grants.

    +
+

enterprise_project_id

+

String

+

Enterprise project ID

+

enterprise_project_name

+

String

+

Name of an enterprise project

+

status

+

String

+

Workspace status. Options:

+
  • CREATE_FAILED: Creating the workspace failed.

    +
  • NORMAL: The workspace is running properly.

    +
  • DELETING: The workspace is being deleted.

    +
  • DELETE_FAILED: Deleting the workspace failed.

    +
+

status_info

+

String

+

Status description. By default, this parameter is left blank. This parameter is used to show detailed information about a status. If a deletion failed, you can use this parameter to obtain the failure cause.

+
+
+
+

Example Requests

Querying a Workspace List

+
GET https://{endpoint}/v1/{project_id}/workspaces?limit=2&offset=3&sort_by=name&order=desc&enterprise_project_id=0
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "total_count" : 10,
+  "count" : 1,
+  "workspaces" : [ {
+    "id" : 0,
+    "name" : "default",
+    "description" : "",
+    "owner" : "testUser",
+    "enterprise_project_id" : "***b0091-887f-4839-9929-cbc884f1e***",
+    "enterprise_project_name" : "default",
+    "auth_type" : "public",
+    "create_time" : 1460000010000,
+    "update_time" : 1460000010000,
+    "status" : "NORMAL",
+    "status_info" : ""
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/PARAMETERS.txt b/modelarts/api-ref/PARAMETERS.txt new file mode 100644 index 00000000..6da8d5f0 --- /dev/null +++ b/modelarts/api-ref/PARAMETERS.txt @@ -0,0 +1,3 @@ +version="" +language="en-us" +type="" \ No newline at end of file diff --git a/modelarts/api-ref/ReviewSamples.html b/modelarts/api-ref/ReviewSamples.html new file mode 100644 index 00000000..8aa30880 --- /dev/null +++ b/modelarts/api-ref/ReviewSamples.html @@ -0,0 +1,191 @@ + + +

Reviewing Team Labeling Results

+

Function

This API is used to review team labeling results.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/review

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

comments

+

No

+

Array of SampleComment objects

+

Review comment list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SampleComment

Parameter

+

Mandatory

+

Type

+

Description

+

accept

+

Yes

+

Boolean

+

Whether the submitted sample review comments are passed. The options are as follows:

+
  • true: passed

    +
  • false: not passed

    +
+

comment

+

No

+

String

+

Review comment, which contains 0 to 256 characters, excluding special characters (!<>=&"').

+

sample_id

+

No

+

String

+

Sample ID.

+

score

+

No

+

String

+

Review score, whose value can be A, B, C, or D, in descending order.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Reviewing Team Labeling Results

+
{
+  "comments" : [ {
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "sample_id" : "0d43f9811d3808a3146c673257d4a1dbhh",
+    "accept" : true,
+    "comment" : "",
+    "score" : "A"
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/SendEmails.html b/modelarts/api-ref/SendEmails.html new file mode 100644 index 00000000..bde04987 --- /dev/null +++ b/modelarts/api-ref/SendEmails.html @@ -0,0 +1,204 @@ + + +

Sending an Email to a Labeling Team Member

+

Function

This API is used to send an email to a labeling team member.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/notify

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

emails

+

Yes

+

Array of strings

+

Email list of a labeling team member.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Result of sending an email to a labeling team member.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Sending an Email to a Labeling Team Member

+
{
+  "emails" : [ "xxx@xxx.com", "xxx@xxx.com" ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/ShowWorkspaceInfo.html b/modelarts/api-ref/ShowWorkspaceInfo.html new file mode 100644 index 00000000..a89472b5 --- /dev/null +++ b/modelarts/api-ref/ShowWorkspaceInfo.html @@ -0,0 +1,237 @@ + + +

Querying Details About a Workspace

+

Function

This API is used to obtain details about a workspace.

+
+

URI

GET /v1/{project_id}/workspaces/{workspace_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workspace_id

+

Yes

+

String

+

Workspace ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID, which is a 32-bit UUID generated by the system without hyphens (-). The ID of the default workspace is 0.

+

name

+

String

+

Workspace name

+

description

+

String

+

Workspace description. Enter 0 to 256 characters.

+

owner

+

String

+

Creator name. Enter 0 to 64 characters.

+

create_time

+

Number

+

Time when a workspace was created, in UTC format

+

update_time

+

Number

+

Last modification time, in UTC format

+

auth_type

+

String

+

Authorization type. Options:

+
  • PUBLIC: public access of tenants (default value)

    +
  • PRIVATE: accessible only to the creator and primary account

    +
  • INTERNAL: accessible to the creator, primary account, and specified IAM users. This parameter must be used together with grants.

    +
+

enterprise_project_id

+

String

+

Enterprise project ID

+

enterprise_project_name

+

String

+

Name of an enterprise project

+

status

+

String

+

Workspace status. Options:

+
  • CREATE_FAILED: Creating the workspace failed.

    +
  • NORMAL: The workspace is running properly.

    +
  • DELETING: The workspace is being deleted.

    +
  • DELETE_FAILED: Deleting the workspace failed.

    +
+

status_info

+

String

+

Status description. By default, this parameter is left blank. This parameter is used to show detailed information about a status. If a deletion failed, you can use this parameter to obtain the failure cause.

+

grants

+

Array of grants objects

+

List of authorized users, which is left blank by default. This parameter must be used together with auth_type and takes effect only when auth_type is set to INTERNAL.

+
+
+ +
+ + + + + + + + + + + + + +
Table 3 grants

Parameter

+

Type

+

Description

+

user_id

+

String

+

User ID. Either this parameter or user_name must be set. If both of them are set, user_id is used preferentially.

+

user_name

+

String

+

IAM username. Either this parameter or user_id must be set.

+
+
+
+

Example Requests

Querying Details About a Workspace

+
GET https://{endpoint}/v1/{project_id}/workspaces/{workspace_id}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "id" : "**d05d1a553b4e188ea878e7dcb85e**",
+  "name" : "test-workspace",
+  "status" : "NORMAL",
+  "status_info" : "",
+  "description" : "",
+  "owner" : "testUser",
+  "create_time" : 1470000020000,
+  "update_time" : 1470000030000,
+  "enterprise_project_id" : "***b0091-887f-4839-9929-cbc884f1e***",
+  "enterprise_project_name" : "test-eps",
+  "auth_type" : "INTERNAL",
+  "grants" : [ {
+    "user_id" : "***55d2cd53b4x458ea878e7dcb85***",
+    "user_name" : "test-iam-user"
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/StartWorkforceSamplingTask.html b/modelarts/api-ref/StartWorkforceSamplingTask.html new file mode 100644 index 00000000..1096cf5b --- /dev/null +++ b/modelarts/api-ref/StartWorkforceSamplingTask.html @@ -0,0 +1,153 @@ + + +

Creating a Team Labeling Acceptance Task

+

Function

This API is used to create a team labeling acceptance task.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

sampling_num

+

No

+

Integer

+

Number of samples for the acceptance task. Either this parameter or the sampling ratio is delivered.

+

sampling_rate

+

No

+

Double

+

Sampling ratio of the acceptance task. The value range is (0,1]. Either this parameter or the number of samples is delivered.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 3 Response body parameters

Parameter

+

Type

+

Description

+

task_id

+

String

+

ID of an asynchronous acceptance task.

+
+
+
+

Example Requests

Creating a Team Labeling Acceptance Task and Setting the Sampling Percentage to 20%

+
{
+  "sampling_rate" : 0.2
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "task_id" : "nv6BbozxCJmZcHAE9hV"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/StartWorkforceTask.html b/modelarts/api-ref/StartWorkforceTask.html new file mode 100644 index 00000000..dd549492 --- /dev/null +++ b/modelarts/api-ref/StartWorkforceTask.html @@ -0,0 +1,319 @@ + + +

Starting a Team Labeling Task

+

Function

This API is used to start a team labeling task.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

No

+

String

+

Dataset ID.

+

task_id

+

No

+

String

+

ID of a team labeling task.

+

workforces_config

+

No

+

WorkforcesConfig object

+

Team labeling task information: Tasks can be assigned by the team administrator or a specified team.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 WorkforcesConfig

Parameter

+

Mandatory

+

Type

+

Description

+

agency

+

No

+

String

+

Administrator.

+

workforces

+

No

+

Array of WorkforceConfig objects

+

List of teams that execute labeling tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 WorkforceConfig

Parameter

+

Mandatory

+

Type

+

Description

+

workers

+

No

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+

workforce_name

+

No

+

String

+

Name of a labeling team. The value contains 0 to 1024 characters and does not support the following special characters: !<>=&"'

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Worker

Parameter

+

Mandatory

+

Type

+

Description

+

create_time

+

No

+

Long

+

Creation time.

+

description

+

No

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

No

+

String

+

Email address of a labeling team member.

+

role

+

No

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

No

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

No

+

Long

+

Update time.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Starting a Team Labeling Task

+
{
+  "workforces_config" : {
+    "workforces" : [ {
+      "workforce_id" : "l4u9Hpz2JJ67DNN1Hg9",
+      "workers" : [ {
+        "email" : "xiaozhang@163.com"
+      }, {
+        "email" : "xiaoli@163.com"
+      } ]
+    } ]
+  }
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/StopAutoAnnotation.html b/modelarts/api-ref/StopAutoAnnotation.html new file mode 100644 index 00000000..7514644e --- /dev/null +++ b/modelarts/api-ref/StopAutoAnnotation.html @@ -0,0 +1,99 @@ + + +

Stopping an Intelligent Task

+

Function

This API is used to stop intelligent tasks, including auto labeling, one-click model deployment, and auto grouping tasks. You can specify the task_id parameter to stop a specific task.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/tasks/{task_id}/stop

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

Task ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Stopping Auto Labeling, One-Click Model Deployment, or Auto Grouping Tasks

+
POST https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/tasks/{task_id}/stop
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/StopProcessorTaskVersion.html b/modelarts/api-ref/StopProcessorTaskVersion.html new file mode 100644 index 00000000..d62f349a --- /dev/null +++ b/modelarts/api-ref/StopProcessorTaskVersion.html @@ -0,0 +1,99 @@ + + +

Stopping the Version of a Data Processing Task

+

Function

This API is used to stop the version of a data processing task.

+
+

URI

POST /v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/stop

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+

version_id

+

Yes

+

String

+

Version ID of a data processing task.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

This API is used to stop the version of a data processing task.

+
POST https://{endpoint}/v2/{project_id}/processor-tasks/{task_id}/versions/{version_id}/stop
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/SyncDataSource.html b/modelarts/api-ref/SyncDataSource.html new file mode 100644 index 00000000..0bbb14df --- /dev/null +++ b/modelarts/api-ref/SyncDataSource.html @@ -0,0 +1,88 @@ + + +

Synchronizing a Dataset

+

Function

This API is used to synchronize samples and labeling information from the input dataset path to the dataset.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/sync-data

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

None

+
+

Example Requests

Synchronizing a Dataset

+
POST https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/sync-data
+
+

Example Responses

None

+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/SyncDataSourceState.html b/modelarts/api-ref/SyncDataSourceState.html new file mode 100644 index 00000000..d49a0b67 --- /dev/null +++ b/modelarts/api-ref/SyncDataSourceState.html @@ -0,0 +1,182 @@ + + +

Querying the Status of a Dataset Synchronization Task

+

Function

This API is used to query the status of a dataset synchronization task.

+
+

URI

GET /v2/{project_id}/datasets/{dataset_id}/sync-data/status

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

None

+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Response body parameters

Parameter

+

Type

+

Description

+

add_sample_count

+

Long

+

Number of added samples.

+

create_time

+

Long

+

Task creation time.

+

dataset_id

+

String

+

Dataset ID.

+

deleted_sample_count

+

Long

+

Number of deleted samples.

+

duration_time

+

Long

+

Task running time.

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

status

+

String

+

Status of a task. The options are as follows:

+
  • QUEUING: queuing

    +
  • STARTING: execution started

    +
  • RUNNING: running

    +
  • COMPLETED: completed

    +
  • FAILED: failed

    +
  • NOT_EXIST: not found

    +
+

task_id

+

String

+

Synchronization task ID.

+

total_sample_count

+

Long

+

Total number of samples.

+
+
+
+

Example Requests

Obtaining the Status of a Dataset Synchronization

+
GET https://{endpoint}/v2/{project_id}/datasets/{dataset_id}/sync-data/status
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "status" : "COMPLETED",
+  "dataset_id" : "gfghHSokody6AJigS5A"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateDataset.html b/modelarts/api-ref/UpdateDataset.html new file mode 100644 index 00000000..5ce955dc --- /dev/null +++ b/modelarts/api-ref/UpdateDataset.html @@ -0,0 +1,419 @@ + + +

Modifying a Dataset

+

Function

This API is used to modify basic information about a dataset, such as the dataset name, description, current version, and labels.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

add_labels

+

No

+

Array of Label objects

+

List of added labels.

+

current_version_id

+

No

+

String

+

ID of current dataset version.

+

dataset_name

+

No

+

String

+

Dataset name.

+

delete_labels

+

No

+

Array of Label objects

+

List of deleted labels.

+

description

+

No

+

String

+

Dataset description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

update_labels

+

No

+

Array of Label objects

+

List of updated labels.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Label

Parameter

+

Mandatory

+

Type

+

Description

+

attributes

+

No

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelAttribute

Parameter

+

Mandatory

+

Type

+

Description

+

default_value

+

No

+

String

+

Default value of a label attribute.

+

id

+

No

+

String

+

Label attribute ID.

+

name

+

No

+

String

+

Label attribute name.

+

type

+

No

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

No

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 5 LabelAttributeValue

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Label attribute value ID.

+

value

+

No

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

dataset_id

+

String

+

Dataset ID.

+
+
+
+

Example Requests

Modifying a Dataset

+
{
+  "dataset_id" : "gfghHSokody6AJigS5A",
+  "description" : "just a test",
+  "add_tags" : [ {
+    "name" : "Pig",
+    "type" : 0,
+    "property" : {
+      "@modelarts:color" : "#3399ff"
+    }
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "dataset_id" : "gfghHSokody6AJigS5A"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateLabel.html b/modelarts/api-ref/UpdateLabel.html new file mode 100644 index 00000000..52885bfb --- /dev/null +++ b/modelarts/api-ref/UpdateLabel.html @@ -0,0 +1,246 @@ + + +

Updating a Label by Label Names

+

Function

This API is used to update a label by label names.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels/{label_name}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

label_name

+

Yes

+

String

+

Label name.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label_type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Response Parameters

Status code: 204

+ +
+ + + + + + + + + + + + + + + + + +
Table 4 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Whether the operation is successful. The options are as follows:

+
  • true: successful

    +
  • false: failed

    +
+
+
+
+

Example Requests

Updating a Label by Label Names

+
{
+  "@modelarts:color" : "#93c47d"
+}
+
+

Example Responses

Status code: 204

+

No Content

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

204

+

No Content

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateLabels.html b/modelarts/api-ref/UpdateLabels.html new file mode 100644 index 00000000..8998bbe7 --- /dev/null +++ b/modelarts/api-ref/UpdateLabels.html @@ -0,0 +1,432 @@ + + +

Modifying Labels in Batches

+

Function

This API is used to modify labels in batches.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/labels

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of Label objects

+

List of labels to be updated.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Label

Parameter

+

Mandatory

+

Type

+

Description

+

attributes

+

No

+

Array of LabelAttribute objects

+

Multi-dimensional attribute of a label. For example, if the label is music, attributes such as style and artist may be included.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

LabelProperty object

+

Basic attribute key-value pair of a label, such as color and shortcut keys.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 LabelAttribute

Parameter

+

Mandatory

+

Type

+

Description

+

default_value

+

No

+

String

+

Default value of a label attribute.

+

id

+

No

+

String

+

Label attribute ID.

+

name

+

No

+

String

+

Label attribute name.

+

type

+

No

+

String

+

Label attribute type. The options are as follows:

+
  • text: text

    +
  • select: single-choice drop-down list

    +
+

values

+

No

+

Array of LabelAttributeValue objects

+

List of label attribute values.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 5 LabelAttributeValue

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Label attribute value ID.

+

value

+

No

+

String

+

Label attribute value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 LabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:color

+

No

+

String

+

Default attribute: Label color, which is a hexadecimal code of the color. By default, this parameter is left blank. Example: #FFFFF0.

+

@modelarts:default_shape

+

No

+

String

+

Default attribute: Default shape of an object detection label (dedicated attribute). By default, this parameter is left blank. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:from_type

+

No

+

String

+

Default attribute: Type of the head entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+

@modelarts:rename_to

+

No

+

String

+

Default attribute: The new name of the label.

+

@modelarts:shortcut

+

No

+

String

+

Default attribute: Label shortcut key. By default, this parameter is left blank. For example: D.

+

@modelarts:to_type

+

No

+

String

+

Default attribute: Type of the tail entity in the triplet relationship label. This attribute must be specified when a relationship label is created. This parameter is used only for the text triplet dataset.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Response body for updating labels.

+

success

+

Boolean

+

Whether the operation is successful. The options are as follows:

+
  • true: successful

    +
  • false: failed

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 8 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Modifying Labels in Batches

+
{
+  "labels" : [ {
+    "name" : "Cat",
+    "property" : {
+      "@modelarts:color" : "#8a1524",
+      "@modelarts:rename_to" : "pussycat"
+    }
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateProcessorTask.html b/modelarts/api-ref/UpdateProcessorTask.html new file mode 100644 index 00000000..ccdc2f98 --- /dev/null +++ b/modelarts/api-ref/UpdateProcessorTask.html @@ -0,0 +1,114 @@ + + +

Updating a Processing Task

+

Function

This API is used to update a processing task. You can update feature analysis tasks and data processing tasks. Only the description of updated tasks is supported. You can specify the task_id path parameter to update a specific task.

+
+

URI

PUT /v2/{project_id}/processor-tasks/{task_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

task_id

+

Yes

+

String

+

ID of a data processing task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Description of a data processing task. The description contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+
+
+
+

Response Parameters

None

+
+

Example Requests

Updating a Data Processing Task

+
{
+  "description" : "test"
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateSamples.html b/modelarts/api-ref/UpdateSamples.html new file mode 100644 index 00000000..4fe924ec --- /dev/null +++ b/modelarts/api-ref/UpdateSamples.html @@ -0,0 +1,655 @@ + + +

Updating Sample Labels in Batches

+

Function

This API is used to update sample labels in batches, including adding, modifying, and deleting sample labels. If the parameter Labels of a sample in the request body is not specified, the label of the sample is deleted.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

email

+

No

+

String

+

Email address of a labeling team member.

+

samples

+

No

+

Array of SampleLabels objects

+

Updated sample list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SampleLabels

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of SampleLabel objects

+

Sample label list. If this parameter is left blank, all sample labels are deleted.

+

metadata

+

No

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

sample_id

+

No

+

String

+

Sample ID.

+

sample_type

+

No

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

sample_usage

+

No

+

String

+

Sample usage. The options are as follows:

+
  • TRAIN: training

    +
  • EVAL: evaluation

    +
  • TEST: test

    +
  • INFERENCE: inference

    +
+

source

+

No

+

String

+

Source address of sample data.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 SampleLabel

Parameter

+

Mandatory

+

Type

+

Description

+

annotated_by

+

No

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

No

+

String

+

Label ID.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

No

+

Float

+

Confidence.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SampleLabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:content

+

No

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

No

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

No

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

No

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

No

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

No

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

No

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

No

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

No

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

No

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

No

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

No

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SampleMetadata

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:hard

+

No

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

No

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

No

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Response list for updating sample labels in batches.

+

success

+

Boolean

+

Whether the operation is successful. The options are as follows:

+
  • true: successful

    +
  • false: failed

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 8 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Updating Sample Labels in Batches

+
{
+  "samples" : [ {
+    "sample_id" : "8b583c44bf249f8ba43ea42c92920221",
+    "labels" : [ {
+      "name" : "yunbao"
+    } ]
+  }, {
+    "sample_id" : "b5fe3039879660a2e6bf18166e247f68",
+    "labels" : [ {
+      "name" : "yunbao"
+    } ]
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorker.html b/modelarts/api-ref/UpdateWorker.html new file mode 100644 index 00000000..93ab8082 --- /dev/null +++ b/modelarts/api-ref/UpdateWorker.html @@ -0,0 +1,138 @@ + + +

Updating a Labeling Team Member

+

Function

This API is used to update a labeling team member.

+
+

URI

PUT /v2/{project_id}/workforces/{workforce_id}/workers/{worker_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

worker_id

+

Yes

+

String

+

ID of a labeling team member.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

role

+

No

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+
+
+
+

Response Parameters

None

+
+

Example Requests

Updating a Labeling Team Member

+
{
+  "description" : "My name is Tom",
+  "role" : 2
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorkforce.html b/modelarts/api-ref/UpdateWorkforce.html new file mode 100644 index 00000000..cbe8dea2 --- /dev/null +++ b/modelarts/api-ref/UpdateWorkforce.html @@ -0,0 +1,123 @@ + + +

Updating a Labeling Team

+

Function

This API is used to update a labeling team.

+
+

URI

PUT /v2/{project_id}/workforces/{workforce_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_id

+

Yes

+

String

+

ID of a labeling team.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Labeling team description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

workforce_name

+

No

+

String

+

Name of a labeling team. The value contains 1 to 64 characters and only letters, digits, hyphens (-), and underscores (_) are allowed.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Updating a Labeling Team

+
{
+  "description" : "my team"
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorkforceSamplingTask.html b/modelarts/api-ref/UpdateWorkforceSamplingTask.html new file mode 100644 index 00000000..c28e62b1 --- /dev/null +++ b/modelarts/api-ref/UpdateWorkforceSamplingTask.html @@ -0,0 +1,824 @@ + + +

Updating the Status of a Team Labeling Acceptance Task

+

Function

This API is used to update the sample status by confirming the acceptance scope and whether the labeled data is overwritten before the acceptance of the team labeling task is completed.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/acceptance/status

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

locale

+

Yes

+

String

+

Language. The options are as follows:

+
  • en-us: English (default value)

    +
  • zh-cn: Chinese

    +
+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

action

+

No

+

Integer

+

Acceptance action. The options are as follows:

+
  • 0: Pass all samples.

    +
  • 1: Reject all samples.

    +
  • 2: Cancel acceptance.

    +
  • 3: View the list of conflicted samples.

    +
  • 4: Pass single-accepted samples and unaccepted samples.

    +
  • 5: Pass only single-accepted samples.

    +
+

locale

+

No

+

String

+

Language. The options are as follows:

+
  • en-us: English (default value)

    +
  • zh-cn: Chinese

    +
+

overwrite_last_result

+

No

+

Boolean

+

Whether to overwrite labeled data. The options are as follows:

+
  • true: Overwrite labeled data.

    +
  • false: Do not overwrite labeled data. (Default value)

    +
+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + +
Table 4 Response body parameters

Parameter

+

Type

+

Description

+

sample_count

+

Integer

+

Total number of accepted samples.

+

samples

+

Array of DescribeSampleResp objects

+

List of accepted samples.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 DescribeSampleResp

Parameter

+

Type

+

Description

+

check_accept

+

Boolean

+

Whether the acceptance is passed, which is used for team labeling. The options are as follows:

+
  • true: The acceptance is passed.

    +
  • false: The acceptance is not passed.

    +
+

check_comment

+

String

+

Acceptance comment, which is used for team labeling.

+

check_score

+

String

+

Acceptance score, which is used for team labeling.

+

deletion_reasons

+

Array of strings

+

Reason for deleting a sample, which is used for healthcare.

+

hard_details

+

Map<String,Object>

+

Details about difficulties, including description, causes, and suggestions of difficult problems.

+

labelers

+

Array of Worker objects

+

Labeling personnel list of sample assignment. The labelers record the team members to which the sample is allocated for team labeling.

+

labels

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

review_accept

+

Boolean

+

Whether to accept the review, which is used for team labeling. The options are as follows:

+
  • true: accepted

    +
  • false: rejected

    +
+

review_comment

+

String

+

Review comment, which is used for team labeling.

+

review_score

+

String

+

Review score, which is used for team labeling.

+

sample_data

+

Array of strings

+

Sample data list.

+

sample_dir

+

String

+

Sample path.

+

sample_id

+

String

+

Sample ID.

+

sample_name

+

String

+

Sample name.

+

sample_size

+

Long

+

Sample size or text length, in bytes.

+

sample_status

+

String

+

Sample status. The options are as follows:

+
  • ALL: labeled

    +
  • NONE: unlabeled

    +
  • UNCHECK: pending acceptance

    +
  • ACCEPTED: accepted

    +
  • REJECTED: rejected

    +
  • UNREVIEWED: pending review

    +
  • REVIEWED: reviewed

    +
  • WORKFORCE_SAMPLED: sampled

    +
  • WORKFORCE_SAMPLED_UNCHECK: sampling unchecked

    +
  • WORKFORCE_SAMPLED_CHECKED: sampling checked

    +
  • WORKFORCE_SAMPLED_ACCEPTED: sampling accepted

    +
  • WORKFORCE_SAMPLED_REJECTED: sampling rejected

    +
  • AUTO_ANNOTATION: to be confirmed

    +
+

sample_time

+

Long

+

Sample time, when OBS is last modified.

+

sample_type

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

score

+

String

+

Comprehensive score, which is used for team labeling.

+

source

+

String

+

Source address of sample data.

+

sub_sample_url

+

String

+

Subsample URL, which is used for healthcare.

+

worker_id

+

String

+

ID of a labeling team member, which is used for team labeling.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 HardDetail

Parameter

+

Type

+

Description

+

alo_name

+

String

+

Alias.

+

id

+

Integer

+

Reason ID.

+

reason

+

String

+

Reason description.

+

suggestion

+

String

+

Handling suggestion.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 Worker

Parameter

+

Type

+

Description

+

create_time

+

Long

+

Creation time.

+

description

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

String

+

Email address of a labeling team member.

+

role

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

Long

+

Update time.

+

worker_id

+

String

+

ID of a labeling team member.

+

workforce_id

+

String

+

ID of a labeling team.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleLabel

Parameter

+

Type

+

Description

+

annotated_by

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

String

+

Label ID.

+

name

+

String

+

Label name.

+

property

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

Float

+

Confidence.

+

type

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 SampleLabelProperty

Parameter

+

Type

+

Description

+

@modelarts:content

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 10 SampleMetadata

Parameter

+

Type

+

Description

+

@modelarts:hard

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Example Requests

All Tasks Are Accepted.

+
{
+  "action" : 0
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorkforceTask.html b/modelarts/api-ref/UpdateWorkforceTask.html new file mode 100644 index 00000000..93b0f381 --- /dev/null +++ b/modelarts/api-ref/UpdateWorkforceTask.html @@ -0,0 +1,337 @@ + + +

Updating a Team Labeling Task

+

Function

This API is used to update a team labeling task.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a team labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

No

+

String

+

Dataset ID.

+

description

+

No

+

String

+

Team labeling task description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

task_id

+

No

+

String

+

ID of a team labeling task.

+

task_name

+

No

+

String

+

Team labeling task name. The value contains 1 to 64 characters and only letters, digits, underscores (_), and hyphens (-) are allowed.

+

workforces_config

+

No

+

WorkforcesConfig object

+

Team labeling task information: Tasks can be assigned by the team administrator or a specified team.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 WorkforcesConfig

Parameter

+

Mandatory

+

Type

+

Description

+

agency

+

No

+

String

+

Administrator.

+

workforces

+

No

+

Array of WorkforceConfig objects

+

List of teams that execute labeling tasks.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 WorkforceConfig

Parameter

+

Mandatory

+

Type

+

Description

+

workers

+

No

+

Array of Worker objects

+

List of labeling team members.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+

workforce_name

+

No

+

String

+

Name of a labeling team. The value contains 0 to 1024 characters and does not support the following special characters: !<>=&"'

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Worker

Parameter

+

Mandatory

+

Type

+

Description

+

create_time

+

No

+

Long

+

Creation time.

+

description

+

No

+

String

+

Labeling team member description. The value contains 0 to 256 characters and does not support the following special characters: ^!<>=&"'

+

email

+

No

+

String

+

Email address of a labeling team member.

+

role

+

No

+

Integer

+

Role. The options are as follows:

+
  • 0: labeling personnel

    +
  • 1: reviewer

    +
  • 2: team administrator

    +
  • 3: dataset owner

    +
+

status

+

No

+

Integer

+

Current login status of a labeling team member. The options are as follows:

+
  • 0: The invitation email has not been sent.

    +
  • 1: The invitation email has been sent but the user has not logged in.

    +
  • 2: The user has logged in.

    +
  • 3: The labeling team member has been deleted.

    +
+

update_time

+

No

+

Long

+

Update time.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+

workforce_id

+

No

+

String

+

ID of a labeling team.

+
+
+
+

Response Parameters

None

+
+

Example Requests

Updating a Team Labeling Task

+
{
+  "workforces_config" : {
+    "workforces" : [ {
+      "workforce_id" : "3frbQ90hb8ZKksAhyR8",
+      "workers" : [ {
+        "email" : "xxx@xxx.com"
+      }, {
+        "email" : "xxx@xxx.com"
+      } ]
+    } ]
+  }
+}
+
+

Example Responses

Status code: 200

+

OK

+
{ }
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorkforceTaskSamples.html b/modelarts/api-ref/UpdateWorkforceTaskSamples.html new file mode 100644 index 00000000..495537bc --- /dev/null +++ b/modelarts/api-ref/UpdateWorkforceTaskSamples.html @@ -0,0 +1,666 @@ + + +

Updating Labels of Team Labeling Samples in Batches

+

Function

This API is used to update labels of team labeling samples in batches.

+
+

URI

PUT /v2/{project_id}/datasets/{dataset_id}/workforce-tasks/{workforce_task_id}/data-annotations/samples

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workforce_task_id

+

Yes

+

String

+

ID of a labeling task.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

email

+

No

+

String

+

Email address of a labeling team member.

+

samples

+

No

+

Array of SampleLabels objects

+

Updated sample list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 SampleLabels

Parameter

+

Mandatory

+

Type

+

Description

+

labels

+

No

+

Array of SampleLabel objects

+

Sample label list. If this parameter is left blank, all sample labels are deleted.

+

metadata

+

No

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

sample_id

+

No

+

String

+

Sample ID.

+

sample_type

+

No

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+

sample_usage

+

No

+

String

+

Sample usage. The options are as follows:

+
  • TRAIN: training

    +
  • EVAL: evaluation

    +
  • TEST: test

    +
  • INFERENCE: inference

    +
+

source

+

No

+

String

+

Source address of sample data.

+

worker_id

+

No

+

String

+

ID of a labeling team member.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 SampleLabel

Parameter

+

Mandatory

+

Type

+

Description

+

annotated_by

+

No

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

No

+

String

+

Label ID.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

No

+

Float

+

Confidence.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 SampleLabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:content

+

No

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

No

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

No

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

No

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

No

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

No

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

No

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

No

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

No

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

No

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

No

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

No

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 SampleMetadata

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:hard

+

No

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

No

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

No

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of BatchResponse objects

+

Response list for updating sample labels in batches.

+

success

+

Boolean

+

Whether the operation is successful. The options are as follows:

+
  • true: successful

    +
  • false: failed

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 8 BatchResponse

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

success

+

Boolean

+

Check whether the operation is successful. The options are as follows:

+
  • true: The operation is successful.

    +
  • false: The operation is failed.

    +
+
+
+
+

Example Requests

Updating Labels of Team Labeling Samples in Batches

+
{
+  "samples" : [ {
+    "sample_id" : "0a0939d6d3c48a3d2a2619245943ac21",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "labels" : [ {
+      "name" : "tulips"
+    } ]
+  }, {
+    "sample_id" : "0e1b5a16a5a577ee53aeb34278a4b3e7",
+    "worker_id" : "8c15ad080d3eabad14037b4eb00d6a6f",
+    "labels" : [ {
+      "name" : "tulips"
+    } ]
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorkspace.html b/modelarts/api-ref/UpdateWorkspace.html new file mode 100644 index 00000000..4cf58c82 --- /dev/null +++ b/modelarts/api-ref/UpdateWorkspace.html @@ -0,0 +1,206 @@ + + +

Modifying a Workspace

+

Function

This API is used to modify a workspace.

+
+

URI

PUT /v1/{project_id}/workspaces/{workspace_id}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workspace_id

+

Yes

+

String

+

Workspace ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

No

+

String

+

Workspace name, encoded using UTF-8.

+

Enter 4 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed. In addition, default is the name of the default workspace reserved by the system. You are not allowed to change the name of an existing workspace to default. The name of the default workspace cannot be changed.

+

description

+

No

+

String

+

Workspace description. By default, this parameter is left blank. Enter 0 to 256 characters.

+

auth_type

+

No

+

String

+

Authorization type. Options:

+
  • PUBLIC: public access of tenants (default value)

    +
  • PRIVATE: accessible only to the creator and primary account

    +
  • INTERNAL: accessible to the creator, primary account, and specified IAM users. This parameter must be used together with grants.

    +
+

grants

+

No

+

Array of grants objects

+

List of authorized users, which is left blank by default. This parameter must be used together with auth_type and takes effect only when auth_type is set to INTERNAL.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 grants

Parameter

+

Mandatory

+

Type

+

Description

+

user_id

+

No

+

String

+

User ID. Either this parameter or user_name must be set. If both of them are set, user_id is used preferentially.

+

user_name

+

No

+

String

+

IAM username. Either this parameter or user_id must be set.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 4 Response body parameters

Parameter

+

Type

+

Description

+

workspace_id

+

String

+

Workspace ID, which is a 32-bit UUID generated by the system without hyphens (-). The ID of the default workspace is 0.

+
+
+
+

Example Requests

Modifying a Workspace

+
PUT  https://{endpoint}/v1/{project_id}/workspaces/{workspace_id}
+
+{
+  "name" : "my_workspace",
+  "description" : "It is my workspace",
+  "auth_type" : "INTERNAL",
+  "grants" : [ {
+    "user_name" : "my_iam_user"
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "workspace_id" : "***05d1a553b4e188ea878e7dcb85***"
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UpdateWorkspaceQuotas.html b/modelarts/api-ref/UpdateWorkspaceQuotas.html new file mode 100644 index 00000000..c6475e1b --- /dev/null +++ b/modelarts/api-ref/UpdateWorkspaceQuotas.html @@ -0,0 +1,258 @@ + + +

Modifying a Workspace Quota

+

Function

This API is used to modify a workspace quota.

+
+

URI

PUT /v1/{project_id}/workspaces/{workspace_id}/quotas

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+

workspace_id

+

Yes

+

String

+

Workspace ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

quotas

+

Yes

+

Array of quotas objects

+

List of quotas

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 3 quotas

Parameter

+

Mandatory

+

Type

+

Description

+

resource

+

Yes

+

String

+

Unique resource ID

+

quota

+

Yes

+

Integer

+

Current quota. Value -1 indicates that the quota is not limited.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + +
Table 4 Response body parameters

Parameter

+

Type

+

Description

+

quotas

+

Array of quotas objects

+

List of quotas

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 quotas

Parameter

+

Type

+

Description

+

name_en

+

String

+

Name of a quota, in English

+

name_cn

+

String

+

Name of a quota, in Chinese

+

resource

+

String

+

Unique resource ID

+

quota

+

Integer

+

Existing quota. Value -1 indicates that the quota is not limited.

+

min_quota

+

Integer

+

Minimum quota

+

max_quota

+

Integer

+

Maximum quota

+

unit_en

+

String

+

Quota unit, in English

+

unit_cn

+

String

+

Quota unit, in Chinese

+

update_time

+

Number

+

Last modification time, in UTC format. If the resource quota has not been modified, the default value is the time when a workspace was created.

+
+
+
+

Example Requests

Modifying Workspace Quotas

+
PUT  https://{endpoint}/v1/{project_id}/workspaces/{workspace_id}/quotas
+
+{
+  "quotas" : [ {
+    "workspace_id" : "***9cd9ea8a5432cbcd6496e57839***",
+    "resource" : "exemlProject.gpu_duration",
+    "quota" : 10
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "quotas" : [ {
+    "name_en" : "ExeMLtraining duration (image classification, object detection, and soundclassification)",
+    "name_cn" : "Chinese name of the quota",
+    "resource" : "exemlProject.gpu_duration",
+    "quota" : 10,
+    "min_quota" : -1,
+    "max_quota" : 60000,
+    "unit_en" : "minute",
+    "unit_cn" : "Chinese name of the minute",
+    "update_time" : 1470000020000
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

400

+

BadRequest

+

403

+

Forbidden

+

500

+

InternalServerError

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/UploadSamplesJson.html b/modelarts/api-ref/UploadSamplesJson.html new file mode 100644 index 00000000..5d6e4109 --- /dev/null +++ b/modelarts/api-ref/UploadSamplesJson.html @@ -0,0 +1,954 @@ + + +

Adding Samples in Batches

+

Function

This API is used to add samples in batches.

+
+

URI

POST /v2/{project_id}/datasets/{dataset_id}/data-annotations/samples

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID.

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID.

+
+
+
+

Request Parameters

+
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Request body parameters

Parameter

+

Mandatory

+

Type

+

Description

+

final_annotation

+

No

+

Boolean

+

Whether to directly import to the final result. The options are as follows:

+
  • true: Import labels to the labeled dataset. (Default value).

    +
  • false: Import labels to the to-be-confirmed dataset. Currently, to-be-confirmed datasets only support categories of image classification and object detection.

    +
+

label_format

+

No

+

LabelFormat object

+

Label format. This parameter is used only for text datasets.

+

samples

+

No

+

Array of Sample objects

+

Sample list.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 LabelFormat

Parameter

+

Mandatory

+

Type

+

Description

+

label_type

+

No

+

String

+

Label type of text classification. The options are as follows:

+
  • 0: The label is separated from the text, and they are distinguished by the fixed suffix _result. For example, the text file is abc.txt, and the label file is abc_result.txt.

    +
  • 1: Default value. Labels and texts are stored in the same file and separated by separators. You can use text_sample_separator to specify the separator between the text and label and text_label_separator to specify the separator between labels.

    +
+

text_label_separator

+

No

+

String

+

Separator between labels. By default, the comma (,) is used as the separator. The separator needs to be escaped. Only one character can be used as the separator. The value must contain letters, digits, and one special character (!@#$%^&*_=|?/':.;,).

+

text_sample_separator

+

No

+

String

+

Separator between the text and label. By default, the Tab key is used as the separator. The separator needs to be escaped. Only one character can be used as the separator. The value must contain letters, digits, and one special character (!@#$%^&*_=|?/':.;,).

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Sample

Parameter

+

Mandatory

+

Type

+

Description

+

data

+

No

+

Object

+

Byte data of sample files. The type is java.nio.ByteBuffer. When this parameter is called, the string converted from the byte data is uploaded.

+

data_source

+

No

+

DataSource object

+

Data source.

+

encoding

+

No

+

String

+

Encoding type of sample files, which is used to upload .txt or .csv files. The value can be UTF-8, GBK, or GB2312. The default value is UTF-8.

+

labels

+

No

+

Array of SampleLabel objects

+

Sample label list.

+

metadata

+

No

+

SampleMetadata object

+

Key-value pair of the sample metadata attribute.

+

name

+

No

+

String

+

Name of sample files. The value contains 0 to 1,024 characters and cannot contain special characters (!<>=&"').

+

sample_type

+

No

+

Integer

+

Sample type. The options are as follows:

+
  • 0: image

    +
  • 1: text

    +
  • 2: speech

    +
  • 4: table

    +
  • 6: video

    +
  • 9: custom format

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 DataSource

Parameter

+

Mandatory

+

Type

+

Description

+

data_path

+

No

+

String

+

Data source path.

+

data_type

+

No

+

Integer

+

Data type. The options are as follows:

+
  • 0: OBS bucket (default value)

    +
  • 1: GaussDB(DWS)

    +
  • 2: DLI

    +
  • 3: RDS

    +
  • 4: MRS

    +
  • 5: AI Gallery

    +
  • 6: Inference service

    +
+

schema_maps

+

No

+

Array of SchemaMap objects

+

Schema mapping information corresponding to the table data.

+

source_info

+

No

+

SourceInfo object

+

Information required for importing a table data source.

+

with_column_header

+

No

+

Boolean

+

Whether the first row in the file is a column name. This field is valid for the table dataset. The options are as follows:

+
  • true: The first row in the file is the column name.

    +
  • false: The first row in the file is not the column name.

    +
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 6 SchemaMap

Parameter

+

Mandatory

+

Type

+

Description

+

dest_name

+

No

+

String

+

Name of the destination column.

+

src_name

+

No

+

String

+

Name of the source column.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 SourceInfo

Parameter

+

Mandatory

+

Type

+

Description

+

cluster_id

+

No

+

String

+

ID of an MRS cluster.

+

cluster_mode

+

No

+

String

+

Running mode of an MRS cluster. The options are as follows:

+
  • 0: normal cluster

    +
  • 1: security cluster

    +
+

cluster_name

+

No

+

String

+

Name of an MRS cluster.

+

database_name

+

No

+

String

+

Name of the database to which the table dataset is imported.

+

input

+

No

+

String

+

HDFS path of a table dataset.

+

ip

+

No

+

String

+

IP address of your GaussDB(DWS) cluster.

+

port

+

No

+

String

+

Port number of your GaussDB(DWS) cluster.

+

queue_name

+

No

+

String

+

DLI queue name of a table dataset.

+

subnet_id

+

No

+

String

+

Subnet ID of an MRS cluster.

+

table_name

+

No

+

String

+

Name of the table to which a table dataset is imported.

+

user_name

+

No

+

String

+

Username, which is mandatory for GaussDB(DWS) data.

+

user_password

+

No

+

String

+

User password, which is mandatory for GaussDB(DWS) data.

+

vpc_id

+

No

+

String

+

ID of the VPC where an MRS cluster resides.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 SampleLabel

Parameter

+

Mandatory

+

Type

+

Description

+

annotated_by

+

No

+

String

+

Video labeling method, which is used to distinguish whether a video is labeled manually or automatically. The options are as follows:

+
  • human: manual labeling

    +
  • auto: automatic labeling

    +
+

id

+

No

+

String

+

Label ID.

+

name

+

No

+

String

+

Label name.

+

property

+

No

+

SampleLabelProperty object

+

Attribute key-value pair of the sample label, such as the object shape and shape feature.

+

score

+

No

+

Float

+

Confidence.

+

type

+

No

+

Integer

+

Label type. The options are as follows:

+
  • 0: image classification

    +
  • 1: object detection

    +
  • 100: text classification

    +
  • 101: named entity recognition

    +
  • 102: text triplet relationship

    +
  • 103: text triplet entity

    +
  • 200: speech classification

    +
  • 201: speech content

    +
  • 202: speech paragraph labeling

    +
  • 600: video classification

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 SampleLabelProperty

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:content

+

No

+

String

+

Speech text content, which is a default attribute dedicated to the speech label (including the speech content and speech start and end points).

+

@modelarts:end_index

+

No

+

Integer

+

End position of the text, which is a default attribute dedicated to the named entity label. The end position does not include the character corresponding to the value of end_index. Examples are as follows.

+
  • If the text content is "Barack Hussein Obama II (born August 4, 1961) is an American attorney and politician.", the start_index and end_index values of "Barack Hussein Obama II" are 0 and 23, respectively.

    +
  • If the text content is "By the end of 2018, the company has more than 100 employees.", the start_index and end_index values of "By the end of 2018" are 0 and 18, respectively.

    +
+

@modelarts:end_time

+

No

+

String

+

Speech end time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:feature

+

No

+

Object

+

Shape feature, which is a default attribute dedicated to the object detection label, with type of List. The upper left corner of an image is used as the coordinate origin [0,0]. Each coordinate point is represented by [x, y]. x indicates the horizontal coordinate, and y indicates the vertical coordinate (both x and y are greater than or equal to 0). The format of each shape is as follows:

+
  • bndbox: consists of two points, for example, [[0,10],[50,95]]. The first point is located at the upper left corner of the rectangle and the second point is located at the lower right corner of the rectangle. That is, the X coordinate of the first point must be smaller than that of the second point, and the Y coordinate of the second point must be smaller than that of the first point.

    +
  • polygon: consists of multiple points that are connected in sequence to form a polygon, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
  • circle: consists of the center point and radius, for example, [[100,100],[50]].

    +
  • line: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • dashed: consists of two points, for example, [[0,100],[50,95]]. The first point is the start point, and the second point is the end point.

    +
  • point: consists of one point, for example, [[0,100]].

    +
  • polyline: consists of multiple points, for example, [[0,100],[50,95],[10,60],[500,400]].

    +
+

@modelarts:from

+

No

+

String

+

ID of the head entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+

@modelarts:hard

+

No

+

String

+

Sample labeled as a hard sample or not, which is a default attribute. Options:

+
  • 0/false: not a hard example

    +
  • 1/true: hard example

    +
+

@modelarts:hard_coefficient

+

No

+

String

+

Coefficient of difficulty of each label level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

String

+

Reasons that the sample is a hard sample, which is a default attribute. Use a hyphen (-) to separate every two hard sample reason IDs, for example, 3-20-21-19. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:shape

+

No

+

String

+

Object shape, which is a default attribute dedicated to the object detection label and is left empty by default. The options are as follows:

+
  • bndbox: rectangle

    +
  • polygon: polygon

    +
  • circle: circle

    +
  • line: straight line

    +
  • dashed: dotted line

    +
  • point: point

    +
  • polyline: polyline

    +
+

@modelarts:source

+

No

+

String

+

Speech source, which is a default attribute dedicated to the speech start/end point label and can be set to a speaker or narrator.

+

@modelarts:start_index

+

No

+

Integer

+

Start position of the text, which is a default attribute dedicated to the named entity label. The start value begins from 0, including the character corresponding to the value of start_index.

+

@modelarts:start_time

+

No

+

String

+

Speech start time, which is a default attribute dedicated to the speech start/end point label, in the format of hh:mm:ss.SSS. (hh indicates hour; mm indicates minute; ss indicates second; and SSS indicates millisecond.)

+

@modelarts:to

+

No

+

String

+

ID of the tail entity in the triplet relationship label, which is a default attribute dedicated to the triplet relationship label.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 SampleMetadata

Parameter

+

Mandatory

+

Type

+

Description

+

@modelarts:hard

+

No

+

Double

+

Whether the sample is labeled as a hard sample, which is a default attribute. The options are as follows:

+
  • 0: non-hard sample

    +
  • 1: hard sample

    +
+

@modelarts:hard_coefficient

+

No

+

Double

+

Coefficient of difficulty of each sample level, which is a default attribute. The value range is [0,1].

+

@modelarts:hard_reasons

+

No

+

Array of integers

+

ID of a hard sample reason, which is a default attribute. The options are as follows:

+
  • 0: No target objects are identified.

    +
  • 1: The confidence is low.

    +
  • 2: The clustering result based on the training dataset is inconsistent with the prediction result.

    +
  • 3: The prediction result is greatly different from the data of the same type in the training dataset.

    +
  • 4: The prediction results of multiple consecutive similar images are inconsistent.

    +
  • 5: There is a large offset between the image resolution and the feature distribution of the training dataset.

    +
  • 6: There is a large offset between the aspect ratio of the image and the feature distribution of the training dataset.

    +
  • 7: There is a large offset between the brightness of the image and the feature distribution of the training dataset.

    +
  • 8: There is a large offset between the saturation of the image and the feature distribution of the training dataset.

    +
  • 9: There is a large offset between the color richness of the image and the feature distribution of the training dataset.

    +
  • 10: There is a large offset between the definition of the image and the feature distribution of the training dataset.

    +
  • 11: There is a large offset between the number of frames of the image and the feature distribution of the training dataset.

    +
  • 12: There is a large offset between the standard deviation of area of image frames and the feature distribution of the training dataset.

    +
  • 13: There is a large offset between the aspect ratio of image frames and the feature distribution of the training dataset.

    +
  • 14: There is a large offset between the area portion of image frames and the feature distribution of the training dataset.

    +
  • 15: There is a large offset between the edge of image frames and the feature distribution of the training dataset.

    +
  • 16: There is a large offset between the brightness of image frames and the feature distribution of the training dataset.

    +
  • 17: There is a large offset between the definition of image frames and the feature distribution of the training dataset.

    +
  • 18: There is a large offset between the stack of image frames and the feature distribution of the training dataset.

    +
  • 19: The data enhancement result based on GaussianBlur is inconsistent with the prediction result of the original image.

    +
  • 20: The data enhancement result based on fliplr is inconsistent with the prediction result of the original image.

    +
  • 21: The data enhancement result based on Crop is inconsistent with the prediction result of the original image.

    +
  • 22: The data enhancement result based on flipud is inconsistent with the prediction result of the original image.

    +
  • 23: The data enhancement result based on scale is inconsistent with the prediction result of the original image.

    +
  • 24: The data enhancement result based on translate is inconsistent with the prediction result of the original image.

    +
  • 25: The data enhancement result based on shear is inconsistent with the prediction result of the original image.

    +
  • 26: The data enhancement result based on superpixels is inconsistent with the prediction result of the original image.

    +
  • 27: The data enhancement result based on sharpen is inconsistent with the prediction result of the original image.

    +
  • 28: The data enhancement result based on add is inconsistent with the prediction result of the original image.

    +
  • 29: The data enhancement result based on invert is inconsistent with the prediction result of the original image.

    +
  • 30: The data is predicted to be abnormal.

    +
+

@modelarts:size

+

No

+

Array of objects

+

Image size (width, height, and depth of the image), which is a default attribute, with type of List. In the list, the first number indicates the width (pixels), the second number indicates the height (pixels), and the third number indicates the depth (the depth can be left blank and the default value is 3). For example, [100,200,3] and [100,200] are both valid. Note: This parameter is mandatory only when the sample label list contains the object detection label.

+
+
+
+

Response Parameters

Status code: 200

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 11 Response body parameters

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

results

+

Array of UploadSampleResp objects

+

Response list for adding samples in batches.

+

success

+

Boolean

+

Whether the operation is successful. The options are as follows:

+
  • true: successful

    +
  • false: failed

    +
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 UploadSampleResp

Parameter

+

Type

+

Description

+

error_code

+

String

+

Error code.

+

error_msg

+

String

+

Error message.

+

info

+

String

+

Description.

+

name

+

String

+

Name of a sample file.

+

success

+

Boolean

+

Whether the operation is successful. The options are as follows:

+
  • true: successful

    +
  • false: failed

    +
+
+
+
+

Example Requests

Adding Samples in Batches

+
{
+  "samples" : [ {
+    "name" : "2.jpg",
+    "data" : "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAA1AJUDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL"
+  } ]
+}
+
+

Example Responses

Status code: 200

+

OK

+
{
+  "success" : true,
+  "results" : [ {
+    "success" : true,
+    "name" : "/test-obs/classify/input/cat-dog/2.jpg",
+    "info" : "960585877c92d63911ba555ab3129d36"
+  } ]
+}
+
+

Status Codes

+
+ + + + + + + + + + + + + + + + +

Status Code

+

Description

+

200

+

OK

+

401

+

Unauthorized

+

403

+

Forbidden

+

404

+

Not Found

+
+
+
+

Error Codes

See Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/authorization.html b/modelarts/api-ref/authorization.html new file mode 100644 index 00000000..0ca790fd --- /dev/null +++ b/modelarts/api-ref/authorization.html @@ -0,0 +1,18 @@ + + +

Authorization Management

+

+
+
+ +
+ diff --git a/modelarts/api-ref/auto_task.html b/modelarts/api-ref/auto_task.html new file mode 100644 index 00000000..83d416da --- /dev/null +++ b/modelarts/api-ref/auto_task.html @@ -0,0 +1,26 @@ + + +

Intelligent Task

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/data_export.html b/modelarts/api-ref/data_export.html new file mode 100644 index 00000000..78f81658 --- /dev/null +++ b/modelarts/api-ref/data_export.html @@ -0,0 +1,20 @@ + + +

Data Export Task

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/data_import.html b/modelarts/api-ref/data_import.html new file mode 100644 index 00000000..23102fba --- /dev/null +++ b/modelarts/api-ref/data_import.html @@ -0,0 +1,20 @@ + + +

Data Import Task

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/data_sync.html b/modelarts/api-ref/data_sync.html new file mode 100644 index 00000000..0f5ec64d --- /dev/null +++ b/modelarts/api-ref/data_sync.html @@ -0,0 +1,18 @@ + + +

Data Synchronization Task

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/dataset_management.html b/modelarts/api-ref/dataset_management.html new file mode 100644 index 00000000..c2909099 --- /dev/null +++ b/modelarts/api-ref/dataset_management.html @@ -0,0 +1,32 @@ + + +

Dataset Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/dataset_version_management.html b/modelarts/api-ref/dataset_version_management.html new file mode 100644 index 00000000..3df63999 --- /dev/null +++ b/modelarts/api-ref/dataset_version_management.html @@ -0,0 +1,22 @@ + + +

Dataset Version Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/en-us_image_0000001126894914.png b/modelarts/api-ref/en-us_image_0000001126894914.png new file mode 100644 index 00000000..371f8e2e Binary files /dev/null and b/modelarts/api-ref/en-us_image_0000001126894914.png differ diff --git a/modelarts/api-ref/en-us_image_0000001126895190.png b/modelarts/api-ref/en-us_image_0000001126895190.png new file mode 100644 index 00000000..923e3725 Binary files /dev/null and b/modelarts/api-ref/en-us_image_0000001126895190.png differ diff --git a/modelarts/api-ref/en-us_image_0150916848.gif b/modelarts/api-ref/en-us_image_0150916848.gif new file mode 100644 index 00000000..d5d6a5eb Binary files /dev/null and b/modelarts/api-ref/en-us_image_0150916848.gif differ diff --git a/modelarts/api-ref/en-us_image_0150917346.gif b/modelarts/api-ref/en-us_image_0150917346.gif new file mode 100644 index 00000000..ad0cefd8 Binary files /dev/null and b/modelarts/api-ref/en-us_image_0150917346.gif differ diff --git a/modelarts/api-ref/en-us_image_0150917350.gif b/modelarts/api-ref/en-us_image_0150917350.gif new file mode 100644 index 00000000..b3e80344 Binary files /dev/null and b/modelarts/api-ref/en-us_image_0150917350.gif differ diff --git a/modelarts/api-ref/en-us_image_0150917353.gif b/modelarts/api-ref/en-us_image_0150917353.gif new file mode 100644 index 00000000..942e1451 Binary files /dev/null and b/modelarts/api-ref/en-us_image_0150917353.gif differ diff --git a/modelarts/api-ref/en-us_image_0171113090.png b/modelarts/api-ref/en-us_image_0171113090.png new file mode 100644 index 00000000..d4a37a97 Binary files /dev/null and b/modelarts/api-ref/en-us_image_0171113090.png differ diff --git a/modelarts/api-ref/en-us_image_0171392261.gif b/modelarts/api-ref/en-us_image_0171392261.gif new file mode 100644 index 00000000..1f8bc2f5 Binary files /dev/null and b/modelarts/api-ref/en-us_image_0171392261.gif differ diff --git a/modelarts/api-ref/en-us_topic_0000001147936839.html b/modelarts/api-ref/en-us_topic_0000001147936839.html new file mode 100644 index 00000000..20f914b4 --- /dev/null +++ b/modelarts/api-ref/en-us_topic_0000001147936839.html @@ -0,0 +1,300 @@ + + +

Querying the Statuses and GPU Quantity of All Job Versions

+

Function

You can use this API to query the overview information about all job versions created by a user based on specified conditions, including the statuses and GPU quantity of all job versions.

+
+

URI

GET /v1/{project_id}/training-jobs/versions

+
Table 1 Parameter description describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID.

+
+
+
+
+

Request Body

Table 2 describes the request parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

offset

+

No

+

Integer

+

Offset.

+

limit

+

No

+

Integer

+

Number of displayed records. The default value is 20. The value 0 indicates that all records are displayed.

+

status

+

No

+

String

+

Job status. By default, all job statuses are returned. For example, to query failed jobs, set the value of status to 3, 5, 6, or 13. For details about the job statuses, see Job Statuses.

+

sort_by

+

No

+

String

+

Sorting mode of the query. By default, the records are sorted by create_time. The records can be sorted by job_desc, status, duration, or job_name.

+

order

+

No

+

String

+

Sorting order. Options:

+
  • asc: ascending order
  • desc: descending order. The default value is desc.
+

pool_id

+

No

+

String

+

ID of the resource pool to be queried.

+

workspace_id

+

No

+

String

+

Workspace where a job resides. Default value: 0

+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

total_count

+

Integer

+

Total number of created job versions.

+

versions

+

JSON Array

+

Attributes of a training job. For details, see Table 4.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 jobs parameters

Parameter

+

Type

+

Description

+

version_id

+

Long

+

ID of a training job

+

version_name

+

String

+

Name of a training job

+

status

+

Byte

+

Status of a training job. For details about the job statuses, see Job Statuses.

+

create_time

+

Long

+

Timestamp when a training job is created

+

start_time

+

Long

+

Timestamp when a training job starts.

+

end_time

+

Long

+

Timestamp when a training job ends.

+

duration

+

Long

+

Running duration of a training job.

+

version_desc

+

String

+

Description of a training job

+

pool_id

+

String

+

ID of the resource pool to which a training job belongs.

+

flavor_code

+

String

+

Resource specifications selected for a training job

+

npu_num

+

integer

+

Number of NPUs used by a training job.

+

gpu_num

+

integer

+

Number of GPUs used by a training job.

+
+
+
+
+

Samples

Query training jobs in batches.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/label_management.html b/modelarts/api-ref/label_management.html new file mode 100644 index 00000000..99389886 --- /dev/null +++ b/modelarts/api-ref/label_management.html @@ -0,0 +1,26 @@ + + +

Label Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/label_task_management.html b/modelarts/api-ref/label_task_management.html new file mode 100644 index 00000000..2bd5b566 --- /dev/null +++ b/modelarts/api-ref/label_task_management.html @@ -0,0 +1,34 @@ + + +

Labeling Task Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/manual_annotation_management.html b/modelarts/api-ref/manual_annotation_management.html new file mode 100644 index 00000000..441e834e --- /dev/null +++ b/modelarts/api-ref/manual_annotation_management.html @@ -0,0 +1,16 @@ + + +

Manual Labeling

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/modelarts_03_0001.html b/modelarts/api-ref/modelarts_03_0001.html new file mode 100644 index 00000000..daaf71b0 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0001.html @@ -0,0 +1,11 @@ + + +

Overview

+

ModelArts is a one-stop AI development platform geared toward developers and data scientists of all skill levels. It enables you to rapidly build, train, and deploy models anywhere (from the cloud to the edge), and manage full-lifecycle AI workflows. ModelArts accelerates AI development and fosters AI innovation with key capabilities, including data preprocessing and auto labeling, distributed training, automated model building, and one-click workflow execution. You can use ModelArts through open APIs. For details about all ModelArts APIs, see API Overview.

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0002.html b/modelarts/api-ref/modelarts_03_0002.html new file mode 100644 index 00000000..26b75dc1 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0002.html @@ -0,0 +1,326 @@ + + +

API Overview

+

All ModelArts APIs are proprietary.

+

You can use these APIs to manage datasets, training jobs, models, and services.

+

Data Management APIs

Data management APIs include the APIs for managing datasets, dataset versions, samples, and labels. Use these APIs to create datasets and label data. For details, see Data Management APIs.

+
+

Development Environment APIs

+
+ + + + + + + + + + + + + + + + + + + + + + +
Table 1 Development environment APIs

API

+

Description

+

Creating a Development Environment Instance

+

Create a development environment instance for code development.

+

Querying a List of Development Environment Instances

+

Query the development environment instances that meet the search criteria.

+

Querying the Details About a Development Environment Instance

+

Query the details about a development environment instance.

+

Modifying the Description of a Development Environment Instance

+

Modify the description of a development environment instance.

+

Deleting a Development Environment Instance

+

Delete a development environment instance.

+

Managing a Development Environment Instance

+

Start or stop a development environment instance.

+
+
+
+

Training Management APIs

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Training management APIs

Type

+

API

+

Description

+

Training Jobs

+

Creating a Training Job

+

Create a training job.

+

Querying a Training Job List

+

Query the created training jobs that meet the search criteria.

+

Querying the Details About a Training Job Version

+

Query the details about a specified training job based on the job ID.

+

Deleting a Version of a Training Job

+

Delete a version of a training job.

+

Querying a List of Training Job Versions

+

Query the version of a specified training job based on the job ID.

+

Creating a Version of a Training Job

+

Create a version of a training job.

+

Stopping a Training Job

+

Stop a training job.

+

Modifying the Description of a Training Job

+

Modify the description of a training job.

+

Deleting a Training Job

+

Delete a training job.

+

Obtaining the Name of a Training Job Log File

+

Obtain the name of a training job log file.

+

Querying a Built-in Algorithm

+

Query the details about a built-in model.

+

Querying Training Job Logs

+

Query detailed information about training job logs by row.

+

Training Job Parameter Configuration

+

Creating a Training Job Configuration

+

Create a training job configuration.

+

Querying a List of Training Job Configurations

+

Query the created training job configurations that meet the search criteria.

+

Modifying a Training Job Configuration

+

Modify a training job configuration.

+

Deleting a Training Job Configuration

+

Delete a training job configuration.

+

Querying the Details About a Training Job Configuration

+

Query the details about a specified training job configuration.

+

Visualization Job Management

+

Creating a Visualization Job

+

Create a visualization job.

+

Querying a Visualization Job List

+

Query the visualization jobs that meet the search criteria.

+

Querying the Details About a Visualization Job

+

Query the details about a specified visualization job based on the job name.

+

Modifying the Description of a Visualization Job

+

Modify the description of a visualization job.

+

Deleting a Visualization Job

+

Delete a visualization job.

+

Stopping a Visualization Job

+

Stop a visualization job.

+

Restarting a Visualization Job

+

Restart a visualization job.

+

Resource and Engine Specifications

+

Querying Job Resource Specifications

+

Query the resource specifications of a specified job.

+

Querying Job Engine Specifications

+

Query the engine type and version of a specified job.

+

Job Statuses

+

Job Statuses

+

View job statuses and status description.

+
+
+
+

Model Management APIs

+
+ + + + + + + + + + + + + + + + +
Table 3 Model management APIs

API

+

Description

+

Importing a Model

+

Import a model.

+

Querying a Model List

+

Query the models that meet the search criteria.

+

Querying the Details About a Model

+

Query details about a model based on the model ID.

+

Deleting a Model

+

Delete a specified model based on the model ID. All versions of the model can be deleted in cascading mode.

+
+
+
+

Service Management APIs

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 Service management APIs

API

+

Description

+

Deploying a Model as a Service

+

Deploy a model service.

+

Querying a Service List

+

Query model services.

+

Querying the Details About a Service

+

Query the details about a model service based on the service ID.

+

Updating Service Configurations

+

Update a model service.

+

Querying Service Monitoring Information

+

Query service monitoring information.

+

Querying Service Update Logs

+

Query the update logs of a real-time service.

+

Querying Service Event Logs

+

Query service event logs, including service operation records, key actions during deployment, and deployment failure causes.

+

Deleting a Service

+

Delete a model service.

+

Querying Supported Service Deployment Specifications

+

Query supported service deployment specifications.

+
+
+
+

Authorization Management APIs

+
+ + + + + + + + + + + + + + + + +
Table 5 Workspace management APIs

API

+

Description

+

Viewing an Authorization List

+

View an authorization list.

+

Configuring Authorization

+

Configure ModelArts authorization. ModelArts functions such as training management, development environment, data management, and real-time services can be properly used only after required permissions are assigned.

+

Deleting Authorization

+

Delete the authorization of a specified user or all users.

+

Creating a ModelArts Agency

+

Create a ModelArts agency for dependent services such as OBS, SWR, and IEF.

+
+
+
+
+ diff --git a/modelarts/api-ref/modelarts_03_0003.html b/modelarts/api-ref/modelarts_03_0003.html new file mode 100644 index 00000000..924cc348 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0003.html @@ -0,0 +1,97 @@ + + +

Response

+

After sending a request, you will receive a response, including the status code, response header, and response body.

+

Status Code

A status code is a group of digits, ranging from 1xx to 5xx. It indicates the status of a request. For more information, see Status Code.

+

For example, if status code 201 is returned for calling the API used to obtain a user token, the request is successful.

+
+

Response Header

Similar to a request, a response also has a header, for example, Content-type.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Common response header fields

Header

+

Description

+

Mandatory

+

Content-Type

+

Media type of the message body sent to a receiver

+

Type: string

+

Default value: application/json; charset=UTF-8

+

Yes

+

X-request-id

+

This field carries the request ID for task tracing.

+

Type: string request_id-timestamp-hostname (request_id is the UUID generated on the server, timestamp indicates the current timestamp, and hostname is the name of the server that processes the current API.)

+

Default value: none

+

No

+

X-ratelimit

+

This field carries the total number of flow control requests.

+

Type: integer

+

Default value: none

+

No

+

X-ratelimit-used

+

This field carries the number of remaining requests.

+

Type: integer

+

Default value: none

+

No

+

X-ratelimit-window

+

This field carries the flow control unit.

+

Type: string The unit is minute, hour, or day.

+

Default value: hour

+

No

+
+
+

Figure 1 shows the response header fields for the API used to obtain a user token.

+

x-subject-token is the desired user token. This token can then be used to authenticate the calling of other APIs.

+
Figure 1 Header fields of the response to the request for obtaining a user token
+
+

Response Body

The body of a response is often returned in structured format as specified in the Content-Type header field. The response body transfers content except the response header.

+

The following is part of the response body for the API used to obtain a user token.

+
{
+    "token": {
+        "expires_at": "2019-02-13T06:52:13.855000Z",
+        "methods": [
+            "password"
+        ],
+        "catalog": [
+            {
+                "endpoints": [
+                    {
+                        "region_id": "aaa",
+......
+

If an error occurs during API calling, an error code and a message will be displayed. The following shows an error response body.

+
{
+    "error_message": "The format of message is error",
+    "error_code": "AS.0001"
+}
+

In the error response body, error_code is an error code, and error_message provides information about the error. For more details, see Error Codes.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0004.html b/modelarts/api-ref/modelarts_03_0004.html new file mode 100644 index 00000000..13c56b0f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0004.html @@ -0,0 +1,113 @@ + + +

Authentication

+
Requests for calling an API can be authenticated using either of the following methods: AK/SK-based authentication: Requests are authenticated by encrypting the request body using an AK/SK pair. +
+

Token-based Authentication

The validity period of a token is 24 hours. When using a token for authentication, cache it to prevent frequently calling the IAM API used to obtain a user token.

+
+

A token specifies temporary permissions in a computer system. During API authentication using a token, the token is added to requests to get permissions for calling the API.

+

In Making an API Request, the process of calling the API used to obtain a user token is described.

+
{
+	"auth": {
+		"identity": {
+			"methods": [
+				"password"
+			],
+			"password": {
+				"user": {
+					"name": "user_name",
+					"password": "user_password",
+					"domain": {
+						"name": "domain_name"
+					}
+				}
+			}
+		},
+		"scope": {
+			"project": {
+				"name": "project_name"
+			}
+		}
+	}
+}
+

After a token is obtained, the X-Auth-Token header field must be added to requests to specify the token when calling other APIs. For example, if the token is ABCDEFJ...., X-Auth-Token: ABCDEFJ.... can be added to a request as follows:

+
+POST https://{endpoint}/v1/{project_id}/services
+Content-Type: application/json
+X-Auth-Token: ABCDEFJ....
+
+

AK/SK-based Authentication

An AK/SK is used to verify the identity of a request sender. In AK/SK-based authentication, a signature needs to be obtained and then added to requests.

+

AK: access key ID, which is a unique identifier used in conjunction with a secret access key to sign requests cryptographically.

+

SK: secret access key used in conjunction with an AK to sign requests cryptographically. It identifies a request sender and prevents the request from being modified.

+
+

The following uses a demo project to show how to sign a request and use an HTTP client to send an HTTPS request.

+

Download the demo project at https://github.com/api-gate-way/SdkDemo.

+

If you do not need the demo project, visit the following URL to download the API Gateway signing SDK:

+

Obtain the API Gateway signing SDK from the enterprise administrator.

+

Decompress the downloaded package and reference the obtained JAR files as dependencies, as highlighted in the following figure.

+

+
  1. Generate an AK/SK. (If an AK/SK file has already been obtained, skip this step and locate the downloaded AK/SK file. Generally, the file name will be credentials.csv.)

    1. Log in to ManageOne Operation Portal.
    2. In the upper right corner of the page, click your account avatar and choose My Settings from the drop-down list.
    3. On the My Settings page, the Project List tab page is displayed by default. Click the Manage Access Key tab page.
    4. Click Add Access Key to create an AK/SK pair.
    5. Click OK. The certificate is automatically downloaded.
    6. After the certificate is downloaded, obtain the AK and SK information from the credentials file.
      • Only two access keys can be added for each user.
      • To ensure access key security, access keys are automatically downloaded only when they are generated for the first time and cannot be obtained from the management console later. Keep them properly.
      +
      +
    +

  2. Download and decompress the demo project.
  3. Import the demo project to Eclipse.

    Figure 1 Selecting Existing Projects into Workspace
    +
    Figure 2 Selecting the demo project
    +
    Figure 3 Structure of the demo project
    +

  4. Sign the request.

    The request signing method is integrated in the JAR files imported in 3. The request needs to be signed before it is sent. The signature will then be added as part of the HTTP header to the request.

    +

    The demo code is classified into the following classes to demonstrate signing and sending the HTTP request:

    +
    • AccessService: abstract class that merges the GET, POST, PUT, and DELETE methods into the access method
    • Demo: execution entry used to simulate the sending of GET, POST, PUT, and DELETE requests
    • AccessServiceImpl: implementation of the access method, which contains the code required for communication with API Gateway
    +

    The following describes how to call a POST method to sign the request.

    +
    1. (Optional) Add request header fields.

      Uncomment the following code snippet in the AccessServiceImpl.java file, and specify the project ID and account ID.

      +
      //TODO: Add special headers.
      +//request.addHeader("X-Project-Id", "xxxxx");
      +//request.addHeader("X-Domain-Id", "xxxxx");
      +
    2. Edit the main() method in the Demo.java file, and replace the bold text with actual values.

      As shown in the following code, if you use other methods such as POST, PUT, and DELETE, see the corresponding comment. Replace the values of region, serviceName, ak, sk, and url. The URL for obtaining the VPC is used in the sample project. Replace it with the actual URL. Contact the administrator to obtain the endpoint.

      +
      //TODO: Replace the value of region with the actual region where the service to be accessed is located.
      +private static final String region = "";
      +
      +//TODO: Replace vpc with the name of the service you want to access. For example, ecs, vpc, iam, and elb.
      +private static final String serviceName = "";
      +
      +public static void main(String[] args) throws UnsupportedEncodingException
      +{
      +//TODO: Replace the values of ak and sk with the AK/SK obtained on the My Credentials page.
      +String ak = "ZIRRKMTWP******1WKNKB";
      +String sk = "Us0mdMNHk******YrRCnW0ecfzl";
      +
      +//TODO: To specify a project ID (multi-project scenarios), add the X-Project-Id header.
      +//TODO: To access a global service, such as IAM, DNS, CDN, and TMS, add the X-Domain-Id header to specify an account ID.
      +//TODO: To add a header, find "Add special headers" in the AccessServiceImple.java file.
      +
      +//TODO: Test the API.
      +String url = "https://{Endpoint}/v1/{project_id}/vpcs";
      +get(ak, sk, url);
      +
      +//TODO: When creating a VPC, replace {project_id} in postUrl with the actual value.
      +//String postUrl = "https://serviceEndpoint/v1/{project_id}/cloudservers";
      +//String postbody ="{\"vpc\": {\"name\": \"vpc\",\"cidr\": \"192.168.0.0/16\"}}";
      +//post(ak, sk, postUrl, postbody);
      +
      +//TODO: When querying a VPC, replace {project_id} in url with the actual value.
      +//String url = "https://serviceEndpoint/v1/{project_id}/vpcs/{vpc_id}";
      +//get(ak, sk, url);
      +
      +//TODO: When updating a VPC, replace {project_id} and {vpc_id} in putUrl with the actual values.
      +//String putUrl = "https://serviceEndpoint/v1/{project_id}/vpcs/{vpc_id}";
      +//String putbody ="{\"vpc\":{\"name\": \"vpc1\",\"cidr\": \"192.168.0.0/16\"}}";
      +//put(ak, sk, putUrl, putbody);
      +
      +//TODO: When deleting a VPC, replace {project_id} and {vpc_id} in deleteUrl with the actual values.
      +//String deleteUrl = "https://serviceEndpoint/v1/{project_id}/vpcs/{vpc_id}";
      +//delete(ak, sk, deleteUrl);
      +}
      +
    3. Compile the code and call the API.

      In the Package Explorer area on the left, right-click Demo.java and choose Run AS > Java Application from the shortcut menu to run the demo code.

      +

      You can view the API call logs on the console.

      +
    +

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0005.html b/modelarts/api-ref/modelarts_03_0005.html new file mode 100644 index 00000000..2f201f5f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0005.html @@ -0,0 +1,218 @@ + + +

Making an API Request

+

This section describes the structure of a REST API request, and uses the IAM API for obtaining a user token as an example to demonstrate how to call an API. The obtained token can then be used to authenticate the calling of other APIs.

+

Request URI

The format of a request URI is as follows:

+

{URI-scheme} :// {Endpoint} / {resource-path} ? {query-string}

+ +
+ + + + + + + + + + + + + + + + +
Table 1 Request URI

Parameter

+

Description

+

URI-scheme

+

Protocol used to transmit requests. All APIs use HTTPS.

+

Endpoint

+

Domain name or IP address of the server for the REST service endpoint. The endpoint varies depending on services in different regions. It can be obtained in Endpoints.

+

resource-path

+

Access path of an API for performing a specified operation. Obtain the path from the URI of an API. For example, the resource-path of the API used to obtain a user token is /v3/auth/tokens.

+

query-string

+

Query parameter, which is optional. Ensure that a question mark (?) is included before each query parameter that is in the format of "Parameter name=Parameter value". For example, ? limit=10 indicates that a maximum of 10 data records will be displayed.

+
+
+

For example, to obtain an IAM token in a region, obtain the endpoint of IAM for this region and the resource-path (/v3/auth/tokens) in the URI of the API used to obtain a user token. Then, construct the URI as follows:

+
+https://{iam-endpoint}/v3/auth/tokens
+

To simplify the URI display in this document, each API is provided only with a resource-path and a request method. The URI-scheme of all APIs is HTTPS, and the endpoints of all APIs in the same region are identical.

+
+
+

Request Methods

The HTTP protocol defines the following request methods that can be used to send a request to the server:

+ +
+ + + + + + + + + + + + + + + + + + + + + + +
Table 2 HTTP methods

Method

+

Description

+

GET

+

Requests the server to return specified resources.

+

PUT

+

Requests the server to update specified resources.

+

POST

+

Requests the server to add resources or perform special operations.

+

DELETE

+

Requests the server to delete specified resources, for example, an object.

+

HEAD

+

Same as GET except that the server must return only the response header.

+

PATCH

+

Requests the server to update partial content of a specified resource.

+

If the resource does not exist, a new resource will be created.

+
+
+
For example, in the case of the API used to obtain a user token, the request method is POST. The request is as follows:
POST https://{iam-endpoint}/v3/auth/tokens
+
+
+

Request Header

You can also add additional header fields to a request, such as the fields required by a specified URI or HTTP method. For example, to request for the authentication information, add Content-Type, which specifies the request body type.

+

Table 3 describes the common request header fields to be added to the request.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Common request header fields

Header

+

Description

+

Mandatory

+

Example

+

Content-type

+

Request body type or format. The default value is application/json.

+

Yes

+

application/json

+

Content-Length

+

Length of the request body. The unit is byte.

+

Mandatory for POST and PUT requests but must be left blank for GET requests

+

3495

+

X-Project-Id

+

Project ID. This parameter is used to obtain the token for each project.

+

No

+

e9993fc787d94b6c886cbaa340f9c0f4

+

X-Auth-Token

+

User token. It is a response to the API used to obtain a user token. This API is the only one that does not require authentication.

+

Mandatory for token-based authentication

+

None

+

X-Sdk-Date

+

Time when the request is sent. The time is in YYYYMMDD'T'HHMMSS'Z' format.

+

The value is the current Greenwich Mean Time (GMT) time of the system.

+

Mandatory for AK/SK-based authentication, optional for PKI token-based authentication

+

20190307T101459Z

+

Authorization

+

Authentication information.

+

The value is obtained from the request signature result and is required when the AK/SK are used to encrypt the signature.

+

Type: string

+

Default value: none

+

Mandatory for AK/SK-based authentication

+

SDK-HMAC-SHA256 Credential=ZIRRKMTWPTQFQI1WKNKB/20150907//ec2/sdk_request, SignedHeaders=content-type;host;x-sdk-date, Signature=55741b610f3c9fa3ae40b5a8021ebf7ebc2a28a603fc62d25cb3bfe6608e1994

+

Host

+

Information about the requested server. The value can be obtained from the URL of the service API.

+

This value is host name[:port number].

+

If the port number is not specified, the default port is used. The default port number for https is 443.

+

Mandatory for AK/SK-based authentication

+

code.test.com

+

or

+

code.test.com:443

+
+
+

In addition to supporting authentication using tokens, APIs support authentication using AK/SK, which uses SDK to sign a request. During the signature, the Authorization (signature authentication) and X-Sdk-Date (time when a request is sent) headers are automatically added to the request.

+
+
The API for obtaining a user token does not require authentication. Therefore, this API only requires adding the Content-Type field. The request with the added Content-Type header is as follows:
POST https://{iam-endpoint}/v3/auth/tokens
+Content-Type: application/json
+
+
+

Request Body

The body of a request is often sent in a structured format as specified in the Content-Type header field. The request body transfers content except the request header. If the request body contains Chinese characters, these characters must be encoded in UTF-8.

+

The request body varies between APIs. Some APIs do not require the request body, such as the APIs requested using the GET and DELETE methods.

+

If an API is used to obtain a user token, the request parameters and parameter description can be obtained from the API request. The following provides an example request with a body included. Replace user_name, domain_name, and user_password with the actual username, account name, and login password, respectively. project_name is the project name. For details, see Obtaining a Username, Obtaining an Account Name and ID, and Obtaining a Project Name.

+

The scope parameter specifies where a token takes effect. In the example, the token takes effect only for the resources in a specified project. ModelArts uses a region-specific endpoint to call this API. Set scope to project. You can set scope to an account or a project under an account. In the following example, the token takes effect only for the resources in a specified project. For more information about this API, see "Obtaining a User Token".

+
+
+POST https://{iam-endpoint}/v3/auth/tokens 
+Content-Type:application/json
+{
+  "auth": {
+    "identity": {
+      "methods": ["password"],
+      "password": {
+        "user": {
+          "name": "Username",
+          "password": "User password",
+          "domain": {
+            "name": "Domain name"
+          }
+        }
+      }
+    },
+    "scope": {
+      "project": {
+        "name": "Project name"
+      }
+    }
+  }
+}
+

If all data required for the API request is available, you can send the request to call the API through curl, Postman, or coding. In the response to the API used to obtain a user token, x-subject-token is the desired user token. This token can then be used to authenticate the calling of other APIs.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0006.html b/modelarts/api-ref/modelarts_03_0006.html new file mode 100644 index 00000000..422b747b --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0006.html @@ -0,0 +1,14 @@ + + +

Obtaining a Username and ID

+

When you call APIs, certain requests require the username and ID. To obtain a username and ID, do as follows:

+
  1. Log in to the management console after registration.
  2. In the upper right corner, click your account avatar icon and choose My Settings from the drop-down list.

    On the My Settings page, view the username and ID.

    +
    Figure 1 Viewing the username and ID
    +
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0043.html b/modelarts/api-ref/modelarts_03_0043.html new file mode 100644 index 00000000..2e1d823f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0043.html @@ -0,0 +1,19 @@ + + +

Training Management

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0044.html b/modelarts/api-ref/modelarts_03_0044.html new file mode 100644 index 00000000..a50405d3 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0044.html @@ -0,0 +1,43 @@ + + +

Training Jobs

+
+
+ + + +
+ diff --git a/modelarts/api-ref/modelarts_03_0045.html b/modelarts/api-ref/modelarts_03_0045.html new file mode 100644 index 00000000..08cb25f7 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0045.html @@ -0,0 +1,660 @@ + + +

Creating a Training Job

+

Function

This API is used to create a training job.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training Job List and Querying the Details About a Training Job Version.

+
+

URI

POST /v1/{project_id}/training-jobs

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_name

+

Yes

+

String

+

Training job name. The value is a string of 1 to 64 characters consisting of only digits, letters, underscores (_), and hyphens (-).

+

job_desc

+

No

+

String

+

Description of a training job. The value is a string of 0 to 256 characters. By default, this parameter is left blank.

+

config

+

Yes

+

JSON

+

Parameters for creating a training job

+

workspace_id

+

No

+

String

+

Workspace where a job resides. Default value: 0

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 config parameters

Parameter

+

Mandatory

+

Type

+

Description

+

worker_server_num

+

Yes

+

Integer

+

Number of workers in a training job. Obtain the maximum value from the max_num value returned by the API in Querying Job Resource Specifications.

+

app_url

+

Yes

+

String

+

Code directory of a training job, for example, /usr/app/. This parameter must be used together with boot_file_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

boot_file_url

+

Yes

+

String

+

Boot file of a training job, which needs to be stored in the code directory. Example value: /usr/app/boot.py This parameter must be used together with app_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

parameter

+

No

+

Array<Object>

+

Running parameters of a training job. It is a collection of label-value pairs. Values can be customized. label is a parameter name and value is the parameter value. For details, see the sample request. This parameter is a container environment variable when a training job uses a custom image. For details, see Table 8.

+

data_url

+

Yes

+

String

+

OBS URL of the dataset required by a training job. By default, this parameter is left blank. For example, /usr/data/. This parameter cannot be used together with data_source or dataset_id and dataset_version_id. However, one of the parameters must exist.

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url or data_source.

+

dataset_version_id

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url or data_source.

+

data_source

+

Yes

+

Array<Object>

+

Dataset of a training job. This parameter cannot be used together with data_url or dataset_id and dataset_version_id. For details, see Table 4.

+

spec_id

+

Yes

+

Long

+

ID of the resource specifications selected for a training job. Obtain the ID by calling the API described in Querying Job Resource Specifications.

+

engine_id

+

Yes

+

Long

+

ID of the engine selected for a training job. The default value is 1. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id. Obtain the ID by calling the API described in Querying Job Engine Specifications.

+

train_url

+

No

+

String

+

OBS URL of the output file of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

log_url

+

No

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/log/

+

user_image_url

+

No

+

String

+

SWR URL of a custom image used by a training job. Example value: 100.125.5.235:20202/jobmng/custom-cpu-base:1.0

+

user_command

+

No

+

String

+

Boot command used to start the container of a custom image of a training job. The format is bash /home/work/run_train.sh python /home/work/user-job-dir/app/train.py {python_file_parameter}.

+

create_version

+

No

+

Boolean

+

Whether a version is created when a training job is created

+
  • true: Default value. A version is created when a training job is created.
  • false: A version is not created when a training job is created.
+

volumes

+

No

+

JSON Array

+

Storage volume that can be used by a training job. For details, see Table 5.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 data_source parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url.

+

dataset_version

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url.

+

type

+

Yes

+

String

+

Dataset type. The value can be obs or dataset. obs and dataset cannot be used at the same time.

+

data_url

+

Yes

+

String

+

OBS bucket path. This parameter cannot be used together with dataset_id or dataset_version.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 5 volumes parameters

Parameter

+

Mandatory

+

Type

+

Description

+

nfs

+

No

+

JSON

+

Storage volume of the shared file system type. Only the training jobs running in the resource pool with the shared file system network connected support such storage volume. For details, see Table 6.

+

host_path

+

No

+

JSON

+

Storage volume of the host file system type. Only training jobs running in the dedicated resource pool support such storage volume. For details, see Table 7.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 nfs parameters

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

Yes

+

String

+

ID of an SFS Turbo file system

+

src_path

+

Yes

+

String

+

Address of an SFS Turbo file system

+

dest_path

+

Yes

+

String

+

Local path of a training job

+

read_only

+

No

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 host_path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

src_path

+

Yes

+

String

+

Local path of a host

+

dest_path

+

Yes

+

String

+

Local path of a training job

+

read_only

+

No

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 8 parameter parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label

+

No

+

String

+

Parameter name

+

value

+

No

+

String

+

Parameter value

+
+
+
+

Response Body

Table 9 describes the response parameters.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

job_id

+

Long

+

ID of a training job

+

job_name

+

String

+

Name of a training job

+

status

+

Int

+

Status of a training job. For details about the job statuses, see Job Statuses.

+

create_time

+

Long

+

Timestamp when a training job is created

+

version_id

+

Long

+

Version ID of a training job

+

resource_id

+

String

+

Charged resource ID of a training job

+

version_name

+

String

+

Version name of a training job

+
+
+

Samples

  1. The following shows how to create training job TestModelArtsJob with This is a ModelArts job as its description.
    • Sample request
      POST    https://endpoint/v1/{project_id}/training-jobs
      +{
      +    "job_name": "TestModelArtsJob",
      +    "job_desc": "This is a ModelArts job",
      +    "workspace_id": "af261af2218841ec960b01ab3cf1a5fa",
      +    "config": {
      +        "worker_server_num": 1,
      +        "app_url": "/usr/app/",
      +        "boot_file_url": "/usr/app/boot.py",
      +        "parameter": [
      +            {
      +                "label": "learning_rate",
      +                "value": "0.01"
      +            },
      +            {
      +                "label": "batch_size",
      +                "value": "32"
      +            }
      +        ],
      +        "dataset_id": "38277e62-9e59-48f4-8d89-c8cf41622c24",
      +        "dataset_version_id": "2ff0d6ba-c480-45ae-be41-09a8369bfc90",
      +        "spec_id": 1,
      +        "engine_id": 1,
      +        "train_url": "/usr/train/",
      +        "log_url": "/usr/log/"
      +    }
      +}
      +
    +
  2. The following shows how to create training job TestModelArtsJob2 with a custom image.
+
+ +

3. The following shows how to create training job TestModelArtsJob3 using a storage volume.

+ + +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0046.html b/modelarts/api-ref/modelarts_03_0046.html new file mode 100644 index 00000000..4591ef30 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0046.html @@ -0,0 +1,282 @@ + + +

Querying a Training Job List

+

Function

This API is used to query the created training jobs that meet the search criteria.

+
+

URI

GET /v1/{project_id}/training-jobs

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+
+

Request Body

Table 2 describes the request parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

status

+

No

+

String

+

Job status. By default, all job statuses are returned. For example, to query failed jobs, set the value of status to 3, 5, 6, or 13. For details about the job statuses, see Job Statuses.

+

per_page

+

No

+

Integer

+

Number of jobs displayed on each page. The value range is [1, 1000]. Default value: 10

+

page

+

No

+

Integer

+

Index of the page to be queried. Default value: 1 The value range is [1, 65535].

+

sortBy

+

No

+

String

+

Sorting mode of the query. The value can be job_name, job_desc, status, duration, version_count, or create_time. Default value: job_name

+

order

+

No

+

String

+

Sorting order. The options are as follows:

+
  • asc: ascending order. It is the default value.
  • desc: descending order
+

search_content

+

No

+

String

+

Search content, for example, a training job name. The value is a string of 0 to 64 characters. By default, this parameter is left blank.

+

workspace_id

+

No

+

String

+

Workspace where a job resides. Default value: 0

+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

job_total_count

+

Integer

+

Total number of created jobs

+

job_count_limit

+

Integer

+

Number of training jobs that can be created

+

jobs

+

jobs array

+

Attributes of a training job. For details, see Table 4.

+

quotas

+

Integer

+

Maximum number of training jobs

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 jobs parameters

Parameter

+

Type

+

Description

+

job_id

+

Long

+

ID of a training job

+

job_name

+

String

+

Name of a training job

+

version_id

+

Long

+

Version ID of a training job

+

status

+

Int

+

Status of a training job. For details about the job statuses, see Job Statuses.

+

create_time

+

Long

+

Timestamp when a training job is created

+

duration

+

Long

+

Training job running duration, in milliseconds

+

job_desc

+

String

+

Description of a training job

+

version_count

+

Long

+

Number of versions of a training job

+
+
+
+
+

Samples

The following shows how to query training jobs whose status is 7 and whose name contains job on the first page. Each page displays 10 records sorted by job_name in ascending order.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0047.html b/modelarts/api-ref/modelarts_03_0047.html new file mode 100644 index 00000000..b5c706ff --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0047.html @@ -0,0 +1,936 @@ + + +

Querying the Details About a Training Job Version

+

Function

This API is used to query the details about a specified training job based on the job ID.

+
+

URI

GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+

version_id

+

Yes

+

Long

+

Version ID of a training job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

job_id

+

Long

+

ID of a training job

+

job_name

+

String

+

Name of a training job

+

job_desc

+

String

+

Description of a training job

+

version_id

+

Long

+

Version ID of a training job

+

version_name

+

String

+

Version name of a training job

+

pre_version_id

+

Long

+

Name of the previous version of a training job

+

engine_type

+

Integer

+

Engine type of a training job. The mapping between engine_type and engine_name is as follows: engine_type: 13, engine_name: Ascend-Powered-Engine

+

engine_name

+

String

+

Name of the engine selected for a training job. Currently, the following engines are supported:

+
  • Ascend-Powered-Engine
  • PyTorch
  • Spark_MLlib
  • Scikit_Learn
  • TensorFlow
  • XGBoost-Sklearn
  • MindSpore-GPU
+

engine_id

+

Long

+

ID of the engine selected for a training job

+

engine_version

+

String

+

Version of the engine selected for a training job

+

status

+

Integer

+

Status of a training job. For details about the job statuses, see Job Statuses.

+

app_url

+

String

+

Code directory of a training job

+

boot_file_url

+

String

+

Boot file of a training job

+

create_time

+

Long

+

Time when a training job is created

+

parameter

+

Array<Object>

+

Running parameters of a training job. This parameter is a container environment variable when a training job uses a custom image. For details, see Table 3.

+

duration

+

Long

+

Training job running duration, in milliseconds

+

spec_id

+

Long

+

ID of the resource specifications selected for a training job

+

core

+

String

+

Number of cores of the resource specifications

+

cpu

+

String

+

CPU memory of the resource specifications

+

gpu_num

+

Integer

+

Number of GPUs of the resource specifications

+

gpu_type

+

String

+

GPU type of the resource specifications

+

worker_server_num

+

Integer

+

Number of workers in a training job

+

data_url

+

String

+

Dataset of a training job

+

train_url

+

String

+

OBS path of the training job output file

+

log_url

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

dataset_version_id

+

String

+

Dataset version ID of a training job

+

dataset_id

+

String

+

Dataset ID of a training job

+

data_source

+

Array<Object>

+

Datasets of a training job. For details, see Table 4.

+

model_id

+

Long

+

Model ID of a training job

+

model_metric_list

+

String

+

Model metrics of a training job. For details, see Table 5.

+

system_metric_list

+

JSON

+

System monitoring metrics of a training job. For details, see Table 6.

+

user_image_url

+

String

+

SWR URL of a custom image used by a training job

+

user_command

+

String

+

Boot command used to start the container of a custom image of a training job

+

resource_id

+

String

+

Charged resource ID of a training job

+

dataset_name

+

String

+

Dataset of a training job

+

spec_code

+

String

+

Resource specifications selected for a training job

+

start_time

+

Long

+

Training start time

+

volumes

+

Array<Object>

+

Storage volume that can be used by a training job. For details, see Table 11.

+

dataset_version_name

+

String

+

Dataset of a training job

+

pool_name

+

String

+

Name of a resource pool

+

pool_id

+

String

+

ID of a resource pool

+

nas_mount_path

+

String

+

Local mount path of SFS Turbo (NAS). Example value: /home/work/nas

+

nas_share_addr

+

String

+

Shared path of SFS Turbo (NAS). Example value: 192.168.8.150:/

+

nas_type

+

String

+

Only NFS is supported. Example value: nfs

+
+
+ +
+ + + + + + + + + + + + + +
Table 3 parameter parameters

Parameter

+

Type

+

Description

+

label

+

String

+

Parameter name

+

value

+

String

+

Parameter value

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 data_source parameters

Parameter

+

Type

+

Description

+

dataset_id

+

String

+

Dataset ID of a training job

+

dataset_version

+

String

+

Dataset version ID of a training job

+

type

+

String

+

Dataset type

+
  • obs: Data from OBS is used.
  • dataset: Data from a specified dataset is used.
+

data_url

+

String

+

OBS bucket path

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 model_metric_list parameters

Parameter

+

Type

+

Description

+

metric

+

JSON Array

+

Validation metrics of a classification of a training job. For details, see Table 7.

+

total_metric

+

JSON

+

Overall validation parameters of a training job. For details, see Table 9.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 6 system_metric_list parameters

Parameter

+

Type

+

Description

+

cpuUsage

+

Array

+

CPU usage of a training job

+

memUsage

+

Array

+

Memory usage of a training job

+

gpuUtil

+

Array

+

GPU usage of a training job

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 7 metric parameters

Parameter

+

Type

+

Description

+

metric_values

+

JSON

+

Validation metrics of a classification of a training job. For details, see Table 8.

+

reserved_data

+

JSON

+

Reserved parameter

+

metric_meta

+

JSON

+

Classification of a training job, including the classification ID and name

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 8 metric_values parameters

Parameter

+

Type

+

Description

+

recall

+

Float

+

Recall of a classification of a training job

+

precision

+

Float

+

Precision of a classification of a training job

+

accuracy

+

Float

+

Accuracy of a classification of a training job

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 9 total_metric parameters

Parameter

+

Type

+

Description

+

total_metric_meta

+

JSON

+

Reserved parameter

+

total_reserved_data

+

JSON

+

Reserved parameter

+

total_metric_values

+

JSON

+

Overall validation metrics of a training job. For details, see Table 10.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 10 total_metric_values parameters

Parameter

+

Type

+

Description

+

f1_score

+

Float

+

F1 score of a training job

+

recall

+

Float

+

Total recall of a training job

+

precision

+

Float

+

Total precision of a training job

+

accuracy

+

Float

+

Total accuracy of a training job

+
+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 11 volumes parameters

Parameter

+

Mandatory

+

Type

+

Description

+

nfs

+

No

+

JSON

+

Storage volume of the shared file system type. Only the training jobs running in the resource pool with the shared file system network connected support such storage volume. For details, see Table 6.

+

host_path

+

No

+

JSON

+

Storage volume of the host file system type. Only training jobs running in the dedicated resource pool support such storage volume. For details, see Table 7.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 nfs parameters

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

Yes

+

String

+

ID of an SFS Turbo file system

+

src_path

+

Yes

+

String

+

Address of an SFS Turbo file system

+

dest_path

+

Yes

+

String

+

Local path of a training job

+

read_only

+

No

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 13 host_path parameters

Parameter

+

Mandatory

+

Type

+

Description

+

src_path

+

Yes

+

String

+

Local path of a host

+

dest_path

+

Yes

+

String

+

Local path of a training job

+

read_only

+

No

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+
+

Samples

The following shows how to query the details about the job whose job_id is 10 and version_id is 10.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0048.html b/modelarts/api-ref/modelarts_03_0048.html new file mode 100644 index 00000000..8cb1ba34 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0048.html @@ -0,0 +1,109 @@ + + +

Deleting a Version of a Training Job

+

Function

This API is used to delete a version of a training job.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training Job List and Querying the Details About a Training Job Version.

+
+

URI

DELETE /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+

version_id

+

Yes

+

Long

+

Version ID of a training job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

+
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0049.html b/modelarts/api-ref/modelarts_03_0049.html new file mode 100644 index 00000000..c75cc5ac --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0049.html @@ -0,0 +1,902 @@ + + +

Querying a List of Training Job Versions

+

Function

This API is used to query the version of a specified training job based on the job ID.

+
+

URI

GET /v1/{project_id}/training-jobs/{job_id}/versions

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+
+
+
+
+

Request Body

Table 2 describes the request parameters.

+ +
+ + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

per_page

+

No

+

Integer

+

Number of job parameters displayed on each page. The value range is [1, 1000]. Default value: 10

+

page

+

No

+

Integer

+

Index of the page to be queried

+
  • If paging is required, set page to 1.
  • The default value of page is 0, indicating that paging is not supported.
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

job_id

+

Long

+

ID of a training job

+

job_name

+

String

+

Name of a training job

+

job_desc

+

String

+

Description of a training job

+

version_count

+

Long

+

Number of versions of a training job

+

versions

+

JSON Array

+

Version parameters of a training job. For details, see the sample response.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 versions parameters

Parameter

+

Type

+

Description

+

version_id

+

Long

+

Version ID of a training job

+

version_name

+

String

+

Version name of a training job

+

pre_version_id

+

Long

+

ID of the previous version of a training job

+

engine_type

+

Long

+

Engine type of a training job

+

engine_name

+

String

+

Name of the engine selected for a training job

+

engine_id

+

Long

+

ID of the engine selected for a training job

+

engine_version

+

String

+

Version of the engine selected for a training job

+

status

+

Int

+

Status of a training job

+

app_url

+

String

+

Code directory of a training job

+

boot_file_url

+

String

+

Boot file of a training job

+

create_time

+

Long

+

Time when a training job is created

+

parameter

+

JSON Array

+

Running parameters of a training job. This parameter is a container environment variable when a training job uses a custom image. For details, see Table 5.

+

duration

+

Long

+

Training job running duration, in milliseconds

+

spec_id

+

Long

+

ID of the resource specifications selected for a training job

+

core

+

String

+

Number of cores of the resource specifications

+

cpu

+

String

+

CPU memory of the resource specifications

+

gpu_num

+

Integer

+

Number of GPUs of the resource specifications

+

gpu_type

+

String

+

GPU type of the resource specifications

+

worker_server_num

+

Integer

+

Number of workers in a training job

+

data_url

+

String

+

Dataset of a training job

+

train_url

+

String

+

OBS path of the training job output file

+

log_url

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/log/

+

dataset_version_id

+

String

+

Dataset version ID of a training job

+

dataset_id

+

String

+

Dataset ID of a training job

+

data_source

+

JSON Array

+

Datasets of a training job. For details, see Table 6.

+

model_id

+

Long

+

Model ID of a training job

+

model_metric_list

+

String

+

Model metrics of a training job. For details, see Table 7.

+

system_metric_list

+

String

+

System monitoring metrics of a training job. For details, see Table 8.

+

user_image_url

+

String

+

SWR URL of a custom image used by a training job

+

user_command

+

String

+

Boot command used to start the container of a custom image of a training job

+

resource_id

+

String

+

Charged resource ID of a training job

+

dataset_name

+

String

+

Dataset of a training job

+

start_time

+

Long

+

Training start time

+

volumes

+

JSON Array

+

Storage volume that can be used by a training job. For details, see Table 13.

+

dataset_version_name

+

String

+

Dataset of a training job

+

pool_name

+

String

+

Name of a resource pool

+

pool_id

+

String

+

ID of a resource pool

+

nas_mount_path

+

String

+

Local mount path of SFS Turbo (NAS). Example value: /home/work/nas

+

nas_share_addr

+

String

+

Shared path of SFS Turbo (NAS). Example value: 192.168.8.150:/

+

nas_type

+

String

+

Only NFS is supported. Example value: nfs

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 parameter parameters

Parameter

+

Type

+

Description

+

label

+

String

+

Parameter name

+

value

+

String

+

Parameter value

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 data_source parameters

Parameter

+

Type

+

Description

+

dataset_id

+

String

+

Dataset ID of a training job

+

dataset_version

+

String

+

Dataset version ID of a training job

+

type

+

String

+

Dataset type

+
  • obs: Data from OBS is used.
  • dataset: Data from a specified dataset is used.
+

data_url

+

String

+

OBS bucket path

+
+
+ +
+ + + + + + + + + + + + + +
Table 7 model_metric_list parameters

Parameter

+

Type

+

Description

+

metric

+

JSON Array

+

Validation metrics of a classification of a training job

+

total_metric

+

JSON

+

Overall validation parameters of a training job. For details, see Table 11.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 8 system_metric_list parameters

Parameter

+

Type

+

Description

+

cpuUsage

+

Array

+

CPU usage of a training job

+

memUsage

+

Array

+

Memory usage of a training job

+

gpuUtil

+

Array

+

GPU usage of a training job

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 9 metric parameters

Parameter

+

Type

+

Description

+

metric_values

+

JSON

+

Validation metrics of a classification of a training job. For details, see Table 10.

+

reserved_data

+

JSON

+

Reserved parameter

+

metric_meta

+

JSON

+

Classification of a training job, including the classification ID and name

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 10 metric_values parameters

Parameter

+

Type

+

Description

+

recall

+

Float

+

Recall of a classification of a training job

+

precision

+

Float

+

Precision of a classification of a training job

+

accuracy

+

Float

+

Accuracy of a classification of a training job

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 11 total_metric parameters

Parameter

+

Type

+

Description

+

total_metric_meta

+

JSON Array

+

Reserved parameter

+

total_reserved_data

+

JSON Array

+

Reserved parameter

+

total_metric_values

+

JSON Array

+

Overall validation metrics of a training job. For details, see Table 12.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 12 total_metric_values parameters

Parameter

+

Type

+

Description

+

f1_score

+

Float

+

F1 score of a training job. This parameter is used only by some preset algorithms and is automatically generated. It is for reference only.

+

recall

+

Float

+

Total recall of a training job

+

precision

+

Float

+

Total precision of a training job

+

accuracy

+

Float

+

Total accuracy of a training job

+
+
+ +
+ + + + + + + + + + + + + +
Table 13 volumes parameters

Parameter

+

Type

+

Description

+

nfs

+

JSON

+

Storage volume of the shared file system type. Only the training jobs running in the resource pool with the shared file system network connected support such storage volume. For details, see Table 14.

+

host_path

+

JSON

+

Storage volume of the host file system type. Only training jobs running in the dedicated resource pool support such storage volume. For details, see Table 15.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 14 nfs parameters

Parameter

+

Type

+

Description

+

id

+

String

+

ID of an SFS Turbo file system

+

src_path

+

String

+

Address of an SFS Turbo file system

+

dest_path

+

String

+

Local path of a training job

+

read_only

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 15 host_path parameters

Parameter

+

Type

+

Description

+

src_path

+

String

+

Local path of a host

+

dest_path

+

String

+

Local path of a training job

+

read_only

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+
+

Samples

The following shows how to query the job version details on the first page when job_id is set to 10 and five records are displayed on each page.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0050.html b/modelarts/api-ref/modelarts_03_0050.html new file mode 100644 index 00000000..dc99169f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0050.html @@ -0,0 +1,446 @@ + + +

Creating a Version of a Training Job

+

Function

This API is used to create a version of a training job.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training Job List and Querying the Details About a Training Job Version.

+
+

URI

POST /v1/{project_id}/training-jobs/{job_id}/versions

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + +
Table 2 Request parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_desc

+

No

+

String

+

Description of a training job. The value is a string of 0 to 256 characters. By default, this parameter is left blank.

+

config

+

Yes

+

JSON

+

Parameters for creating a training job

+
+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 config parameters

Parameter

+

Mandatory

+

Type

+

Description

+

worker_server_num

+

Yes

+

Integer

+

Number of workers in a training job. Obtain the maximum value from Querying Job Resource Specifications.

+

app_url

+

Yes

+

String

+

Code directory of a training job, for example, /usr/app/. This parameter must be used together with boot_file_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

boot_file_url

+

Yes

+

String

+

Boot file of a training job, which needs to be stored in the code directory. Example value: /usr/app/boot.py This parameter must be used together with app_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

parameter

+

No

+

Array<Object>

+

Running parameters of a training job. It is a collection of label-value pairs. For details, see the sample request. This parameter is a container environment variable when a training job uses a custom image. For details, see Table 5.

+

data_url

+

Yes

+

String

+

OBS URL of the dataset required by a training job. By default, this parameter is left blank. For example, /usr/data/. This parameter cannot be used together with data_source or dataset_id and dataset_version_id. However, one of the parameters must exist.

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url or data_source.

+

dataset_version_id

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url or data_source.

+

data_source

+

No

+

JSON Array

+

Dataset of a training job. This parameter cannot be used with data_url, dataset_id, or dataset_version_id. For details, see Table 4.

+

spec_id

+

Yes

+

Long

+

ID of the resource specifications selected for a training job. Obtain the ID by calling the API described in Querying Job Resource Specifications.

+

engine_id

+

Yes

+

Long

+

ID of the engine selected for a training job. The default value is 1. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id. Obtain the ID by calling the API described in Querying Job Engine Specifications.

+

model_id

+

Yes

+

Long

+

ID of the built-in model of a training job. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

train_url

+

Yes

+

String

+

OBS URL of the output file of a training job. By default, this parameter is left blank. Example value: /bucket/trainUrl/

+

log_url

+

No

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

pre_version_id

+

Yes

+

Long

+

ID of the previous version of a training job. You can obtain the value of version_id by calling the API described in Querying a List of Training Job Versions.

+

user_image_url

+

No

+

String

+

SWR URL of a custom image used by a training job. Example value: 100.125.5.235:20202/jobmng/custom-cpu-base:1.0

+

user_command

+

No

+

String

+

Boot command used to start the container of a custom image of a training job. The format is bash /home/work/run_train.sh python /home/work/user-job-dir/app/train.py {python_file_parameter}.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 data_source parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url.

+

dataset_version

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url.

+

type

+

Yes

+

String

+

Dataset type. The value can be obs or dataset. obs and dataset cannot be used at the same time.

+

data_url

+

Yes

+

String

+

OBS bucket path. This parameter cannot be used together with dataset_id or dataset_version.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 5 parameter parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label

+

No

+

String

+

Parameter name

+

value

+

No

+

String

+

Parameter value

+
+
+

Response Body

Table 6 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+

job_id

+

Long

+

ID of a training job

+

job_name

+

String

+

Name of a training job

+

status

+

Int

+

Status of a training job. For details about the job statuses, see Job Statuses.

+

create_time

+

Long

+

Timestamp when a training job is created

+

version_id

+

Long

+

Version ID of a training job

+

version_name

+

String

+

Version name of a training job

+
+
+
+
+

Samples

  1. The following shows how to create a job whose job_id is 10 and pre_version_id is 20.
    • Sample request
      POST    https://endpoint/v1/{project_id}/training-jobs/10/versions/
      +{
      +    "job_desc": "This is a ModelArts job",
      +    "config": {
      +        "worker_server_num": 1,
      +        "app_url": "/usr/app/",
      +        "boot_file_url": "/usr/app/boot.py",
      +        "parameter": [
      +            {
      +                "label": "learning_rate",
      +                "value": "0.01"
      +            },
      +            {
      +                "label": "batch_size",
      +                "value": "32"
      +            }
      +        ],
      +        "dataset_id": "38277e62-9e59-48f4-8d89-c8cf41622c24",
      +        "dataset_version_id": "2ff0d6ba-c480-45ae-be41-09a8369bfc90",
      +        "spec_id": 1,
      +        "engine_id": 1,
      +        "train_url": "/usr/train/",
      +        "log_url": "/usr/log/",
      +        "pre_version_id": 20
      +    }
      +}
      +
    +
+
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0051.html b/modelarts/api-ref/modelarts_03_0051.html new file mode 100644 index 00000000..3d390839 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0051.html @@ -0,0 +1,111 @@ + + +

Stopping a Training Job

+

Function

This API is used to stop a training job.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training Job List and Querying the Details About a Training Job Version.

+
+

URI

POST /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/stop

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+

version_id

+

Yes

+

Long

+

Version ID of a training job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to stop a version of the job whose job_id is 10 and version_id is 10.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0052.html b/modelarts/api-ref/modelarts_03_0052.html new file mode 100644 index 00000000..2ce6e655 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0052.html @@ -0,0 +1,126 @@ + + +

Modifying the Description of a Training Job

+

Function

This API is used to modify the description of a training job.

+
+

URI

PUT /v1/{project_id}/training-jobs/{job_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_desc

+

Yes

+

String

+

Description of a training job. The value is a string of 0 to 256 characters.

+
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful.

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to modify the description of the job whose job_id is 10.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0053.html b/modelarts/api-ref/modelarts_03_0053.html new file mode 100644 index 00000000..a1830412 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0053.html @@ -0,0 +1,102 @@ + + +

Deleting a Training Job

+

Function

This API is used to delete a training job.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Training Job List and Querying the Details About a Training Job Version.

+
+

URI

DELETE /v1/{project_id}/training-jobs/{job_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to delete the job whose job_id is 10.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0054.html b/modelarts/api-ref/modelarts_03_0054.html new file mode 100644 index 00000000..fe655a31 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0054.html @@ -0,0 +1,119 @@ + + +

Obtaining the Name of a Training Job Log File

+

Function

This API is used to obtain the name of a training job log file.

+
+

URI

GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/log/file-names

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+

version_id

+

Yes

+

Long

+

Version ID of a training job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+

log_file_list

+

String

+

Log file name of a training job. A single-node job has only one log file, and a distributed job has multiple log files.

+
+
+
+
+

Samples

The following shows how to obtain the log files of the job whose job_id is 10 and version_id is 10.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0056.html b/modelarts/api-ref/modelarts_03_0056.html new file mode 100644 index 00000000..e2fb4abf --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0056.html @@ -0,0 +1,335 @@ + + +

Querying a Built-in Algorithm

+

Function

This API is used to query the details about a built-in model.

+
+

URI

GET /v1/{project_id}/built-in-algorithms

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+
+

Request Body

Table 2 describes the request parameters.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

per_page

+

No

+

Integer

+

Number of job parameters displayed on each page. The value range is [1, 100]. Default value: 10

+

page

+

No

+

Integer

+

Index of the page to be queried. Default value: 1

+

sortBy

+

No

+

String

+

Sorting mode of the query. The value can be engine, model_name, model_precision, model_usage, model_precision, model_size, create_time, or parameter. Default value: engine

+

order

+

No

+

String

+

Sorting order. The options are as follows:

+
  • asc: ascending order
  • desc: descending order. The default value is desc.
+

search_content

+

No

+

String

+

Search content, for example, a parameter name. By default, this parameter is left blank.

+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+

model_total_count

+

Integer

+

Number of models

+

models

+

Array<Object>

+

Model parameter list. For details, see Table 4.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 models structure data

Parameter

+

Type

+

Description

+

model_id

+

Integer

+

Model ID

+

model_name

+

String

+

Model name

+

model_usage

+

Integer

+

Model usage. The options are as follows:

+
  • 1: image classification
  • 2: object class and location
  • 3: image semantic segmentation
  • 4: natural language processing
  • 5: image embedding
+

model_precision

+

String

+

Model precision

+

model_size

+

Long

+

Model size, in bytes

+

model_train_dataset

+

String

+

Model training dataset

+

model_dataset_format

+

String

+

Dataset format required by a model

+

model_description_url

+

String

+

URL of the model description

+

parameter

+

String

+

Running parameters of a model. This parameter is a container environment variable when a training job uses a custom image. For details, see the sample request.

+

create_time

+

Long

+

Time when a model is created

+

engine_id

+

Long

+

Engine ID of a model

+

engine_name

+

String

+

Engine name of a model

+

engine_version

+

String

+

Engine version of a model

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 5 parameter parameters

Parameter

+

Type

+

Description

+

label

+

String

+

Parameter name

+

value

+

String

+

Parameter value

+

required

+

Boolean

+

Whether a parameter is mandatory

+
+
+
+

Samples

The following shows how to query the algorithm whose name contains configname.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0057.html b/modelarts/api-ref/modelarts_03_0057.html new file mode 100644 index 00000000..7a493ac1 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0057.html @@ -0,0 +1,23 @@ + + +

Training Job Parameter Configuration

+
+
+ + + +
+ diff --git a/modelarts/api-ref/modelarts_03_0058.html b/modelarts/api-ref/modelarts_03_0058.html new file mode 100644 index 00000000..2f5346a9 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0058.html @@ -0,0 +1,365 @@ + + +

Creating a Training Job Configuration

+

Function

This API is used to create a training job configuration.

+
+

URI

POST /v1/{project_id}/training-job-configs

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

config_name

+

Yes

+

String

+

Name of a training job configuration. The value is a string of 1 to 64 characters consisting of only digits, letters, underscores (_), and hyphens (-).

+

config_desc

+

No

+

String

+

Description of a training job configuration. The value is a string of 0 to 256 characters. By default, this parameter is left blank.

+

worker_server_num

+

Yes

+

Integer

+

Number of workers in a training job. Obtain the maximum value from Querying Job Resource Specifications.

+

app_url

+

Yes

+

String

+

Code directory of a training job, for example, /usr/app/. This parameter must be used together with boot_file_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

boot_file_url

+

Yes

+

String

+

Boot file of a training job, which needs to be stored in the code directory, for example, /usr/app/boot.py. This parameter must be used together with app_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

model_id

+

Yes

+

Long

+

Model ID of a training job. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

parameter

+

No

+

Array<Object>

+

Running parameters of a training job. It is a collection of label-value pairs. For details, see the sample request. This parameter is a container environment variable when a training job uses a custom image. For details, see Table 4.

+

spec_id

+

Yes

+

Long

+

ID of the resource specifications selected for a training job. Obtain the ID by calling the API described in Querying Job Resource Specifications.

+

data_url

+

Yes

+

String

+

OBS URL of the dataset required by a training job, for example, /usr/data/.

+

This parameter cannot be used together with data_source or dataset_id and dataset_version_id. However, one of the parameters must exist.

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url or data_source.

+

dataset_version_id

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url or data_source.

+

data_source

+

Yes

+

JSON Array

+

Dataset of a training job. This parameter cannot be used together with data_url or dataset_id and dataset_version_id.

+

engine_id

+

Yes

+

Long

+

ID of the engine selected for a training job. The default value is 1. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id. Obtain the ID by calling the API described in Querying Job Engine Specifications.

+

train_url

+

No

+

String

+

OBS URL of the output file of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

log_url

+

No

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

user_image_url

+

No

+

String

+

SWR URL of a custom image used by a training job. Example value: 100.125.5.235:20202/jobmng/custom-cpu-base:1.0

+

user_command

+

No

+

String

+

Boot command used to start the container of a custom image of a training job. The format is bash /home/work/run_train.sh python /home/work/user-job-dir/app/train.py {python_file_parameter}. The run_train.sh script needs to be invoked to initialize variables, such as the AK/SK. The run_train.sh script is followed by python to ensure that the Python files can be executed in the initialized variable environment. run_train.sh is used to start Python.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 data_source parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url.

+

dataset_version

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url.

+

type

+

Yes

+

String

+

Dataset type. The value can be obs or dataset. obs and dataset cannot be used at the same time.

+

data_url

+

Yes

+

String

+

OBS bucket path. This parameter cannot be used together with dataset_id or dataset_version.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 4 parameter parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label

+

No

+

String

+

Parameter name.

+

value

+

No

+

String

+

Parameter value.

+
+
+
+

Response Body

Table 5 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 5 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

  1. The following shows how to create a training job configuration whose name is testConfig and description is This is config.
    • Sample request
      POST    https://endpoint/v1/{project_id}/training-job-configs
      +{
      +    "config_name": "testConfig",
      +    "config_desc": "This is config",
      +    "worker_server_num": 1,
      +    "app_url": "/usr/app/",
      +    "boot_file_url": "/usr/app/boot.py",
      +    "parameter": [
      +        {
      +            "label": "learning_rate",
      +            "value": "0.01"
      +        },
      +        {
      +            "label": "batch_size",
      +            "value": "32"
      +        }
      +    ],
      +    "spec_id": 1,
      +    "dataset_id": "38277e62-9e59-48f4-8d89-c8cf41622c24",
      +    "dataset_version_id": "2ff0d6ba-c480-45ae-be41-09a8369bfc90",
      +    "engine_id": 1,
      +    "train_url": "/usr/train/",
      +    "log_url": "/usr/log/"
      +}
      +
    +
+
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0059.html b/modelarts/api-ref/modelarts_03_0059.html new file mode 100644 index 00000000..3c282083 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0059.html @@ -0,0 +1,263 @@ + + +

Querying a List of Training Job Configurations

+

Function

This API is used to query the created training job configurations that meet the search criteria.

+
+

URI

GET /v1/{project_id}/training-job-configs

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+
+

Request Body

Table 2 describes the request parameters.

+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

per_page

+

No

+

Integer

+

Number of job parameters displayed on each page. The value range is [1, 1000]. Default value: 10

+

page

+

No

+

Integer

+

Index of the page to be queried. Default value: 1

+

sortBy

+

No

+

String

+

Sorting mode of the query. The value can be config_name, config_desc, or create_time. The default value is config_name. create_time is not supported for sample sorting.

+

order

+

No

+

String

+

Sorting order. Options:

+
  • asc: ascending order
  • desc: descending order. The default value is desc.
+

search_content

+

No

+

String

+

Search content, for example, a parameter name. By default, this parameter is left blank.

+

config_type

+

No

+

String

+

Configuration type to be queried. Options:

+
  • custom: Query the custom configurations.
  • sample: Query the sample configurations. The default value is custom.
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call. This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+

config_total_count

+

Integer

+

Total number of the queried training job configurations

+

configs

+

JSON Array

+

configs parameters

+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 configs parameters

Parameter

+

Type

+

Description

+

config_name

+

String

+

Name of a training job configuration

+

config_desc

+

String

+

Description of a training job configuration

+

create_time

+

Long

+

Time when a training job is created

+

engine_type

+

Integer

+

Engine type of a training job

+

engine_name

+

String

+

Name of the engine selected for a training job

+

engine_id

+

Long

+

ID of the engine selected for a training job

+

engine_version

+

String

+

Version of the engine selected for a training job

+

user_image_url

+

String

+

SWR URL of a custom image used by a training job. Example value: 100.125.5.235:20202/jobmng/custom-cpu-base:1.0

+

user_command

+

String

+

Boot command used to start the container of a custom image of a training job. The format is bash /home/work/run_train.sh python /home/work/user-job-dir/app/train.py {python_file_parameter}.

+
+
+
+
+

Samples

+
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0060.html b/modelarts/api-ref/modelarts_03_0060.html new file mode 100644 index 00000000..b0442b9b --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0060.html @@ -0,0 +1,365 @@ + + +

Modifying a Training Job Configuration

+

Function

This API is used to modify a training job configuration.

+
+

URI

PUT /v1/{project_id}/training-job-configs/{config_name}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

config_name

+

Yes

+

String

+

Name of a training job configuration

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

config_desc

+

No

+

String

+

Description of a training job configuration. The value is a string of 0 to 256 characters. By default, this parameter is left blank.

+

worker_server_num

+

Yes

+

Integer

+

Number of workers in a training job. Obtain the maximum value from Querying Job Resource Specifications.

+

app_url

+

Yes

+

String

+

Code directory of a training job, for example, /usr/app/. This parameter must be used together with boot_file_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

boot_file_url

+

Yes

+

String

+

Boot file of a training job, which needs to be stored in the code directory, for example, /usr/app/boot.py. This parameter must be used together with app_url. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

model_id

+

Yes

+

Long

+

Model ID of a training job. After setting model_id, you do not need to set app_url or boot_file_url, and engine_id.

+

parameter

+

No

+

Array<Object>

+

Running parameters of a training job. It is a collection of label-value pairs. This parameter is a container environment variable when a training job uses a custom image.

+

spec_id

+

Yes

+

Long

+

ID of the resource specifications selected for a training job. Obtain the ID by calling the API described in Querying Job Resource Specifications.

+

data_url

+

Yes

+

String

+

OBS URL of the dataset required by a training job, for example, /usr/data/.

+

This parameter cannot be used together with data_source or dataset_id and dataset_version_id. However, one of the parameters must exist.

+

dataset_id

+

Yes

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url or data_source.

+

dataset_version_id

+

Yes

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url or data_source.

+

data_source

+

Yes

+

JSON Array

+

Dataset of a training job. This parameter cannot be used together with data_url or dataset_id and dataset_version_id.

+

engine_id

+

Yes

+

Long

+

ID of the engine selected for a training job. The default value is 1. Obtain the ID by calling the API described in Querying Job Engine Specifications.

+

train_url

+

No

+

String

+

OBS URL of the output file of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

log_url

+

No

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

user_image_url

+

No

+

String

+

SWR URL of a custom image used by a training job. Example value: 100.125.5.235:20202/jobmng/custom-cpu-base:1.0

+

user_command

+

No

+

String

+

Boot command used to start the container of a custom image of a training job. The format is bash /home/work/run_train.sh python /home/work/user-job-dir/app/train.py {python_file_parameter}.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 data_source parameters

Parameter

+

Mandatory

+

Type

+

Description

+

dataset_id

+

No

+

String

+

Dataset ID of a training job. This parameter must be used together with dataset_version_id, but cannot be used together with data_url.

+

dataset_version

+

No

+

String

+

Dataset version ID of a training job. This parameter must be used together with dataset_id, but cannot be used together with data_url.

+

type

+

No

+

String

+

Dataset type. The value can be obs or dataset. obs and dataset cannot be used at the same time.

+

data_url

+

No

+

String

+

OBS bucket path. This parameter cannot be used together with dataset_id or dataset_version.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 4 parameter parameters

Parameter

+

Mandatory

+

Type

+

Description

+

label

+

No

+

String

+

Parameter name.

+

value

+

No

+

String

+

Parameter value.

+
+
+
+

Response Body

Table 5 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 5 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

  1. The following shows how to modify the job parameter configuration named config.
    • Sample request
      PUT    https://endpoint/v1/{project_id}/training-job-configs/config
      +
      +{
      +    "config_desc": "This is config",
      +    "worker_server_num": 1,
      +    "app_url": "/usr/app/",
      +    "boot_file_url": "/usr/app/boot.py",
      +    "parameter": [
      +        {
      +            "label": "learning_rate",
      +            "value": 0.01
      +        },
      +        {
      +            "key": "batch_size",
      +            "value": 32
      +        }
      +    ],
      +    "spec_id": 1,
      +    "dataset_id": "38277e62-9e59-48f4-8d89-c8cf41622c24",
      +    "dataset_version_id": "2ff0d6ba-c480-45ae-be41-09a8369bfc90",
      +    "engine_id": 1,
      +    "train_url": "/usr/train/",
      +    "log_url": "/usr/log/"
      +}
      +
    +
    • Successful sample response
      {
      +    "is_success": true
      +}
      +
    • Failed sample response
      {
      +    "is_success": false,
      +    "error_message": "Error string",
      +    "error_code": "ModelArts.0105"
      +}
      +
    +
    +
+
+

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0061.html b/modelarts/api-ref/modelarts_03_0061.html new file mode 100644 index 00000000..98972add --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0061.html @@ -0,0 +1,100 @@ + + +

Deleting a Training Job Configuration

+

Function

This API is used to delete a training job configuration.

+
+

URI

DELETE /v1/{project_id}/training-job-configs/{config_name}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

config_name

+

Yes

+

String

+

Name of a training job configuration

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters.

+ +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+
+
+
+

Samples

The following shows how to delete the job configuration named test-trainconfig.

+ +
+ +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0062.html b/modelarts/api-ref/modelarts_03_0062.html new file mode 100644 index 00000000..ffc0349e --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0062.html @@ -0,0 +1,591 @@ + + +

Querying the Details About a Training Job Configuration

+

Function

This API is used to query the details about a specified training job configuration.

+
+

URI

GET /v1/{project_id}/training-job-configs/{config_name}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

config_name

+

Yes

+

String

+

Name of a training job configuration

+
+
+
+
+

Request Body

Table 2 describes the request parameters.

+ +
+ + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

config_type

+

No

+

String

+

Configuration type to be queried. Options:

+
  • custom: Query the custom configurations.
  • sample: Query the sample configurations. The default value is custom.
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes. This parameter is not included when the API call succeeds.

+

config_name

+

String

+

Name of a training job configuration

+

config_desc

+

String

+

Description of a training job configuration

+

worker_server_num

+

Integer

+

Number of workers in a training job

+

app_url

+

String

+

Code directory of a training job

+

boot_file_url

+

String

+

Boot file of a training job

+

model_id

+

Long

+

Model ID of a training job

+

parameter

+

JSON Array

+

Running parameters of a training job. It is a collection of label-value pairs. This parameter is a container environment variable when a training job uses a custom image. For details, see Table 8.

+

spec_id

+

Long

+

ID of the resource specifications selected for a training job

+

data_url

+

String

+

Dataset of a training job

+

dataset_id

+

String

+

Dataset ID of a training job

+

dataset_version_id

+

String

+

Dataset version ID of a training job

+

data_source

+

JSON Array

+

Datasets of a training job

+

engine_type

+

Integer

+

Engine type of a training job

+

engine_name

+

String

+

Name of the engine selected for a training job

+

engine_id

+

Long

+

ID of the engine selected for a training job

+

engine_version

+

String

+

Version of the engine selected for a training job

+

train_url

+

String

+

OBS URL of the output file of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

log_url

+

String

+

OBS URL of the logs of a training job. By default, this parameter is left blank. Example value: /usr/train/

+

user_image_url

+

String

+

SWR URL of a custom image used by a training job

+

user_command

+

String

+

Boot command used to start the container of a custom image of a training job

+

spec_code

+

String

+

Resource specifications selected for a training job

+

gpu_type

+

String

+

GPU type of the resource specifications

+

create_time

+

Long

+

Time when a training job parameter configuration is created

+

cpu

+

String

+

CPU memory of the resource specifications

+

gpu_num

+

Integer

+

Number of GPUs of the resource specifications

+

core

+

String

+

Number of cores of the resource specifications

+

dataset_name

+

String

+

Dataset of a training job

+

dataset_version_name

+

String

+

Dataset of a training job

+

pool_id

+

String

+

ID of a resource pool

+

pool_name

+

String

+

Name of a resource pool

+

volumes

+

JSON Array

+

Storage volume that can be used by a training job. For details, see Table 5.

+

nas_mount_path

+

String

+

Local mount path of SFS Turbo (NAS). Example value: /home/work/nas

+

nas_share_addr

+

String

+

Shared path of SFS Turbo (NAS). Example value: 192.168.8.150:/

+

nas_type

+

String

+

Only NFS is supported. Example value: nfs

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 data_source parameters

Parameter

+

Type

+

Description

+

dataset_id

+

String

+

Dataset ID of a training job

+

dataset_version

+

String

+

Dataset version ID of a training job

+

type

+

String

+

Dataset type. Options:

+
  • obs: Data from OBS is used.
+
  • dataset: Data from a specified dataset is used.
+

data_url

+

String

+

OBS bucket path

+
+
+
+ +
+ + + + + + + + + + + + + +
Table 5 volumes parameters

Parameter

+

Type

+

Description

+

nfs

+

JSON

+

Storage volume of the shared file system type. Only the training jobs running in the resource pool with the shared file system network connected support such storage volume. For details, see Table 6.

+

host_path

+

JSON

+

Storage volume of the host file system type. Only training jobs running in the dedicated resource pool support such storage volume. For details, see Table 7.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 nfs parameters

Parameter

+

Type

+

Description

+

id

+

String

+

ID of an SFS Turbo file system

+

src_path

+

String

+

Address of an SFS Turbo file system

+

dest_path

+

String

+

Local path of a training job

+

read_only

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 7 host_path parameters

Parameter

+

Type

+

Description

+

src_path

+

String

+

Local path of a host

+

dest_path

+

String

+

Local path of a training job

+

read_only

+

Boolean

+

Whether dest_path is read-only. The default value is false.

+
  • true: read-only permission
  • false: read/write permission. This is the default value.
+
+
+ +
+ + + + + + + + + + + + + +
Table 8 parameter parameters

Parameter

+

Type

+

Description

+

label

+

String

+

Parameter name

+

value

+

String

+

Parameter value

+
+
+

Samples

The following shows how to query the details about the job configuration named config123.

+ +
+ +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0063.html b/modelarts/api-ref/modelarts_03_0063.html new file mode 100644 index 00000000..1664cfe0 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0063.html @@ -0,0 +1,27 @@ + + +

Visualization Jobs

+
+
+ + + +
+ diff --git a/modelarts/api-ref/modelarts_03_0064.html b/modelarts/api-ref/modelarts_03_0064.html new file mode 100644 index 00000000..6b3dc641 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0064.html @@ -0,0 +1,270 @@ + + +

Creating a Visualization Job

+

Function

This API is used to create a visualization job.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Visualization Job List and Querying the Details About a Visualization Job.

+
+

URI

POST /v1/{project_id}/visualization-jobs

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+

Request Body

Table 2 describes the request parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_name

+

Yes

+

String

+

Name of a visualization job. The value is a string of 1 to 20 characters consisting of only digits, letters, underscores (_), and hyphens (-).

+

job_desc

+

No

+

String

+

Description of a visualization job. The value is a string of 0 to 256 characters. By default, this parameter is left blank.

+

train_url

+

Yes

+

String

+

OBS path

+

job_type

+

No

+

String

+

Type of a visualization job. You can create visualization jobs of TensorBoard and MindInsight types. The default type is TensorBoard.

+

flavor

+

No

+

JSON

+

Specifications when a visualization job is created. You do not need to set this parameter.

+

schedule

+

No

+

JSON

+

Automatic stop setting

+
+
+ +
+ + + + + + + + + + + +
Table 3 flavor parameters

Parameter

+

Mandatory

+

Type

+

Description

+

code

+

Yes

+

String

+

Resource specification code of a visualization job. You can obtain the code through the flavor parameter.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 schedule parameters

Parameter

+

Mandatory

+

Type

+

Description

+

type

+

Yes

+

String

+

Set this parameter to stop.

+

time_unit

+

Yes

+

String

+

Unit of auto stop duration. The value is HOURS.

+

duration

+

Yes

+

Int

+

Auto stop duration. The value ranges from 0 to 24.

+
+
+
+

Response Body

Table 5 describes the response parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Parameters

Parameter

+

Type

+

Description

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

job_id

+

Long

+

ID of a visualization job

+

job_name

+

String

+

Name of a visualization job

+

status

+

Integer

+

Status of a visualization job. For details about the job statuses, see Job Statuses.

+

create_time

+

Long

+

Time when a visualization job is created, in timestamp format

+

service_url

+

String

+

Endpoint of a visualization job

+
+
+
+

Samples

The following shows how to create a visualization job whose name is visualization-job, description is this is a visualization job, and OBS path is /obs/name/.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0065.html b/modelarts/api-ref/modelarts_03_0065.html new file mode 100644 index 00000000..08b5ac9a --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0065.html @@ -0,0 +1,297 @@ + + +

Querying a Visualization Job List

+

Function

This API is used to query the visualization jobs that meet the search criteria.

+
+

URI

GET /v1/{project_id}/visualization-jobs

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Query parameters

Parameter

+

Mandatory

+

Type

+

Description

+

status

+

No

+

String

+

Job status. By default, all job statuses are returned. For example, to query failed jobs, set the value of status to 3, 5, 6, or 13. For details about the job statuses, see Job Statuses.

+

per_page

+

No

+

Integer

+

Number of jobs displayed on each page. The value range is [1, 1000]. Default value: 10

+

page

+

No

+

Integer

+

Index of the page to be queried. Default value: 1

+

sortBy

+

No

+

String

+

Sorting mode of the query. The value can be job_id, job_name, job_desc, create_time, or status. Default value: job_id

+

order

+

No

+

String

+

Sorting order. Options:

+
  • asc: ascending order. It is the default value.
  • desc: descending order
+

search_content

+

No

+

String

+

Search content, for example, a visualization job name. The value is a string of 0 to 64 characters. By default, this parameter is left blank.

+

workspace_id

+

No

+

String

+

Workspace where a job resides. Default value: 0

+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

job_total_count

+

Integer

+

Total number of the queried visualization jobs

+

job_count_limit

+

Integer

+

Number of visualization jobs that can be created

+

jobs

+

jobs array

+

Visualization job attributes. For details, see Table 4.

+

quotas

+

Integer

+

Maximum number of training jobs

+
+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 jobs parameters

Parameter

+

Type

+

Description

+

job_name

+

String

+

Name of a visualization job

+

status

+

Integer

+

Status of a visualization job. For details about the job statuses, see Job Statuses.

+

create_time

+

Long

+

Time when a visualization job is created

+

duration

+

Long

+

Visualization job running duration, in milliseconds

+

job_desc

+

String

+

Description of a visualization job

+

service_url

+

String

+

Endpoint of a visualization job

+

train_url

+

String

+

Path for storing visualization job logs

+

job_id

+

Long

+

ID of a visualization job

+

resource_id

+

String

+

Charged resource ID of a visualization job

+
+
+

Samples

The following shows how to query the top 10 visualization jobs that are being deployed on the first page in ascending order.

+ +
+ +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0066.html b/modelarts/api-ref/modelarts_03_0066.html new file mode 100644 index 00000000..f7af0804 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0066.html @@ -0,0 +1,167 @@ + + +

Querying the Details About a Visualization Job

+

Function

This API is used to query the details about a specified visualization job based on the job name.

+
+

URI

GET /v1/{project_id}/visualization-jobs/{job_id}

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

String

+

ID of a visualization job

+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

job_name

+

String

+

Name of a visualization job

+

service_url

+

String

+

Endpoint of a visualization job

+

resource_id

+

String

+

Charged resource ID of a visualization job

+

job_id

+

Long

+

ID of a visualization job

+

job_desc

+

String

+

Description of a visualization job

+

duration

+

Long

+

Visualization job running duration, in milliseconds

+

create_time

+

Long

+

Time when a visualization job is created, in timestamp format

+

train_url

+

String

+

OBS path of the visualization job output file

+

status

+

Int

+

Status of a visualization job. For details about the job statuses, see Job Statuses.

+
+
+
+

Samples

The following shows how to query the visualization job whose ID is 10.

+ +
+ +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0067.html b/modelarts/api-ref/modelarts_03_0067.html new file mode 100644 index 00000000..42e17a67 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0067.html @@ -0,0 +1,127 @@ + + +

Modifying the Description of a Visualization Job

+

Function

This API is used to modify the description of a visualization job.

+
+

URI

PUT /v1/{project_id}/visualization-jobs/{job_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

String

+

ID of a visualization job

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_desc

+

Yes

+

String

+

Description of a visualization job. The value is a string of 0 to 256 characters.

+
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to modify the description of the visualization job whose ID is 10 to This is a ModelArts job.

+ + +
+

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0068.html b/modelarts/api-ref/modelarts_03_0068.html new file mode 100644 index 00000000..b6734af7 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0068.html @@ -0,0 +1,101 @@ + + +

Deleting a Visualization Job

+

Function

This API is used to delete a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Visualization Job List and Querying the Details About a Visualization Job.

+
+

URI

DELETE /v1/{project_id}/visualization-jobs/{job_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

String

+

ID of a visualization job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to delete the visualization job whose ID is 10.

+ + +
+

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0069.html b/modelarts/api-ref/modelarts_03_0069.html new file mode 100644 index 00000000..7e132e34 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0069.html @@ -0,0 +1,101 @@ + + +

Stopping a Visualization Job

+

Function

This API is used to stop a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Visualization Job List and Querying the Details About a Visualization Job.

+
+

URI

POST /v1/{project_id}/visualization-jobs/{job_id}/stop

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

String

+

ID of a visualization job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to stop the visualization job whose ID is 10.

+ + +
+

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0070.html b/modelarts/api-ref/modelarts_03_0070.html new file mode 100644 index 00000000..8ee6f83f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0070.html @@ -0,0 +1,101 @@ + + +

Restarting a Visualization Job

+

Function

This API is used to restart a visualization job. Calling this API is an asynchronous operation. The job status can be obtained by calling the APIs described in Querying a Visualization Job List and Querying the Details About a Visualization Job.

+
+

URI

POST /v1/{project_id}/visualization-jobs/{job_id}/restart

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

String

+

ID of a visualization job

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+
+
+
+
+

Samples

The following shows how to restart the visualization job whose ID is 10.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0071.html b/modelarts/api-ref/modelarts_03_0071.html new file mode 100644 index 00000000..f295340f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0071.html @@ -0,0 +1,17 @@ + + +

Resource and Engine Specifications

+
+
+ + + +
+ diff --git a/modelarts/api-ref/modelarts_03_0072.html b/modelarts/api-ref/modelarts_03_0072.html new file mode 100644 index 00000000..70d03e2c --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0072.html @@ -0,0 +1,267 @@ + + +

Querying Job Resource Specifications

+

Function

This API is used to query the resource specifications of a specified job.

+

You must specify the resource specifications when creating a training job or an inference job.

+
+

URI

GET /v1/{project_id}/job/resource-specs

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_type

+

No

+

String

+

Job type. The value can be train or inference.

+

engine_id

+

No

+

Long

+

Engine ID of a job. Default value: 0

+

project_type

+

No

+

Integer

+

Project type. Default value: 0

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

spec_total_count

+

Integer

+

Total number of job resource specifications

+

specs

+

specs array

+

List of resource specifications attributes. For details, see Table 4.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 specs parameters

Parameter

+

Type

+

Description

+

spec_id

+

Long

+

ID of the resource specifications

+

core

+

String

+

Number of cores of the resource specifications

+

cpu

+

String

+

CPU memory of the resource specifications

+

gpu_num

+

Integer

+

Number of GPUs of the resource specifications

+

gpu_type

+

String

+

GPU type of the resource specifications

+

spec_code

+

String

+

Type of the resource specifications

+

max_num

+

Integer

+

Maximum number of nodes that can be selected

+

unit_num

+

Integer

+

Number of pricing units

+

storage

+

String

+

SSD size of a resource flavor

+

interface_type

+

Integer

+

Interface type

+

no_resource

+

Boolean

+

Whether the resources of the selected specifications are sufficient. True indicates that no resource is available.

+
+
+
+

Samples

The following shows how to query the resource specifications of a training job.

+ + +
+

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0073.html b/modelarts/api-ref/modelarts_03_0073.html new file mode 100644 index 00000000..d9671380 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0073.html @@ -0,0 +1,172 @@ + + +

Querying Job Engine Specifications

+

Function

This API is used to query the engine type and version of a specified job.

+

You must specify the engine specifications when creating a training job or an inference job.

+
+

URI

GET /v1/{project_id}/job/ai-engines

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+ +
+ + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

job_type

+

No

+

String

+

Job type. The value can be train or inference.

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

is_success

+

Boolean

+

Whether the request is successful

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

engines

+

engines array

+

List of engine specifications attributes. For details, see Table 4.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 engines parameters

Parameter

+

Type

+

Description

+

engine_type

+

Integer

+

Engine type of a training job

+
  • 1: TensorFlow
  • 5: Spark_MLlib
  • 6: Scikit Learn
  • 9: XGBoost-Sklearn
  • 10: PyTorch
  • 13: Ascend-Powered-Engine
  • 17: MindSpore-GPU
+

engine_id

+

Long

+

ID of the engine selected for a training job

+

engine_name

+

String

+

Name of the engine selected for a training job

+

engine_version

+

String

+

Version of the engine selected for a training job

+
+
+
+

Samples

The following shows how to query the engine specifications of a training job.

+ +
+ +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0074.html b/modelarts/api-ref/modelarts_03_0074.html new file mode 100644 index 00000000..81680c41 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0074.html @@ -0,0 +1,131 @@ + + +

Job Statuses

+

Table 1 describes the job statuses.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Job statuses

Status Value

+

Description

+

0

+

JOBSTAT_UNKNOWN: Unknown status.

+

1

+

JOBSTAT_INIT: The job is being initialized.

+

2

+

JOBSTAT_IMAGE_CREATING: The job image is being created.

+

3

+

JOBSTAT_IMAGE_FAILED: Failed to create the job image.

+

4

+

JOBSTAT_SUBMIT_TRYING: The job is being submitted.

+

5

+

JOBSTAT_SUBMIT_FAILED: Failed to submit the job.

+

6

+

JOBSTAT_DELETE_FAILED: Failed to delete the job.

+

7

+

JOBSTAT_WAITING: The job is queuing.

+

8

+

JOBSTAT_RUNNING: The job is running.

+

9

+

JOBSTAT_KILLING: The job is being canceled.

+

10

+

JOBSTAT_COMPLETED: The job has been completed.

+

11

+

JOBSTAT_FAILED: Failed to run the job.

+

12

+

JOBSTAT_KILLED: Job canceled successfully.

+

13

+

JOBSTAT_CANCELED: Job canceled.

+

14

+

JOBSTAT_LOST: Job lost.

+

15

+

JOBSTAT_SCALING: The job is being scaled.

+

16

+

JOBSTAT_SUBMIT_MODEL_FAILED: Failed to submit the model.

+

17

+

JOBSTAT_DEPLOY_SERVICE_FAILED: Failed to deploy the service.

+

18

+

JOBSTAT_CHECK_INIT: The job review is being initialized.

+

19

+

JOBSTAT_CHECK_RUNNING: The job is being reviewed.

+

20

+

JOBSTAT_CHECK_RUNNING_COMPLETED: The approval job is completed.

+

21

+

JOBSTAT_CHECK_FAILED: Failed to review the job.

+
+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0075.html b/modelarts/api-ref/modelarts_03_0075.html new file mode 100644 index 00000000..23be36a0 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0075.html @@ -0,0 +1,11 @@ + + +

Model Management

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0076.html b/modelarts/api-ref/modelarts_03_0076.html new file mode 100644 index 00000000..f9e26188 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0076.html @@ -0,0 +1,782 @@ + + +

Importing a Model

+

Function

You can use the API to import a model.

+

Ensure that the execution code and model have been uploaded to OBS. By default, the models generated by a training job are stored in OBS.

+
+

URI

POST /v1/{project_id}/models

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

model_name

+

Yes

+

String

+

Model name. Enter 1 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed.

+

model_version

+

Yes

+

String

+

Model version in the format of Digit.Digit.Digit. The value range of the digits is [1, 99]. Note that no part of the version number can start with 0. For example, 01.01.01 is not allowed.

+

source_location

+

Yes

+

String

+

OBS path where the model is located or the SWR image location

+

source_job_id

+

No

+

String

+

ID of the source training job. If the model is generated from a training job, input this parameter for source tracing. If the model is imported from a third-party meta model, leave this parameter blank. By default, this parameter is left blank.

+

source_job_version

+

No

+

String

+

Version of the source training job. If the model is generated from a training job, input this parameter for source tracing. If the model is imported from a third-party meta model, leave this parameter blank. By default, this parameter is left blank.

+

source_type

+

No

+

String

+

Model source type. The value can only be auto, which indicates ExeML models (model download is not supported). If a model is deployed through a training job, this parameter is left blank by default.

+

model_type

+

Yes

+

String

+

Model type. The value can be TensorFlow, MXNet, Caffe, Spark_MLlib, Scikit_Learn, XGBoost, Image, PyTorch, or Template, which is read from the configuration file.

+

runtime

+

No

+

String

+

For details about runtime options, see Model Management > Importing a Model > Importing a Meta Model from OBS in ModelArts User Guide.

+

description

+

No

+

String

+

Model remarks. The value contains a maximum of 100 characters and cannot contain the following special characters and more: &!'\"<>=

+

model_algorithm

+

No

+

String

+

Model algorithm. If the algorithm is read from the configuration file, this parameter can be left blank. For example, the value can be predict_analysis, object_detection, or image_classification. The value must start with a letter and contain no more than 36 characters. Chinese characters and special characters (&!'\"<>=) are not allowed.

+

execution_code

+

No

+

String

+

OBS path for storing the execution code. By default, this parameter is left blank. The name of the execution code file is fixed to customize_service.py. The inference code file must be stored in the model directory. If this parameter is left blank, the system can automatically identify the inference code in the model directory.

+

input_params

+

No

+

params array

+

Collection of input parameters of a model. By default, this parameter is left blank. For details, see Table 3. If the collection of input parameters is read from apis in the configuration file, you only need to provide the initial_config field and do not need to set input_params. input_params is optional when the initial_config field exists.

+

output_params

+

No

+

params array

+

Collection of output parameters of a model. By default, this parameter is left blank. For details, see Table 3. If the collection of output parameters is read from apis in the configuration file, you only need to provide the initial_config field and do not need to set output_params.

+

dependencies

+

No

+

dependency array

+

Package required for inference code and model. By default, this parameter is left blank. If the package is read from the configuration file, this parameter can be left blank. Table 4 shows the dependency structure.

+

model_metrics

+

No

+

String

+

Model precision, which is read from the configuration file

+

apis

+

No

+

apis array

+

All apis input and output parameters of the model. If the parameters are read from the configuration file, this parameter can be left blank. For details, see Table 10.

+

initial_config

+

No

+

String

+

Character string converted from the final model configuration file. It is recommended that the initial_config file be used to provide information about the fields such as apis, dependencies, input_params, and output_params.

+

workspace_id

+

No

+

String

+

ID of the workspace to which a service belongs. The default value is 0, indicating the default workspace.

+

model_docs

+

No

+

doc array

+

List of model description documents. A maximum of three documents are supported. For details, see Table 7.

+

install_type

+

No

+

String array

+

Deployment type. Only lowercase letters are supported. The value can be real-time, or batch. Default value: [real-time, batch]

+

template

+

No

+

Template object

+

Template configuration items. This parameter is mandatory when model_type is set to Template. For details, see Table 8.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 params parameters

Parameter

+

Mandatory

+

Type

+

Description

+

url

+

Yes

+

String

+

API URL

+

method

+

Yes

+

String

+

Request method. Possible values are post and get.

+

protocol

+

Yes

+

String

+

Request protocol

+

param_name

+

Yes

+

String

+

Parameter name. It is recommended that the parameter name contain a maximum of 64 characters.

+

param_type

+

Yes

+

String

+

Parameter type. The value can be int, string, float, timestamp, date, or file.

+

min

+

No

+

Number

+

This parameter is optional when param_type is set to int or float. By default, this parameter is left blank.

+

max

+

No

+

Number

+

This parameter is optional when param_type is set to int or float. By default, this parameter is left blank.

+

param_desc

+

No

+

String

+

Parameter description. It is recommended that the parameter description contain a maximum of 100 characters. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 4 dependency parameters

Parameter

+

Mandatory

+

Type

+

Description

+

installer

+

Yes

+

String

+

Installation mode. Only pip is supported.

+

packages

+

Yes

+

package array

+

Collection of dependency packages

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 package parameters

Parameter

+

Mandatory

+

Type

+

Description

+

package_name

+

Yes

+

String

+

Name of a dependency package. Ensure that the package name is correct and exists. Chinese characters and special characters (&!'"<>=) are not allowed.

+

package_version

+

No

+

String

+

Version of a dependency package. If this parameter is left blank, the latest version is installed by default. Chinese characters and special characters (&!'"<>=) are not allowed.

+

restraint

+

No

+

String

+

Version restriction. The value can be EXACT, ATLEAST, or ATMOST. This parameter is mandatory only when package_version exists.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 metric parameters

Parameter

+

Mandatory

+

Type

+

Description

+

f1

+

No

+

Double

+

F1 score

+

recall

+

No

+

Double

+

Recall

+

precision

+

No

+

Double

+

Precision

+

accuracy

+

No

+

Double

+

Accuracy

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 7 doc parameters

Parameter

+

Mandatory

+

Type

+

Description

+

doc_name

+

Yes

+

String

+

Document name, which must start with a letter. Enter 1 to 48 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed.

+

doc_url

+

Yes

+

String

+

HTTP(S) link of the document

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 Template parameters

Parameter

+

Mandatory

+

Type

+

Description

+

infer_format

+

No

+

String

+

Input and output mode. When this parameter is used, the input and output mode built in the template does not take effect.

+

template_id

+

Yes

+

String

+

ID of the used template. The template has a built-in input and output mode.

+

template_inputs

+

Yes

+

Array of TemplateInputs objects

+

Template input configuration, specifying the source path for configuring a model. For details, see Table 9.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 9 template_inputs parameters

Parameter

+

Mandatory

+

Type

+

Description

+

input

+

Yes

+

String

+

Template input path, which can be an OBS file path or OBS directory path. When you use a template with multiple input items to create a model, if the target paths input_properties specified in the template are the same, the OBS directory or OBS file name entered here must be unique to prevent files from being overwritten.

+

input_id

+

Yes

+

String

+

Input item ID, which is obtained from the template details.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 apis parameters

Parameter

+

Mandatory

+

Type

+

Description

+

input_params

+

No

+

InputParams structure

+

Input parameters in apis, described in JSON Schema format. For details, see Table 11.

+

method

+

No

+

String

+

Request method. The options are POST and GET.

+

output_params

+

No

+

OutputParams structure

+

Output parameters in apis, described in JSON Schema format. For details, see Table 12.

+

protocol

+

No

+

String

+

Request protocol.

+

url

+

No

+

String

+

Inference request URL.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 11 InputParams parameters

Parameter

+

Mandatory

+

Type

+

Description

+

properties

+

No

+

Map<String,Object>

+

Properties of an object element in JSON Schema. You can set parameters, including the parameter name and type, in properties.

+

type

+

No

+

String

+

Type in JSON Schema, which can be object.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 12 OutputParams parameters

Parameter

+

Mandatory

+

Type

+

Description

+

properties

+

No

+

Map<String,Object>

+

Properties of an object element in JSON Schema. You can set parameters, including the parameter name and type, in properties.

+

type

+

No

+

String

+

Type in JSON Schema, which can be object.

+
+
+
+

Response Body

Table 13 describes the response parameters. +
+ + + + + + + + + +
Table 13 Parameters

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+
+
+
+
+

Samples

The following shows how to import a model whose name is mnist, version is 1.0.0, and type is TensorFlow. The model file comes from an OBS bucket.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0077.html b/modelarts/api-ref/modelarts_03_0077.html new file mode 100644 index 00000000..a91e59ec --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0077.html @@ -0,0 +1,421 @@ + + +

Querying a Model List

+

Function

This API is used to query the models that meet the search criteria.

+
+

URI

GET /v1/{project_id}/models

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Search parameters

Parameter

+

Mandatory

+

Type

+

Description

+

model_name

+

No

+

String

+

Model name. Fuzzy match is supported.

+
NOTE:

If a model name contains an underscore (_), add the exact_match parameter to the request and set the parameter value to true because the underscore needs to be escaped. This ensures that the query operation can be performed properly.

+
+

model_version

+

No

+

String

+

Model version

+

model_status

+

No

+

String

+

Model status. You can query models based on the model status. Options:

+
  • Publishing
  • Published
  • Failed
  • Building
  • Building_failed
+

model_type

+

No

+

String

+

Model type. The models of this type are queried. model_type and not_model_type are mutually exclusive and cannot co-exist.

+

not_model_type

+

No

+

String

+

Model type. A list of models of types except for this type are queried.

+

description

+

No

+

String

+

Description. Fuzzy match is supported.

+

offset

+

No

+

Integer

+

Index of the page to be queried. Default value: 0

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The default value is 100. The recommended value ranges from 10 to 50.

+

sort_by

+

No

+

String

+

Sorting mode. The value can be create_at, model_version, or model_size. Default value: create_at

+

order

+

No

+

String

+

Sorting order. The value can be asc or desc, indicating ascending or descending order. Default value: desc

+

workspace_id

+

No

+

String

+

ID of the workspace to which a service belongs. The default value is 0, indicating the default workspace.

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

total_count

+

Integer

+

Total number of models that meet the search criteria when no paging is implemented

+

count

+

Integer

+

Number of models

+

models

+

model array

+

Model metadata. For details, see Table 4.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 model parameters

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+

model_name

+

String

+

Model name

+

model_version

+

String

+

Model version

+

model_status

+

String

+

Model status

+

model_type

+

String

+

Model type. The value can be TensorFlow, MXNet, Caffe, Spark_MLlib, Scikit_Learn, XGBoost, MindSpore, Image, or PyTorch.

+

model_size

+

Long

+

Model size, in bytes

+

tenant

+

String

+

Tenant to which a model belongs

+

project

+

String

+

Project to which a model belongs

+

owner

+

String

+

User to which a model belongs

+

create_at

+

Long

+

Time when a model is created, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

description

+

String

+

Model description

+

source_type

+

String

+

Model source type. This parameter is valid only when a model is deployed through ExeML. The value is auto. You do not need to set this parameter for a model deployed through a training job. By default, this parameter is left blank.

+

workspace_id

+

String

+

Workspace ID

+

model_source

+

String

+

Model source. Options:

+
  • algos: built-in algorithm
  • custom: custom model
+

tunable

+

Boolean

+

Whether a model can be tuned. Options:

+
  • true: yes
  • false: no
+

market_flag

+

Boolean

+

Whether a model is subscribed from the marketplace. Options:

+
  • true: yes
  • false: no
+

publishable_flag

+

Boolean

+

Whether a model can be published to the marketplace. Options:

+
  • true: yes
  • false: no
+

install_type

+

String array

+

Model deployment type, determining which service a model can be deployed as. The example value is real-time (real-time service).

+

subscription_id

+

String

+

Model subscription ID.

+

extra

+

String

+

Extended field.

+

specification

+

Specification structure

+

Minimum deployment specifications. For details, see Table 5.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 Specification parameters

Parameter

+

Type

+

Description

+

min_cpu

+

String

+

Minimal CPU specifications

+

min_gpu

+

String

+

Minimal GPU specifications

+

min_memory

+

String

+

Minimum memory capacity

+

min_ascend

+

String

+

Minimal Ascend specifications

+
+
+
+

Samples

The following shows how to query models.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0078.html b/modelarts/api-ref/modelarts_03_0078.html new file mode 100644 index 00000000..b750b232 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0078.html @@ -0,0 +1,651 @@ + + +

Querying the Details About a Model

+

Function

This API is used to query details about a model based on the model ID.

+
+

URI

GET /v1/{project_id}/models/{model_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

model_id

+

Yes

+

String

+

Model ID

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+

model_name

+

String

+

Model name

+

model_version

+

String

+

Model version

+

create_at

+

Long

+

Time when a model is created, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

tenant

+

String

+

Tenant to which a model belongs

+

project

+

String

+

Project to which a model belongs

+

owner

+

String

+

User to which a model belongs

+

source_location

+

String

+

OBS path where the model is located or the SWR image location

+

source_job_id

+

String

+

ID of the source training job

+

source_job_version

+

String

+

Version of the source training job

+

source_type

+

String

+

Model source type. If a model is deployed through ExeML, the value is auto. If a model is deployed through a training job or an OBS model file, this parameter is left blank.

+

model_type

+

String

+

Model type. The value can be TensorFlow, MXNet, Spark_MLlib, Scikit_Learn, XGBoost, MindSpore, Image, or PyTorch.

+

model_size

+

Long

+

Model size, in bytes

+

model_status

+

String

+

Model status

+

runtime

+

String

+

Model runtime environment

+

description

+

String

+

Model description

+

execution_code

+

String

+

OBS path for storing the execution code. The name of the execution code file is fixed to customize_service.py.

+

schema_doc

+

String

+

Download address of the model schema file

+

image_address

+

String

+

image path generated after model packaging

+

input_params

+

params array

+

Collection of input parameters of a model. For details, see Table 3.

+

output_params

+

params array

+

Collection of output parameters of a model. For details, see Table 3.

+

dependencies

+

dependency array

+

Package required for running the inference code and model. For details, see Table 4.

+

model_metrics

+

String

+

Model precision

+

apis

+

String

+

All input and output apis parameter information of a model, which is obtained from the model preview

+

model_source

+

String

+

Model source. Options:

+
  • algos: built-in algorithm
  • custom: custom model
+

tunable

+

Boolean

+

Whether a model can be tuned. Options:

+
  • true: yes
  • false: no
+

market_flag

+

Boolean

+

Whether a model is subscribed from the marketplace. Options:

+
  • true: yes
  • false: no
+

publishable_flag

+

Boolean

+

Whether a model can be published to the marketplace. Options:

+
  • true: yes
  • false: no
+

model_docs

+

GuideDoc array

+

List of template documents. For details, see Table 7.

+

health

+

Health structure

+

Model health check interface information. For details, see Table 8.

+

model_algorithm

+

String

+

Model algorithm type. The value can be predict_analysis, object_detection, or image_classification.

+

model_labels

+

String array

+

Model label array.

+

labels_map

+

Map

+

Model label map. The key is fixed to labels, and the value is the model label array.

+

workspace_id

+

String

+

ID of the workspace to which a service belongs. The default value is 0, indicating the default workspace.

+

install_type

+

String array

+

Supported service type for deployment.

+

specification

+

Specification structure

+

Minimum model deployment specifications. For details, see Table 9.

+

config

+

String

+

Model configurations.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 params parameters

Parameter

+

Type

+

Description

+

url

+

String

+

API URL

+

method

+

String

+

Request method, for example, post

+

protocol

+

String

+

Request protocol, for example, HTTP

+

param_name

+

String

+

Parameter name, which contains a maximum of 64 characters

+

param_type

+

String

+

Parameter type. The value can be int, string, float, timestamp, date, or file.

+

min

+

Number

+

This parameter is optional when param_type is set to int or float. By default, this parameter is left blank.

+

max

+

Number

+

This parameter is optional when param_type is set to int or float. By default, this parameter is left blank.

+

param_desc

+

String

+

Parameter description, which contains a maximum of 100 characters. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + + + +
Table 4 dependency parameters

Parameter

+

Type

+

Description

+

installer

+

String

+

Installer

+

packages

+

package array

+

Collection of dependency packages. For details, see Table 5.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 5 package parameters

Parameter

+

Type

+

Description

+

package_name

+

String

+

Name of a dependency package

+

package_version

+

String

+

Version of a dependency package

+

restraint

+

String

+

Version restriction. The value can be EXACT, ATLEAST, or ATMOST.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 metric parameters

Parameter

+

Type

+

Description

+

f1

+

Double

+

F1 score

+

recall

+

Double

+

Recall

+

precision

+

Double

+

Precision

+

accuracy

+

Double

+

Accuracy

+
+
+ +
+ + + + + + + + + + + + + +
Table 7 GuideDoc parameters

Parameter

+

Type

+

Description

+

doc_name

+

String

+

Document name

+

doc_url

+

String

+

HTTP(S) link of the document

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 Health parameters

Parameter

+

Type

+

Description

+

url

+

String

+

URL of the health check interface

+

protocol

+

String

+

Request protocol of the health check interface. Only HTTP is supported.

+

initial_delay_seconds

+

String

+

After an instance is started, a health check starts after seconds configured in initial_delay_seconds.

+

timeout_seconds

+

String

+

Health check timeout

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 Specification parameters

Parameter

+

Type

+

Description

+

min_cpu

+

String

+

Minimum CPU

+

min_gpu

+

String

+

Minimum GPU

+

min_memory

+

String

+

Minimum memory capacity

+

min_ascend

+

String

+

Minimum Ascend

+
+
+
+

Samples

The following shows how to query details about a model based on the model ID.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0079.html b/modelarts/api-ref/modelarts_03_0079.html new file mode 100644 index 00000000..0cb95f66 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0079.html @@ -0,0 +1,149 @@ + + +

Deleting a Model

+

Function

This API is used to delete a model based on the model ID. When cascade is set to true, the model specified by the model ID and models of different versions with the same name as the specified model are deleted. By default, only the model with the specified model ID is deleted.

+
+

URI

DELETE /v1/{project_id}/models/{model_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

model_id

+

Yes

+

String

+

ID of the model to be deleted

+
+
+ +
+ + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

cascade

+

No

+

Boolean

+

The default value is false, indicating that only the model with the specified model ID is deleted. The value true indicates that not only the model with the specified model ID but also all models with the same name but different versions as the specified model will be deleted. A maximum of 20 models can be deleted at a time.

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Type

+

Description

+

delete_success_list

+

String array

+

ID list of models successfully deleted

+

delete_failed_list

+

DeleteModelFailResult array

+

List of models that fail to be deleted. For details, see Table 4.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 DeleteModelFailResult parameters

Parameter

+

Type

+

Description

+

model_id

+

String

+

ID of a model that fails to be deleted

+

error_code

+

String

+

Error code of the deletion failure

+

error_message

+

String

+

Error message of the deletion failure

+
+
+
+
+

Samples

The following shows how to delete the model whose ID is 023e90be-7e2a-4169-bab4-1bc34ff0ca45 and all models of the same name but different versions.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0081.html b/modelarts/api-ref/modelarts_03_0081.html new file mode 100644 index 00000000..c8172a11 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0081.html @@ -0,0 +1,27 @@ + + +

Service Management

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0082.html b/modelarts/api-ref/modelarts_03_0082.html new file mode 100644 index 00000000..f8047c2d --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0082.html @@ -0,0 +1,552 @@ + + +

Deploying a Model as a Service

+

Function

This API is used to deploy a model as a service.

+
+

URI

POST /v1/{project_id}/services

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

service_name

+

Yes

+

String

+

Service name. Enter 1 to 64 characters. Only letters, digits, hyphens (-), and underscores (_) are allowed.

+

description

+

No

+

String

+

Service description, which contains a maximum of 100 characters. By default, this parameter is left blank.

+

infer_type

+

Yes

+

String

+

Inference mode. The value can be real-time or batch.

+
  • real-time: real-time service, which can be stopped as scheduled.
  • batch: batch service, which can be configured as tasks to run in batches. When the tasks are completed, the service stops automatically.
+

workspace_id

+

No

+

String

+

ID of the workspace to which a service belongs. The default value is 0, indicating the default workspace.

+

vpc_id

+

No

+

String

+

ID of the VPC to which a real-time service instance is deployed. By default, this parameter is left blank.

+
  • In this case, ModelArts allocates a dedicated VPC to each user so that users are isolated from each other. If you need to access other service components in a VPC of a service instance, set this parameter to the ID of the corresponding VPC.
  • Once a VPC is configured, it cannot be modified. If both vpc_id and cluster_id are configured, only the dedicated resource pool takes effect.
+

subnet_network_id

+

No

+

String

+

ID of a subnet. By default, this parameter is left blank.

+

This parameter is mandatory when vpc_id is configured. Enter the network ID displayed in the subnet details on the VPC console. A subnet provides dedicated network resources that are isolated from other networks.

+

security_group_id

+

No

+

String

+

Security group. By default, this parameter is left blank. This parameter is mandatory when vpc_id is configured.

+

A security group is a virtual firewall that provides secure network access control policies for service instances. A security group must contain at least one inbound rule to permit the requests whose protocol is TCP, source address is 0.0.0.0/0, and port number is 8080.

+

cluster_id

+

No

+

String

+

ID of a dedicated resource pool. This parameter is left blank by default, indicating that no dedicated resource pool is used. When using a dedicated resource pool to deploy services, ensure that the resource pool is running properly. After this parameter is set, the network configuration of the cluster is used, and the vpc_id parameter does not take effect. If this parameter is configured together with cluster_id in real-time config, cluster_id in real-time config is used preferentially.

+

config

+

Yes

+

config array corresponding to infer_type

+

Model running configuration. If infer_type is batch, you can configure only one model. If infer_type is real-time, you can configure multiple models and assign weights based on service requirements. However, the versions of these models cannot be the same.

+

schedule

+

No

+

schedule array

+

Service scheduling configuration, which can be configured only for real-time services. By default, this parameter is not used. Services run for a long time. For details, see Table 5.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 config parameters of real-time

Parameter

+

Mandatory

+

Type

+

Description

+

model_id

+

Yes

+

String

+

Model ID

+

weight

+

Yes

+

Integer

+

Traffic weight allocated to a model. This parameter is mandatory only when infer_type is set to real-time. The sum of the weights must be 100.

+

specification

+

Yes

+

String

+

Resource specifications. Select specifications based on service requirements.

+

custom_spec

+

No

+

custom_spec structure

+

Custom specifications. Set this parameter when you use a dedicated resource pool. For details, see Table 6.

+

instance_count

+

Yes

+

Integer

+

Number of instances deployed in a model. The value must be greater than 0.

+

envs

+

No

+

Map<String, String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank.

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

cluster_id

+

No

+

string

+

ID of a dedicated resource pool. By default, this parameter is left blank, indicating that no dedicated resource pool is used.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 config parameters of batch

Parameter

+

Mandatory

+

Type

+

Description

+

model_id

+

Yes

+

String

+

Model ID

+

specification

+

Yes

+

String

+

Resource flavor. Available options: modelarts.vm.cpu.2u and modelarts.vm.gpu.p4

+

instance_count

+

Yes

+

Integer

+

Number of instances deployed in a model.

+

envs

+

No

+

Map<String, String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank.

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

src_type

+

No

+

String

+

Data source type. This parameter can be set to ManifestFile. By default, this parameter is left blank, indicating that only files in the src_path directory are read. If this parameter is set to ManifestFile, src_path must be a specific Manifest file path. You can specify multiple data paths in the Manifest file.

+

src_path

+

Yes

+

String

+

OBS path of the input data of a batch job

+

dest_path

+

Yes

+

String

+

OBS path of the output data of a batch job

+

req_uri

+

Yes

+

String

+

Inference API called in a batch task, which is a REST API in the model image. Select an API URI from the model config.json file for inference. If a ModelArts built-in inference image is used, the value of this parameter is /.

+

mapping_type

+

Yes

+

String

+

Mapping type of the input data. The value can be file or csv.

+
  • If you select file, each inference request corresponds to a file in the input data path. When this mode is used, req_uri of this model can have only one input parameter and the type of this parameter is file.
  • If you select csv, each inference request corresponds to a row of data in the CSV file. When this mode is used, the files in the input data path can only be in CSV format and mapping_rule needs to be configured to map the index of each parameter in the inference request body to the CSV file.
+

mapping_rule

+

No

+

Map

+

Mapping between input parameters and CSV data. This parameter is mandatory only when mapping_type is set to csv.

+

Mapping rule: The mapping rule comes from the input parameter (input_params) in the model configuration file config.json. When type is set to string, number, integer, or boolean, you need to configure the index parameter. For details, see .

+

The index must be a positive integer starting from 0. If the value of index does not comply with the rule, this parameter is ignored in the request. After the mapping rule is configured, the corresponding CSV data must be separated by commas (,).

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 schedule parameters

Parameter

+

Mandatory

+

Type

+

Description

+

type

+

Yes

+

String

+

Scheduling type. Only the value stop is supported.

+

time_unit

+

Yes

+

String

+

Scheduling time unit. Options:

+
  • DAYS
  • HOURS
  • MINUTES
+

duration

+

Yes

+

Integer

+

Value that maps to the time unit. For example, if the task stops after two hours, set time_unit to HOURS and duration to 2.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 custom_spec parameters

Parameter

+

Mandatory

+

Type

+

Description

+

cpu

+

Yes

+

Float

+

Number of required CPUs

+

memory

+

Yes

+

Integer

+

Required memory capacity, in MB

+

gpu_p4

+

No

+

Float

+

Number of GPUs, which can be decimals. This parameter is optional. By default, it is not used.

+

ascend_a310

+

No

+

Integer

+

Number of NPUs, which can be decimals. This parameter is optional. By default, it is not used.

+
+
+
+

Response Body

Table 7 describes the response parameters. +
+ + + + + + + + + + + + + +
Table 7 Parameters

Parameter

+

Type

+

Description

+

service_id

+

String

+

Service ID

+

resource_ids

+

Array of strings

+

Resource ID array for the resource IDs generated by the target model

+
+
+
+
+

Samples

The following shows how to deploy different types of services.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0083.html b/modelarts/api-ref/modelarts_03_0083.html new file mode 100644 index 00000000..eee9a0b6 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0083.html @@ -0,0 +1,482 @@ + + +

Querying a Service List

+

Function

This API is used to obtain model services.

+
+

URI

GET /v1/{project_id}/services

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

service_id

+

No

+

String

+

Service ID

+

service_name

+

No

+

String

+

Service name

+

model_id

+

No

+

String

+

Model ID

+

cluster_id

+

No

+

String

+

Dedicated resource pool ID, which is left blank by default

+

workspace_id

+

No

+

String

+

Workspace ID. Default value: 0

+

infer_type

+

No

+

String

+

Inference mode. The value can be real-time or batch. By default, this parameter is left blank.

+

status

+

No

+

String

+

Service status. By default, the service status is not filtered. You can query information by service status. The possible values are running, deploying, concerning, failed, stopped, and finished.

+

offset

+

No

+

Integer

+

Start page of the paging list. Default value: 0

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. The default value is 1000. The recommended value ranges from 10 to 50.

+

sort_by

+

No

+

String

+

Sorting mode. The value can be publish_at or service_name. Default value: publish_at

+

order

+

No

+

String

+

Sorting order. The value can be asc or desc, indicating ascending or descending order. Default value: desc

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

total_count

+

Integer

+

Total number of services that meet the search criteria when no paging is implemented

+

count

+

Integer

+

Number of services in the query result. If offset and limit are not set, the values of count and total_count are the same.

+

services

+

service array

+

Collection of the queried services. For details, see Table 4.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 service parameters

Parameter

+

Type

+

Description

+

service_id

+

String

+

Service ID

+

service_name

+

String

+

Service name

+

description

+

String

+

Service description

+

tenant

+

String

+

Tenant to which a service belongs

+

project

+

String

+

Project to which a service belongs

+

owner

+

String

+

User to which a service belongs

+

publish_at

+

Long

+

Latest service release time, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

infer_type

+

String

+

Inference mode. The value can be real-time or batch.

+

workspace_id

+

String

+

Workspace ID. Default value: 0

+

status

+

String

+

Service status. The value can be running, deploying, concerning, failed, stopped, or finished.

+
  • running: The service is running properly.
  • deploying: The service is being deployed or scheduling resources are being deployed.
  • concerning: The backend instances are abnormal. For example, if there are multiple instances and some instances are abnormal, the normal instances will consume resources. In this case, the service status is concerning.
  • failed: The service fails to be deployed. For details about the failure cause, see the event and log tab pages.
  • stopped: The service has been stopped.
  • finished: This state is displayed only for the batch service, indicating that the service running is complete.
+

start_time

+

Number

+

Batch service start time, in milliseconds calculated from 1970.1.1 0:0:0 UTC. This parameter is returned only when the service is a batch service.

+

finished_time

+

Number

+

Batch service end time, in milliseconds calculated from 1970.1.1 0:0:0 UTC. This parameter is returned only when the service is a batch service.

+

progress

+

integer

+

Deployment progress. This parameter is returned when the status is deploying.

+

invocation_times

+

Long

+

Total number of service calls

+

failed_times

+

Long

+

Number of failed service calls

+

is_shared

+

Boolean

+

Whether a service is subscribed

+

shared_count

+

Number

+

Number of subscribed services

+

schedule

+

schedule array

+

Service scheduling. For details, see Table 5.

+

due_time

+

number

+

Time when a real-time service automatically stops, in milliseconds calculated from 1970.1.1 0:0:0 UTC.

+

operation_time

+

number

+

Operation time of a request

+

is_opened_sample_collection

+

String

+

Whether to enable data collection. The default value is false.

+

transition_at

+

number

+

Time when the service status changes

+

is_free

+

Boolean

+

Whether a service uses the free-of-charge flavor

+

additional_properties

+

Map<String, Object>

+

Additional service attribute. If this parameter is not set, no value is returned.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 schedule parameters

Parameter

+

Mandatory

+

Type

+

Description

+

type

+

Yes

+

String

+

Scheduling type. Only the value stop is supported.

+

time_unit

+

Yes

+

String

+

Scheduling time unit. Options:

+
  • DAYS
  • HOURS
  • MINUTES
+

duration

+

Yes

+

Integer

+

Value that maps to the time unit. For example, if the task stops after two hours, set time_unit to HOURS and duration to 2.

+
+
+
+

Samples

The following shows how to query model services.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0084.html b/modelarts/api-ref/modelarts_03_0084.html new file mode 100644 index 00000000..ad544d93 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0084.html @@ -0,0 +1,781 @@ + + +

Querying the Details About a Service

+

Function

This API is used to query the details about a model service based on the service ID.

+
+

URI

GET /v1/{project_id}/services/{service_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

service_id

+

Yes

+

String

+

Service ID

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

service_id

+

String

+

Service ID

+

service_name

+

String

+

Service name

+

description

+

String

+

Service description

+

tenant

+

String

+

Tenant to which a service belongs

+

project

+

String

+

Project to which a service belongs

+

owner

+

String

+

User to which a service belongs

+

publish_at

+

Number

+

Latest service release time, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

infer_type

+

String

+

Inference type, which can be real-time, edge,

+

workspace_id

+

String

+

Workspace ID

+

cluster_id

+

String

+

ID of the dedicated resource pool used by the real-time or batch service. This parameter is available only when a dedicated resource pool is used.

+

vpc_id

+

String

+

ID of the VPC to which the real-time service instance belongs. This parameter is available when the network configuration is customized.

+

subnet_network_id

+

String

+

ID of the subnet to which the real-time service instance belongs. This parameter is available when the network configuration is customized.

+

security_group_id

+

String

+

Security group to which the real-time service instance belongs. This parameter is available when the network configuration is customized.

+

status

+

String

+

Service status, which can be running, deploying, concerning, failed, stopped, or finished

+

progress

+

Integer

+

Deployment progress. This parameter is available when the status is deploying.

+

error_msg

+

String

+

Error message. When status is failed, an error message carrying the failure cause is returned.

+

config

+

config array corresponding to infer_type

+

Service configuration (If a service is shared, only model_id, model_name, and model_version are returned.)

+

access_address

+

String

+

Access address of an inference request. This parameter is available when infer_type is set to real-time.

+

bind_access_address

+

String

+

Request address of a custom domain name. This parameter is available after a domain name is bound.

+

invocation_times

+

Number

+

Total number of service calls

+

failed_times

+

Number

+

Number of failed service calls

+

is_shared

+

Boolean

+

Whether a service is subscribed

+

shared_count

+

Number

+

Number of subscribed services

+

schedule

+

schedule array

+

Service scheduling configuration. If this parameter is not set, no value will be returned. For details, see Table 6.

+

update_time

+

Number

+

Time when the configuration used by a service is updated, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

debug_url

+

String

+

Online debugging address of a real-time service. This parameter is available only when the model supports online debugging and there is only one instance.

+

due_time

+

number

+

Time when a real-time service automatically stops, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

operation_time

+

number

+

Operation time of a request

+

transition_at

+

number

+

Time when the service status changes

+

is_free

+

Boolean

+

Whether a free-of-charge flavor is used

+

additional_properties

+

Map<String, Object>

+

Additional service attribute

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 config parameters of real-time

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+

model_name

+

String

+

Model name

+

model_version

+

String

+

Model version

+

source_type

+

String

+

Model source. This parameter is returned when a model is created through ExeML. The value is auto.

+

status

+

String

+

Model status. Options:

+
  • ready: ready (All instances have been started.)
  • concerning: partially ready (Some instances are started and some are not.)
  • notReady: not ready (All instances are not started.)
+

weight

+

Integer

+

Traffic weight allocated to a model

+

specification

+

String

+

Resource flavor. Options: modelarts.vm.cpu.2u, modelarts.vm.gpu.p4, and modelarts.vm.ai1.a310

+

custom_spec

+

custom_spec structure

+

Custom

+

envs

+

Map<String, String>

+

(Optional) Environment variable key-value pair required for running a model

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

instance_count

+

Integer

+

Number of instances deployed in a model

+

scaling

+

Boolean

+

Whether auto scaling is enabled

+

cluster_id

+

String

+

ID of a dedicated resource pool used by a service instance. This parameter is returned only when a dedicated resource pool is configured.

+

support_debug

+

Boolean

+

Whether a model supports online debugging

+

additional_properties

+

Map<String, Object>

+

Additional model deployment attribute

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 config parameters of batch

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+

model_name

+

String

+

Model name

+

model_version

+

String

+

Model version

+

specification

+

String

+

Resource flavor. Options: modelarts.vm.cpu.2u, modelarts.vm.gpu.p4, and modelarts.vm.ai1.a310

+

custom_spec

+

custom_spec structure

+

Custom

+

envs

+

Map<String, String>

+

(Optional) Environment variable key-value pair required for running a model

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

instance_count

+

Integer

+

Number of instances deployed in a model

+

src_type

+

String

+

Data source type. This parameter is returned only when ManifestFile is specified.

+

src_path

+

String

+

OBS path of the input data of a batch job

+

dest_path

+

String

+

OBS path of the output data of a batch job

+

req_uri

+

String

+

Inference path of a batch job

+

mapping_type

+

String

+

Mapping type of the input data. The value can be file or csv.

+

mapping_rule

+

Map

+

Mapping between input parameters and CSV data. This parameter is mandatory only when mapping_type is set to csv.

+

start_time

+

Number

+

Task start time, in milliseconds calculated from 1970.1.1 0:0:0 UTC. This parameter is not returned until the task starts.

+

finished_time

+

Number

+

Task finished time, in milliseconds calculated from 1970.1.1 0:0:0 UTC. This parameter is not returned until the task ends.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 node parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Node ID

+

name

+

String

+

Node name

+

instance_status

+

String

+

Status of a model instance on a node. The value can be running, stopped, notReady, or failed.

+

predict_url

+

String

+

Inference URL of a node

+

node_label

+

String

+

Node label

+

description

+

String

+

Description

+

host_name

+

String

+

Host name of a node

+

os_name

+

String

+

OS name

+

os_type

+

String

+

OS type

+

os_version

+

String

+

OS version

+

arch

+

String

+

Node architecture

+

cpu

+

Integer

+

Number of CPUs

+

memory

+

Integer

+

Memory size, in MB

+

gpu_num

+

Integer

+

Number of GPUs

+

enable_gpu

+

String

+

Whether to enable the GPU

+

host_ips

+

String array

+

Host IP of a node

+

deployment_num

+

Integer

+

Number of application instances deployed on a node

+

state

+

String

+

Host status. Options:

+
  • RUNNING: running
  • FAIL: faulty
  • UNCONNECTED: disconnected
+

created_at

+

String

+

Creation time, in the format of YYYY-MM-DDThh:mm:ss (UTC)

+

updated_at

+

String

+

Update time, in the format of YYYY-MM-DDThh:mm:ss (UTC)

+
+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 6 schedule parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Scheduling type. Only the value stop is supported.

+

time_unit

+

String

+

Scheduling time unit. Options:

+
  • DAYS
  • HOURS
  • MINUTES
+

duration

+

Integer

+

Value that maps to the time unit. For example, if the task stops after two hours, set time_unit to HOURS and duration to 2.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 7 custom_spec parameters

Parameter

+

Type

+

Description

+

cpu

+

Float

+

Number of CPUs

+

memory

+

Integer

+

Memory capacity in MB

+

gpu_p4

+

Float

+

Number of GPUs

+

ascend_a310

+

Integer

+

Number of Ascend chips

+
+
+
+

Samples

The following shows how to query the details about a real-time service based on the service ID.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0086.html b/modelarts/api-ref/modelarts_03_0086.html new file mode 100644 index 00000000..b7a1d800 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0086.html @@ -0,0 +1,355 @@ + + +

Updating Service Configurations

+

Function

This API is used to update configurations of a model service. It can also be used to start or stop a service.

+
+

URI

PUT /v1/{project_id}/services/{service_id}

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

service_id

+

Yes

+

String

+

Service ID

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Service description, which contains a maximum of 100 characters. If this parameter is not set, the service description is not updated.

+

status

+

No

+

String

+

Service status. The value can be running or stopped. If this parameter is not set, the service status is not changed. status and config cannot be modified at the same time. If both parameters exist, modify only the status parameter.

+

config

+

No

+

config array corresponding to infer_type

+

Service configuration. If this parameter is not set, the service is not updated. The model service is modified and the update_time parameter is returned only for requests with config updated.

+

schedule

+

No

+

schedule array

+

Service scheduling configuration, which can be configured only for real-time services. By default, this parameter is not used. Services run for a long time. For details, see Table 5.

+

additional_properties

+

No

+

Map<String, Object>

+

Additional service attribute, which facilitates service management

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 config parameters of real-time

Parameter

+

Mandatory

+

Type

+

Description

+

model_id

+

Yes

+

String

+

Model ID

+

weight

+

Yes

+

Integer

+

Traffic weight allocated to a model. This parameter is mandatory only when infer_type is set to real-time. The sum of the weights must be 100.

+

specification

+

Yes

+

String

+

Resource specifications. Select specifications based on service requirements.

+

custom_spec

+

No

+

custom_spec structure

+

Custom specifications. Set this parameter when you use a dedicated resource pool. For details, see Table 6.

+

instance_count

+

Yes

+

Integer

+

Number of instances deployed in a model. The value must be greater than 0.

+

envs

+

No

+

Map<String, String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank.

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

cluster_id

+

No

+

string

+

ID of a dedicated resource pool. By default, this parameter is left blank, indicating that no dedicated resource pool is used.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 config parameters of batch

Parameter

+

Mandatory

+

Type

+

Description

+

model_id

+

Yes

+

String

+

Model ID

+

specification

+

Yes

+

String

+

Resource flavor. Available options: modelarts.vm.cpu.2u and modelarts.vm.gpu.p4

+

instance_count

+

Yes

+

Integer

+

Number of instances deployed in a model.

+

envs

+

No

+

Map<String, String>

+

(Optional) Environment variable key-value pair required for running a model. By default, this parameter is left blank.

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

src_type

+

No

+

String

+

Data source type. This parameter can be set to ManifestFile. By default, this parameter is left blank, indicating that only files in the src_path directory are read. If this parameter is set to ManifestFile, src_path must be a specific Manifest file path. You can specify multiple data paths in the Manifest file.

+

src_path

+

Yes

+

String

+

OBS path of the input data of a batch job

+

dest_path

+

Yes

+

String

+

OBS path of the output data of a batch job

+

req_uri

+

Yes

+

String

+

Inference API called in a batch task, which is a REST API in the model image. Select an API URI from the model config.json file for inference. If a ModelArts built-in inference image is used, the value of this parameter is /.

+

mapping_type

+

Yes

+

String

+

Mapping type of the input data. The value can be file or csv.

+
  • If you select file, each inference request corresponds to a file in the input data path. When this mode is used, req_uri of this model can have only one input parameter and the type of this parameter is file.
  • If you select csv, each inference request corresponds to a row of data in the CSV file. When this mode is used, the files in the input data path can only be in CSV format and mapping_rule needs to be configured to map the index of each parameter in the inference request body to the CSV file.
+

mapping_rule

+

No

+

Map

+

Mapping between input parameters and CSV data. This parameter is mandatory only when mapping_type is set to csv.

+

Mapping rule: The mapping rule comes from the input parameter (input_params) in the model configuration file config.json. When type is set to string, number, integer, or boolean, you need to configure the index parameter. For details, see .

+

The index must be a positive integer starting from 0. If the value of index does not comply with the rule, this parameter is ignored in the request. After the mapping rule is configured, the corresponding CSV data must be separated by commas (,).

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 schedule parameters

Parameter

+

Mandatory

+

Type

+

Description

+

type

+

Yes

+

String

+

Scheduling type. Only the value stop is supported.

+

time_unit

+

Yes

+

String

+

Scheduling time unit. Possible values are DAYS, HOURS, and MINUTES.

+

duration

+

Yes

+

Integer

+

Value that maps to the time unit. For example, if the task stops after two hours, set time_unit to HOURS and duration to 2.

+
+
+
+

Response Body

None

+
+

Samples

The following shows how to update a real-time service.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0087.html b/modelarts/api-ref/modelarts_03_0087.html new file mode 100644 index 00000000..dd32da77 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0087.html @@ -0,0 +1,221 @@ + + +

Querying Service Monitoring Information

+

Function

This API is used to query service monitoring information.

+
+

URI

GET /v1/{project_id}/services/{service_id}/monitor

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

service_id

+

Yes

+

String

+

Service ID

+
+
+ +
+ + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

node_id

+

No

+

String

+

ID of the node to be queried. By default, all nodes are queried.

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

service_id

+

String

+

Service ID

+

service_name

+

String

+

Service name

+

monitors

+

monitor array corresponding to infer_type of a service

+

Monitoring details

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 monitor parameters of real-time

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+

model_name

+

String

+

Model name

+

model_version

+

String

+

Model version

+

invocation_times

+

Long

+

Total number of model instance calls

+

failed_times

+

Long

+

Number of failed model instance calls

+

cpu_core_usage

+

Float

+

Number of used CPUs

+

cpu_core_total

+

Float

+

Total number of CPUs

+

cpu_memory_usage

+

Integer

+

Used memory, in MB

+

cpu_memory_total

+

Integer

+

Total memory, in MB

+

gpu_usage

+

Float

+

Number used GPUs

+

gpu_total

+

Float

+

Total number of GPUs

+
+
+
+
+

Samples

The following shows how to query the monitoring information about a real-time service.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0088.html b/modelarts/api-ref/modelarts_03_0088.html new file mode 100644 index 00000000..e298c73e --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0088.html @@ -0,0 +1,346 @@ + + +

Querying Service Update Logs

+

Function

This API is used to query the update logs of a real-time service.

+
+

URI

GET /v1/{project_id}/services/{service_id}/logs

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

service_id

+

Yes

+

String

+

Service ID

+

update_time

+

No

+

Number

+

Update time for filtering. This parameter can be used to obtain the update logs of a real-time service. By default, the filtering by update time is disabled.

+
+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters.

+ +
+ + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

service_id

+

String

+

Service ID

+

service_name

+

String

+

Service name

+

logs

+

log array

+

Service update logs. For details, see Table 3.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 log parameters

Parameter

+

Type

+

Description

+

update_time

+

Number

+

Time when a service is updated, in milliseconds calculated from 1970.1.1 0:0:0 UTC

+

result

+

String

+

Update result. The value can be SUCCESS, FAIL, or RUNNING.

+

config

+

config array

+

Updated service configurations. For details, see Table 4.

+

success_num

+

Number

+

Number of nodes that are successfully operated. This parameter is returned when infer_type is set to edge.

+

failed_num

+

Number

+

Number of nodes that fail to be operated. This parameter is returned when infer_type is set to edge.

+

result_detail

+

result array

+

Operation result details. This parameter is returned when infer_type is set to edge. For details, see Table 5.

+

cluster_id

+

String

+

ID of a dedicated resource pool

+

extend_config

+

List

+

Personalized configuration

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 config parameters

Parameter

+

Type

+

Description

+

model_id

+

String

+

Model ID

+

model_name

+

String

+

Model name

+

model_version

+

String

+

Model version

+

weight

+

Integer

+

Traffic weight allocated to a model. This parameter is returned when infer_type is set to real-time.

+

specification

+

String

+

Resource flavor.

+

custom_spec

+

Float

+

Custom specifications.

+

instance_count

+

Integer

+

Number of instances deployed in a model.

+

envs

+

Map<String, String>

+

Environment variable key-value pair required for running a model

+

To ensure data security, do not enter sensitive information, such as plaintext passwords, in environment variables.

+

cluster_id

+

String

+

ID of a dedicated resource pool

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 5 result parameters

Parameter

+

Type

+

Description

+

node_id

+

String

+

Node ID

+

node_name

+

String

+

Node name

+

operation

+

String

+

Operation type. The value can be deploy or delete.

+

result

+

Boolean

+

Operation result. true indicates operation success, and false indicates operation failure.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 custom_spec parameters

Parameter

+

Type

+

Description

+

cpu

+

Float

+

Number of CPUs

+

memory

+

Integer

+

Memory capacity in MB

+

gpu_p4

+

Float

+

Number of GPUs

+

ascend_a310

+

Integer

+

Number of Ascend chips

+
+
+
+

Samples

The following shows how to query the update logs of the real-time service whose ID is xxxxxx and name is mnist.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0089.html b/modelarts/api-ref/modelarts_03_0089.html new file mode 100644 index 00000000..c4bfd372 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0089.html @@ -0,0 +1,59 @@ + + +

Deleting a Service

+

Function

This API is used to delete a model service. You can delete your own services only.

+
+

URI

DELETE /v1/{project_id}/services/{service_id}
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

service_id

+

Yes

+

String

+

Service ID. If you want to delete multiple services in batches, use commas (,) to separate multiple service_id values.

+
+
+
+
+

Request Body

None

+
+

Response Body

None

+
+

Samples

The following shows how to delete the model service whose ID is xxxxxx.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0093.html b/modelarts/api-ref/modelarts_03_0093.html new file mode 100644 index 00000000..2f8223a6 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0093.html @@ -0,0 +1,19 @@ + + +

Common Parameters

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0094.html b/modelarts/api-ref/modelarts_03_0094.html new file mode 100644 index 00000000..801ee75f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0094.html @@ -0,0 +1,329 @@ + + +

Status Code

+

Table 1 describes the status codes.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Status codes

Status Code

+

Code

+

Status Code Description

+

100

+

Continue

+

The client continues sending the request.

+

This provisional response informs the client that part of the request has been received and has not yet been rejected by the server.

+

101

+

Switching Protocols

+

Switching protocols. The target protocol must be more advanced than the source protocol.

+

For example, the current HTTP protocol is switched to a later version of HTTP.

+

200

+

OK

+

The request has been fulfilled.

+

201

+

Created

+

The request for creating a resource has been fulfilled.

+

202

+

Accepted

+

The request has been accepted, but the processing has not been completed.

+

203

+

Non-Authoritative Information

+

Non-authoritative information. The request is successful.

+

204

+

NoContent

+

The request has been fulfilled, but the HTTP response does not contain a response body.

+

The status code is returned in response to an HTTP OPTIONS request.

+

205

+

Reset Content

+

The server has fulfilled the request, but the requester is required to reset the content.

+

206

+

Partial Content

+

The server has successfully processed a part of the GET request.

+

300

+

Multiple Choices

+

There are multiple options for the location of the requested resource. The response contains a list of resource characteristics and addresses from which the user or user agent (such as a browser) can choose the most appropriate one.

+

301

+

Moved Permanently

+

The requested resource has been assigned a new permanent URI, and the new URI is contained in the response.

+

302

+

Found

+

The requested resource resides temporarily under a different URI.

+

303

+

See Other

+

The response to the request can be found under a different URI,

+

and should be retrieved using a GET or POST method.

+

304

+

Not Modified

+

The requested resource has not been modified. When the server returns this status code, it does not return any resources.

+

305

+

Use Proxy

+

The requested resource must be accessed through a proxy.

+

306

+

Unused

+

The HTTP status code is no longer used.

+

400

+

BadRequest

+

The request is invalid.

+

Do not retry the request before modification.

+

401

+

Unauthorized

+

The status code is returned after the client provides the authentication information, indicating that the authentication information is incorrect or invalid.

+

402

+

Payment Required

+

This status code is reserved for future use.

+

403

+

Forbidden

+

The request has been rejected.

+

The server has received and understood the request; yet it refused to respond, because the request is set to deny access. Do not retry the request before modification.

+

404

+

NotFound

+

The requested resource cannot be found.

+

Do not retry the request before modification.

+

405

+

MethodNotAllowed

+

The request contains one or more methods not supported for the resource.

+

Do not retry the request before modification.

+

406

+

Not Acceptable

+

The server cannot fulfill the request according to the content characteristics of the request.

+

407

+

Proxy Authentication Required

+

This status code is similar to 401, but the client must first authenticate itself with the proxy.

+

408

+

Request Time-out

+

The request timed out.

+

The client may repeat the request without modifications at any time later.

+

409

+

Conflict

+

The request could not be processed due to a conflict with the current state of the resource.

+

This status code indicates that the resource that the client attempts to create already exits, or the requested update failed due to a conflict.

+

410

+

Gone

+

The requested resource is no longer available.

+

The status code indicates that the requested resource has been deleted permanently.

+

411

+

Length Required

+

The server refuses to process the request without a defined Content-Length.

+

412

+

Precondition Failed

+

The server does not meet one of the preconditions that the requester puts on the request.

+

413

+

Request Entity Too Large

+

The request is larger than that a server is able to process. The server may close the connection to prevent the client from continuing the request. If the server cannot process the request temporarily, the response will contain a Retry-After header field.

+

414

+

Request-URI Too Large

+

The URI provided was too long for the server to process.

+

415

+

Unsupported Media Type

+

The server is unable to process the media format in the request.

+

416

+

Requested range not satisfiable

+

The requested range is invalid.

+

417

+

Expectation Failed

+

The server fails to meet the requirements of the Expect request-header field.

+

422

+

UnprocessableEntity

+

The request is well-formed but is unable to be processed due to semantic errors.

+

429

+

TooManyRequests

+

The client has sent excessive number of requests to the server within a given time (exceeding the limit on the access frequency of the client), or the server has received an excessive number of requests within a given time (beyond its processing capability). In this case, the client should resend the request after the time specified in the Retry-After header of the response has elapsed.

+

500

+

InternalServerError

+

The server is able to receive but unable to understand the request.

+

501

+

Not Implemented

+

The server does not support the requested function.

+

502

+

Bad Gateway

+

The server acting as a gateway or proxy has received an invalid request from a remote server.

+

503

+

ServiceUnavailable

+

The requested service is invalid.

+

Do not retry the request before modification.

+

504

+

ServerTimeout

+

The request cannot be fulfilled within a given time. This status code is returned to the client only when the Timeout parameter is specified in the request.

+

505

+

HTTP Version not supported

+

The server does not support the HTTP protocol version used in the request.

+
+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0095.html b/modelarts/api-ref/modelarts_03_0095.html new file mode 100644 index 00000000..df05515b --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0095.html @@ -0,0 +1,6230 @@ + + +

Error Codes

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Status Code

+

Error Codes

+

Error Message

+

Description

+

Solution

+

400

+

ModelArts.0104

+

Parameter error.

+

Parameter error.

+

Check the parameter settings.

+

400

+

ModelArts.0113

+

Create Notebook failed. Quota: {} is exhausted.

+

Failed to create the notebook instance due to the exhausted quota.

+

Contact the administrator for a higher quota.

+

400

+

ModelArts.6301

+

The instance already exists.

+

The instance already exists.

+

Enter another instance name.

+

400

+

ModelArts.6302

+

The instance count already reaches the maximum value.

+

The maximum number of instances has been reached.

+

Delete unnecessary instances.

+

400

+

ModelArts.6303

+

The field does not support sorting.

+

This field does not support sorting.

+

Delete the field from the sorting parameters.

+

400

+

ModelArts.6304

+

Please stop the instance before deleting.

+

Failed to delete the instance in the running state.

+

Refresh the page. Stop the instance and then delete it.

+

400

+

ModelArts.6305

+

The instance is already running.

+

The instance is in the running state.

+

Do not start it again.

+

400

+

ModelArts.6306

+

The instance is starting.

+

Other operations cannot be performed because the instance is being started.

+

Perform other operations later.

+

400

+

ModelArts.6307

+

The instance has already been stopped.

+

The instance has been stopped.

+

Refresh the page and view the instance state.

+

400

+

ModelArts.6308

+

The instance is stopping.

+

The instance is being stopped.

+

Refresh the page and view the instance state.

+

400

+

ModelArts.6309

+

The instance does not exist.

+

The instance does not exist.

+

Ensure that the instance has been deleted.

+

400

+

ModelArts.6316

+

The param 'storage' is needed for creating notebook instance.

+

Incorrect parameters during instance creation.

+

Add the storage parameter.

+

400

+

ModelArts.6317

+

The 'path' parameter is required when the storage type is OBS.

+

Incorrect parameters during instance creation.

+

Add the path parameter.

+

400

+

ModelArts.6318

+

The param 'path' needs to end with /.

+

Incorrect parameters during instance creation.

+

Ensure that the value of the path parameter ends with a slash (/).

+

400

+

ModelArts.6319

+

There is no ak/sk in the global settings. Please add it by access ModelArts console.

+

The AK/SK have not been configured in the global settings of ModelArts.

+

Configure the AK/SK in the global settings of ModelArts.

+

400

+

ModelArts.6320

+

Access obs error. Reason is {}.

+

OBS access error.

+

Ensure that the OBS path is correct.

+

400

+

ModelArts.6323

+

The flavor is not supported, please check it.

+

The flavor is not supported.

+

Ensure that the flavor is correctly configured.

+

400

+

ModelArts.6324

+

This location type is not supported. Please check it.

+

The storage type is not supported.

+

Ensure that the storage type is correctly configured.

+

400

+

ModelArts.6326

+

The instance is frozen. Check whether your account is in arrears in Billing Center.

+

The instance has been frozen.

+

Check the account balance and top up the account.

+

400

+

ModelArts.6327

+

The instance is not in running. Please refresh the page and start it.

+

The instance is not in the running state.

+

Refresh the page and try again.

+

400

+

ModelArts.6328

+

Sorry, you currently do not have permission for this flavor. Please apply it firstly.

+

You do not have permission to use the flavor.

+

Contact technical support.

+

400

+

ModelArts.6329

+

Sorry, this flavor specification is sold out. Please try others.

+

The flavor is sold out.

+

Use another flavor or try again later.

+

400

+

ModelArts.6330

+

The flavor type of profile does not match the type of flavor. Please check it.

+

The types are not matched.

+

Ensure that the flavor type matches the profile type.

+

400

+

ModelArts.6331

+

The evs volume size ranges from {} GB to {} GB.

+

The EVS disk size exceeds the upper limit.

+

Check the EVS disk size.

+

400

+

ModelArts.6332

+

Incorrect parameter type. The '{}' parameter must be of the '{}' type.

+

Parameter type error.

+

Ensure that the parameter type meets the requirements.

+

400

+

ModelArts.6333

+

The notebook is being restored. Refresh the page and wait for several minutes. The fault may be caused by instance overload.

+

Notebook instance fault.

+

The instance is recovering. Try again later.

+

400

+

ModelArts.6334

+

Invalid value for name or description. The character `{}` is not allowed.

+

The description parameter is incorrect.

+

Enter a valid description value.

+

400

+

ModelArts.6335

+

'{}' is a required property.

+

Parameter missing.

+

Ensure that the corresponding parameter is available.

+

400

+

ModelArts.6336

+

'{}' property value is incorrect.

+

Incorrect parameter value.

+

Ensure that the corresponding parameter is correct.

+

400

+

ModelArts.6341

+

The repository does not exist or has been deleted.

+

The Git repository does not exist or has been deleted.

+

Check the corresponding parameter.

+

400

+

ModelArts.6343

+

The repository name already exists.

+

The Git repository already exists.

+

Check the corresponding parameter.

+

400

+

ModelArts.6344

+

Delete failed. Please delete the associated development environment instance first.

+

Failed to delete the repository.

+

Delete the corresponding notebook instance first.

+

400

+

ModelArts.6345

+

Currently the OBS type instance does not support git capability.

+

The Git repository cannot be used for OBS instances.

+

Use notebook instances of the EVS type.

+

400

+

ModelArts.6346

+

The timestamp of auto stop must be later than the current timestamp.

+

This parameter cannot be modified.

+

Check the corresponding parameter and ensure that the automatic end time is later than the current time.

+

400

+

ModelArts.6347

+

When the instance is in the '{}' status, the '{}' parameter cannot be modified.

+

Failed to modify the parameter.

+

Check the corresponding parameter.

+

400

+

ModelArts.6348

+

The 'duration' parameter is mandatory for enabling auto stop.

+

The description parameter is missing.

+

Configure the duration parameter.

+

400

+

ModelArts.6349

+

The 'duration' parameter is incorrect. The value ranges from {} to {}.

+

The value of duration parameter exceeds the threshold.

+

Configure the duration parameter.

+

400

+

ModelArts.6350

+

Failed to access OBS because of incorrect AK/SK or insufficient permissions.

+

Access to OBS is rejected.

+

Ensure that the AK/SK is correct and you have the permission to access OBS.

+

400

+

ModelArts.6353

+

Failed to verify the AK/SK. Please check and try again.

+

Incorrect AK/SK.

+

Ensure that the AK/SK in the global settings of ModelArts is correct.

+

400

+

ModelArts.6354

+

The AK/SK do not belong to the user. Please check and enter the correct ones.

+

Incorrect AK/SK.

+

Ensure that the AK/SK in the global settings of ModelArts is correct.

+

400

+

ModelArts.6355

+

The resource is initializing. Please wait one minute and try again.

+

Resources in the dedicated resource pool are being initialized.

+

Try again later.

+

400

+

ModelArts.6357

+

The operation is not allowed because another operation is being performed on the instance or the instance is in the target state.

+

The operation is not allowed.

+

Try again later.

+

400

+

ModelArts.6358

+

The path parameter is incorrect. It cannot be the root directory of an OBS bucket, but must be a specific directory in the OBS bucket.

+

The OBS path parameter is incorrect.

+

Ensure that the path parameter is correct.

+

400

+

ModelArts.6361

+

ModelArts internal service or configuration error. Submit a service ticket to get professional technical support.

+

Internal error.

+

System error. Contact technical support.

+

400

+

ModelArts.6371

+

Currently, notebooks support only OBS buckets whose Storage Class is Standard. Change the OBS bucket and try again.

+

Notebook instances support only standard OBS buckets.

+

Use standard OBS buckets.

+

400

+

ModelArts.6374

+

The credential can be added only when type is set to AK/SK or agency.

+

The authentication type can only be AK/SK or agency.

+

Ensure that the authentication type is AK/SK or agency.

+

400

+

ModelArts.6376

+

Please make sure agency exists.

+

The IAM agency does not exist.

+

Ensure that the agency exists on IAM.

+

400

+

ModelArts.6377

+

The user id cannot be left blank.

+

The user ID cannot be left blank.

+

Ensure that the user ID parameter is available.

+

400

+

ModelArts.6378

+

The agency name cannot be left blank.

+

The agency name cannot be left blank.

+

Ensure that the agency name parameter is available.

+

400

+

ModelArts.6379

+

No IAM agency created in Settings. Create an IAM agency on the ModelArts management console.

+

No agency is created in the global settings.

+

Create an agency in the global settings.

+

400

+

ModelArts.6528

+

No EVS available. Please try again later.

+

The EVS disk has been sold out.

+

Try again later or contact technical support.

+

401

+

ModelArts.6201

+

The user's account has been suspended.

+

The user account is frozen.

+

Check the account balance and top up the account.

+

401

+

ModelArts.6203

+

The user's account has been restricted.

+

The user account is restricted.

+

Check the account balance and top up the account.

+

403

+

ModelArts.0112

+

Policy doesn't allow {} {} to be performed.

+

The permission on the workspace is restricted.

+

Ensure that you have the permission on the workspace.

+

408

+

ModelArts.6100

+

The request timed out. Please try again.

+

Timeout.

+

System error. Contact technical support.

+

429

+

ModelArts.6101

+

The system is busy now. Please try again later.

+

The system is busy.

+

The system is busy. Please try again. If the retry still fails, contact technical support.

+

500

+

ModelArts.6102

+

The server has encountered an error and fails to process the request. Please try again later or submit a service ticket.

+

Internal error.

+

System error. Contact technical support.

+

500

+

ModelArts.6200

+

No resources available. Please try again later or submit a service ticket.

+

Internal error.

+

System error. Contact technical support.

+

400

+

ModelArts.6600

+

Check whether the parameter is valid.

+

Failed to verify the parameters.

+

Ensure that the parameter is valid.

+

400

+

ModelArts.6651

+

Unsupport entity.

+

Unsupported project objects.

+

Ensure that the project object is supported.

+

400

+

ModelArts.6652

+

Please delete resources from the project first.

+

Failed to delete the project resources.

+

Delete the datasets and notebook instances under the project and ensure that all training jobs have been completed.

+

400

+

ModelArts.6690

+

The image size cannot exceed 8 MB.

+

The image size exceeds 8 MB.

+

Ensure that the image size does not exceed 8 MB.

+

401

+

ModelArts.6608

+

Please refresh user info.

+

Failed to obtain the user information.

+

Refresh the user information.

+

401

+

ModelArts.6620

+

Please refresh the user token.

+

Invalid user token.

+

Refresh the user token.

+

403

+

ModelArts.6653

+

Contact the project owner.

+

You are not authorized to operate the project.

+

Contact the project owner to obtain the authorization.

+

404

+

ModelArts.6623

+

Ensure that the dataset already exists.

+

Failed to find the corresponding dataset.

+

Ensure that the dataset already exists.

+

404

+

ModelArts.6624

+

Ensure that the storage path already exists.

+

Failed to find the corresponding storage path.

+

Ensure that the storage path already exists.

+

404

+

ModelArts.6650

+

Ensure that the project already exists.

+

Failed to find the corresponding project.

+

Ensure that the project already exists.

+

404

+

ModelArts.6655

+

Can't find ai project template.

+

Failed to find the corresponding project template.

+

Ensure that the corresponding project template already exists.

+

417

+

ModelArts.6654

+

Project with the same name already exists.

+

A project with the same name already exists.

+

Use another project name.

+

500

+

ModelArts.6691

+

System error. Check the service status.

+

The algorithm service is abnormal.

+

System error. Check the service state.

+

500

+

ModelArts.6692

+

System error. Check the service status.

+

The dataset service is abnormal.

+

System error. Check the service state.

+

500

+

ModelArts.6699

+

Internal error.

+

Network error.

+

Check network connectivity.

+

400

+

ModelArts.4205

+

A subdirectory must be specified as the dataset input or output path.

+

A subdirectory must be specified as the dataset input or output path.

+

A subdirectory must be specified as the dataset input or output path.

+

400

+

ModelArts.4311

+

OBS bucket does not exist

+

The OBS bucket does not exist.

+

Ensure that the bucket name is correct and that the bucket name exists in OBS.

+

400

+

ModelArts.4312

+

OBS path does not exist

+

Incorrect or invalid bucket name.

+

Ensure that the bucket name is correct and that the bucket name exists in OBS.

+

400

+

ModelArts.4313

+

OBS path is invalid

+

Invalid characters in the OBS path.

+

Ensure that the OBS path consists of valid characters, including digits, letters, hyphens (-), underscores (_), slashes (/), and Chinese characters.

+

400

+

ModelArts.4314

+

Obs error

+

OBS access error.

+

Ensure that you have the permission to access OBS and that the OBS path is valid.

+

400

+

ModelArts.4338

+

The resource not exists

+

The dataset ID or labeling task ID does not exist.

+

Ensure that the input dataset ID or labeling task ID is correct.

+

400

+

ModelArts.4340

+

Import path does not contain valid file

+

No valid file exists in the import path.

+

Ensure that the file in the OBS path is valid.

+

400

+

ModelArts.4342

+

Dataset publish with splitting annotated samples error

+

Incorrect splitting for labeled samples.

+

Ensure that the labeled samples and labels meet splitting criteria.

+

400

+

ModelArts.4343

+

Dataset is publishing, the operation is forbidden

+

Do not switch, import, synchronize, or publish a dataset version because there is an ongoing publish task.

+

Perform the operations after the ongoing publish task is complete.

+

400

+

ModelArts.4344

+

Dataset is being deleted, annotation is forbidden

+

No labeling task is allowed because the dataset is being deleted.

+

Ensure that the dataset ID is correct.

+

400

+

ModelArts.4345

+

File not found

+

The HDFS file does not exist.

+

Ensure that the OBS path is correct and that the file is available in the OBS path.

+

400

+

ModelArts.4347

+

List files failed

+

Failed to obtain HDFS files.

+

Ensure that the OBS path is correct and that the files are available in the OBS path.

+

400

+

ModelArts.4349

+

Dataset is switching version, the operation is forbidden

+

Do not switch, import, synchronize, or publish a dataset version because there is an ongoing version switching task.

+

Perform the operations after the ongoing version switching task is complete.

+

400

+

ModelArts.4350

+

The work_path is too long, please select shorter folder

+

The value of work_path exceeded the limit.

+

Change the value of work_path to a valid one.

+

400

+

ModelArts.4351

+

Dataset already exists

+

A dataset whose name is the value of dataset_name already exists.

+

Change the value of dataset_name.

+

400

+

ModelArts.4352

+

Dataset does not exist

+

The dataset ID does not exist.

+

Ensure that the imported dataset ID is correct.

+

400

+

ModelArts.4353

+

Dataset version does not exist

+

The dataset version ID does not exist.

+

Check dataset version parameters.

+

400

+

ModelArts.4355

+

Sync data source task exist

+

A data synchronization task is being executed for the dataset.

+

Perform the operations after the ongoing data synchronization task is complete.

+

400

+

ModelArts.4356

+

Dataset already has running import task

+

A data import task is being executed for the dataset.

+

Perform the operations after the ongoing data import task is complete.

+

400

+

ModelArts.4357

+

Parse AI annotation result file name error

+

Failed to parse the labeling file name.

+

Ensure that the file name in the OBS auto labeling result path is correct.

+

400

+

ModelArts.4358

+

Invalid export path

+

Invalid OBS export path.

+

Ensure that the export path is correct.

+

400

+

ModelArts.4359

+

Export task does not exist

+

The export task ID does not exist.

+

Ensure that the export task ID is correct.

+

400

+

ModelArts.4361

+

Import AI annotation error

+

Failed to synchronize the auto labeling task result.

+

Ensure that the auto labeling task result is correct.

+

400

+

ModelArts.4362

+

Import data error

+

Failed to import data.

+

Ensure that the authentication information and the request parameters for creating an import task are correct.

+

400

+

ModelArts.4364

+

Dataset workPath subdir already exists

+

The work_path subdirectory already exists in the dataset directory.

+

Ensure that the subdirectory in the dataset directory is correct.

+

400

+

ModelArts.4365

+

Dataset labels not set

+

The label set of the dataset is empty.

+

Create labels for the dataset.

+

400

+

ModelArts.4368

+

Parse pc bin file error

+

Failed to parse the point cloud binary file.

+

Ensure that the point cloud binary file is not damaged.

+

400

+

ModelArts.4369

+

Parse pc prelabel json file error

+

Failed to parse the point cloud pre-labeling file.

+

Ensure that the point cloud pre-labeling file is correct.

+

400

+

ModelArts.4370

+

One dataset version can be released in a minute, please try again later

+

Frequent dataset version creation.

+

Do not frequently create dataset versions.

+

400

+

ModelArts.4371

+

Dataset version already exists

+

The value of version_name is the same as an existing version.

+

Change the value of version_name.

+

400

+

ModelArts.4372

+

Valid image not found

+

No point cloud data image found.

+

Ensure that the point cloud data image is correct.

+

400

+

ModelArts.4374

+

Invalid path

+

Invalid OBS path.

+

Ensure that the OBS path is correct and that the file is available in the OBS path.

+

400

+

ModelArts.4375

+

Parse pc obs image error

+

Failed to parse the point cloud data image in OBS.

+

Ensure that the point cloud data image is correct.

+

400

+

ModelArts.4376

+

Unsupported pc pcd format error

+

The PCD attribute is not supported.

+

Check the point cloud data in PCD format.

+

400

+

ModelArts.4377

+

Pc pcd format error

+

Failed to parse the point cloud data in PCD format.

+

Ensure that the PCD file is valid.

+

400

+

ModelArts.4378

+

Parse pc pcd file error

+

Failed to parse the point cloud data in PCD format.

+

Ensure that the PCD file is correct.

+

400

+

ModelArts.4379

+

Unsupported pc file format error

+

The point cloud data format is not supported.

+

Ensure that the imported point cloud data is in BIN or PCD format.

+

400

+

ModelArts.4380

+

Parse kitti calibration file error

+

Failed to parse the Kitti data.

+

Ensure that the data is correct.

+

400

+

ModelArts.4381

+

Dataset is publishing, annotation is forbidden

+

Sample labeling is not allowed because a publish task is being executed in the dataset.

+

Wait until the publish task is complete.

+

400

+

ModelArts.4382

+

Generate 2d image error

+

Failed to generate a 2D image.

+

Ensure that the point cloud data is correct.

+

400

+

ModelArts.4384

+

Invalid export parameter

+

Invalid parameter.

+

Ensure that all mandatory parameters are included and valid.

+

400

+

ModelArts.4391

+

Task name is invalid

+

An auto deploy task whose name is the task_name value already exists in the dataset.

+

Ensure that the task name is correct.

+

400

+

ModelArts.4392

+

Task failed

+

Failed to create or run the dataset.

+

Ensure that the parameters and task resources are correct.

+

400

+

ModelArts.4393

+

Task stopped

+

The dataset task has been stopped.

+

Check the status of the dataset task with the specified ID.

+

400

+

ModelArts.4396

+

Parameter is invalid

+

Invalid parameter.

+

Ensure that the import task parameters are correct.

+

400

+

ModelArts.4399

+

Type match error

+

The dataset type does not match the algorithm model.

+

Ensure that the algorithm model is correct.

+

400

+

ModelArts.4400

+

Can not get table schema

+

Failed to obtain the schema information from the OBS directory.

+

Ensure that the CSV file in the OBS directory is correct.

+

400

+

ModelArts.4404

+

Can not get table schema from DLI

+

Failed to obtain the schema information from DLI.

+

Ensure that the request parameters are correct.

+

400

+

ModelArts.4405

+

Dataset must contains labels to start automation labeling

+

Labeled samples must be available for enabling auto labeling.

+

Add new labeled samples to the dataset.

+

400

+

ModelArts.4406

+

Dataset must contains labels with at least five images to start automation labeling

+

The number of samples to be labeled for each label cannot be less than 5.

+

Ensure that each label has at least five samples.

+

400

+

ModelArts.4407

+

Dataset must contains unlabeled files

+

The dataset must contain samples that have not been labeled.

+

Ensure that there are unlabeled samples in the dataset.

+

400

+

ModelArts.4408

+

Dataset contains labels shape not bndbox

+

The label shape cannot be bounding box.

+

Ensure that the shape attribute is correct.

+

400

+

ModelArts.4409

+

Running tasks has exceed the max threshold

+

The number of tasks that are being executed in the dataset has reached the maximum allowed limit.

+

Try again later.

+

400

+

ModelArts.4410

+

Label not found

+

The label does not exist.

+

Ensure that the label name is correct.

+

400

+

ModelArts.4411

+

Label already exists

+

The new label list contains existing label names.

+

Ensure that the label names are correct.

+

400

+

ModelArts.4412

+

Label shortcut already exists

+

The label shortcut already exists.

+

Change the label shortcut.

+

400

+

ModelArts.4413

+

Label is incompatible with annotation rules

+

The label attribute does not comply with specifications.

+

Ensure that the label attribute complies with specifications.

+

400

+

ModelArts.4414

+

Triplet label's from_entity or to_entity does not exist

+

The entity label does not exist in the triplet label.

+

Ensure that the entity label in the triplet label is available.

+

400

+

ModelArts.4415

+

Entity label can not be deleted because it is used by triplet label

+

Failed to delete the entity label because it is being used in the triplet label.

+

Wait until the label is not used in the triplet label and try again.

+

400

+

ModelArts.4416

+

Sync tags error

+

Failed to synchronize labels in the team labeling task.

+

Ensure that the synchronization task parameters are correct.

+

400

+

ModelArts.4417

+

Update sample labels failed when upload sample and labels

+

Failed to update the sample labels.

+

Ensure that the labels are correct.

+

400

+

ModelArts.4418

+

Label property mask_gray_value already exists

+

Duplicate tag mask_gray_value.

+

Ensure that the tag mask_gray_value is correct.

+

400

+

ModelArts.4420

+

Sample not found

+

The sample with the specified ID does not exist.

+

Ensure that the sample ID is correct.

+

400

+

ModelArts.4421

+

Upload sample failed

+

Failed to upload the sample to the dataset.

+

Ensure that the uploaded sample type and data are correct.

+

400

+

ModelArts.4422

+

Sample already exists

+

The sample that is being imported already exists.

+

Enter that the imported sample is correct.

+

400

+

ModelArts.4423

+

Get sample size failed, please input size manually

+

Incorrect size of the image obtained from the object detection dataset.

+

Manually set the image size.

+

400

+

ModelArts.4425

+

Label property mask_gray_value is incompatible with defined value

+

The mask_gray_value value cannot be changed.

+

Ensure that the mask_gray_value value is not changed.

+

400

+

ModelArts.4426

+

Dataset must contains tags to start auto deploy

+

The label set of the dataset cannot be empty.

+

Create labels for the dataset.

+

400

+

ModelArts.4427

+

Dataset must contains tags with at least five images to start auto deploy

+

The number of samples to be labeled for each label cannot be less than 5.

+

Ensure that each label has at least five samples.

+

400

+

ModelArts.4502

+

The IAM agency name already exists, please delete the agency in IAM first and retry

+

The default IAM agency already exists.

+

Delete the IAM agency and create a new one.

+

400

+

ModelArts.4601

+

The workforce does not exist

+

The team ID does not exist.

+

Ensure that the team ID is correct.

+

400

+

ModelArts.4602

+

The workforce already exists

+

A team whose name is the workforce_name value already exists.

+

Ensure that the workforce_name value is correct.

+

400

+

ModelArts.4603

+

Update workforce state failed

+

Failed to delete the team.

+

Check whether the team has been deleted.

+

400

+

ModelArts.4604

+

The worker does not exist

+

The team member specified by work_id does not exist.

+

Ensure that the workforce_id and work_id values are correct.

+

400

+

ModelArts.4605

+

The worker already exists

+

The team member whose value is the email value already exists.

+

Ensure that the email value is correct.

+

400

+

ModelArts.4609

+

Change password failed

+

Failed to change the account password.

+

Ensure that the new password complies with specifications.

+

400

+

ModelArts.4612

+

Task not found

+

The task ID does not exist.

+

Ensure that the task ID is correct.

+

400

+

ModelArts.4615

+

Workforce task is unfinished

+

The version cannot be switched because the team labeling task has not been completed.

+

Wait until the team labeling task is complete and try again.

+

400

+

ModelArts.4617

+

The number of manager should be one

+

The team manager already exists.

+

Ensure that the roles of the team members are correct.

+

400

+

ModelArts.4618

+

Can not delete yourself

+

You cannot delete yourself from the team.

+

Ensure that the request parameters are correct.

+

400

+

ModelArts.4619

+

Workforce task does not exist

+

No team labeling task whose ID is the workforce_task_id value is allowed.

+

Ensure that the workforce_task_id value is correct.

+

400

+

ModelArts.4620

+

The workforce task already exists

+

A team labeling task whose name is the task_name value already exists.

+

Ensure that the task_name value is correct.

+

400

+

ModelArts.4622

+

Invalid n_clusters, should less than the total number of samples

+

The number of groups has reached the maximum allowed limit.

+

Check whether the number of groups is less than the total number of samples.

+

400

+

ModelArts.4623

+

Workforce task is checking

+

Only one review task is allowed at a time.

+

Wait until all existing review tasks are complete and try again.

+

400

+

ModelArts.4627

+

Request notify too frequently

+

Frequent notification request submission.

+

Try again later.

+

400

+

ModelArts.4628

+

Can not delete worker who has task

+

The team member cannot be deleted because the labeling task has not been completed.

+

Notify the member to complete the task.

+

400

+

ModelArts.4650

+

Interactive operations not found

+

Incorrect interactive_operations value.

+

Ensure that the interactive_operations value is correct.

+

400

+

ModelArts.4651

+

Get obs sample error

+

Failed to read the sample from OBS.

+

Ensure that the sample in OBS is correct.

+

400

+

ModelArts.4700

+

Task does not exist

+

The task ID does not exist.

+

Ensure that the task ID is correct.

+

400

+

ModelArts.4701

+

Can not start data analysis task, dataset version annotation type must be 'Image Classification' or 'Object Detection'

+

The dataset type does not support feature analysis.

+

Ensure that the dataset type is correct.

+

400

+

ModelArts.4702

+

Can not start data analysis task, dataset version must be 'Default' format

+

The dataset format does not support feature analysis.

+

Use the default dataset format.

+

400

+

ModelArts.4703

+

Can not start data analysis task, dataset version must contains annotated samples

+

Feature analysis cannot be performed because there is no labeled sample in the dataset.

+

Ensure that there are labeled samples in the dataset.

+

400

+

ModelArts.4704

+

Currently unable to start data analysis task, Please try again later

+

Feature analysis cannot be performed because a dataset version is being published.

+

Try again later.

+

400

+

ModelArts.4706

+

Can not start data analysis task, dataset version status must be normal

+

Feature analysis cannot be performed because the dataset version is not in normal state.

+

Ensure that the current dataset version is in normal state.

+

400

+

ModelArts.4709

+

Data preprocessing task already exists

+

A training task whose name is the name value already exists.

+

Ensure that the name value is correct in the request for creating in the task.

+

400

+

ModelArts.4710

+

Dataset export file error

+

Failed to publish or export the dataset.

+

Ensure that the task export or publish parameters are correct.

+

400

+

ModelArts.4711

+

Publishing requires splitting but annotated samples do not satisfied for splitting rules

+

Failed to publish because the dataset label samples do not meet splitting requirements.

+

Ensure that the number of dataset labels and the number of labeled samples meet the splitting requirements.

+

400

+

ModelArts.4712

+

Dataset publish version failed

+

Failed to publish the dataset version.

+

Ensure that the task export or publish parameters are correct.

+

400

+

ModelArts.4800

+

The label task already exists

+

A labeling task whose name is the task_name value already exists.

+

Ensure that the task_name value is correct.

+

400

+

ModelArts.4801

+

The label task not exists

+

The labeling task specified by the ID does not exist.

+

Ensure that the imported task ID is correct.

+

400

+

ModelArts.4820

+

Unsupported operation, label task type is %s

+

The labeling task type does not support the operation.

+

Ensure that the labeling task type is correct.

+

400

+

ModelArts.4822

+

The number of labelers must exceed 1

+

At least two annotators are available in the team.

+

Check the number of annotators in the team.

+

400

+

ModelArts.4823

+

Import to dataset version error

+

No data can be imported to a table dataset version.

+

Ensure that the dataset type is correct.

+

400

+

ModelArts.4824

+

Dataset version status must be normal

+

Abnormal dataset version state.

+

Check the version publish state of the dataset.

+

400

+

ModelArts.4825

+

Dataset version is empty

+

No sample is available in the dataset version.

+

Check the version publish result of the dataset.

+

400

+

ModelArts.4827

+

Dataset version delete failed

+

Failed to delete the dataset version because there is a labeling task based on the dataset version.

+

Check whether there is any labeling task that is created using the dataset version.

+

400

+

ModelArts.4851

+

Task version not found

+

The version specified by version_id does not exist.

+

Ensure that the version_id value is correct.

+

401

+

ModelArts.4504

+

Iam error

+

IAM error.

+

Ensure that the AK/SK or token is correct.

+

403

+

ModelArts.4310

+

OBS action is forbidden, please check iam agency or OBS and so on

+

You do not have permission to access OBS.

+

Grant the permission to access OBS.

+

403

+

ModelArts.4335

+

Iam agency is invalid

+

Invalid IAM agency.

+

Check the IAM permission.

+

403

+

ModelArts.4336

+

The user hasn't permission

+

Restricted user permission.

+

Check the user permission.

+

403

+

ModelArts.4348

+

Check dli agency failed

+

Failed to create the agency for checking DLI.

+

Check the agency permission for checking DLI.

+

403

+

ModelArts.4419

+

File is too large

+

The size of the file imported from OBS or to be parsed has reached the maximum allowed limit.

+

Ensure that the file is correct.

+

403

+

ModelArts.4500

+

The number of iam agencies has reached the maximum

+

The number of IAM agencies exceeded the upper limit.

+

Delete unused IAM agencies.

+

403

+

ModelArts.4501

+

The iam agency create action is forbidden

+

Failed to create the IAM agency.

+

Ensure that you have permission to create an IAM agency.

+

403

+

ModelArts.4600

+

The worker action is forbidden

+

The team member cannot perform the operation.

+

Check whether the team member has permission to perform the operation.

+

403

+

ModelArts.4613

+

Task not finish

+

Failed to obtain the task result because the task execution has not been completed.

+

Try again later.

+

403

+

ModelArts.4803

+

Dataset has unfinished label tasks, the operation is forbidden

+

Failed to delete data because the labeling task in the dataset has not been completed.

+

Ensure that all labeling tasks in the dataset are complete.

+

403

+

ModelArts.4821

+

Task is publishing, the operation is forbidden

+

The auto labeling task cannot be started because a labeling task is being published.

+

Try again after the labeling task is published.

+

403

+

ModelArts.4850

+

Process task is initializing, the operation is forbidden

+

Failed to delete the task because the processor task is being initialized.

+

Check the task status.

+

429

+

ModelArts.4395

+

Too many dataset requests

+

Frequent dataset or labeling task creation in a unit time.

+

Try again later.

+

500

+

ModelArts.4354

+

Insufficient quota

+

The number of created datasets has reached the maximum allowed limit.

+

Delete unused datasets or apply for a higher quota.

+

500

+

ModelArts.4360

+

Sync data source error

+

Failed to synchronize the data source.

+

Check the synchronization task status and result.

+

500

+

ModelArts.4367

+

Update dataset state failed

+

Failed to delete the dataset.

+

Check the dataset status.

+

500

+

ModelArts.4373

+

No migratory job

+

Failed to create the dataset migration task.

+

Check whether the source dataset of the migration task is valid.

+

500

+

ModelArts.4424

+

Delete sample failed

+

Failed to delete the dataset sample.

+

Ensure that the sample to be deleted is correct and that there is unfinished import or synchronization task.

+

500

+

ModelArts.4614

+

Task failed

+

Failed to run the task.

+

Check the execution result of the processor task.

+

501

+

ModelArts.4383

+

Unsupported operation, dataset %s type is %s

+

The operation cannot be performed in the dataset.

+

Check whether the dataset supports the current operation.

+

501

+

ModelArts.4385

+

Auto deploy task exist

+

Failed to start the deploy task because an auto deploy task is being executed in the dataset.

+

Start a new deploy task after the current task is complete.

+

501

+

ModelArts.4386

+

Auto labeling task exist

+

Failed to start the auto labeling task because an auto labeling task is being executed in the dataset.

+

Start a new auto labeling task after the current task is complete.

+

501

+

ModelArts.4387

+

Pre-label task exist

+

Failed to start the pre-labeling task because a pre-labeling task is being executed in the dataset.

+

Start a new pre-labeling task after the current task is complete.

+

501

+

ModelArts.4394

+

Task is running

+

Failed to obtain the migration result because a migration task is being executed in the dataset.

+

Wait until the migration task is complete.

+

400

+

ModelArts.0116

+

Free train job reached the limit

+

The number of free jobs exceeds the upper limit.

+

Create the job again after the running job or the queuing job is complete.

+

400

+

ModelArts.0806

+

download log failed

+

No log is found.

+

Ensure that the configured log storage duration exceeds the log storage duration. View the job running parameters and ensure that the job is started successfully. If the fault persists, contact technical support.

+

400

+

ModelArts.0901

+

Get dataset url failed

+

An error occurred when obtaining the dataset URL.

+

Ensure that the dataset URL is correct.

+

400

+

ModelArts.2605

+

The number of available nodes in the resource pool is illegal: ${nodeCount}

+

No compute node is available in the selected resource pool.

+

Select another resource pool.

+

400

+

ModelArts.2606

+

${obsUrl} should not be bucket

+

The selected dataset path is invalid.

+

Select a valid path or select another dataset.

+

400

+

ModelArts.2607

+

parameter length reaches the max limit ${BATCH_CMD_LENGTH_LIMIT}

+

The number of characters in the running parameter exceeds the upper limit.

+

Check the running parameters and try again.

+

400

+

ModelArts.2608

+

${url} should not contains

+

Invalid OBS path.

+

Ensure that the OBS path does not contain the following characters: '{} [] and spaces try again.

+

403

+

ModelArts.2701

+

There is already a peer connection

+

The VPC peering connection already exists in the resource pool.

+

Use the existing connection or create another one.

+

403

+

ModelArts.2702

+

Invalid vpc or subnet

+

The specified VPC CIDR block overlaps with the dedicated resource pool CIDR block, and no VPC peering connection can be created.

+

Change the VPC CIDR block.

+

403

+

ModelArts.2703

+

network of pool is not ready

+

There are multiple peering connections in the dedicated resource pool. These peering connections may have been manually operated.

+

System error. Contact technical support.

+

403

+

ModelArts.3000

+

volumes of config are used in dedicated pools

+

You do not have the permission to access the volume in the dedicated resource pool.

+

Access a common volume.

+

404

+

ModelArts.0015

+

The resource you requested is not exist

+

The queried resource does not exist.

+

Ensure that the URL is correct.

+

500

+

ModelArts.2700

+

${operator} failed. Response status is ${status}.Detail error msg: $agentErrorMsg

+

Failed to call the VPC interface.

+

System error. Contact technical support.

+

500

+

ModelArts.2704

+

Update sfs turbo info of pool failed. Error msg: {ex.getMessage}

+

Failed to create the VPC peering connection and call the VPC interface.

+

Try again later.

+

500

+

ModelArts.2800

+

${operator} failed. Response status is $status. Detail error msg: ${agentErrorMsg}

+

Failed to access the SFS Turbo interface.

+

System error. Contact technical support.

+

400

+

ModelArts.5011

+

Invalid parameter

+

Invalid parameter.

+

Modify the parameter.

+

400

+

ModelArts.5012

+

Invalid product attribute

+

Invalid product attribute.

+

Modify the product attribute.

+

400

+

ModelArts.5016

+

The original model ({0}) does not exist during model release

+

The product source model does not exist.

+

Select another source model.

+

400

+

ModelArts.5022

+

Unsupported model type

+

The model type is not supported.

+

Select another source model.

+

400

+

ModelArts.5025

+

Incorrect verification code

+

Incorrect verification code.

+

Enter the correct verification code.

+

400

+

ModelArts.5026

+

Verification code expired

+

Verification code expired.

+

Obtain a new verification code.

+

400

+

ModelArts.5027

+

The verification code does not match the email address.

+

The email address and verification code do not match.

+

Obtain a new verification code.

+

400

+

ModelArts.5038

+

You must first get a verification code

+

You have not obtained the verification code.

+

Specify your email address and obtain the verification code by email.

+

400

+

ModelArts.5041

+

Duplicate version. Refresh the page and try again

+

Duplicate version.

+

Refresh the page and try again.

+

400

+

ModelArts.5047

+

Hilens product name duplicated.

+

Duplicate HiLens name.

+

Enter another product name.

+

400

+

ModelArts.5048

+

Hilens skill version duplicated.

+

Duplicate HiLens version.

+

Select another version.

+

400

+

ModelArts.5049

+

Incorrect Hilens skill. Select the skill corresponding to the offering.

+

Incorrect HiLens skill.

+

Select the correct HiLens skill.

+

400

+

ModelArts.5051

+

The HiLens skill type is not supported. Release the skill type in HiLens Studio.

+

The HiLens skill is currently unavailable. Publish the skill in HiLens Studio.

+

Select the correct HiLens skill.

+

400

+

ModelArts.5052

+

The user does not have the agency permission

+

You do not have the agency permission.

+

Set an agency and try again.

+

400

+

ModelArts.5303

+

The obs source data is empty

+

Empty source data.

+

Select other source data.

+

400

+

ModelArts.5304

+

The maximum number of files has been exceeded

+

The number of files has reached the maximum allowed limit.

+

Select other source data or reduce the data volume.

+

400

+

ModelArts.5305

+

The maximum size of files has been exceeded

+

The file size has reached the maximum allowed limit.

+

Select other source data or reduce the data volume.

+

400

+

ModelArts.5306

+

The maximum depth of files has been exceeded

+

The file depth has reached the maximum allowed limit.

+

Select other source data or reduce the data depth.

+

400

+

ModelArts.5309

+

Only allow edge service published to AIHub.

+

Only edge VAS can be published.

+

Select the edge VAS.

+

400

+

ModelArts.5312

+

Only allow market category service published to AIHub.

+

Only market VAS can be published.

+

Select the market VAS.

+

400

+

ModelArts.5313

+

Only allow released service published to AIHub.

+

Only published VAS is allowed.

+

Select the published VAS.

+

400

+

ModelArts.5314

+

Only allow 'closed Beta Test' stage service published to AIHub.

+

Only Closed Beta Test VAS can be published.

+

Select the Closed Beta Test VAS.

+

401

+

ModelArts.5001

+

Token is blank

+

The token does not exist.

+

Add the token and try again.

+

401

+

ModelArts.5002

+

Token is invalid

+

Invalid token.

+

Obtain a token and try again.

+

401

+

ModelArts.5056

+

You do not have the permission to access the requested resource.

+

You have not subscribed to HiLens.

+

Log in to the HiLens management console and subscribe to the service.

+

403

+

ModelArts.5013

+

The user is not registered. Please release a product first to complete the registration

+

You have not registered with AI Gallery.

+

Publish a product in AI Gallery for registration.

+

403

+

ModelArts.5014

+

Permission deny

+

You do not have permission to access the resource.

+

Access other resources.

+

403

+

ModelArts.5017

+

Account suspended.

+

Suspended account.

+

Ensure that the account is not restricted and try again.

+

403

+

ModelArts.5040

+

Your usage has exceeded the maximum quota

+

The resource usage has reached the maximum allowed limit.

+

Wait until the system is idle and try again.

+

403

+

ModelArts.5043

+

Challenge applications submit failed

+

Failed to submit the contest.

+

System error. Contact technical support.

+

403

+

ModelArts.5053

+

Product has been recycled.

+

The product has been reclaimed.

+

Select another product.

+

403

+

ModelArts.5054

+

At least one available version must exist.

+

At least one available version is required for the product.

+

Perform other operations.

+

403

+

ModelArts.5055

+

The subscription has expired.

+

The subscription has expired.

+

Initiate a new subscription.

+

403

+

ModelArts.5057

+

The free package is sold out.

+

The free package has been sold out.

+

Select another package.

+

404

+

ModelArts.5058

+

Object ({0}) does not exit.

+

The object does not exist.

+

Select another object.

+

409

+

ModelArts.5015

+

The product is being reviewed

+

The product is being reviewed.

+

Perform operations after the review is complete.

+

409

+

ModelArts.5044

+

Operations conflict, please try again later

+

Failed to perform the operation.

+

Try again later.

+

500

+

ModelArts.5000

+

Please try again later or contact customer service to solve the problem

+

Internal error.

+

System error. Contact technical support.

+

500

+

ModelArts.5003

+

Internal Error: access IAM service failed

+

Failed to access IAM.

+

System error. Contact technical support.

+

500

+

ModelArts.5004

+

Internal Error: access moderation service failed.

+

Failed to access Text Moderation.

+

System error. Contact technical support.

+

500

+

ModelArts.5005

+

Internal Error: access ModelArts service failed

+

Failed to access ModelArts.

+

System error. Contact technical support.

+

500

+

ModelArts.5006

+

Internal Error: access AI service failed

+

Failed to access the managed services in AI Gallery.

+

System error. Contact technical support.

+

500

+

ModelArts.5007

+

Internal Error: access AI manager failed

+

Failed to access the manager services in AI Gallery.

+

System error. Contact technical support.

+

500

+

ModelArts.5008

+

Internal Error: access SMN service failed

+

Failed to access SMN.

+

System error. Contact technical support.

+

500

+

ModelArts.5009

+

Database operation failed

+

Database operation error.

+

System error. Contact technical support.

+

500

+

ModelArts.5010

+

OBS operation failed

+

Failed to access OBS.

+

System error. Contact technical support.

+

500

+

ModelArts.5019

+

Internal Error: access CBC service failed

+

Failed to access CBC.

+

System error. Contact technical support.

+

500

+

ModelArts.5021

+

Internal Error: access LCS service failed

+

Failed to access LCS.

+

System error. Contact technical support.

+

500

+

ModelArts.5039

+

Dear User,the traffic is a little busy. Please try again later

+

Excessive requests.

+

Try again later.

+

500

+

ModelArts.5050

+

Internal Error: access Hilens service failed

+

Failed to access HiLens.

+

System error. Contact technical support.

+

500

+

ModelArts.5300

+

Failed to explore the user source address.

+

Failed to traverse user source addresses.

+

System error. Contact technical support.

+

500

+

ModelArts.5301

+

Failed to download the source file from OBS.

+

Failed to download the source file from OBS.

+

System error. Contact technical support.

+

500

+

ModelArts.5302

+

Failed to upload the source file to the destination address.

+

Failed to upload the source file to the destination address.

+

System error. Contact technical support.

+

500

+

ModelArts.5308

+

Internal Error: access VAS service failed

+

Failed to access VAS.

+

System error. Contact technical support.

+

500

+

ModelArts.5311

+

Failed to get market category id.

+

No market category ID found.

+

System error. Contact technical support.

+

400

+

ModelArts.0101

+

Invalid Argument.

+

Invalid parameter.

+

Enter the correct parameter as prompted.

+

400

+

ModelArts.0107

+

The values of the request parameters ({0},{1}) are invalid.

+

Invalid parameter values ({0}, {1}).

+

Check whether the parameter values are valid.

+

400

+

ModelArts.0204

+

Token must contain projectId info.

+

No project ID included in the token.

+

Ensure that the project token is used.

+

400

+

ModelArts.3015

+

Config json file does not conform to the specification.

+

The model configuration file does not comply with file specifications.

+

Check whether config.json or initial_config complies with the specifications.

+

400

+

ModelArts.3016

+

Failed to parse config json file because of unsupported fields, types, or formats({0}, {1}).

+

Failed to parse the configuration file because the field, type, or format is not supported.

+

Check whether the field is correct as prompted.

+

400

+

ModelArts.3017

+

Failed to publish model because source location noncompliance.

+

Failed to publish the model because the model source path does not meet specifications.

+

Check whether the source_location value complies with the specifications.

+

400

+

ModelArts.3023

+

Invalid labels({0}) of model.

+

Invalid model label {0}.

+

Ensure that a model label starts with a letter or a Chinese character and contains a maximum of 64 characters, including letters, digits, underscores (_), and Chinese characters.

+

400

+

ModelArts.3024

+

The templateInputs field is incorrectly configured, check if the input is consistent with the template requirements.

+

Incorrect configuration. Check whether the configuration complies with the template requirements.

+

The parameter configuration does not comply with the template requirements. Modify the configuration.

+

400

+

ModelArts.3025

+

User ({0}) has only a single share permission and cannot be shared with all users.

+

User {0} does not have permission to share data with all users.

+

Only the administrator or accounts in the whitelist have permission to share data with all users.

+

400

+

ModelArts.3026

+

Failed to delete infer format, infer format ({0}) already be used by template.

+

Failed to delete input/output mode {0} because it is being used by a template.

+

Find the template that is using the input/output mode, disassociate the mode from the template, and delete the mode again.

+

400

+

ModelArts.3027

+

The model used by the template ({0}) is private and belongs to other users and cannot be shared.

+

Input/Output mode {0} used by the template cannot be shared.

+

Select another mode.

+

400

+

ModelArts.3028

+

Failed to publish model, please select input and output mode.

+

Failed to publish the model. Select an input/output mode.

+

Select another input/output mode and publish the model again.

+

400

+

ModelArts.3029

+

Model publishing failed because the Template model must provide the template ID used.

+

Failed to publish the model because the template ID is unavailable.

+

Enter the template ID.

+

400

+

ModelArts.3030

+

Model publishing failed, template ({0}) does not support modifying its built-in input and output mode.

+

Failed to publish the model because the built-in input/output mode of template {0} cannot be changed.

+

Do not change the input/output mode of a template when publishing the template model.

+

400

+

ModelArts.3031

+

Cancel sharing failed, ({0}) has not been shared with other tenants.

+

Failed to cancel the sharing because image {0} is not shared with other users.

+

Ensure that image {0} is not shared with other users.

+

400

+

ModelArts.3032

+

Image ({0}) cannot be shared to admin ({1}), please check the image permissions.

+

Failed to share image {0} with the administrator {0}. Check the image permission.

+

Perform operations according to the error message.

+

400

+

ModelArts.3033

+

Template publishing failed, please provide template label.

+

Failed to publish the template model because its label is left blank.

+

Set the model label.

+

400

+

ModelArts.3034

+

Template model failed to be published, template field is required.

+

Failed to publish the template model because the template field is left blank.

+

Set template.

+

400

+

ModelArts.3035

+

Profile preview failed, sourceLocation and previewConfig fields need to provide at least one.

+

Failed to preview the configuration file.

+

Either source_location or preview_config must be specified.

+

400

+

ModelArts.3036

+

Parameter verification failed, parameter ({0}) is required.

+

Failed to check parameter {0} because it is left blank.

+

Set parameters according to the error message.

+

400

+

ModelArts.3037

+

Parameter ({0}:{1}) is invalid.

+

Invalid parameter {0}:{1}.

+

Modify passwords as prompted.

+

400

+

ModelArts.3038

+

When isSpecific is true, domainId is required.

+

Failed to perform the operation because domain_id is left blank.

+

Set domain_id as prompted.

+

400

+

ModelArts.3039

+

Template input input_id cannot be repeated.

+

Failed to perform the operation because input_id is repetitive.

+

Ensure that the input_id value is unique.

+

400

+

ModelArts.3040

+

When the installer installer is yum or apt-get, and the version packageVersion exists, the version constraint restraint can only be EXACT.

+

Failed to perform the operation because restraint can only be EXACT.

+

Change the restraint value to EXACT.

+

400

+

ModelArts.3041

+

When the installer installer is conda or pip, and the version packageVersion exists, the version constraint restraint can only be EXACT, ATLEAST, ATMOST.

+

Failed to perform the operation because restraint can only be EXACT, ATLEAST, or ATMOST.

+

Change the restraint value to EXACT, ATLEAST, or ATMOST.

+

400

+

ModelArts.3063

+

Mode unshared failed, the template using this mode has been shared, you need to cancel the sharing of the template first.

+

Failed to cancel mode sharing because the template using the mode is being shared.

+

Perform operations according to the error message.

+

400

+

ModelArts.3070

+

There is a model name that does not belong to this tenant: ({0}).

+

The model does not belong to user {0}.

+

The domain ID of the subscribed model is different from the entered domain ID. Contact the model subscription service and check whether the entered domain ID is correct.

+

400

+

ModelArts.3072

+

The domain id is the same and does not support subscribing to your own model.

+

You cannot subscribe to your own model.

+

Use another account to subscribe to the model.

+

400

+

ModelArts.3074

+

Unable to delete subscribed model.

+

Failed to delete the subscribed model.

+

Unsubscribe from the model and try again.

+

400

+

ModelArts.3076

+

Model does not support deploying edge services and cannot broadcast.

+

Failed to perform the operation because the model cannot be deployed as an edge service.

+

The install_type of the model does not support edge service.

+

400

+

ModelArts.3077

+

Model is a subscription model that cannot be broadcast.

+

Failed to perform the operation because the model has been subscribed.

+

The model_source of the model has been set to subscribe.

+

400

+

ModelArts.3078

+

The model status is not normal and cannot be broadcast.

+

Failed to perform the operation because the model status is abnormal.

+

Check the model status.

+

400

+

ModelArts.3079

+

Purchased license, can't cancel subscription.

+

Failed to unsubscribe from the model because a license has been configured for the model.

+

Delete the license and unsubscribe from the model.

+

403

+

ModelArts.0108

+

You are not authorized to perform the ({0}) operation.

+

You do not have permission to perform {0}.

+

Check whether you are authorized to perform operations on OBS or APIs.

+

403

+

ModelArts.0203

+

Invalid token.

+

Invalid token.

+

Obtain a new token and try again.

+

403

+

ModelArts.0206

+

Invalid AK/SK.

+

Invalid AK/SK.

+

Check whether the token is valid.

+

403

+

ModelArts.3010

+

Failed to copy model, you don't have permission to copy the model ({0}).

+

You do not have the permission to copy model {0}.

+

Check whether the entered token or model ID is correct.

+

403

+

ModelArts.3075

+

Model ({0}) is a subscription model and cannot create a new version.

+

Failed to create a new version because model {0} has been subscribed.

+

No new version can be created for a subscribed model.

+

404

+

ModelArts.3001

+

Model ({0}) does not exist.

+

Model {0} does not exist.

+

Check whether the entered model ID is correct.

+

404

+

ModelArts.3003

+

Failed to get model names list.

+

Failed to obtain the model name list.

+

Check whether the account is correct.

+

404

+

ModelArts.3019

+

Infer format ({0}) does not exist.

+

Input/Output mode {0} does not exist.

+

Change the input/output mode.

+

404

+

ModelArts.3021

+

Template ({0}) does not exists.

+

Template {0} does not exist.

+

Select another template.

+

404

+

ModelArts.3071

+

The subscription ({0}) does not exist.

+

Subscription {0} does not exist.

+

Subscribe to the model and try again.

+

404

+

ModelArts.3080

+

Model optimization job ({0}) does not exist.

+

Model optimization task {0} does not exist.

+

The task is unavailable.

+

409

+

ModelArts.3002

+

Model ({0}, {1}) already exists.

+

Model ({0}, {1}) already exists.

+

Change the model name or version and try again.

+

409

+

ModelArts.3020

+

Infer format ({0}) already exists.

+

Input/Output mode {0} already exists.

+

Change the input/output mode.

+

409

+

ModelArts.3022

+

Template ({0}) already exists.

+

Template {0} already exists.

+

Select another template.

+

409

+

ModelArts.3073

+

model ({0}) already exists, no subscription required.

+

Model {0} already exists.

+

Do not subscribe to another model with the same name.

+

500

+

ModelArts.0010

+

Internal error.

+

Internal error.

+

Contact R&D and O&M personnel.

+

500

+

ModelArts.0109

+

Unauthorized account.

+

Unauthorized account.

+

Only the administrator can call alarm APIs.

+

500

+

ModelArts.3006

+

Failed to publish model.

+

Failed to publish the model.

+

The model metadata failed to be stored to the database. Contact R&D and O&M personnel.

+

500

+

ModelArts.3009

+

Failed to delete model, model ({0}) already deploy service.

+

Failed to delete model {0} because it has been deployed as a service.

+

Delete the deployed service and try again.

+

500

+

ModelArts.3043

+

User ({0}) does not have obs: object: PutObjectAcl permission.

+

User {0} does not have the obs:object:PutObjectAcl permission.

+

Add the permission and try again.

+

500

+

ModelArts.3044

+

Model file ({0}) is larger than 5G and cannot be imported.

+

Failed to import model file {0}.

+

Ensure that the file is at most 5 GB in size.

+

500

+

ModelArts.3045

+

Parameter ({0}) is null.

+

Parameter {0} is left blank.

+

Check the parameter setting as prompted.

+

500

+

ModelArts.3047

+

ExeML model({0}) cannot be converted.

+

Failed to convert model {0}.

+

ExeML models cannot be converted.

+

500

+

ModelArts.3048

+

Model({0}) is being imported and cannot be converted.

+

Failed to convert model {0} because the model status is abnormal.

+

Convert the model after the model status changes to normal.

+

500

+

ModelArts.3049

+

Cannot convert models belonging to other users.

+

Models of other users cannot be converted.

+

You can only convert your models.

+

500

+

ModelArts.3050

+

Get user temporary credential failed.

+

Failed to obtain the temporary user certificate.

+

Check whether the token is valid.

+

500

+

ModelArts.3052

+

Chip Type ({0}) not support.

+

Chip type {0} is not supported.

+

Check whether the chip is of Ascend, GPU, Arm, or general type.

+

500

+

ModelArts.3053

+

Model Type ({0}) not support.

+

Model type {0} is not supported.

+

This type of model is not supported.

+

500

+

ModelArts.3054

+

Task ({0}) does not exists.

+

Task {0} does not exist.

+

The task is unavailable.

+

500

+

ModelArts.3055

+

Task ({0}) is running cannot be deleted.

+

Failed to delete task {0} because it is running.

+

Delete the task after it is complete.

+

500

+

ModelArts.3056

+

Task name ({0}) not meeting the specification.

+

Task name {0} does not comply with specifications.

+

Ensure that a task name starts with a lowercase letter, ends with a lowercase letter or digit, and contains 2 to 24 characters, including lowercase letters, digits, and hyphens (-).

+

500

+

ModelArts.3057

+

Task description ({0}) not meeting the specification.

+

Task description {0} does not comply with specifications.

+

Ensure that task description contains 1 to 100 characters and cannot contain the following characters: &,!\"<>=.

+

500

+

ModelArts.3058

+

Task input ({0}) not meeting the specification.

+

Task input {0} does not comply with specifications.

+

Perform operations according to the error message.

+

500

+

ModelArts.3059

+

Task output ({0}) not meeting the specification.

+

Task output {0} does not comply with specifications.

+

Ensure that the task output is a valid HTTP or HTTPS address.

+

500

+

ModelArts.3060

+

Task spec ({0}) not meeting the specification.

+

Advanced task option {0} does not comply with specifications.

+

Ensure that the advanced option does not contain Chinese characters or the following characters: (|)&$?<>. Ensure that the value is less than 4000.

+

500

+

ModelArts.3061

+

get Task ({0}) log parameter not meeting the specification.

+

Log parameters of task {0} do not comply with specifications.

+

Ensure that the offset or lines value is greater than or equal to 0.

+

500

+

ModelArts.3062

+

Task ({0}) exists.

+

Task {0} already exists.

+

Change the name and try again.

+

500

+

ModelArts.3064

+

DL Framework Type ({0}) not support.

+

Deep learning framework {0} is not supported.

+

This type of framework is not supported.

+

500

+

ModelArts.3065

+

The image of the model is being built so the runtime cannot be updated.

+

Failed to update runtime because the model image is being created.

+

Wait until the image is created.

+

500

+

ModelArts.3301

+

Failed to build image.

+

Failed to create the image.

+

Check the image, or contact R&D and O&M personnel to rectify the fault.

+

500

+

ModelArts.3801

+

invalid token, can not get template ak/sk.

+

Failed to obtain the template AK/SK due to an invalid token.

+

Obtain a new token and try again.

+

500

+

ModelArts.3802

+

create encrypt path or file failed.

+

Failed to create the encryption path or file.

+

Contact R&D and O&M personnel.

+

500

+

ModelArts.3803

+

download file from obs failed.

+

Failed to download the file from OBS.

+

Check whether the OBS path is available.

+

500

+

ModelArts.3804

+

upload file to obs failed.

+

Failed to upload the file to OBS.

+

Contact R&D and O&M personnel.

+

500

+

ModelArts.3805

+

file can not be empty.

+

The file cannot be left blank.

+

Set file.

+

500

+

ModelArts.3901

+

license id ({0}) is not exist.

+

License ID {0} does not exist.

+

Check whether the license ID is valid.

+

500

+

ModelArts.3902

+

Failed to get license info because ({0}).

+

Failed to obtain the license because {Reason}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3903

+

Renew license failed because ({0}).

+

Failed to renew the license because {Reason}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3904

+

Upload data failed because ({0}).

+

Failed to report the used quota because {Reason}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3905

+

Failed to get app key.

+

Failed to obtain the app key.

+

Contact R&D and O&M personnel.

+

500

+

ModelArts.3906

+

Failed to create license because ({0}).

+

Failed to create the license because {Reason}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3907

+

Failed to delete license because ({0}).

+

Failed to delete the license because {Reason}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3908

+

Failed to get CBC resources.

+

Failed to obtain CBC resources.

+

Check the license ID or the associated license configuration.

+

500

+

ModelArts.3909

+

access CBC failed.

+

Failed to access CBC.

+

Contact R&D and O&M personnel.

+

500

+

ModelArts.3910

+

Check CBC auth failed because ({0}).

+

Failed to authorize CBC operations because {Reason}.

+

Locate the fault based on the failure cause. Renewal authentication fails because cloudservicetype is not provided. Unsubscription authentication fails because an instance is being deployed.

+

500

+

ModelArts.7001

+

Internal error occurred in workspace manager.

+

An internal error occurred in the workspace manager.

+

Contact R&D and O&M personnel.

+

400

+

ModelArts.3514

+

Field [input_params] must be set for the model that is used to deploy the batch service.

+

The input_params parameter for the batch service model is left blank.

+

Set input_params.

+

400

+

ModelArts.3515

+

The model {id} cannot be used for deploying services because it is still being published.

+

Failed to deploy the service because model {ID} is not ready.

+

Try again after the model is ready.

+

400

+

ModelArts.3516

+

Failed to deploy services because the version of the model that is used for deploying services is the same.

+

Failed to deploy the service due to duplicate model versions.

+

Correct the model version list to ensure that no duplicate model versions exist.

+

400

+

ModelArts.3518

+

The {service_type} service has not been enabled or authorized.

+

Service {type} has not been enabled or authorized.

+

Enable or authorize the service.

+

400

+

ModelArts.3519

+

Services only in one of states in [deploying, running, stopping, deleting] can be updated.

+

Services in the deploying, running, stopping, or deleting state cannot be updated.

+

Do not update services in the deploying, running, stopping, or deleting state.

+

400

+

ModelArts.3520

+

A maximum of {number} {service_type} services are allowed.

+

The total number of {type} services has reached the maximum allowed limit {quantity}.

+

Delete the services that are no longer used or contact service O&M personnel to increase the quantity quota.

+

400

+

ModelArts.3521

+

A maximum of {number} {service_type} services in running status are allowed.

+

The total number of {type} services in the running state has reached the maximum allowed limit {quantity}.

+

Delete the services that are no longer used or contact service O&M personnel to increase the quantity quota.

+

400

+

ModelArts.3522

+

A maximum of {number} {service_type} service instances are allowed.

+

The total number of {type} service instances has reached the maximum allowed limit {quantity}.

+

Delete the service instances that are no longer used or contact service O&M personnel to increase the quantity quota.

+

400

+

ModelArts.3523

+

Service {name} has been subscribed.

+

Service {name} has been subscribed.

+

Directly use the subscribed service.

+

400

+

ModelArts.3524

+

Failed to subscribe to the service because {reason}.

+

Failed to subscribe to the service because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3525

+

Forbidden to update service {name} because it is a shared service.

+

Failed to update service {name} because it is a shared service.

+

Do not update a shared service.

+

400

+

ModelArts.3527

+

Forbidden to subscribe to service {id} because it is a {service_type} service.

+

Failed to subscribe to service {ID} because it is a {type} service.

+

The service of this type cannot be subscribed to.

+

400

+

ModelArts.3528

+

Forbidden to subscribe to service {id} because it is released by yourself.

+

Failed to subscribe to service {ID} because it is your service.

+

You cannot subscribe to services published by yourself.

+

400

+

ModelArts.3529

+

Model {id} under service {id} does not exist.

+

Model {ID} of service {ID} does not exist.

+

Ensure that the model ID is correctly set and that the model belongs to the target service.

+

400

+

ModelArts.3531

+

Service {id} has created the QR code.

+

A QR code has been created for service {ID}.

+

The QR code has been created for the service and does not need to be created again.

+

400

+

ModelArts.3532

+

No QR code found for service {id}.

+

A QR code has not been created for service {ID}.

+

Create a QR code for the service and try again.

+

400

+

ModelArts.3533

+

The selected edge node does not support {GPU/NPU}.

+

The selected edge node flavor does not support {GPU/NPU}.

+

Add the target hardware support to the selected edge node, or use another edge node that supports the target hardware.

+

400

+

ModelArts.3534

+

The security group must contain at least one inbound rule to allow the TCP request from source address 0.0.0.0/0 and port 8080.

+

The security group must contain at least one inbound rule to permit the requests whose protocol is TCP, source address is 0.0.0.0/0, and port number is 8080.

+

Add the inbound rule to the security group.

+

400

+

ModelArts.3535

+

Subservice {name} of service {name} does not exist.

+

Subservice {name} of service {name} does not exist.

+

Ensure that the subservice name is correctly set and that the subservice belongs to the target service.

+

400

+

ModelArts.3536

+

Subservice {name} of service {name} has been subscribed.

+

You have subscribed to subservice {name} of service {name}.

+

Directly use the subscribed subservice.

+

400

+

ModelArts.3538

+

The requirement notification of service {name} should not exceed one.

+

At most one requirement notification is allowed for service {name}.

+

Check the configuration and ensure that no more than one requirement notification is configured for service {name}.

+

400

+

ModelArts.3540

+

Operation failed because you are not the owner of service {name}.

+

You are not the owner of service {name}.

+

This operation can be performed only by the service owner.

+

400

+

ModelArts.3541

+

Error code {error_code} of service {name} does not exist.

+

Error code {error code} of service {name} does not exist.

+

Ensure that the error code is correctly set and that the error code belongs to the target service.

+

400

+

ModelArts.3542

+

Error code {error_code} of service {name} already exists.

+

Error code {error code} of service {name} already exists.

+

Change the error code and try again.

+

400

+

ModelArts.3544

+

URL domain CNAME resolution failed.

+

Failed to parse the CNAME domain.

+

Check whether the URL is correct.

+

400

+

ModelArts.3545

+

Invalid certificate or private key.

+

Invalid certificate or private key.

+

Check whether the certificate or private key is correctly configured.

+

400

+

ModelArts.3547

+

Cluster {id} does not exist.

+

Cluster {ID} does not exist.

+

Check whether the cluster ID is correct.

+

400

+

ModelArts.3548

+

Path {path} is not a valid regex.

+

Path {path} is not a valid regular expression.

+

Check whether the path is correct.

+

400

+

ModelArts.3549

+

Error code {code} is duplicated.

+

Duplicate error code {error code} in the request body.

+

Modify the error code to ensure that the error code is unique.

+

400

+

ModelArts.3550

+

Domain {name} has been used.

+

Domain {noun} has been used.

+

Change the domain name and try again.

+

400

+

ModelArts.3551

+

OBS path {path} does not exist.

+

OBS path {path} does not exist.

+

Check whether the OBS path is correct.

+

400

+

ModelArts.3552

+

The length of [resource_id] must be in the range of [0, 64].

+

The resource_id value is not in the range from 0 to 64.

+

Check whether RESOURCE_SPEC_CODE is too long.

+

400

+

ModelArts.3553

+

Service {name} not subscribed yet.

+

Service {name} has not been subscribed to.

+

Subscribe to the service and try again.

+

400

+

ModelArts.3554

+

Cluster name {name} has been used.

+

Cluster name {name} has been used.

+

Change the cluster name and try again.

+

400

+

ModelArts.3556

+

Forbidden to share ai-service {id} because ai-service is unsharable.

+

AI service {ID} cannot be shared.

+

AI services cannot be shared.

+

400

+

ModelArts.3559

+

Source error code {error_code} of service {name} already exists.

+

Source error code {error code} of service {name} already exists.

+

Change the source error code and try again.

+

400

+

ModelArts.3560

+

Project {project_id} has not subscribed to subservice {id}.

+

Subservice {ID} has not been subscribed for project {ID}.

+

Subscribe to the subservice and try again.

+

400

+

ModelArts.3561

+

Insufficient node quota.

+

Insufficient node quota.

+

Contact service O&M personnel to increase the quota or delete unnecessary nodes to release resources.

+

400

+

ModelArts.3562

+

The value of field {due_time} must be a future time.

+

The due_time value must be later than the current time.

+

Set the due_time parameter to a time later than the current time.

+

400

+

ModelArts.3564

+

The sample collection task has been created for service {id}.

+

Sample collection for service {ID} has been created.

+

Do not create the task again.

+

400

+

ModelArts.3565

+

No sample collection task is created for service {id}.

+

Sample collection for service {ID} has not been created.

+

Create a service sample collection task and try again.

+

400

+

ModelArts.3566

+

Forbidden to modify the default workspace.

+

The default workspace cannot be modified.

+

Do not modify the default workspace.

+

400

+

ModelArts.3567

+

OBS error occurs because {reason}.

+

OBS error because {cause}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3568

+

OBS client error occurs.

+

The OBS client is abnormal.

+

Contact service O&M personnel.

+

400

+

ModelArts.3572

+

Invalid OBS URL {url}.

+

Invalid OBS path {path}.

+

Check whether the OBS path is correct.

+

400

+

ModelArts.3573

+

Failed to obtain the bucket name from OBS URL {url}.

+

Failed to obtain the bucket name from OBS path {path}.

+

Check whether the OBS path is correct.

+

400

+

ModelArts.3574

+

Failed to validate the correctness of OBS URL {url}.

+

Failed to check OBS path {path}.

+

Check whether the OBS path is correct.

+

400

+

ModelArts.3576

+

Failed to query the model quota.

+

Failed to obtain the model quota.

+

Contact service O&M personnel.

+

400

+

ModelArts.3577

+

Failed to update the model quota.

+

Failed to update the model quota.

+

Contact service O&M personnel.

+

400

+

ModelArts.3578

+

Insufficient model quota. Increase the quota or delete any unnecessary services.

+

Insufficient model quota.

+

Increase the quota or delete the models that are no longer used, and try again.

+

400

+

ModelArts.3580

+

Model not purchased.

+

The model has not been purchased.

+

Purchase the model and try again.

+

400

+

ModelArts.3582

+

Failed to create edge configurations.

+

Failed to create edge configurations.

+

Contact service O&M personnel.

+

400

+

ModelArts.3583

+

Failed to create the edge application due to IEF error.

+

Failed to create the edge application due to an IEF platform error.

+

Contact service O&M personnel.

+

400

+

ModelArts.3584

+

Failed to update the edge application.

+

Failed to update the edge application.

+

Contact service O&M personnel.

+

400

+

ModelArts.3585

+

The host port has been used by another edge application.

+

The host port has been used by another edge application.

+

Use another port or contact service O&M personnel.

+

400

+

ModelArts.3586

+

Failed to create the edge application.

+

Failed to create the edge application.

+

Contact service O&M personnel.

+

400

+

ModelArts.3587

+

Failed to delete the edge application.

+

Failed to delete the edge application.

+

Contact service O&M personnel.

+

400

+

ModelArts.3588

+

Failed to create edge volumes and environments.

+

Failed to create edge parameters.

+

Contact service O&M personnel.

+

400

+

ModelArts.3589

+

Model {id} cannot be used to deploy {service_type} service.

+

Model {ID} cannot be used to deploy the {type} service.

+

Select another deployment type.

+

400

+

ModelArts.3590

+

Invalid expression rule. Create a rule using Spring Expression Language and the desired result type is Boolean.

+

Invalid expression rule.

+

Ensure that the rule complies with the Spring expression language syntax, and the expected result is of Boolean type.

+

400

+

ModelArts.3591

+

There are duplicate rules in the extend-configs.

+

Duplicate rules in extend-configs.

+

Modify the expression specifications to prevent duplicate expressions.

+

400

+

ModelArts.3592

+

Incorrect model health configuration.

+

Incorrect model health check parameters.

+

Check whether the health check parameters of the model are correct.

+

400

+

ModelArts.3593

+

The model has been expired.

+

The model has expired.

+

Purchase or subscribe to the model again.

+

400

+

ModelArts.3594

+

Failed to query IEF instances.

+

Failed to obtain the edge instance.

+

Contact service O&M personnel.

+

400

+

ModelArts.3595

+

Failed to create the IEF application mesh.

+

Failed to create the application mesh.

+

Contact service O&M personnel.

+

400

+

ModelArts.3596

+

Failed to update the IEF application mesh.

+

Failed to update the application mesh.

+

Contact service O&M personnel.

+

400

+

ModelArts.3597

+

Please authorize the development environment to use and store your AK and SK.

+

Failed to use the service because the development environment has not been authorized to store and use your AK and SK.

+

Authorize the development environment to store and use your AK and SK.

+

400

+

ModelArts.3598

+

Only nodes in the [Running] state can be selected.

+

Only running nodes can be selected.

+

Select a running node and try again.

+

400

+

ModelArts.3610

+

Parameter {name} cannot be empty.

+

Parameter {name} cannot be left blank.

+

Set the parameter and try again.

+

400

+

ModelArts.3611

+

Failed to query the batch task run log.

+

Failed to obtain batch processing task logs.

+

Contact service O&M personnel.

+

400

+

ModelArts.3612

+

Failed to {create/update} the service payload due to error code {code}. Please try later or submit a service ticket for professional technical support.

+

Failed to {create/update} service workload. The error code is {error code}. Try again later or submit a service ticket.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3613

+

Failed to register the API. Please try later or submit a service ticket for professional technical support.

+

Failed to register the service API. Try again later or submit a service ticket.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3620

+

Failed to get the number of associated services.

+

Failed to obtain the number of associated services.

+

Contact service O&M personnel.

+

400

+

ModelArts.3811

+

Failed to query the agency because {reason}.

+

Failed to obtain the agency because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3812

+

Failed to create AI resource specification code {code} because {reason}.

+

Failed to create resource flavor code {code} because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3813

+

Failed to update AI resource specification code {code} because {reason}.

+

Failed to update resource flavor code {code} because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3814

+

Failed to delete AI resource specification code {code} because {reason}.

+

Failed to delete resource flavor code {code} because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3815

+

Failed to delete AI subservice {id} because {reason}.

+

Failed to delete subservice {ID} because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3816

+

Failed to create the AI proxy mapping because {reason}.

+

Failed to create the proxy mapping because {reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3817

+

Failed to update the AI proxy mapping because {reason}.

+

Failed to update the proxy mapping path because {reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3818

+

The agency of service {name} has been defined.

+

The agency of service {name} has been configured.

+

Do not repeat the operation.

+

400

+

ModelArts.3822

+

The batch service task does not exist.

+

The batch service does not exist.

+

Enter the correct batch service ID.

+

400

+

ModelArts.3825

+

Failed to generate the SDR data because {reason}.

+

Failed to generate CDR data because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3826

+

Failed to create the API because {reason}.

+

Failed to create the API because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3827

+

Failed to subscribe to the API because {reason}.

+

Failed to subscribe to the API because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3828

+

Failed to unsubscribe from the API because {reason}.

+

Failed to unsubscribe from the API because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3829

+

Failed to modify the API subscription status because {reason}.

+

Failed to update the API subscription because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3830

+

Operation not allowed. Change the charging mode from postpaid to prepaid in CBC.

+

You are not allowed to change the billing mode from postpaid to prepaid.

+

Change the billing mode in CBC.

+

400

+

ModelArts.3831

+

Operation not allowed. Change the charging mode from prepaid to postpaid in CBC.

+

You are not allowed to change the billing mode from prepaid to postpaid.

+

Change the billing mode in CBC.

+

400

+

ModelArts.3832

+

The subscription configuration is not modified and does not need to be updated. Please check.

+

Failed to perform the operation because the subscription configuration has not modified.

+

Change the subscription configuration and try again.

+

400

+

ModelArts.3833

+

Failed to delete the API because {reason}.

+

Failed to delete the API because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3834

+

Failed to update metedata in CBC because {reason}.

+

Failed to update the CBC metadata because {Reason}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3835

+

Resource specification code {code} of service {name} does not exist.

+

Resource flavor code {code} of service {name} does not exist.

+

Ensure that the resource flavor code is correctly set and that the code belongs to the target service.

+

400

+

ModelArts.3836

+

Resource specification code {code} of project {id} does not exist.

+

Resource flavor code {code} of project {1} does not exist.

+

Ensure that the resource flavor code is correctly set and that the code belongs to the target project.

+

400

+

ModelArts.3837

+

Resource ID {id} of project {id} does not exist.

+

Resource {ID} of project {1} does not exist.

+

Ensure that the resource ID is correctly set and that the resource belongs to the target project.

+

400

+

ModelArts.3838

+

The hard-select has not been enabled for service {id}.

+

Hard example filtering of service {0} has not been enabled.

+

Enable hard example filtering for the service and try again.

+

400

+

ModelArts.3839

+

The hard-select has been enabled for service {id}.

+

Hard example filtering of service {0} has been enabled.

+

You do not need to enable it again.

+

400

+

ModelArts.3840

+

Only one algorithm can be set for the service model.

+

Service {0} has more than one labeling type.

+

Modify the configuration to ensure that there is only one labeling type.

+

400

+

ModelArts.3841

+

The value of [dataset_type] must be one of values in [image_classification, object_detection].

+

The labeling type is not object detection or image classification.

+

Change the labeling type to object detection or image classification.

+

400

+

ModelArts.3842

+

The labeling type of service algorithm {name} does not match that of dataset {type}.

+

The labeling type of service algorithm {0} does not match that of dataset {1}.

+

Modify the labeling type or contact service O&M personnel.

+

400

+

ModelArts.3843

+

Failed to query the information about dataset {id}.

+

Failed to obtain database {0}.

+

Contact service O&M personnel.

+

400

+

ModelArts.3844

+

Failed to import samples to dataset {id}.

+

Failed to import the sample to dataset {0}.

+

Contact service O&M personnel.

+

400

+

ModelArts.3845

+

No sample is available for import under the OBS path {path} of service {id}.

+

No sample is available to be imported to OBS path {path} of service {ID}.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3846

+

Failed to add the OBS bucket lifecycle rule for service {id}.

+

Failed to add the lifecycle rule for the OBS bucket of service {ID}.

+

Contact service O&M personnel.

+

400

+

ModelArts.3847

+

Failed to delete the OBS bucket lifecycle rule for service {id}.

+

Failed to delete the lifecycle rule for the OBS bucket of service {ID}.

+

Contact service O&M personnel.

+

400

+

ModelArts.3848

+

Service {id} in the information of dataset {import_type} already exists.

+

The service {ID} of the {import type} dataset already exists.

+

Check the request and select the correct service ID.

+

400

+

ModelArts.3849

+

Service {id} in the information of dataset {import_type} does not exist.

+

The service {ID} of the {import type} dataset does not exist.

+

Check whether the service ID is correct.

+

400

+

ModelArts.3850

+

The hard-sample-select task {id} of service {id} does not exist.

+

Hard example task {ID} of service {ID} does not exist.

+

Ensure that the hard example task ID is correctly set and that the task belongs to the target service.

+

400

+

ModelArts.3851

+

The status of hard-sample-select task {name} cannot be changed from {status} to {status}.

+

The status {status value} of the hard example task {name} cannot be updated to {status value}.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3852

+

Failed to start hard-sample-select task {id} because related OBS files have been deleted.

+

Failed to start hard example task {ID} because key OBS files have been deleted.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3853

+

The hard-sample-select quota of project {id} does not exist.

+

The quota for selecting hard samples for project {ID} does not exist.

+

Create the quota and try again.

+

400

+

ModelArts.3854

+

The hard-sample-select quota of project {id} already exists.

+

The quota for selecting hard samples for project {ID} already exists.

+

You do not need to create it again.

+

400

+

ModelArts.3856

+

A maximum of {number} free services are allowed.

+

The number of free services has reached the maximum allowed limit {Number}.

+

Delete unnecessary free services to release resources.

+

400

+

ModelArts.3857

+

A maximum of {number} free service instances are allowed.

+

The number of free services has reached the maximum allowed limit {Number}.

+

Delete unnecessary free services to release resources.

+

400

+

ModelArts.3862

+

Subservice {id} has been added to whitelist URL {url}.

+

The URL {URL} has been added to the whitelist of the sub-service {ID}.

+

Do not repeat the operation.

+

400

+

ModelArts.3863

+

Whitelist URL {id} of service {name} does not exist.

+

The whitelist URL {ID} of the service {name} does not exist.

+

Ensure that the URL ID in the whitelist is correct and that the URL belongs to the corresponding service.

+

400

+

ModelArts.3864

+

Subservice {id} of service {name} does not exist.

+

The sub-service {ID} of service {name} does not exist.

+

Ensure that the sub-service ID is correct and that the sub-service belongs to the corresponding service.

+

400

+

ModelArts.3865

+

The size of the download whitelist of service {name} exceeds the maximum number {number} allowed.

+

The number of the whitelist records downloaded by service {name} exceeds the upper limit {value}.

+

Contact service O&M personnel.

+

400

+

ModelArts.3866

+

Domain ID {id} and user ID {id} already exist.

+

The domain ID {ID} and user ID {ID} already exist.

+

Do not repeat the operation.

+

400

+

ModelArts.3867

+

Domain ID {id} does not exist.

+

The domain ID {ID} does not exist.

+

Check the request and enter the correct domain ID.

+

400

+

ModelArts.3868

+

Domain ID {id} and user ID {id} do not exist.

+

The domain ID {ID} and user ID {ID} do not exist.

+

Check the request and enter the correct domain ID and user ID.

+

400

+

ModelArts.3869

+

Failed to delete the task type because {reason}.

+

Failed to delete the task type. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3870

+

Failed to subscribe to the API because {reason}.

+

Failed to subscribe to the API. Fault cause: {fault cause}.

+

Contact service O&M personnel.

+

400

+

ModelArts.3871

+

Forbidden to subscribe to postpaid QPS for the task type.

+

You are not allowed to subscribe to postpaid QPS for the task type.

+

Check the request parameter or contact service O&M personnel.

+

400

+

ModelArts.3872

+

Forbidden to change the charging mode for the task type from prepaid to postpaid QPS.

+

You are not allowed to change the billing mode of the task type from prepaid mode to postpaid QPS mode.

+

Check the request parameter or contact service O&M personnel.

+

400

+

ModelArts.3873

+

No corresponding subservice is found in cluster {name}.

+

The cluster {0} does not have the corresponding sub-service.

+

Check the request parameter or contact service O&M personnel.

+

400

+

ModelArts.3880

+

Failed to create the AS policy because {reason}.

+

Failed to create the AS policy. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

400

+

ModelArts.3890

+

The total number of AS policies exceeds the maximum number {number} allowed.

+

The number of AS policies exceeds the upper limit {0}.

+

Delete unnecessary AS policies.

+

400

+

ModelArts.3892

+

Calling failed due to invalid CBS request.

+

Failed to call the API due to the invalid CBC request.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3894

+

The specification configuration information of runtime environment {name} does not exist.

+

The specification configurations of the running environment {0} does not exist.

+

Set the flavor configurations for the running environment and try again.

+

400

+

ModelArts.3895

+

The specification configuration information of runtime environment {name} already exists.

+

The specification configurations of the running environment {0} already exist.

+

Do not repeat the operation.

+

400

+

ModelArts.3896

+

Resource specification code {0} for user {1} already exists.

+

The records of the resource specification code {0} in project {1} already exist.

+

Do not repeat the operation.

+

400

+

ModelArts.3897

+

Failed to {create/obtain/remove} the AS policy due to error code {code}.

+

Failed to request {create/obtain/delete} the AS. Error code: {0}.

+

Try again or contact service O&M personnel.

+

400

+

ModelArts.3950

+

Failed to obtain the AI service list.

+

Failed to obtain the AI service list.

+

Contact service O&M personnel.

+

400

+

ModelArts.3951

+

Failed to parse file {name}.

+

Failed to parse the file {name}.

+

Ensure that the file name is correct.

+

400

+

ModelArts.3952

+

Insufficient resources. Please try later.

+

Insufficient resources.

+

Try again later or contact service O&M personnel.

+

400

+

ModelArts.3953

+

Failed to delete online service {id} because it has been associated with AI service {name}. Please disassociate it and try again.

+

Failed to delete real-time service {ID} because it has been associated with AI service {name}.

+

Cancel the association between the real-time service and all AI services and try again.

+

400

+

ModelArts.3954

+

AI resource specification {id} does not exist.

+

The cloud resource specification {ID} does not exist.

+

Ensure that the cloud resource specification ID is correct.

+

400

+

ModelArts.3955

+

Field [resource_id_components] of AI resource specification {id} cannot be empty.

+

The ID components in the cloud resource specification {ID} cannot be left blank.

+

Set the resource_id_components parameter and try again.

+

400

+

ModelArts.7002

+

You are not authorized to operate dataset {id}.

+

You do not have the permission to operate the dataset {ID}.

+

Check the permission or contact service O&M personnel.

+

401

+

ModelArts.3801

+

User credential (AK and SK) does not exist.

+

The user certificate does not exist.

+

Add the AK/SK again.

+

403

+

ModelArts.0210

+

The project id in the request URL does not match the token.

+

The project ID in the request URL and the token do not match.

+

Use the correct project ID to generate a token.

+

403

+

ModelArts.3555

+

Forbidden to access ECS.

+

You do not have the permission to access the ECS.

+

Authorize access to the ECS and try again.

+

403

+

ModelArts.3936

+

The app-auth API {id} does not belong to service {id}.

+

The API {ID} that supports application authentication does not belong to the service {ID}.

+

Check the request parameter or contact service O&M personnel.

+

404

+

ModelArts.3502

+

Service {name} does not exist.

+

The service {name} does not exist.

+

Ensure that the service name is correct.

+

404

+

ModelArts.3507

+

Model {name} does not exist.

+

The model {ID} does not exist.

+

Ensure that the model ID is correct.

+

404

+

ModelArts.3923

+

Application {id} does not exist.

+

The application {ID} does not exist.

+

Ensure that the application ID is correct.

+

404

+

ModelArts.3935

+

The app-auth API {id} does not exist.

+

The API {ID} that supports application authentication does not exist.

+

Ensure that the interface ID is correct.

+

409

+

ModelArts.3503

+

Service {name} already exists.

+

The service {name} already exists.

+

Change the service name and try again.

+

409

+

ModelArts.3929

+

Failed to create the application because {reason}.

+

Failed to create the application. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3501

+

Failed to deploy the service.

+

Failed to deploy the service.

+

Contact service O&M personnel.

+

500

+

ModelArts.3504

+

Failed to delete the service.

+

Failed to delete the service.

+

Contact service O&M personnel.

+

500

+

ModelArts.3505

+

Failed to update the service.

+

Failed to query the service.

+

Contact service O&M personnel.

+

500

+

ModelArts.3506

+

Model {id} does not exist.

+

Failed to update the service.

+

Contact service O&M personnel.

+

500

+

ModelArts.3508

+

Failed to query the service monitoring information.

+

Failed to obtain the service monitoring information.

+

Contact service O&M personnel.

+

500

+

ModelArts.3509

+

Failed to query edge nodes.

+

Failed to query the node.

+

Contact service O&M personnel.

+

500

+

ModelArts.3511

+

Failed to query edge tags.

+

Failed to query the edge label.

+

Contact service O&M personnel.

+

500

+

ModelArts.3537

+

Failed to handle the CBC request.

+

Failed to process the CBC request.

+

Contact service O&M personnel.

+

500

+

ModelArts.3539

+

Failed to initialize service URL {url}.

+

Failed to initialize the service URL {URL}.

+

Ensure that the service URL is correct.

+

500

+

ModelArts.3543

+

Invalid CBC request.

+

The CBC request is invalid.

+

Try again or contact service O&M personnel.

+

500

+

ModelArts.3546

+

Failed to create the cluster.

+

Failed to create the dedicated resource pool.

+

Contact service O&M personnel.

+

500

+

ModelArts.3579

+

Failed to query the model information.

+

Failed to query the model information.

+

Contact service O&M personnel.

+

500

+

ModelArts.3581

+

Failed to create edge certificates.

+

Failed to create the edge certificate.

+

Contact service O&M personnel.

+

500

+

ModelArts.3802

+

Failed to create API group {name} because {reason}.

+

Failed to create the API group {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3803

+

Failed to register API {name} because {reason}.

+

Failed to register the API {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3805

+

Failed to create AI service {name} because {reason}.

+

Failed to create the AI service {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3806

+

Failed to update AI service {name} because {reason}.

+

Failed to update the AI service {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3807

+

Failed to delete AI service {name} because {reason}.

+

Failed to delete the AI service {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3808

+

Failed to create AI subservice {name} because {reason}.

+

Failed to create the sub-service {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3809

+

Failed to update AI subservice {name} because {reason}.

+

Failed to update the sub-service {name}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3810

+

Failed to create the agency because {reason}.

+

Failed to create the agency. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3819

+

Failed to delete the agency definition.

+

Failed to delete the service agency definition.

+

Contact service O&M personnel.

+

500

+

ModelArts.3858

+

Failed to bind task type to API {id} because {reason}.

+

Failed to bind task type to API {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3859

+

Failed to unbind task type from API {id} because {reason}.

+

Failed to unbind the task type from API {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3860

+

Failed to create the task type because {reason}.

+

Failed to create the task type. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3861

+

Failed to update the task type because {reason}.

+

Failed to update the task. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3891

+

Failed to operate the serverless cluster because {reason}.

+

Failed to operate the serverless cluster. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3893

+

Failed to forward the CBC request to the cloud service.

+

Failed to forward the CBC request to the cloud service.

+

Contact service O&M personnel.

+

500

+

ModelArts.3921

+

Failed to create the app-auth API because {reason}.

+

Failed to register the API that supports application authentication. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3924

+

Failed to create the application code for application {id} because {reason}.

+

Failed to create the AppCode of the application {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3925

+

Failed to delete app-auth api {id} because {reason}.

+

Failed to delete API {ID} that supports application authentication. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3926

+

Failed to reset the secret of application {id} because {reason}.

+

Failed to reset the private key of the application {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3927

+

Failed to reset the application code of application {id} because {reason}.

+

Failed to reset the AppCode of the application {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3928

+

Failed to obtain the app-auth API information because {reason}.

+

Failed to obtain information about the API that supports application authentication. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3932

+

Failed to obtain the application information because {reason}.

+

Failed to obtain the application information. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3933

+

Failed to delete application {id} because {reason}.

+

Failed to delete the application {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3934

+

Failed to delete the application code of application {id} because {reason}.

+

Failed to delete the AppCode of the application {ID}. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3937

+

Failed to authorize the app-auth API to the application because {reason}.

+

Failed to authorize the API to the application. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.3938

+

Failed to unbind the app-auth API from the application because {reason}.

+

Failed to cancel the authorization from the API to the application. Fault cause: {fault cause}.

+

Locate the fault based on the failure cause.

+

500

+

ModelArts.7001

+

Unknown workspace system error. Try again later or submit a service ticket for professional technical support.

+

An internal workspace error occurred.

+

Contact service O&M personnel.

+

400

+

ModelArts.7002

+

Not gray user, please contact the administrator

+

Workspace authentication error.

+

Contact the administrator to check whether the user has permissions on the workspace.

+

400

+

ModelArts.7003

+

no permission to get iam user list, please check permission

+

IAM permissions are incorrect.

+

Add the IAM permissions based on the error message.

+

400

+

ModelArts.7004

+

the status of workspace: {workspaceID} is abnormal, can not execute [action: {action}]

+

You are not allowed to create resources when a workspace is in the normal state, for example, a deleted or failed workspace.

+

Create resources in a normal workspace.

+

400

+

ModelArts.7005

+

workspace:{workspace} not existed

+

The workspace does not exist.

+

Ensure that the transferred workspace ID exists.

+

400

+

ModelArts.7006

+

workspace number over limit, limit: {limit}

+

The number of workspaces exceeds the upper limit.

+

Delete unnecessary workspaces.

+

500

+

ModelArts.7001

+

Workspace internal error

+

An unknown error occurs in the workspace.

+

System error. Contact technical support.

+

500

+

ModelArts.7008

+

workspace in read-only status, please wait a few minutes

+

The workspace is abnormal. Only the read-only operation is supported.

+

System error. Contact technical support.

+

500

+

ModelArts.7009

+

call {apiType} api failed

+

The workspace is abnormal.

+

System error. Contact technical support.

+

500

+

ModelArts.7100

+

get enterprise project id failed

+

EPS request error.

+

System error. Contact technical support.

+

500

+

ModelArts.7101

+

Call pdp auth api failed.

+

PDP request error.

+

System error. Contact technical support.

+
+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0097.html b/modelarts/api-ref/modelarts_03_0097.html new file mode 100644 index 00000000..1e284f54 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0097.html @@ -0,0 +1,20 @@ + + +

Change History

+
+
+ + + + + + + +

Released On

+

Description

+

2021-04-30

+

This is the first official release.

+
+
+
+ diff --git a/modelarts/api-ref/modelarts_03_0108.html b/modelarts/api-ref/modelarts_03_0108.html new file mode 100644 index 00000000..3f052f28 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0108.html @@ -0,0 +1,25 @@ + + +

DevEnviron

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0109.html b/modelarts/api-ref/modelarts_03_0109.html new file mode 100644 index 00000000..1f4930bd --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0109.html @@ -0,0 +1,83 @@ + + +

Querying the Authentication Information of a Development Environment Instance

+

Function

This API is used to query the authentication information of a development environment instance, which is used to open the development environment instance.

+
+

URI

GET /v1/{project_id}/demanager/instances/{instance_id}/token

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID

+

instance_id

+

Yes

+

String

+

Instance ID It is the ID of the development environment instance you create. The instance ID is contained in the response code after the instance has been created. For details, see Creating a Development Environment Instance. If the development environment instance is created on the ModelArts management console, you can view the ID of the instance on the ModelArts management console.

+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

token

+

String

+

Authentication token

+
+
+
+
+

Samples

The following shows how to obtain the authentication token of instance 6fa459ea-ee8a-3ca4-894e-db77e160355e.

+ +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0110.html b/modelarts/api-ref/modelarts_03_0110.html new file mode 100644 index 00000000..d5e46fc8 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0110.html @@ -0,0 +1,1446 @@ + + +

Creating a Development Environment Instance

+

Function

This API is used to create a development environment instance for code development.

+

Calling this API is an asynchronous operation. The job status can be obtained by calling the API described in Querying the Details About a Development Environment Instance.

+
+

URI

POST /v1/{project_id}/demanager/instances

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

name

+

Yes

+

String

+

Instance name, which contains a maximum of 64 characters.

+

The value can contain letters, digits, hyphens (-), and underscores (_).

+

profile_id

+

Yes

+

String

+

Configuration ID. The options are as follows:

+
  • Ascend-Power-Engine 1.0(python3)
  • Multi-Engine 1.0 (python3)-cpu
  • Multi-Engine 1.0 (python3)-gpu
  • Multi-Engine 2.0 (python3)
+

description

+

No

+

String

+

Instance description. The value contains a maximum of 512 characters and cannot contain the following special characters: &<>"'/. By default, this parameter is left blank.

+

flavor

+

Yes

+

String

+

Instance flavor. The options are as follows:

+
  • modelarts.kat1.xlarge
  • modelarts.vm.cpu.2u
  • modelarts.vm.cpu.8u
  • modelarts.vm.gpu.v100
+

The preceding flavors are for reference only. The actual flavors depend on the region. If you need to use other flavors, contact the administrator to change the flavors.

+

spec

+

Yes

+

Object

+

Instance definition. Only Notebook is supported. For example, see Table 3.

+

workspace

+

No

+

Object

+

Workspace. The default workspace ID is 0. For details, see Table 7.

+

pool

+

No

+

Object

+

Resource pool. For details, see Table 8.

+

ai_project

+

No

+

Object

+

AI project. For details, see Table 10.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 notebook parameters

Parameter

+

Mandatory

+

Type

+

Description

+

storage

+

Yes

+

Object

+

Storage path. For details, see Table 4.

+

extend_storage

+

No

+

Array<Storage>

+

Extended storage list. For details, see Table 4. Extended storage supports only obsfs and is available only for certain dedicated resource pools.

+

auto_stop

+

No

+

Object

+

Auto stop parameter. For details, see Table 6.

+

annotations

+

No

+

Map<String,String>

+

Label information, which can be extended. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 4 storage parameters

Parameter

+

Mandatory

+

Type

+

Description

+

type

+

Yes

+

String

+

Storage type.

+

Only obs and evs are supported.

+

location

+

No

+

Object

+

Storage location. If type is set to obs, this parameter is mandatory. See Table 5. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + +
Table 5 location parameters

Parameter

+

Mandatory

+

Type

+

Description

+

path

+

No

+

String

+

Storage path

+
  • If type is set to obs, this parameter is mandatory. The value must be a valid OBS bucket path and end with a slash (/). The value must be a specific directory in an OBS bucket rather than the root directory of an OBS bucket.
+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 auto_stop parameters

Parameter

+

Mandatory

+

Type

+

Description

+

enable

+

No

+

Boolean

+

Whether to enable the auto stop function. The value true indicates that the function is enabled and the instance will automatically stop when the running duration is reached. The value false indicates that the function is disabled. The default value is false.

+

duration

+

No

+

Integer

+

Running duration, in seconds. The value ranges from 3,600 to 86,400. After this parameter is set, it is valid for each startup. This parameter is mandatory when enable is set to true.

+

prompt

+

No

+

Boolean

+

Whether to display a prompt again. This parameter is provided for the console to determine whether to display a prompt again. The default value is true.

+
+
+ +
+ + + + + + + + + + + +
Table 7 workspace parameters

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8 poolSpec parameters

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

Yes

+

String

+

Resource pool ID.

+

type

+

No

+

String

+

Resource pool type.

+

name

+

No

+

String

+

Resource pool name.

+

owner

+

No

+

Object

+

Owner of the resource pool. For details, see Table 9.

+
+
+ +
+ + + + + + + + + + + +
Table 9 userSpec parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

No

+

String

+

ID of the project to which the resource belongs.

+
+
+ +
+ + + + + + + + + + + +
Table 10 AIProject parameters

Parameter

+

Mandatory

+

Type

+

Description

+

id

+

No

+

String

+

AI project ID. This parameter is reserved.

+
+
+

Response Body

Table 11 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 11 Parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

description

+

String

+

Instance description

+

status

+

String

+

Instance status

+

creation_timestamp

+

String

+

Time when an instance is created

+

latest_update_timestamp

+

String

+

Time when an instance is modified

+

profile

+

Object

+

Configuration information. For details, see Table 12.

+

flavor

+

String

+

Instance flavor

+

flavor_details

+

Object

+

For details about the flavor, see Table 16.

+

pool

+

Object

+

For details about the dedicated resource pool, see Table 17.

+

spec

+

Object

+

Instance definition For details about parameters of a notebook instance, see Table 19.

+

workspace

+

Object

+

Workspace. For details, see Table 24.

+

ai_project

+

Object

+

AI project. For details, see Table 25.

+

error_code

+

String

+

Error code. For details, see Error Codes.

+

queuing_info

+

Object

+

Queuing information. For details, see Table 26.

+

user

+

Object

+

User information. For details, see Table 27.

+

repository

+

Object

+

Git repository information. For details, see Table 28. This parameter cannot be used. It is automatically returned when the API is called.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 12 profile parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Configuration ID

+

name

+

String

+

Configuration name

+

description

+

String

+

Configuration description

+

de_type

+

String

+

Development environment type. Only Notebook is supported.

+

flavor_type

+

String

+

Hardware, which can be CPU, GPU, or Ascend.

+

provision

+

Object

+

Deployment information. For details, see Table 13.

+

labels

+

Map<String,String>

+

Label

+
+
+ +
+ + + + + + + + + + + + + +
Table 13 provision parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Deployment type. Only Docker is supported.

+

spec

+

Object

+

Deployment details. For details, see Table 14.

+
+
+ +
+ + + + + + + + + + + + + +
Table 14 spec parameters

Parameter

+

Type

+

Description

+

engine

+

String

+

Deployment engine. Only CCE is supported.

+

params

+

Object

+

Deployment parameters. Only Docker is supported. For details, see Table 15.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 15 Docker deployment parameters

Parameter

+

Type

+

Description

+

namespace

+

String

+

SWR organization name, which is globally unique

+

image_name

+

String

+

Image name

+

image_tag

+

String

+

Image tag

+

annotations

+

Map<String,String>

+

Label information, which can be extended. By default, this parameter is left blank.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 16 flavor_details parameters

Parameter

+

Type

+

Description

+

name

+

String

+

Flavor name

+

status

+

String

+

Flavor sale status The options are as follows:

+
  • onSale
  • soldOut
+

queuing_num

+

Integer

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

queue_left_time

+

Integer

+

Left queuing time, in seconds

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

storage_list

+

Array<Storage type>

+

Supported storage type. The options are as follows:

+
  • obs
  • evs
+

is_permitted

+

Boolean

+

Whether the current user has the permission to use this flavor

+

type

+

String

+

Flavor status. The options are as follows:

+
  • GPU
  • CPU
  • ASCEND
+

params

+

Dict

+

Parameters that describing flavor

+

promo_type

+

String

+

Promotion type. The options are as follows:

+
  • Free
  • NoDiscount
+

instance_num

+

Integer

+

Number of instances of this flavor the current created

+

duration

+

Integer

+

Auto stop time after startup, in seconds

+

store_time

+

Integer

+

Maximum retention period of an inactive instance of this flavor in the database, in hours

+

The default value is -1, indicating that the instance can be permanently saved.

+

billing_flavor

+

String

+

Billing specifications. If this field is left blank, the specifications name is used for billing.

+

billing_params

+

Integer

+

Billing ratio This parameter is mandatory when billing_flavor is specified.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 17 pool parameters

Parameter

+

Type

+

Description

+

id

+

String

+

ID of a resource pool

+

name

+

String

+

Name of a resource pool

+

type

+

String

+

Type of a resource pool. USER_DEFINED indicates a dedicated resource pool.

+

owner

+

Object

+

This parameter is mandatory when type is set to USER_DEFINED. For details, see Table 18.

+
+
+ +
+ + + + + + + + + +
Table 18 owner parameters

Parameter

+

Type

+

Description

+

project_id

+

String

+

Project ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 19 notebook parameters

Parameter

+

Type

+

Description

+

log_path

+

String

+

Path for storing custom image logs

+

custom_script_path

+

String

+

Path for storing custom initialization scripts used when a notebook instance is started

+

storage

+

Object

+

Storage path. For details, see Table 20.

+

credential

+

Object

+

AK and SK for accessing OBS. For details, see Table 30.

+

repository

+

Object

+

Git repository information. For details, see Table 28. This parameter cannot be used. It is automatically returned when the API is called.

+

resource_reserved_timestamp

+

Integer

+

Time when the resource is reserved

+

auto_stop

+

Object

+

Auto stop parameter. For details, see Table 23.

+

failed_reasons

+

Object

+

Cause for a creation or startup failure. For details, see Table 22.

+

annotations

+

Map<String,String>

+

Annotations

+

The generated URL cannot be directly accessed.

+

extend_params

+

Map<String,String>

+

Extended parameter

+
+
+ +
+ + + + + + + + + + + + + +
Table 20 storage parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Storage type.

+

Only obs and evs are supported.

+

location

+

Object

+

Storage location. If type is set to obs, this parameter is mandatory. See Table 21. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + + + +
Table 21 location parameters

Parameter

+

Type

+

Description

+

path

+

String

+

Storage path

+
  • If type is set to obs, this parameter is mandatory. The value must be a valid OBS bucket path and end with a slash (/). The value must be a specific directory in an OBS bucket rather than the root directory of an OBS bucket.
+

volume_size

+

Integer

+

If type is set to obs, this parameter does not need to be set.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 22 failed_reasons parameters

Parameter

+

Type

+

Description

+

code

+

String

+

Error code.

+

message

+

String

+

Error message

+

detail

+

Map<String,String>

+

Error details

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 23 auto_stop parameters

Parameter

+

Type

+

Description

+

enable

+

Boolean

+

Whether to enable the auto stop function

+

duration

+

Integer

+

Running duration, in seconds

+

prompt

+

Boolean

+

Whether to display a prompt again. This parameter is provided for the console to use.

+

stop_timestamp

+

Integer

+

Time when the instance stops. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Remaining time before actual stop, in seconds

+
+
+ +
+ + + + + + + + + +
Table 24 workspace parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID

+
+
+ +
+ + + + + + + + + +
Table 25 AIProject parameters

Parameter

+

Type

+

Description

+

id

+

String

+

AI project ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 26 queuing_info parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

de_type

+

String

+

Development environment type. By default, all types are returned.

+

Only Notebook is supported.

+

flavor

+

String

+

Instance flavor. By default, all types are returned.

+

flavor_details

+

Object

+

Flavor details, which display the flavor information and whether the flavor is sold out For details, see Table 16.

+

status

+

String

+

Instance status. By default, all statuses are returned, including:

+
  • CREATE_QUEUING
  • START_QUEUING
+

begin_timestamp

+

Integer

+

Time when an instance starts queuing. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Left queuing time, in seconds

+

end_timestamp

+

Integer

+

Time when an instance completes queuing. The value is a 13-digit timestamp.

+

rank

+

Integer

+

Ranking of an instance in a queue

+
+
+ +
+ + + + + + + + + + + + + +
Table 27 user parameters

Parameter

+

Type

+

Description

+

id

+

String

+

User ID

+

name

+

String

+

Username

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 28 repository parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Repository ID

+

branch

+

String

+

Repository branch

+

user_name

+

String

+

Repository username

+

user_email

+

String

+

Repository user mailbox

+

type

+

String

+

Repository type. The options are CodeClub and GitHub.

+

connection_info

+

Object

+

Repository link information. For details, see Table 29.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 29 connection_info parameters

Parameter

+

Type

+

Description

+

protocol

+

String

+

Repository link protocol. The options are ssh and https.

+

url

+

String

+

Repository link address

+

credential

+

Object

+

Certificate information. For details, see Table 30.

+
+
+ +
+ + + + + + + + + + + + + +
Table 30 credential parameters

Parameter

+

Type

+

Description

+

ssh_private_key

+

String

+

SSH private certificate

+

access_token

+

String

+

OAuth token of GitHub

+
+
+
+

Samples

The following shows how to create the notebook-instance instance with ID Python3-gpu.

+ + +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0111.html b/modelarts/api-ref/modelarts_03_0111.html new file mode 100644 index 00000000..baf29209 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0111.html @@ -0,0 +1,1224 @@ + + +

Querying a List of Development Environment Instances

+

Function

This API is used to query the development environment instances that meet the search criteria.

+
+

URI

GET /v1/{project_id}/demanager/instances?de_type={de_type}&provision_type={provision_type}&status={status}&sortby={sortby}&order={order}&offset={offset}&limit={limit}

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+
+
+
+

Request Body

Table 2 describes the request parameters.

+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

de_type

+

Yes

+

String

+

Development environment type. Only Notebook is supported. The first letter must be capitalized.

+

provision_type

+

No

+

String

+

Deployment type. Only Docker is supported.

+

status

+

No

+

String

+

Instance status. By default, all statuses are returned. The options are as follows:

+
  • CREATING: The instance is being created.
  • CREATE_FAILED: The instance fails to be created.
  • STARTING: The instance is being started.
  • RESTARTING: The instance is being restarted.
  • START_FAILED: The instance fails to be started.
  • RUNNING: The instance is running.
  • STOPPING: The instance is being stopped.
  • STOPPED: The instance has been stopped.
  • UNAVAILABLE: The instance is unavailable.
  • DELETED: The instance has been deleted.
  • RESIZING: The instance specifications are being changed.
  • RESIZE_FAILED: The instance specifications fail to be changed.
+

sortby

+

No

+

String

+

Classification standard. The value can be name or creation_timestamp. The default value is name.

+

order

+

No

+

String

+

Sorting mode. The value can be asc or desc. The default value is asc.

+

offset

+

No

+

Integer

+

Start index. The default value is 0.

+

limit

+

No

+

Integer

+

Number of returned result records. The value range is (0, ∞). The default value is 0, which indicates that all results are returned.

+

workspace_id

+

No

+

String

+

Workspace ID. If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+

show_self

+

No

+

String

+

Only the user is displayed. The default value is true.

+

ai_project

+

No

+

String

+

AI project ID

+

pool_id

+

No

+

String

+

ID of a dedicated resource pool. This parameter can be used to query information about all instances in a dedicated resource pool.

+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

total_count

+

Integer

+

Total number of development environments.

+

instances

+

Array

+

Configuration list. For details, see Table 4.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4 instances parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

description

+

String

+

Instance description

+

status

+

String

+

Instance status

+

creation_timestamp

+

String

+

Time when an instance is created

+

profile

+

Object

+

Configuration information. For details, see Table 5.

+

flavor

+

String

+

Instance flavor

+

spec

+

Object

+

Instance definition. For details about parameters of a notebook instance, see Table 13.

+

workspace

+

Object

+

Workspace. For details, see Table 17.

+

latest_update_timestamp

+

String

+

Time when an instance is modified

+

flavor_details

+

Object

+

Flavor details. For details, see Table 9.

+

pool

+

Object

+

Dedicated resource pool. For details, see Table 10.

+

ai_project

+

Object

+

AI project. For details, see Table 11.

+

error_code

+

String

+

Error code. For details, see Error Codes.

+

queuing_info

+

Object

+

Queuing information. For details, see Table 18.

+

user

+

Object

+

User information. For details, see Table 19.

+

repository

+

Object

+

Git repository information. For details, see Table 20. This parameter cannot be used. It is automatically returned when the API is called.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 profile parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Configuration ID

+

name

+

String

+

Configuration name

+

description

+

String

+

Configuration description

+

de_type

+

String

+

Development environment type. Only Notebook is supported.

+

provision

+

Object

+

Deployment information. For details, see Table 6.

+

labels

+

Map<String,String>

+

Label information, which can be extended

+

flavor_type

+

String

+

Hardware, which can be CPU, GPU, or Ascend.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 6 provision parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Deployment type. Only Docker is supported.

+

spec

+

Object

+

Deployment details. For details, see Table 7.

+

annotations

+

Map<String,String>

+

Label information, which can be extended. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + + + +
Table 7 spec parameters

Parameter

+

Type

+

Description

+

engine

+

String

+

Deployment engine. Only CCE is supported.

+

params

+

Object

+

Deployment parameters. Only Docker is supported. For details, see Table 8.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 Docker deployment parameters

Parameter

+

Type

+

Description

+

namespace

+

String

+

SWR organization name, which is globally unique

+

image_name

+

String

+

Image name

+

image_tag

+

String

+

Image tag

+

annotations

+

Map<String,String>

+

Label information, which can be extended. By default, this parameter is left blank.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 9 flavor_details parameters

Parameter

+

Type

+

Description

+

name

+

String

+

Flavor name

+

status

+

String

+

Flavor sale status The options are as follows:

+
  • onSale
  • soldOut
+

queuing_num

+

Integer

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

queue_left_time

+

Integer

+

Left queuing time, in seconds

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

storage_list

+

Array<Storage type>

+

Supported storage type. The options are obs, evs, and efs.

+

is_permitted

+

Boolean

+

Whether the current user has the permission to use this flavor

+

type

+

String

+

Flavor status. The options are as follows:

+
  • GPU
  • CPU
  • ASCEND
+

params

+

Dict

+

Parameters that describing flavor

+

promo_type

+

String

+

Promotion type. The options are as follows:

+
  • Free
  • NoDiscount
+

instance_num

+

Integer

+

Number of instances of this flavor the current created

+

duration

+

Integer

+

Auto stop time after startup, in seconds

+

store_time

+

Integer

+

Maximum retention period of an inactive instance of this flavor in the database, in hours

+

The default value is -1, indicating that the instance can be permanently saved.

+

billing_flavor

+

String

+

Billing specifications. If this field is left blank, the specifications name is used for billing.

+

billing_params

+

Integer

+

Billing ratio This parameter is mandatory when billing_flavor is specified.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 10 pool parameters

Parameter

+

Type

+

Description

+

id

+

String

+

ID of a resource pool

+

name

+

String

+

Name of a resource pool

+

type

+

String

+

Type of a resource pool. USER_DEFINED indicates a dedicated resource pool.

+

owner

+

Object

+

This parameter is mandatory when type is set to USER_DEFINED. For details, see Table 12.

+
+
+ +
+ + + + + + + + + +
Table 11 AIProject parameters

Parameter

+

Type

+

Description

+

id

+

String

+

AI project ID

+
+
+ +
+ + + + + + + + + +
Table 12 owner parameters

Parameter

+

Type

+

Description

+

project_id

+

String

+

Project ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 13 notebook parameters

Parameter

+

Type

+

Description

+

auto_stop

+

Object

+

Auto stop parameter. For details, see Table 16.

+

annotations

+

Map<String,String>

+

Annotations

+

The generated URL cannot be directly accessed.

+

failed_reasons

+

Object

+

Cause for a creation or startup failure. See Table 15.

+

extend_params

+

Map<String,String>

+

Extended parameter

+
+
+ +
+ + + + + + + + + + + + + + + + +
Table 14 location parameters

Parameter

+

Mandatory

+

Type

+

Description

+

path

+

No

+

String

+

Storage path.

+
  • If type is set to obs, this parameter is mandatory. The value must be a valid OBS bucket path and end with a slash (/). The value must be a specific directory in an OBS bucket rather than the root directory of an OBS bucket.
+

volume_size

+

No

+

Integer

+

If type is set to obs, this parameter does not need to be set.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 15 failed_reasons parameters

Parameter

+

Type

+

Description

+

code

+

String

+

Error code

+

message

+

String

+

Error message

+

detail

+

Map<String,String>

+

Error details

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 16 auto_stop parameters

Parameter

+

Type

+

Description

+

enable

+

Boolean

+

Whether to enable the auto stop function

+

duration

+

Integer

+

Running duration, in seconds

+

prompt

+

Boolean

+

Whether to display a prompt again. This parameter is provided for the console to use.

+

stop_timestamp

+

Integer

+

Time when the instance stops. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Remaining time before actual stop, in seconds

+
+
+ +
+ + + + + + + + + +
Table 17 workspace parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 18 queuing_info parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

de_type

+

String

+

Development environment type. By default, all types are returned.

+

Only Notebook is supported.

+

flavor

+

String

+

Instance flavor. By default, all types are returned.

+

flavor_details

+

Object

+

Flavor details, which display the flavor information and whether the flavor is sold out For details, see Table 9.

+

status

+

String

+

Instance status. By default, all statuses are returned, including:

+
  • CREATE_QUEUING
  • START_QUEUING
+

begin_timestamp

+

Integer

+

Time when an instance starts queuing. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Left queuing time, in seconds

+

end_timestamp

+

Integer

+

Time when an instance completes queuing. The value is a 13-digit timestamp.

+

rank

+

Integer

+

Ranking of an instance in a queue

+
+
+ +
+ + + + + + + + + + + + + +
Table 19 user parameters

Parameter

+

Type

+

Description

+

id

+

String

+

User ID

+

name

+

String

+

Username

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 20 repository parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Repository ID

+

branch

+

String

+

Repository branch

+

user_name

+

String

+

Repository username

+

user_email

+

String

+

Repository user mailbox

+

type

+

String

+

Repository type. The options are CodeClub and GitHub.

+

connection_info

+

Object

+

Repository link information. For details, see Table 21.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 21 connection_info parameters

Parameter

+

Type

+

Description

+

protocol

+

String

+

Repository link protocol. The options are ssh and https.

+

url

+

String

+

Repository link address

+

credential

+

Object

+

Certificate information. For details, see Table 22.

+
+
+ +
+ + + + + + + + + + + + + +
Table 22 credential parameters

Parameter

+

Type

+

Description

+

ssh_private_key

+

String

+

SSH private certificate

+

access_token

+

String

+

OAuth token of GitHub

+
+
+
+

Samples

The following shows how to query the list of Notebook development environments.

+ +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0112.html b/modelarts/api-ref/modelarts_03_0112.html new file mode 100644 index 00000000..bd8a6fa0 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0112.html @@ -0,0 +1,1070 @@ + + +

Querying the Details About a Development Environment Instance

+

Function

This API is used to query the details about a development environment instance.

+
+

URI

GET /v1/{project_id}/demanager/instances/{instance_id}

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

instance_id

+

Yes

+

String

+

Instance ID

+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

description

+

String

+

Instance description

+

status

+

String

+

Instance status

+

creation_timestamp

+

String

+

Time when an instance is created

+

profile

+

Object

+

Configuration information. For details, see Table 3.

+

flavor

+

String

+

Instance flavor

+

spec

+

Object

+

Instance specifications of the notebook type. For details, see Table 11.

+

workspace

+

Object

+

Workspace. For details, see Table 16.

+

latest_update_timestamp

+

String

+

Time when an instance is modified

+

flavor_details

+

Object

+

Flavor details. See Table 7.

+

pool

+

Object

+

Dedicated resource pool. See Table 8.

+

ai_project

+

Object

+

AI project. For details, see Table 9.

+

error_code

+

String

+

Error code. For details, see Error Codes.

+

queuing_info

+

Object

+

Queuing information. For details, see Table 17.

+

user

+

Object

+

User information. For details, see Table 18.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 profile parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Configuration ID

+

name

+

String

+

Configuration name

+

description

+

String

+

Configuration description

+

de_type

+

String

+

Development environment type. Only Notebook is supported.

+

provision

+

Object

+

Deployment information. For details, see Table 4.

+

labels

+

Map<String,String>

+

Label information

+

flavor_type

+

String

+

Hardware, which can be CPU, GPU, or Ascend.

+
+
+ +
+ + + + + + + + + + + + + +
Table 4 provision parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Deployment type. Only Docker is supported.

+

spec

+

Object

+

Deployment details. For details, see Table 5.

+
+
+ +
+ + + + + + + + + + + + + +
Table 5 spec parameters

Parameter

+

Type

+

Description

+

engine

+

String

+

Deployment engine. Only CCE is supported.

+

params

+

Object

+

Deployment parameters. Only Docker is supported. For details, see Table 6.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 6 Docker deployment parameters

Parameter

+

Type

+

Description

+

namespace

+

String

+

SWR organization name, which is globally unique

+

image_name

+

String

+

Image name

+

image_tag

+

String

+

Image tag

+

annotations

+

Map<String,String>

+

Label information, which can be extended. By default, this parameter is left blank.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7 flavor_details parameters

Parameter

+

Type

+

Description

+

name

+

String

+

Flavor name

+

status

+

String

+

Flavor sale status The options are as follows:

+
  • onSale
  • soldOut
+

queuing_num

+

Integer

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

queue_left_time

+

Integer

+

Left queuing time, in seconds

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

storage_list

+

Array<Storage type>

+

Supported storage type. The options are obs, evs, and efs.

+

is_permitted

+

Boolean

+

Whether the current user has the permission to use this flavor

+

type

+

String

+

Flavor status. The options are as follows:

+
  • GPU
  • CPU
  • ASCEND
+

params

+

Dict

+

Parameters that describing flavor

+

promo_type

+

String

+

Promotion type. The options are as follows:

+
  • Free
  • NoDiscount
+

instance_num

+

Integer

+

Number of instances of this flavor the current created

+

duration

+

Integer

+

Auto stop time after startup, in seconds

+

store_time

+

Integer

+

Maximum retention period of an inactive instance of this flavor in the database, in hours

+

The default value is -1, indicating that the instance can be permanently saved.

+

billing_flavor

+

String

+

Billing specifications. If this field is left blank, the specifications name is used for billing.

+

billing_params

+

Integer

+

Billing ratio This parameter is mandatory when billing_flavor is specified.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 8 pool parameters

Parameter

+

Type

+

Description

+

id

+

String

+

ID of a resource pool

+

name

+

String

+

Name of a resource pool

+

type

+

String

+

Type of a resource pool. USER_DEFINED indicates a dedicated resource pool.

+

owner

+

Object

+

This parameter is mandatory when type is set to USER_DEFINED. For details, see Table 10.

+
+
+ +
+ + + + + + + + + +
Table 9 AIProject parameters

Parameter

+

Type

+

Description

+

id

+

String

+

AI project ID

+
+
+ +
+ + + + + + + + + +
Table 10 owner parameters

Parameter

+

Type

+

Description

+

project_id

+

String

+

Project ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 11 notebook parameters

Parameter

+

Type

+

Description

+

log_path

+

String

+

Path for storing custom image logs

+

custom_script_path

+

String

+

Path for storing custom initialization scripts used when a notebook instance is started

+

storage

+

Object

+

Storage path. For details, see Table 20.

+

credential

+

Object

+

AK and SK for accessing OBS. For details, see Table 30.

+

repository

+

Object

+

Git repository information. For details, see Table 28. This parameter cannot be used. It is automatically returned when the API is called.

+

resource_reserved_timestamp

+

Integer

+

Time when the resource is reserved

+

auto_stop

+

Object

+

Auto stop parameter. For details, see Table 23.

+

failed_reasons

+

Object

+

Cause for a creation or startup failure. For details, see Table 22.

+

annotations

+

Map<String,String>

+

Annotations

+

The generated URL cannot be directly accessed.

+

extend_params

+

Map<String,String>

+

Extended parameter

+
+
+ +
+ + + + + + + + + + + + + +
Table 12 storage parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Storage type.

+

Only obs and evs are supported.

+

location

+

Object

+

Storage location. If type is set to obs, this parameter is mandatory. See Table 21. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + + + +
Table 13 location parameters

Parameter

+

Type

+

Description

+

path

+

String

+

Storage path

+
  • If type is set to obs, this parameter is mandatory. The value must be a valid OBS bucket path and end with a slash (/). The value must be a specific directory in an OBS bucket rather than the root directory of an OBS bucket.
+

volume_size

+

Integer

+

If type is set to obs, this parameter does not need to be set.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 auto_stop parameters

Parameter

+

Type

+

Description

+

enable

+

Boolean

+

Whether to enable the auto stop function

+

duration

+

Integer

+

Running duration, in seconds

+

prompt

+

Boolean

+

Whether to display a prompt again. This parameter is provided for the console to use.

+

stop_timestamp

+

Integer

+

Time when the instance stops. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Remaining time before actual stop, in seconds

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 15 failed_reasons parameters

Parameter

+

Type

+

Description

+

code

+

String

+

Error code

+

message

+

String

+

Error message

+

detail

+

Map<String,String>

+

Error details

+
+
+ +
+ + + + + + + + + +
Table 16 workspace parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 17 queuing_info parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

de_type

+

String

+

Development environment type. By default, all types are returned.

+

Only Notebook is supported.

+

flavor

+

String

+

Instance flavor. By default, all types are returned.

+

flavor_details

+

Object

+

Flavor details, which display the flavor information and whether the flavor is sold out For details, see Table 16.

+

status

+

String

+

Instance status. By default, all statuses are returned, including:

+
  • CREATE_QUEUING
  • START_QUEUING
+

begin_timestamp

+

Integer

+

Time when an instance starts queuing. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Left queuing time, in seconds

+

end_timestamp

+

Integer

+

Time when an instance completes queuing. The value is a 13-digit timestamp.

+

rank

+

Integer

+

Ranking of an instance in a queue

+
+
+ +
+ + + + + + + + + + + + + +
Table 18 user parameters

Parameter

+

Type

+

Description

+

id

+

String

+

User ID

+

name

+

String

+

Username

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 19 repository parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Repository ID

+

branch

+

String

+

Repository branch

+

user_name

+

String

+

Repository username

+

user_email

+

String

+

Repository user mailbox

+

type

+

String

+

Repository type. The options are CodeClub and GitHub.

+

connection_info

+

Object

+

Repository link information. For details, see Table 29.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 20 connection_info parameters

Parameter

+

Type

+

Description

+

protocol

+

String

+

Repository link protocol. The options are ssh and https.

+

url

+

String

+

Repository link address

+

credential

+

Object

+

Certificate information. For details, see Table 30.

+
+
+ +
+ + + + + + + + + + + + + +
Table 21 credential parameters

Parameter

+

Type

+

Description

+

ssh_private_key

+

String

+

SSH private certificate

+

access_token

+

String

+

OAuth token of GitHub

+
+
+
+

Samples

The following shows how to obtain the details about instance 6fa459ea-ee8a-3ca4-894e-db77e160355e.

+ + +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0113.html b/modelarts/api-ref/modelarts_03_0113.html new file mode 100644 index 00000000..4ba2f8db --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0113.html @@ -0,0 +1,1173 @@ + + +

Modifying the Description of a Development Environment Instance

+

Function

This API is used to modify the description of a development environment instance or information about the auto stop function.

+
+

URI

PUT /v1/{project_id}/demanager/instances/{instance_id}

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

instance_id

+

Yes

+

String

+

Instance ID

+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

description

+

No

+

String

+

Configuration description. The value contains a maximum of 512 characters and cannot contain the following special characters: &<>"'/.

+

spec

+

No

+

Object

+

Instance definition. For details, see Table 3.

+
+
+
+ +
+ + + + + + + + + + + +
Table 3 spec parameters

Parameter

+

Mandatory

+

Type

+

Description

+

auto_stop

+

No

+

Object

+

Auto stop parameter. For details, see Table 4.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 4 auto_stop parameters

Parameter

+

Mandatory

+

Type

+

Description

+

enable

+

No

+

Boolean

+

Whether to enable the auto stop function. The value true indicates that the function is enabled and the instance will automatically stop when the running duration is reached. The value false indicates that the function is disabled. The default value is false.

+

duration

+

No

+

Integer

+

Running duration, in seconds. The value ranges from 3,600 to 86,400. After this parameter is set, it is valid for each startup. This parameter is mandatory when enable is set to true. If the current instance status is Running, the modifications of this parameter take effect only after the next startup.

+

prompt

+

No

+

Boolean

+

Whether to display a prompt again. This parameter is provided for the console to determine whether to display a prompt again. The default value is true.

+
+
+
+

Response Body

Table 5 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5 Parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

description

+

String

+

Instance description

+

status

+

String

+

Instance status

+

creation_timestamp

+

String

+

Time when an instance is created

+

latest_update_timestamp

+

String

+

Time when an instance is modified

+

profile

+

Object

+

Configuration information. For details, see Table 12.

+

flavor

+

String

+

Instance flavor

+

flavor_details

+

Object

+

For details about the flavor, see Table 16.

+

pool

+

Object

+

For details about the dedicated resource pool, see Table 17.

+

spec

+

Object

+

Instance definition For details about parameters of a notebook instance, see Table 19.

+

workspace

+

Object

+

Workspace. For details, see Table 24.

+

ai_project

+

Object

+

AI project. For details, see Table 25.

+

error_code

+

String

+

Error code. For details, see Error Codes.

+

queuing_info

+

Object

+

Queuing information. For details, see Table 26.

+

user

+

Object

+

User information. For details, see Table 27.

+

repository

+

Object

+

Git repository information. For details, see Table 28. This parameter cannot be used. It is automatically returned when the API is called.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 6 profile parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Configuration ID

+

name

+

String

+

Configuration name

+

description

+

String

+

Configuration description

+

de_type

+

String

+

Development environment type. Only Notebook is supported.

+

provision

+

Object

+

Deployment information. For details, see Table 4.

+

labels

+

Map<String,String>

+

Label information

+

flavor_type

+

String

+

Hardware, which can be CPU, GPU, or Ascend.

+
+
+ +
+ + + + + + + + + + + + + +
Table 7 provision parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Deployment type. Only Docker is supported.

+

spec

+

Object

+

Deployment details. For details, see Table 8.

+
+
+ +
+ + + + + + + + + + + + + +
Table 8 spec parameters

Parameter

+

Type

+

Description

+

engine

+

String

+

Deployment engine. Only CCE is supported.

+

params

+

Object

+

Deployment parameters. Only Docker is supported. For details, see Table 9.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 9 Docker deployment parameters

Parameter

+

Type

+

Description

+

namespace

+

String

+

SWR organization name, which is globally unique

+

image_name

+

String

+

Image name

+

image_tag

+

String

+

Image tag

+

annotations

+

Map<String,String>

+

Label information, which can be extended. By default, this parameter is left blank.

+
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 10 flavor_details parameters

Parameter

+

Type

+

Description

+

name

+

String

+

Flavor name

+

status

+

String

+

Flavor sale status The options are as follows:

+
  • onSale
  • soldOut
+

queuing_num

+

Integer

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

queue_left_time

+

Integer

+

Left queuing time, in seconds

+

This parameter is mandatory when promo_type is set to Free and status is set to soldOut.

+

storage_list

+

Array<Storage type>

+

Supported storage type. The options are as follows:

+
  • obs
  • evs
+

is_permitted

+

Boolean

+

Whether the current user has the permission to use this flavor

+

type

+

String

+

Flavor status. The options are as follows:

+
  • GPU
  • CPU
  • ASCEND
+

params

+

Dict

+

Parameters that describing flavor

+

promo_type

+

String

+

Promotion type. The options are as follows:

+
  • Free
  • NoDiscount
+

instance_num

+

Integer

+

Number of instances of this flavor the current created

+

duration

+

Integer

+

Auto stop time after startup, in seconds

+

store_time

+

Integer

+

Maximum retention period of an inactive instance of this flavor in the database, in hours

+

The default value is -1, indicating that the instance can be permanently saved.

+

billing_flavor

+

String

+

Billing specifications. If this field is left blank, the specifications name is used for billing.

+

billing_params

+

Integer

+

Billing ratio This parameter is mandatory when billing_flavor is specified.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 11 pool parameters

Parameter

+

Type

+

Description

+

id

+

String

+

ID of a resource pool

+

name

+

String

+

Name of a resource pool

+

type

+

String

+

Type of a resource pool. USER_DEFINED indicates a dedicated resource pool.

+

owner

+

Object

+

This parameter is mandatory when type is set to USER_DEFINED. For details, see Table 13.

+
+
+ +
+ + + + + + + + + +
Table 12 AIProject parameters

Parameter

+

Type

+

Description

+

id

+

String

+

AI project ID.

+
+
+ +
+ + + + + + + + + + + +
Table 13 owner parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 14 notebook parameters

Parameter

+

Type

+

Description

+

log_path

+

String

+

Path for storing custom image logs

+

custom_script_path

+

String

+

Path for storing custom initialization scripts used when a notebook instance is started

+

storage

+

Object

+

Storage path. For details, see Table 20.

+

credential

+

Object

+

AK and SK for accessing OBS. For details, see Table 30.

+

repository

+

Object

+

Git repository information. For details, see Table 28. This parameter cannot be used. It is automatically returned when the API is called.

+

resource_reserved_timestamp

+

Integer

+

Time when the resource is reserved

+

auto_stop

+

Object

+

Auto stop parameter. For details, see Table 23.

+

failed_reasons

+

Object

+

Cause for a creation or startup failure. For details, see Table 22.

+

annotations

+

Map<String,String>

+

Annotations

+

The generated URL cannot be directly accessed.

+

extend_params

+

Map<String,String>

+

Extended parameter

+
+
+ +
+ + + + + + + + + + + + + +
Table 15 storage parameters

Parameter

+

Type

+

Description

+

type

+

String

+

Storage type. Only obs and evs are supported.

+

location

+

Object

+

Storage location. If type is set to obs, this parameter is mandatory. See Table 16. By default, this parameter is left blank.

+
+
+ +
+ + + + + + + + + + + +
Table 16 location parameters

Parameter

+

Mandatory

+

Type

+

Description

+

path

+

No

+

String

+

Storage path

+
  • If type is set to obs, this parameter is mandatory. The value must be a valid OBS bucket path and end with a slash (/). The value must be a specific directory in an OBS bucket rather than the root directory of an OBS bucket.
+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 17 auto_stop parameters

Parameter

+

Type

+

Description

+

enable

+

Boolean

+

Whether to enable the auto stop function

+

duration

+

Integer

+

Running duration, in seconds

+

prompt

+

Boolean

+

Whether to display a prompt again. This parameter is provided for the console to use.

+

stop_timestamp

+

Integer

+

Time when the instance stops. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Remaining time before actual stop, in seconds

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 18 failed_reasons parameters

Parameter

+

Type

+

Description

+

code

+

String

+

Error code

+

message

+

String

+

Error message

+

detail

+

Map<String,String>

+

Error details

+
+
+ +
+ + + + + + + + + +
Table 19 workspace parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Workspace ID If no workspace is created, the default value is 0. If a workspace is created and used, use the actual value.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 20 queuing_info parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Instance ID

+

name

+

String

+

Instance name

+

de_type

+

String

+

Development environment type. By default, all types are returned.

+

Only Notebook is supported.

+

flavor

+

String

+

Instance flavor. By default, all types are returned.

+

flavor_details

+

Object

+

Flavor details, which display the flavor information and whether the flavor is sold out For details, see Table 16.

+

status

+

String

+

Instance status. By default, all statuses are returned, including:

+
  • CREATE_QUEUING
  • START_QUEUING
+

begin_timestamp

+

Integer

+

Time when an instance starts queuing. The value is a 13-digit timestamp.

+

remain_time

+

Integer

+

Left queuing time, in seconds

+

end_timestamp

+

Integer

+

Time when an instance completes queuing. The value is a 13-digit timestamp.

+

rank

+

Integer

+

Ranking of an instance in a queue

+
+
+ +
+ + + + + + + + + + + + + +
Table 21 user parameters

Parameter

+

Type

+

Description

+

id

+

String

+

User ID

+

name

+

String

+

Username

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 22 repository parameters

Parameter

+

Type

+

Description

+

id

+

String

+

Repository ID

+

branch

+

String

+

Repository branch

+

user_name

+

String

+

Repository username

+

user_email

+

String

+

Repository user mailbox

+

type

+

String

+

Repository type. The options are CodeClub and GitHub.

+

connection_info

+

Object

+

Repository link information. For details, see Table 29.

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 23 connection_info parameters

Parameter

+

Type

+

Description

+

protocol

+

String

+

Repository link protocol. The options are ssh and https.

+

url

+

String

+

Repository link address

+

credential

+

Object

+

Certificate information. For details, see Table 30.

+
+
+ +
+ + + + + + + + + + + + + +
Table 24 credential parameters

Parameter

+

Type

+

Description

+

ssh_private_key

+

String

+

SSH private certificate

+

access_token

+

String

+

OAuth token of GitHub

+
+
+
+

Samples

The following shows how to modify the details about instance 6fa459ea-ee8a-3ca4-894e-db77e160355e.

+ +
+ +

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0114.html b/modelarts/api-ref/modelarts_03_0114.html new file mode 100644 index 00000000..8cfda18f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0114.html @@ -0,0 +1,84 @@ + + +

Deleting a Development Environment Instance

+

Function

This API is used to delete a development environment instance.

+
+

URI

DELETE /v1/{project_id}/demanager/instances/{instance_id}

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

instance_id

+

Yes

+

String

+

Instance ID

+
+
+
+

Request Body

None

+
+

Response Body

Table 2 describes the response parameters.

+ +
+ + + + + + + + + +
Table 2 Parameter description

Parameter

+

Type

+

Description

+

instance_id

+

String

+

Instance ID

+
+
+
+

Samples

The following shows how to delete instance 6fa459ea-ee8a-3ca4-894e-db77e160355e.

+ + +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0115.html b/modelarts/api-ref/modelarts_03_0115.html new file mode 100644 index 00000000..8ab2f021 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0115.html @@ -0,0 +1,118 @@ + + +

Managing a Development Environment Instance

+

Function

This API is used to startor stop a notebook instance.

+
+

URI

POST /v1/{project_id}/demanager/instances/{instance_id}/action

+
+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

instance_id

+

Yes

+

String

+

Instance ID

+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

action

+

Yes

+

String

+

Operation on a development environment instance. The options are as follows:

+
  • start
  • stop
+
+
+
+
+

Response Body

Table 3 describes the response parameters.

+ +
+ + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

current_status

+

String

+

Current status of an instance

+

previous_state

+

String

+

Previous status of an instance

+
+
+
+

Samples

The following shows how to start instance 6fa459ea-ee8a-3ca4-894e-db77e160355e.

+ + +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0139.html b/modelarts/api-ref/modelarts_03_0139.html new file mode 100644 index 00000000..e4528fe5 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0139.html @@ -0,0 +1,17 @@ + + +

Before You Start

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0140.html b/modelarts/api-ref/modelarts_03_0140.html new file mode 100644 index 00000000..f15c7c5e --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0140.html @@ -0,0 +1,11 @@ + + +

API Calling

+

ModelArts supports Representational State Transfer (REST) APIs, allowing you to call APIs using HTTPS. For details about API calling, see Calling APIs.

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0141.html b/modelarts/api-ref/modelarts_03_0141.html new file mode 100644 index 00000000..9c3ff23c --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0141.html @@ -0,0 +1,47 @@ + + +

Endpoints

+

Endpoints are request address for calling APIs. Endpoints vary depending on services and regions. To obtain the regions and endpoints, contact the enterprise administrator.

+

A service endpoint consists of the service name, region ID, and external domain name in the format of "{service_name}.{region_id}.{external_domain_name}". For details about how to obtain each parameter, see Table 1.

+ +
+ + + + + + + + + + + + + + + + + +
Table 1 Obtaining an endpoint

Parameter

+

Description

+

How to Obtain

+

service_name

+

Abbreviation of a case-insensitive service name

+

modelarts for ModelArts by default.

+

region_id

+

Region ID

+

Obtain the value from the administrator.

+

external_domain_name

+

External domain name suffix

+

Obtain the value from the administrator.

+
+
+

If an endpoint uses a domain name, configure the hosts file in the format of "{float-ip} {service_name}.{region_id}.{external_domain_name}" on the local PC. Obtain float-ip from the administrator.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0143.html b/modelarts/api-ref/modelarts_03_0143.html new file mode 100644 index 00000000..43e48b4c --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0143.html @@ -0,0 +1,16 @@ + + +

Basic Concepts

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0144.html b/modelarts/api-ref/modelarts_03_0144.html new file mode 100644 index 00000000..7a220843 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0144.html @@ -0,0 +1,15 @@ + + +

Calling APIs

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0147.html b/modelarts/api-ref/modelarts_03_0147.html new file mode 100644 index 00000000..da19ed5d --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0147.html @@ -0,0 +1,40 @@ + + +

Obtaining a Project ID and Name

+

Scenarios

A project ID or name is required for some requests when an API is called. Therefore, obtain the project ID and name before calling the API. Use either of the following methods:

+ +
+

Obtaining a Project ID and Name from the Console

To do so, perform the following operations:

+
  1. Log in to the console.
  2. In the upper right corner, click your account avatar icon and choose My Settings from the drop-down list.
  3. On the My Settings page, go to the Project List tab page, which is displayed by default. View the project ID and name in the project list.
+
+

Obtaining a Project ID by Calling an API

The API for obtaining a project ID is GET https://{iam-endpoint}/v3/projects. To obtain {iam-endpoint}, see Request URIEndpoints.

+

The following is an example response. For example, if ModelArts is deployed in the xxx region, the value of name in the response body is xxx. The value of id in projects is the project ID.

+
{
+	"projects": [{
+		"domain_id": "65382450e8f64ac0870cd180d14e684b",
+		"is_domain": false,
+		"parent_id": "65382450e8f64ac0870cd180d14e684b",
+		"name": "xxx",
+		"description": "",
+		"links": {
+			"next": null,
+			"previous": null,
+			"self": "https://www.example.com/v3/projects/a4a5d4098fb4474fa22cd05f897d6b99"
+		},
+		"id": "a4a5d4098fb4474fa22cd05f897d6b99",
+		"enabled": true
+	}],
+	"links": {
+		"next": null,
+		"previous": null,
+		"self": "https://www.example.com/v3/projects"
+	}
+}
+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0148.html b/modelarts/api-ref/modelarts_03_0148.html new file mode 100644 index 00000000..f31c13b1 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0148.html @@ -0,0 +1,14 @@ + + +

Obtaining an Account Name and ID

+

When you call APIs, certain requests require the account name and ID. To obtain an account name and ID, do as follows:

+
  1. Sign up and log in to the console.
  2. Hover the cursor on the username and choose My Credentials from the drop-down list.

    On the API Credentials page, view the account name and ID.

    +
    Figure 1 Viewing the account name and ID
    +
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0149.html b/modelarts/api-ref/modelarts_03_0149.html new file mode 100644 index 00000000..48fc9ebd --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0149.html @@ -0,0 +1,193 @@ + + +

Querying Training Job Logs

+

Function

This API is used to query detailed information about training job logs by row.

+
+

URI

GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/aom-log

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

job_id

+

Yes

+

Long

+

ID of a training job

+

version_id

+

Yes

+

Long

+

Version ID of a training job

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

base_line

+

No

+

String

+

Base line of the log, which is obtained from an API response. If the value is empty, the latest log is obtained.

+

lines

+

No

+

Integer

+

Length of the obtained log. The default value is 50 lines. The value range is [0, 500].

+

log_file

+

Yes

+

String

+

Name of the log file to be viewed. For details about how to obtain the log file name, see Obtaining the Name of a Training Job Log File.

+

order

+

No

+

String

+

Log query direction

+
  • desc: Querying next records
  • asc: Querying previous records
+
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

content

+

String

+

Content of the requested log

+

lines

+

Integer

+

Lines of the obtained logs

+

start_line

+

String

+

Start position of the obtained log

+

end_line

+

String

+

End position of the obtained log

+

is_success

+

Boolean

+

Whether the request is successful

+
+
+
+
+

Samples

The following shows how to query the logs contained in log1.log of the job whose job_id is 10 and version_id is 10.

+ + +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0150.html b/modelarts/api-ref/modelarts_03_0150.html new file mode 100644 index 00000000..bf71633f --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0150.html @@ -0,0 +1,193 @@ + + +

Querying Monitoring Information About a Single Container of a Job

+

Function

This API is used to query monitoring information about a single container of a job.

+
+

URI

GET /v1/{project_id}/training-jobs/{job_id}/versions/{version_id}/pod/{pod_name}/metric-statistic

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID

+

job_id

+

Yes

+

Long

+

ID of a training job

+

version_id

+

Yes

+

Long

+

Version ID of a training job

+

pod_name

+

Yes

+

String

+

Container name, which is the same as the job log name. For details about how to obtain the value, see Obtaining the Name of a Training Job Log File.

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

metrics

+

No

+

String

+

Metrics to be queried. Separate metrics by commas (,), for example, CpuUsage,MemUsage. If this parameter is left blank, all metrics are queried.

+

Options:

+
  • CpuUsage
  • MemUsage
  • DiskReadRate
  • DiskWriteRate
  • RecvBytesRate
  • SendBytesRate
  • GpuUtil
  • GpuMemUsage
+

statistic_type

+

No

+

String

+

Metric statistics method, indicating whether to collect metric statistics based on a single GPU. This parameter applies only to GPU metric statistics.

+
  • all: Obtain the average value of the metric.
  • each: Obtain the metric monitoring information about each GPU.
+
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Type

+

Description

+

error_message

+

String

+

Error message when the API call fails.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Error code when the API call fails. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

metrics

+

JSON Array

+

Metric monitoring details. For details, see Table 4.

+

interval

+

Integer

+

Query interval, in minutes.

+
+
+
+ +
+ + + + + + + + + + + + + +
Table 4 metrics data structure

Parameter

+

Type

+

Description

+

metric

+

String

+

Monitoring metrics

+

value

+

JSON Array

+

Sequence of the obtained metric value. The element is of the String type.

+
+
+
+

Samples

The following shows how to query the logs contained in log1.log of the job whose job_id is 10 and version_id is 10.

+ + +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0151.html b/modelarts/api-ref/modelarts_03_0151.html new file mode 100644 index 00000000..e4f35a5d --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0151.html @@ -0,0 +1,184 @@ + + +

Querying Monitoring Information About Resource Pool Nodes

+

Function

This API is used to query monitoring information about resource pool nodes.

+
+

URI

GET /v1/{project_id}/pools/{pool_id}/nodes/{node_ip}/metric-statistic

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID

+

pool_id

+

Yes

+

String

+

ID of a dedicated resource pool

+

node_ip

+

Yes

+

String

+

IP address of a resource pool node, which is obtained from the response of the pool details query API

+
+
+
+
+

Request Body

Table 2 describes the request parameters. +
+ + + + + + + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

metrics

+

No

+

String

+

Metrics to be queried. Separate metrics by commas (,), for example, CpuUsage,MemUsage. If this parameter is left blank, all metrics are queried.

+

Options:

+
  • CpuUsage: CPU usage
  • MemUsage: memory usage
  • DiskReadRate: disk read rate
  • DiskWriteRate: disk write rate
  • RecvBytesRate: network receiving rate
  • SendBytesRate: network sending rate
  • GpuUtil: GPU usage
  • GpuMemUsage: GPU memory usage
+

statistic_type

+

No

+

String

+

Metric statistics method, indicating whether to collect metric statistics based on a single GPU. This parameter applies only to GPU metric statistics.

+
  • all: Obtain the average value of the metric.
  • each: Obtain the metric monitoring information about each GPU.
+
+
+
+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Type

+

Description

+

error_message

+

String

+

Error message of a failed API call.

+

This parameter is not included when the API call succeeds.

+

error_code

+

String

+

Indicates the error code when the API fails to be called. Error code of a failed API call. For details, see Error Codes.

+

This parameter is not included when the API call succeeds.

+

metrics

+

Json Array

+

Metric monitoring details. For details, see Table 4.

+

interval

+

Integer

+

Query interval, in minutes.

+
+
+
+ +
+ + + + + + + + + + + + + +
Table 4 metrics data structure

Parameter

+

Type

+

Description

+

metric

+

String

+

Monitoring metrics

+

value

+

Json Array

+

Sequence of the obtained metric value. The element is of the String type.

+
+
+
+

Samples

The following example queries monitoring information about node 192.168.1.1 in the dedicated resource pool poolabcd.

+ + +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0152.html b/modelarts/api-ref/modelarts_03_0152.html new file mode 100644 index 00000000..882e2fc8 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0152.html @@ -0,0 +1,113 @@ + + +

Restarting an ML Studio Instance

+

Function

This API is used to restart an ML Studio development environment instance.

+
+

URI

POST /v1/{project_id}/demanager/instances/{instance_id}/action

+

Table 1 describes the required parameters.

+
+ +
+ + + + + + + + + + + + + + + + +
Table 1 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

instance_id

+

Yes

+

String

+

Instance ID

+
+
+

Request Body

Table 2 describes the request parameters.

+
+ +
+ + + + + + + + + + + +
Table 2 Parameter description

Parameter

+

Mandatory

+

Type

+

Description

+

action

+

Yes

+

String

+

Operation on a development environment instance. The value can be restart.

+
+
+

Response Body

Table 3 describes the response parameters.

+
+ +
+ + + + + + + + + + + + + +
Table 3 Parameter description

Parameter

+

Type

+

Description

+

current_status

+

String

+

Current status of an instance

+

previous_state

+

String

+

Previous status of an instance

+
+
+

Samples

The following shows how to restart the ML Studio instance whose ID is 47cf4ff3-0c59-44fe-9821-2840a34c02a9.

+ + +
+

Status Code

For details about the status code, see Status Code.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0153.html b/modelarts/api-ref/modelarts_03_0153.html new file mode 100644 index 00000000..c61ef90d --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0153.html @@ -0,0 +1,22 @@ + + +

Models

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/modelarts_03_0155.html b/modelarts/api-ref/modelarts_03_0155.html new file mode 100644 index 00000000..5ec8f7eb --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0155.html @@ -0,0 +1,270 @@ + + +

Querying Service Event Logs

+

Function

This API is used to query service event logs, including service operation records, key actions during deployment, and deployment failure causes.

+
+

URI

GET /v1/{project_id}/services/{service_id}/events

+
Table 1 describes the required parameters. +
+ + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

service_id

+

Yes

+

String

+

Service ID

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 2 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

event_type

+

No

+

String

+

Type of the event to be filtered. By default, the event type is not filtered. Options:

+
  • normal: normal events
  • abnormal: abnormal events
+

start_time

+

No

+

Number

+

Start time of the event to be filtered. The value is milliseconds between the current time and '1970.1.1 0:0:0 UTC'.

+

end_time

+

No

+

Number

+

End time of the event to be filtered. The value is milliseconds between the current time and '1970.1.1 0:0:0 UTC'.

+

offset

+

No

+

Integer

+

Start page of the paging list. Default value: 0

+

limit

+

No

+

Integer

+

Maximum number of records returned on each page. Default value: 1000

+

sort_by

+

No

+

String

+

Specified sorting field. The default value is occur_time.

+

order

+

No

+

String

+

Sorting mode. The default value is desc. Options:

+
  • asc: ascending order
  • desc: descending order
+
+
+
+
+

Request Body

None

+
+

Response Body

Table 3 describes the response parameters. +
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 Parameters

Parameter

+

Type

+

Description

+

service_id

+

String

+

Service ID

+

service_name

+

String

+

Service name

+

events

+

event array

+

Event logs. For details, see Table 4.

+

total_count

+

Integer

+

Total number of events that meet the search criteria when no paging is implemented

+

count

+

Integer

+

Number of events in the query result

+
+
+ +
+ + + + + + + + + + + + + + + + + +
Table 4 event structure

Parameter

+

Type

+

Description

+

occur_time

+

Number

+

Time when an event occurs. The value is milliseconds between the current time and '1970.1.1 0:0:0 UTC'.

+

event_type

+

String

+

Event type. Possible values are normal and abnormal, indicating whether the event is normal or abnormal.

+

event_info

+

String

+

Event information,' including service operation records, key actions during deployment, and deployment failure causes.

+
+
+
+
+

Samples

The following example queries event information of the service whose ID is 35de3ca9-1bca-4ae7-9cb0-914f30fa7d3e.

+
+ + +

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0200.html b/modelarts/api-ref/modelarts_03_0200.html new file mode 100644 index 00000000..2678a425 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0200.html @@ -0,0 +1,180 @@ + + +

Querying Supported Service Deployment Specifications

+

Function

This API is used to query supported service deployment specifications.

+
+

URI

+

GET /v1/{project_id}/services/specifications

+ +
+ + + + + + + + + + + + + + + + + + + + + +
Table 1 Parameters

Parameter

+

Mandatory

+

Type

+

Description

+

project_id

+

Yes

+

String

+

Project ID. For details about how to obtain the project ID, see Obtaining a Project ID and Name.

+

is_personal_cluster

+

No

+

Boolean

+

Whether to query the service deployment specifications supported by dedicated resource pool. The default value is false.

+

infer_type

+

No

+

String

+

Inference mode. The default value is real-time. The value can be real-time or batch.

+
+
+

Request Body

None

+
+

Response Body

+
+ + + + + + + + + +
Table 2 Parameters

Parameter

+

Type

+

Description

+

specifications

+

specification array

+

Supported service deployment specifications. For details, see Table 3.

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 3 specification parameters

Parameter

+

Type

+

Description

+

specification

+

String

+

Unique specifications ID

+

billing_spec

+

String

+

Unique ID of the billing specifications

+

spec_status

+

String

+

Specifications status

+
  • normal: The specifications are normal.
  • sellout: The specifications cannot be used to deploy services because they are sold out.
+

is_open

+

Boolean

+

Whether to enable the specifications. The default value is true. If this parameter is set to false, you need to submit a service ticket to apply for the specifications.

+

source_type

+

String

+

Type of the model to which the specifications apply.

+
  • Empty: indicates a model generated by the user.
+

is_free

+

Boolean

+

Whether the flavor is free of charge. The value true indicates that the flavor is free of charge.

+

over_quota

+

Boolean

+

Whether the quota exceeds the upper limit. The value true indicates that the quota exceeds the upper limit.

+

extend_params

+

Integer

+

Billing item

+

display_en

+

String

+

Specifications description in English

+

display_cn

+

String

+

Specifications description in Chinese

+
+
+
+

Samples

+ +
+

Status Code

For details about the status code, see Table 1.

+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0202.html b/modelarts/api-ref/modelarts_03_0202.html new file mode 100644 index 00000000..c3f40d1b --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0202.html @@ -0,0 +1,39 @@ + + +

Data Management

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0299.html b/modelarts/api-ref/modelarts_03_0299.html new file mode 100644 index 00000000..ba8cc947 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0299.html @@ -0,0 +1,414 @@ + + +

Data Management APIs

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1 Data management APIs

Type

+

API

+

Description

+

Dataset Management

+

Querying the Dataset List

+

Query created datasets by page based on specified conditions.

+

Creating a Dataset

+

Create a new dataset and determine whether to enable team labeling as needed.

+
  • Enable team labeling. The subsequent operations vary depending on the specified role.

    If a team is specified to assign labeling tasks, the team labeling task is started after the dataset is created.

    +

    If a task manager is specified to assign labeling tasks, the manager calls the API in Starting a Team Labeling Task to assign and start the team labeling tasks.

    +
  • Disable team labeling. To enable team labeling later, call the API in Creating a Team Labeling Task to create team labeling tasks for the dataset.
+

Querying Details About a Dataset

+

Query details about a dataset, including the dataset name, type, and version name based on the dataset ID.

+

Modifying a Dataset

+

Modify basic information about a dataset, such as the dataset name, description, version, or labels.

+

The modification takes effect in the entire dataset, including the samples in the dataset.

+

Deleting a Dataset

+

Delete a dataset based on the dataset ID to release resources.

+

Querying Dataset Statistics

+

Query dataset statistics, such as sample statistics, label statistics, or hard examples based on specified conditions.

+

Querying the Monitoring Data of a Dataset

+

Query the monitoring data of a dataset within a specified period, such as the number of labeled samples, number of unlabeled samples, and total number of samples at each time point within the period.

+

Data Synchronization

+

Synchronizing a Dataset

+

Synchronize data and labels from the dataset input path to the dataset.

+

Querying the Status of a Dataset Synchronization Task

+

Query the status of a data source synchronization task based on the dataset ID.

+

Labeling Data

+

Updating Sample Labels in Batches

+

Label multiple samples in a dataset in batches.

+
  • Label unlabeled samples. You can use an existing label or create a new label.
  • Add, modify, or delete labels for labeled samples. You can use an existing label or newly added label to modify an original label. Additionally, you can add an existing label or a new label to a sample.
+

This API uses a new label list to overwrite the original one to update the sample label. For example, if an empty label list is used to overwrite the original one, all sample labels will be deleted.

+

Querying the Sample List

+

Query dataset samples by page based on specified conditions.

+

Adding Samples in Batches

+

Add samples to a dataset in batches for data labeling.

+

Deleting Samples in Batches

+

Delete unused samples from a dataset in batches.

+

Querying Details About a Sample

+

Query a single sample based on the sample ID, including the sample status and labels.

+

Querying Sample Search Criteria

+

Obtain sample search criteria, such as the label list and attribute key-value pairs of the dataset based on the dataset ID.

+

Label Management

+

Querying the Dataset Label List

+

Query the labels in a specified dataset version.

+

Creating a Dataset Label

+

During dataset labeling, new labels can be created. This function is available only in datasets of the text classification and named entity types.

+

Modifying Labels in Batches

+

Modify dataset labels in batches. The modification takes effect in the entire dataset, including the samples in the dataset.

+

Deleting Labels in Batches

+

Delete dataset labels in batches and ask you to determine whether to delete the samples with the labels.

+

Updating a Label by Label Name

+

Modify a label in a dataset based on the label name. The modification takes effect in the entire dataset, including the samples in the dataset.

+

Deleting a Label and the Files with This Label Only

+

Delete a label in a dataset based on the label name and ask you to determine whether to delete the samples with the label.

+

Importing Data

+

Querying the Dataset Import Task List

+

Query historical tasks imported to a dataset by page based on the dataset ID.

+

Creating a Dataset Import Task

+

Create a dataset import task to import labels and data (such as Manifest files and OBS data) from a storage system to the dataset.

+

Querying Details About a Dataset Import Task

+

Query details about a dataset import task based on the dataset ID and task ID to learn about the data source, import mode, and task status.

+

Exporting Data

+

Querying the Dataset Export Task List

+

Query historical tasks exported from a dataset by page based on the dataset ID.

+

Creating a Dataset Export Task

+

Export certain data as a new dataset or to OBS.

+

Querying the Status of a Dataset Export Task

+

Query details about a dataset export task based on the dataset ID and task ID to learn about the export type, task status, and number of samples.

+

Publishing a Dataset

+

Creating a Dataset Labeling Version

+

Publish the labeled dataset as a new version for model building.

+

Managing Dataset Versions

+

Querying the Dataset Version List

+

Query the versions of a dataset based on the dataset ID to learn about the dataset version evolution.

+

Creating a Dataset Labeling Version

+

Publish a modified dataset as a new version. The modification includes labeling samples, adding samples, and deleting samples in the dataset.

+

Querying Details About a Dataset Labeling Version

+

Query details about a specified dataset labeling version, including the name, description, number of files, and storage path based on the dataset ID and version ID.

+

Deleting a Dataset Labeling Version

+

Delete a dataset version based on the dataset ID and version ID.

+

Auto Labeling

+

Querying the Auto Labeling Sample List

+

Query the to-be-confirmed auto labeling samples in a dataset by page based on the dataset ID.

+

Querying Details About an Auto Labeling Sample

+

Query information of a single auto labeling sample based on the dataset ID and sample ID, such as the sample labels, hard example details, and sample type.

+

Querying Auto Labeling Tasks by Page

+

Query all auto labeling tasks by page based on the dataset ID.

+

Starting an Auto Labeling Task

+

Start an auto labeling task for unlabeled data to quickly label the data. After the auto labeling task is complete, call the API in Updating Sample Labels in Batches to check the labeling result.

+

Querying Details About an Auto Labeling Task

+

Query details about an auto labeling task based on the dataset ID and task ID to learn about the task configuration, name, and status.

+

Stopping an Auto Labeling Task

+

Stop an ongoing auto labeling task based on the dataset ID and task ID.

+

Auto Grouping

+

Querying Auto Grouping Tasks by Page

+

Query all auto grouping tasks by page based on the dataset ID.

+

In auto grouping, unlabeled images are clustered using a clustering algorithm and then processed based on the clustering result. Images can be labeled by group or cleaned.

+

Starting an Auto Grouping Task

+

Execute an auto grouping task based on selected data samples to improve data labeling efficiency.

+

Querying Details About an Auto Grouping Task

+

Query details about an auto grouping task based on the dataset ID and task ID to learn about the task configuration, name, and status.

+

Stopping an Auto Grouping Task

+

Stop an ongoing auto grouping task based on the dataset ID and task ID.

+

Team Labeling

+

Querying Statistics for a Team Labeling Task

+

Query statistics for a team labeling task on the data labeling platform, such as the sample statistics, label statistics, and hard example set based on the dataset ID and team labeling task ID.

+

Querying Statistics for the Member Labeling Progresses in a Team Labeling Task

+

Query statistics for the member labeling progresses in a team labeling task based on the dataset ID and team labeling task ID.

+

Querying the Team Labeling Task List of a Dataset

+

Query the team labeling tasks of a dataset based on the dataset ID.

+

Creating a Team Labeling Task

+

Create a team labeling task based on an existing dataset so that multiple members can concurrently label the dataset.

+
  • If a team is specified to assign labeling tasks, the team labeling task is started after the task is created.
  • If a task manager is specified to assign labeling tasks, the manager calls the API in Starting a Team Labeling Task to assign and start the team labeling tasks.
+

Querying Details About a Team Labeling Task

+

Query details about a team labeling task based on the dataset ID and team labeling task ID, including the task name, data, and team information.

+

Starting a Team Labeling Task

+

The team labeling task manager assigns and starts a team labeling task on the data labeling platform based on the dataset ID and team labeling task ID.

+

Updating a Team Labeling Task

+

Update the description, name, and team information of a team labeling task based on the dataset ID and team labeling task ID.

+

Deleting a Team Labeling Task

+

Delete a team labeling task based on the dataset ID and team labeling task ID.

+

Creating a Team Labeling Acceptance Task

+

Initiate an acceptance task for a team labeling task based on the dataset ID and team labeling task ID.

+

Querying the Acceptance Report of a Team Labeling Task

+

Query the acceptance report and statistics for a team labeling task based on the dataset ID and team labeling task ID.

+

Updating the Status of a Team Labeling Acceptance Task

+

Determine the acceptance scope for a team labeling task, including all labeled data, and update the sample data accordingly.

+

Querying the Sample List of a Team Labeling Task by Page

+

Query the samples of a team labeling task on the data labeling platform by page based on the dataset ID and team labeling task ID.

+

Querying Details About a Team Labeling Sample

+

Query details about a sample in a team labeling task on the data labeling platform based on the dataset ID, team labeling task ID, and sample ID.

+

Querying Team Labeling Tasks by Team Member

+

Members in a team labeling task query all team labeling tasks on the data labeling platform by page.

+

Submitting Sample Review Comments for Acceptance

+

During the acceptance of a team labeling task, provide review comments on samples, including the review result and score.

+

Reviewing Team Labeling Results

+

The manager of a team labeling task reviews the team labeling task on the data labeling platform based on the dataset ID and team labeling task ID, determines the review result, and provides review comments.

+

Updating Labels of Team Labeling Samples in Batches

+

Update sample labels on the data labeling platform in batches, including adding, modifying, and deleting the sample labels. Ensure that only the labels in the dataset can be added or modified.

+

Labeling Team

+

Querying the Labeling Team List

+

Query all labeling teams by page.

+

Creating a Labeling Team

+

Add a labeling team.

+

Querying Details About a Labeling Team

+

Query details about a labeling team, including the team name, description, and total number of members based on the team ID.

+

Updating a Labeling Team

+

Update the name and description of a labeling team based on the team ID.

+

Deleting a Labeling Team

+

Delete a labeling team based on the team ID.

+

Sending an Email to Labeling Team Members

+

Enable automatic email sending to members in a labeling team to notify them of starting the team labeling task after the task is created.

+

Querying All Labeling Team Members

+

Query all labeling team members by page based on specified conditions.

+

Querying Members in a Labeling Team

+

Query members in a labeling team by page based on the team ID.

+

Creating a Labeling Team Member

+

Add new members to a labeling team.

+

Deleting Labeling Team Members in Batches

+

Delete multiple members from a labeling team in batches.

+

Querying Details About a Labeling Team Member

+

Query details about a member in a labeling team, including the member description, email address, and role based on the team ID and member ID.

+

Updating Labeling Team Members

+

Update the description and role of a member in a labeling team based on the team ID and member ID.

+

Deleting a Labeling Team Member

+

Delete a member from a labeling team based on the team ID and member ID.

+
+
+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0400.html b/modelarts/api-ref/modelarts_03_0400.html new file mode 100644 index 00000000..e6f8a89d --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0400.html @@ -0,0 +1,12 @@ + + +

Application Cases

+

+
+
+ +
+ diff --git a/modelarts/api-ref/modelarts_03_0401.html b/modelarts/api-ref/modelarts_03_0401.html new file mode 100644 index 00000000..f2a129e4 --- /dev/null +++ b/modelarts/api-ref/modelarts_03_0401.html @@ -0,0 +1,307 @@ + + +

Creating a Training Job Using the TensorFlow Framework

+

Overview

This section describes how to train a model on ModelArts by calling a series of APIs.

+

The process for creating a training job using the TensorFlow framework is as follows:

+
  1. Call the API in Authentication to obtain the user token, which will be put into the request header for authentication in a subsequent request.
  2. Call the API in Querying Job Resource Specifications to obtain the resource flavors available for training jobs.
  3. Call the API in Querying Job Engine Specifications to view the engine types and versions available for training jobs.
  4. Call the API in Creating a Training Job to create a training job.
  5. Call the API in Querying the Details About a Training Job Version to query the details about the training job based on the job ID.
  6. Call the API in Obtaining the Name of a Training Job Log File to obtain the name of the training job log file.
  7. Call the API in Querying Training Job Logs to view the log details of the training job.
  8. Call the API in Deleting a Training Job to delete the training job if it is no longer needed.
+
+

Prerequisites

+
+

Procedure

  1. Call the API in Authentication to obtain the user token.
    1. Request body:

      URI format: POST https://{iam_endpoint}/v3/auth/tokens

      +

      Request header: Content-Type → application/json

      +
      Request body:
      {
      +  "auth": {
      +    "identity": {
      +      "methods": ["password"],
      +      "password": {
      +        "user": {
      +          "name": "username", 
      +          "password": "**********",
      +          "domain": {
      +            "name": "domainname"  
      +          }
      +        }
      +      }
      +    },
      +    "scope": {
      +      "project": {
      +        "name": ""  
      +      }
      +    }
      +  }
      +}
      +
      +
      Set the italic fields in bold based on the site requirements.
      • Replace iam_endpoint with the IAM endpoint.
      • Replace username with the IAM username.
      • Replace ******** with the login password of the user.
      • Replace domainname with the account to which the user belongs.
      • Replace with the project name, which indicates the zone where the service is deployed.
      +
      +
    2. The status code 201 Created is returned. The value of X-Subject-Token in the response header is the token.
      x-subject-token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...
      +
    +
  2. Call the API in Querying Job Resource Specifications to obtain the resource flavors available for training jobs.
    1. Request body:

      URI format: GET https://{ma_endpoint}/v1/{project_id}/job/resource-specs?job_type=train

      +

      Request header: X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...

      +
      Set the italic fields in bold based on the site requirements.
      • Replace ma_endpoint with the ModelArts endpoint.
      • Replace project_id with the project ID of the user.
      • Set X-auth-Token to the token obtained in 1.
      +
      +
    2. The status code 200 OK is returned. The response body is as follows:
      {
      +  "specs": [
      +    ......
      +    {
      +      "spec_id": 7,
      +      "core": "2",
      +      "cpu": "8",
      +      "gpu_num": 0,
      +      "gpu_type": "",
      +      "spec_code": "modelarts.vm.cpu.2u",
      +      "unit_num": 1,
      +      "max_num": 1,
      +      "storage": "",
      +      "interface_type": 1,
      +      "no_resource": false
      +    },
      +    {
      +      "spec_id": 27,
      +      "core": "8",
      +      "cpu": "32",
      +      "gpu_num": 0,
      +      "gpu_type": "",
      +      "spec_code": "modelarts.vm.cpu.8u",
      +      "unit_num": 1,
      +      "max_num": 1,
      +      "storage": "",
      +      "interface_type": 1,
      +      "no_resource": false
      +    }
      +  ],
      +  "is_success": true,
      +  "spec_total_count": 5
      +}
      +
      • Select and record the flavor type required for creating the training job based on the spec_code field. This section uses modelarts.vm.cpu.8u as an example and records the value of the max_num field as 1.
      • The no_resource field is used to determine whether resources are sufficient. Value false indicates that resources are available.
      +
    +
  3. Call the API in Querying Job Engine Specifications to view the engine types and versions available for training jobs.
    1. Request body:

      URI format: GET https://{ma_endpoint}/v1/{project_id}/job/ai-engines?job_type=train

      +

      Request header: X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...

      +
      Set the italic fields in bold based on the site requirements.
      • Replace ma_endpoint with the ModelArts endpoint.
      • Replace project_id with the project ID of the user.
      • Set X-auth-Token to the token obtained in 1.
      +
      +
    2. The status code 200 OK is returned. The response body is as follows:
      {
      +  "engines": [
      +    {
      +      "engine_type": 13,
      +      "engine_name": "Ascend-Powered-Engine",
      +      "engine_id": 130,
      +      "engine_version": "TF-1.15-python3.7-aarch64"
      +    },
      +    ......
      +    {
      +      "engine_type": 1,
      +      "engine_name": "TensorFlow",
      +      "engine_id": 3,
      +      "engine_version": "TF-1.8.0-python2.7"
      +    },
      +    {
      +      "engine_type": 1,
      +      "engine_name": "TensorFlow",
      +      "engine_id": 4,
      +      "engine_version": "TF-1.8.0-python3.6"
      +    },
      +    ......
      +    {
      +      "engine_type": 9,
      +      "engine_name": "XGBoost-Sklearn",
      +      "engine_id": 100,
      +      "engine_version": "XGBoost-0.80-Sklearn-0.18.1-python3.6"
      +    }
      +  ],
      +  "is_success": true
      +}
      +

      Select the engine flavor required for creating a training job based on the engine_name and engine_version fields and record engine_id. This section describes how to create a job based on the TensorFlow engine. Record engine_id as 4.

      +
    +
  4. Call the API in Creating a Training Job to create a training job named jobtest_TF based on the TensorFlow framework.
    1. Request body:

      URI format: POST https://{ma_endpoint}/v1/{project_id}/training-jobs

      +
      Request header:
      • X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...
      • Content-Type →application/json
      +
      +
      Request body:
      {
      +    "job_name": "jobtest_TF",
      +    "job_desc": "using TensorFlow for handwritten digit recognition",
      +    "config": {
      +        "worker_server_num": 1,
      +        "parameter": [],
      +        "flavor": {
      +            "code": "modelarts.vm.cpu.8u"
      +        },
      +        "train_url": "/test-modelarts/mnist-model/output/",
      +        "engine_id": 4,
      +        "app_url": "/test-modelarts/mnist-tensorflow-code/",
      +        "boot_file_url": "/test-modelarts/mnist-tensorflow-code/train_mnist_tf.py",
      +        "data_source": [
      +            {
      +                "type": "obs",
      +                "data_url": "/test-modelarts/dataset-mnist/"
      +            }
      +        ]
      +    },
      +    "notification": {
      +        "topic_urn": "",
      +        "events": []
      +    },
      +    "workspace_id": "0"
      +}
      +
      +
      Set the italic fields in bold based on the site requirements.
      • Set job_name and job_desc to the name and description of the training job.
      • Set worker_server_num and code to the values of max_num and spec_code obtained in 2.
      • Set engine_id to the engine ID obtained in 3.
      • Set train_url to the output directory of the training job.
      • Set app_url and boot_file_url to the code directory and code boot file of the training job, respectively.
      • Set data_url to the dataset directory used by the training job.
      +
      +
    2. The status code 200 OK is returned, indicating that the training job has been created. The response body is as follows:
      {
      +  "version_name": "V0001",
      +  "job_name": "jobtest_TF",
      +  "create_time": 1609121837000,
      +  "job_id": 567524,
      +  "resource_id": "jobaedef089",
      +  "version_id": 1108482,
      +  "is_success": true,
      +  "status": 1
      +}
      +
      • Record the values of job_id (training job ID) and version_id (training job version ID) for future use.
      • The value of status is 1, indicating that the training job is being initialized.
      +
    +
  5. Call the API in Querying the Details About a Training Job Version to query the details about the training job based on the job ID.
    1. Request body:

      URI format: GET https://{ma_endpoint}/v1/{project_id}/training-jobs/567524/versions/1108482

      +

      Request header: X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...

      +
      Set the italic fields in bold based on the site requirements.
      • Replace 567524 with the value of job_id recorded in 4.
      • Replace 1108482 with the value of version_id recorded in 4.
      +
      +
    2. The status code 200 OK is returned. The response body is as follows:
      {
      +  "dataset_name": null,
      +  "duration": 1326,
      +  "spec_code": "modelarts.vm.cpu.8u",
      +  "parameter": [],
      +  "start_time": 1609121913000,
      +  "model_outputs": [],
      +  "engine_name": "TensorFlow",
      +  "error_result": null,
      +  "gpu_type": "",
      +  "user_frame_image": null,
      +  "gpu": null,
      +  "dataset_id": null,
      +  "nas_mount_path": null,
      +  "task_summary": {},
      +  "max_num": 1,
      +  "model_metric_list": "{}",
      +  "is_zombie": null,
      +  "flavor_code": "modelarts.vm.cpu.8u",
      +  "gpu_num": 0,
      +  "train_url": "/test-modelarts/mnist-model/output/",
      +  "engine_type": 1,
      +  "job_name": "jobtest_TF",
      +  "nas_type": "efs",
      +  "outputs": null,
      +  "job_id": 567524,
      +  "data_url": "/test-modelarts/dataset-mnist/",
      +  "log_url": null,
      +  "boot_file_url": "/test-modelarts/mnist-tensorflow-code/train_mnist_tf.py",
      +  "volumes": null,
      +  "dataset_version_id": null,
      +  "algorithm_id": null,
      +  "worker_server_num": 1,
      +  "pool_type": "SYSTEM_DEFINED",
      +  "autosearch_config": null,
      +  "job_desc": "using TensorFlow for handwritten digit recognition",
      +  "inputs": null,
      +  "model_id": null,
      +  "dataset_version_name": null,
      +  "pool_name": "hec-train-pub-cpu",
      +  "engine_version": "TF-1.8.0-python3.6",
      +  "system_metric_list": {
      +    "recvBytesRate": [
      +      "0",
      +      "0"
      +    ],
      +    "cpuUsage": [
      +      "0",
      +      "0"
      +    ],
      +    "sendBytesRate": [
      +      "0",
      +      "0"
      +    ],
      +    "memUsage": [
      +      "0",
      +      "0"
      +    ],
      +    "gpuUtil": [
      +      "0",
      +      "0"
      +    ],
      +    "gpuMemUsage": [
      +      "0",
      +      "0"
      +    ],
      +    "interval": 1,
      +    "diskWriteRate": [
      +      "0",
      +      "0"
      +    ],
      +    "diskReadRate": [
      +      "0",
      +      "0"
      +    ]
      +  },
      +  "retrain_model_id": null,
      +  "version_name": "V0001",
      +  "pod_version": "1.8.0-cp36",
      +  "engine_id": 4,
      +  "status": 10,
      +  "cpu": "32",
      +  "user_image_url": null,
      +  "spec_id": 27,
      +  "is_success": true,
      +  "storage": "",
      +  "nas_share_addr": null,
      +  "version_id": 1108482,
      +  "no_resource": false,
      +  "user_command": null,
      +  "resource_id": "jobaedef089",
      +  "core": "8",
      +  "npu_info": null,
      +  "app_url": "/test-modelarts/mnist-tensorflow-code/",
      +  "data_source": [
      +    {
      +      "type": "obs",
      +      "data_url": "/test-modelarts/dataset-mnist/"
      +    }
      +  ],
      +  "pre_version_id": null,
      +  "create_time": 1609121837000,
      +  "job_type": 1,
      +  "pool_id": "pool7d1e384a"
      +}
      +

      You can learn about the version details of the training job based on the response. The value of status is 10, indicating that the training job is successful.

      +
    +
  6. Call the API in Obtaining the Name of a Training Job Log File to obtain the name of the training job log file.
    1. Request body:

      URI format: GET https://{ma_endpoint}/v1/{project_id}/training-jobs/567524/versions/1108482/log/file-names

      +

      Request header: X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...

      +

      Set the italic fields in bold based on the site requirements.

      +
    2. The status code 200 OK is returned. The response body is as follows:
      {
      +  "is_success": true,
      +  "log_file_list": [
      +    "job-jobtest-tf.0"
      +  ]
      +}
      +

      Only one log file named job-jobtest-tf.0 exists.

      +
    +
  7. Call the API in Querying Training Job Logs to query details about eight rows in the training job log file.
    1. Request body:

      URI format: GET https://{ma_endpoint}/v1/{project_id}/training-jobs/567524/versions/1108482/aom-log?log_file=job-jobtest-tf.0&lines=8&order=desc

      +

      Request header: X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...

      +
      Set the italic fields in bold based on the site requirements.
      • Set log_file to the name of the log file obtained in 6.
      • Set lines to the rows to be obtained in the log file.
      • Set order to the log query direction.
      +
      +
    2. The status code 200 OK is returned. The response body is as follows:
      {
      +  "start_line": "1609121886518240330",
      +  "lines": 8,
      +  "is_success": true,
      +  "end_line": "1609121900042593083",
      +  "content": "Done exporting!\n\n[Modelarts Service Log]Training completed.\n\n[ModelArts Service Log]modelarts-pipe: will create log file /tmp/log/jobtest_TF.log\n\n[ModelArts Service Log]modelarts-pipe: will create log file /tmp/log/jobtest_TF.log\n\n[ModelArts Service Log]modelarts-pipe: will write log file /tmp/log/jobtest_TF.log\n\n[ModelArts Service Log]modelarts-pipe: param for max log length: 1073741824\n\n[ModelArts Service Log]modelarts-pipe: param for whether exit on overflow: 0\n\n[ModelArts Service Log]modelarts-pipe: total length: 23303\n"
      +}
      +
    +
  8. Call the API in Deleting a Training Job to delete the training job if it is no longer needed.
    1. Request body:

      URI format: GET https://{ma_endpoint}/v1/{project_id}/training-jobs/567524

      +

      Request header: X-auth-Token →MIIZmgYJKoZIhvcNAQcCoIIZizCCGYcCAQExDTALBglghkgBZQMEAgEwgXXXXXX...

      +

      Set the italic fields in bold based on the site requirements.

      +
    2. The status code 200 OK is returned, indicating that the job has been deleted. The response is as follows:
      {
      +  "is_success": true
      +}
      +
    +
+
+
+
+ +
+ diff --git a/modelarts/api-ref/process_task.html b/modelarts/api-ref/process_task.html new file mode 100644 index 00000000..fdd5c8f6 --- /dev/null +++ b/modelarts/api-ref/process_task.html @@ -0,0 +1,38 @@ + + +

Processing Task

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/sample_management.html b/modelarts/api-ref/sample_management.html new file mode 100644 index 00000000..2d51b170 --- /dev/null +++ b/modelarts/api-ref/sample_management.html @@ -0,0 +1,28 @@ + + +

Sample Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/workforce_management.html b/modelarts/api-ref/workforce_management.html new file mode 100644 index 00000000..e30624ad --- /dev/null +++ b/modelarts/api-ref/workforce_management.html @@ -0,0 +1,24 @@ + + +

Labeling Team Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/workforce_process_management.html b/modelarts/api-ref/workforce_process_management.html new file mode 100644 index 00000000..ddf9ec50 --- /dev/null +++ b/modelarts/api-ref/workforce_process_management.html @@ -0,0 +1,20 @@ + + +

Team Labeling Process Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/workforce_worker_management.html b/modelarts/api-ref/workforce_worker_management.html new file mode 100644 index 00000000..f1a57b1b --- /dev/null +++ b/modelarts/api-ref/workforce_worker_management.html @@ -0,0 +1,30 @@ + + +

Labeling Team Member Management

+

+
+
+ + + +
+ diff --git a/modelarts/api-ref/workspace.html b/modelarts/api-ref/workspace.html new file mode 100644 index 00000000..c653a722 --- /dev/null +++ b/modelarts/api-ref/workspace.html @@ -0,0 +1,24 @@ + + +

Workspace Management

+

+
+
+ +
+