diff --git a/otc_doc_convertor/__init__.py b/otc_doc_convertor/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/otc_doc_convertor/comparator.py b/otc_doc_convertor/comparator.py
deleted file mode 100644
index 69014754..00000000
--- a/otc_doc_convertor/comparator.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import argparse
-import logging
-import requests
-import pathlib
-from bs4 import BeautifulSoup
-
-
-def body_filter(tag):
- return (
- tag.name == "div"
- and tag.has_attr("id")
- and tag["id"].startswith("body")
- )
-
-
-def simplify_body(data):
- return data.get_text().replace(" ", "")
-
-
-class OTCComparator:
-
- def compare(self, url_prefix, file_path, file_name):
- try:
- data = requests.get(
- f"https://docs.otc.t-systems.com/{url_prefix}/"
- f"{file_name}.json")
- page_data = None
- for item in data.json():
- if (
- item.get("url").endswith(f"{file_name}.html")
- and item['content']
- ):
- page_data = item["content"]
- break
- original = BeautifulSoup(page_data, 'html.parser')
- with open(f"{file_path}/{file_name}.html", "r") as f:
- new_content = f.read()
- new = BeautifulSoup(new_content, 'html.parser')
- t1 = original.find(body_filter)
- t2 = new.find(body_filter)
- if t1 != t2:
- if simplify_body(t1) == simplify_body(t2):
- logging.error(
- "File %s is not matching, but "
- "plain text matches" % file_name)
- return True
- else:
- logging.error("File %s mismatches" % file_name)
- logging.debug(
- "Proposed content: %s" %
- t2.get_text().encode("unicode_escape").decode("utf-8"))
- logging.debug(
- "Current content: %s" %
- t1.get_text().encode("unicode_escape").decode("utf-8"))
- return False
- else:
- logging.info("Content matches")
- return True
- except Exception as ex:
- logging.error("Content comparison error %s" % ex)
- return False
-
- def main(self):
- logging.basicConfig(level=logging.DEBUG)
- parser = argparse.ArgumentParser(description="Compare document data.")
- parser.add_argument(
- "path",
- type=str,
- help="Path to the document content (i.e. docs/ecs/api-ref")
- parser.add_argument(
- "url",
- type=str,
- help="url prefix in the helpcenter (i.e. api/ecs)")
- args = parser.parse_args()
- match = True
-
- for f in pathlib.Path(args.path).glob("*.html"):
- logging.info(f"Comparing {f.name}")
- if not self.compare(
- args.url, args.path, f.name.replace(".html", "")):
- match = False
-
- if not match:
- logging.error("Comparison showed deviations")
- exit(1)
- else:
- logging.info("No deviations found")
-
-
-def main():
- OTCComparator().main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/otc_doc_convertor/convertor.py b/otc_doc_convertor/convertor.py
deleted file mode 100644
index 305c0c41..00000000
--- a/otc_doc_convertor/convertor.py
+++ /dev/null
@@ -1,777 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-import bs4
-import json
-import logging
-import os
-import pathlib
-import re
-import shutil
-
-from jinja2 import FileSystemLoader, Environment, select_autoescape
-
-
-class OTCDocConvertor:
- def __init__(self):
- self.doc_anchors = dict()
- self.doc_links = dict()
-
- @staticmethod
- def get_new_name(current_name):
- new_name = current_name.replace(" - ", "_")
- # This is a unicode char
- new_name = new_name.replace("–", "_")
- new_name = new_name.replace(" ", "_")
- new_name = new_name.replace("/", "_")
- new_name = new_name.replace("=", "_")
- new_name = new_name.replace("+", "")
- new_name = new_name.replace("'", "")
- new_name = new_name.replace('"', "")
- new_name = new_name.replace("`", "")
- new_name = new_name.replace("´", "")
- new_name = new_name.replace(":", "")
- new_name = new_name.replace("?", "")
- new_name = new_name.replace("(", "")
- new_name = new_name.replace(")", "")
- new_name = new_name.replace(",", "")
- new_name = new_name.replace("!", "")
- new_name = new_name.replace("<", "")
- new_name = new_name.replace(">", "")
- new_name = new_name.replace("$", "")
- new_name = new_name.replace("#", "sharp")
- new_name = new_name.replace("%", "pct")
- new_name = new_name.replace('_&_', '_and_')
- new_name = re.sub(r'(\w+)&(\w+)', r'\1_and_\2', new_name)
- new_name = re.sub('(_+)', '_', new_name)
- new_name = new_name.lower()
- return new_name
-
- @staticmethod
- def build_doc_tree(metadata):
- flat_tree = dict()
- for k, v in metadata.items():
- parent_id = v.get("p_code")
- if not parent_id:
- parent_id = 0
-
- if parent_id not in flat_tree:
- flat_tree[parent_id] = list()
- flat_tree[parent_id].append(v)
- return flat_tree
-
- @classmethod
- def get_target_path(cls, code, metadata):
- if code in metadata:
- current = metadata[code]
- if not current.get("p_code"):
- return current["new_name"]
- else:
- return "{0}/{1}".format(
- cls.get_target_path(current["p_code"], metadata),
- current["new_name"],
- )
- else:
- return ""
-
- def make_label(self, soup, name):
- label = soup.new_tag("p")
- label.string = f"..\\_{name.lower()}:"
- return label
-
- def is_element_referred(self, ref, fname):
- return (
- ref in self.doc_links
- or "#" + ref in self.doc_links
- or fname.lower() + "#" + ref in self.doc_links
- )
-
- def rawize_me(self, soup, expressions):
- for to_rawize in expressions:
- for p in soup.body.find_all(string=re.compile(to_rawize)):
- if p.string and p.parent.name not in [
- "b", "strong", "pre", "code"]:
- curr = p.string
- part = re.search(to_rawize, curr)
- # We should not escape inside of bold - this is wrong
- if len(part.groups()) > 0:
- logging.debug(
- "Found element to rawize %s", part.group(1)
- )
- new = curr.replace(
- part.group(1), f"{part.group(1)}
"
- )
- logging.debug("Replacing string with: %s", new)
- p.replace_with(bs4.BeautifulSoup(new, "html.parser"))
- logging.debug("Replacing string with: %s", p.string)
- else:
- logging.error(
- "Cannot find string for rawization anymore"
- )
-
- def streamline_html(self, soup, file_name, args=None):
- # Drop eventual header duplicated anchors
- fname = file_name.replace(".html", "").lower()
- page_anchors = set()
- met_page_anchors = dict()
- for lnk in soup.body.find_all("a"):
- name = None
- if "name" in lnk.attrs and lnk.string is None:
- name = lnk.attrs["name"].lower()
- if name in met_page_anchors:
- # Such anchor already existed on this page, drop it
- lnk.decompose()
- met_page_anchors[name] = True
-
- if name and name.lower() == fname:
- lnk.decompose()
-
- # Process divs
- for i in soup.body.find_all("div"):
- if i.decomposed:
- # if we decompose a later in the code it may still be returned
- # here in the list. Skip those
- continue
- if "note" in i.get("class", []):
- # Notes
- del i["id"]
- if i.img:
- i.img.decompose()
- notetitle = i.find("span", class_="notetitle")
- notebody = i.find(class_="notebody")
- if not (notebody and notebody.get_text()):
- # Some smart people make empty notes. Since this is
- # breaking layout we need to drop those
- i.decompose()
- elif notetitle:
- title = soup.new_tag("div")
- title["class"] = "title"
- title.string = "Note:"
- notetitle.replace_with(title)
- elif "warning" in i.get("class", []):
- # Warnings
- del i["id"]
- if i.img:
- i.img.decompose()
- eltitle = i.find("span", class_="warningtitle")
- if eltitle:
- title = soup.new_tag("div")
- title["class"] = "title"
- title.string = "Warning:"
- eltitle.replace_with(title)
- elif "notice" in i.get("class", []):
- # Notices
- del i["id"]
- if i.img:
- i.img.decompose()
- i["class"] = "important"
- elif "caution" in i.get("class", []):
- # Cautions
- del i["id"]
- if i.img:
- i.img.decompose()
- elif "fignone" in i.get("class", []):
- # Figures
- # When we found figure generate local label (anchor)
- if i.get("id"):
- logging.debug("place figure label")
- i.insert_before(self.make_label(soup, i.get("id")))
- figure = soup.new_tag("figure")
- img = i.find("img")
- cap = i.find("span", class_="figcap")
- if cap is not None:
- cap.name = "figcaption"
- figure.append(cap)
- if img:
- # Store all referred images for copying
- self.doc_images.add(img["src"])
- img["src"] = (
- "/_static/images/"
- + os.path.basename(img["src"])
- )
- del img["width"]
- del img["height"]
- del img["class"]
- del img["title"]
- del img["name"]
- del img["id"]
- figure.append(img)
- i.replace_with(figure)
- elif "section" in i.get("class", []):
- # Sections
- if i.get("id"):
- # When we found section generate local label (anchor)
- sec_id = i.get("id").lower()
- if self.is_element_referred(sec_id, file_name):
- page_anchors.add(sec_id)
- i.insert_before(self.make_label(soup, sec_id))
- i.unwrap()
- elif i.get("id") and i.get("id").startswith("body"):
- i.unwrap()
- else:
- i.name = "p"
-
- # Process remaining images
- for img in soup.body.find_all("img"):
- if img["src"] and not img["src"].startswith("/_static/images"):
- self.doc_images.add(img["src"])
- img["src"] = "/_static/images/" + os.path.basename(img["src"])
- del img["width"]
- del img["height"]
- del img["class"]
- del img["title"]
- del img["id"]
-
- # Drop strong in table headers "/"
- for th in soup.body.find_all("th"):
- if th.p and th.p.strong:
- th.p.strong.unwrap()
-
- if args and args.improve_table_headers:
- # Add spaces around "/"
- for th in soup.body.find_all("th"):
- if hasattr(th, "p") and th.p.string:
- th.p.string = re.sub(r"\b/\b", " / ", th.p.string)
-
- # Drop strong around links "/"
- for strong in soup.body.find_all("strong"):
- if strong.a:
- strong.unwrap()
-
- # table anchors - some tables are referred. Some are having anchor in
- # front, some not. In order to cope with that we analyze every table
- # and if it is referred - prepend anchor. Next anchor processing will
- # skiip it, since such anchor is already placed on the page
- for table in soup.body.find_all("table"):
- # Verify this is really called from somewhere:
- if table.get("id"):
- local_ref = table["id"].lower()
- if self.is_element_referred(local_ref, file_name):
- # We now know something in the document wants this anchor -
- # replace it with label
- if local_ref not in page_anchors:
- lnk = bs4.BeautifulSoup(
- f"
..\\_{local_ref}:
", "html.parser" - ) - table.insert_before(lnk) - page_anchors.add(local_ref) - else: - logging.debug( - "Not placing replaced anchor %s " - "since it already existed", - local_ref, - ) - - # local anchors - for lnk in soup.body.find_all("a"): - if ( - lnk.string is None - and lnk.has_attr("name") - and not re.match(r"^li\d+$", lnk.attrs["name"]) - # anywhere section - and not re.match(r".*section\d+$", lnk.attrs["name"]) - # starts with table - and not re.match(r"^table\d+$", lnk.attrs["name"]) - ): - # Verify this is really called from somewhere: - local_ref = lnk["name"].lower() - if self.is_element_referred(local_ref, file_name): - # We now know something in the document wants this anchor - - # replace it with label - if local_ref not in page_anchors: - logging.debug("Adding anchor") - lnk.name = "p" - lnk.string = f"..\\_{local_ref}:" - del lnk["name"] - page_anchors.add(local_ref) - else: - logging.debug( - "Not placing replaced anchor %s " - " since it already existed", - local_ref, - ) - else: - logging.debug("Dropping unreferred link %s", lnk) - - # Undeline element should not be used at all - for underline in soup.body.find_all("u"): - underline.unwrap() - - for li in soup.body.find_all("li"): - if not li.get("id"): - continue - local_ref = li.get("id").lower() - # Delete li ID to prevent broken RST containers - del li["id"] - - if ( - self.is_element_referred(local_ref, file_name) - and local_ref not in page_anchors - ): - # LI item referred, but no anchor present - logging.debug("Adding missing li %s anchor", local_ref) - li.insert(0, self.make_label(soup, local_ref)) - page_anchors.add(local_ref) - - # Sometimes we have code blocks with line numbers. - #1 - # 2 - # 3 | ....
- # {em.string}" - em.replace_with(bs4.BeautifulSoup(new, "html.parser")) - - # Incredibly dirty hacks: - rawize_expressions = [ - # DWS Dev Guide harcodes - r"^(1\|\"iamtext\"\|\"iamvarchar\"\|2006-07-07\|12:00:00)$", - r"^(2\|\"iamtext\"\|\"iamvarchar\"\|2022-07-07\|19:00:02)$", - r"(\*max_files_per_process\*3)", - r"(&&, &&&, .* <#>)", - # r"(\*\+)", - r"(-\|-)", - r"(^-{8}$)" - ] - self.rawize_me(soup, rawize_expressions) - - # Special asterisks treatement - escape_asterisk_re = r"\((\*)[\.,]" - self.rawize_me(soup, [escape_asterisk_re]) - - # And now remaining specialities - rawize_strings = [ - # "\*\*\*\*\*\*", - # r"([\\\/\:\*\?\"\~|<>]{4,})" - # ModelArts UMN contain this "unallowed" sequence - r"(\\/:\*\?\"<>\|)", - # CSS UMN contain "SELECT exclude('*name') FROM my-index" - r"(\*name)", - # DMS UMN contain: (`~!@#$%^&*()-_=+\|[{}]:'",<.>/?) - r"\(([\W\x60_]{10,})\)", - # MRS UMN contain: /:*?"<>|\\;&,'`!{}[]$%+ - r"\s([^a-zA-Z0-9\s]{8,})", - # MRS operation guide contain: /*+ MAPJOIN(join_table) \*/ - # RDS UMN contain: /*FORCE_MASTER*/ - r"(/\*.{5,}\*/)", - # BMS API contain sequence in a dedicated paragraph - r"^([^a-zA-Z0-9\s]{10,})$", - # OBS special chars - "\$" "\\" etc - r"^(\\[\$\\bfnrtvu]{1})$", - # CES contains: urn:smn:([a-z]|[A-Z]|[0-9]|\\-){1,32}:.... - r"\s(urn:smn:\(.*)\.", - # "-" only (in tables) is considered as list - r"^(-)$", - # MRS component guide has: "./mydate_\\\\d*/" - r"\w(_)\\", - # Pandoc is not properly escaping _ before special chars what makes - # it invalid for Sphinx. A bit weird regex: - # "[:space:][:word:][:underscore:][:comma:]" - r"\s([\w_]+_)[,]", - # DWS dirty-fixes part 2 - r"/(\*\+)", - ] - self.rawize_me(soup, rawize_strings) - - # Pandoc seem to be not escaping properly asterisks which are - # immediately following non word chars - # (https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#toc-entry-44) - # NOTE(gtema): - # 1. this is on purpose placed here since we want to have some special - # escapings above - # 2. we are not escaping asterisks at the end of the paragraphs (pandoc - # deals correctly with that) - re_escape = re.compile(r"([-/'\"<\([{])(\*+)(.)") - for p in soup.body.find_all(string=re_escape): - if p.string and p.parent.name == "p": - p.string.replace_with( - re.sub(re_escape, r"\1``\2``\3", p.string)) - - # Special case for multiple asterisks and colons like ecs:*:* - re_escape = re.compile(r"([:])(\*+)") - re_escape_new = re.compile(r"([:])(\*)[^$]") - for p in soup.body.find_all(string=re_escape): - if p.string and (p.parent.name == "p" or p.parent.name == "li"): - string = p.string - while re.search(re_escape, string): - if re.search(re_escape_new, string): - string = re.sub( - re_escape, r"\1``\2``", string, count=1) - else: - break - p.string.replace_with(string) - - # Drop parent link at the bottom of the page - for parent in soup.body.find_all("p", class_="familylinks"): - parent.decompose() - - return soup.body - - def main(self): - logging.basicConfig(level=logging.DEBUG) - parser = argparse.ArgumentParser(description="Process links.") - parser.add_argument("path", type=str, help="path to the files") - parser.add_argument( - "--improve-table-headers", - action="store_true", - help="Improve table headers by enforcing spaces around `/`", - ) - parser.add_argument( - "--pygments-lexer", help="Set particular code-block lexer language" - ) - parser.add_argument( - "--dest", help="Directory to write resulting files" - ) - parser.add_argument("--title", required=True, help="Document title") - parser.add_argument( - "--service", help="Service to which the document belongs to" - ) - parser.add_argument("--repo-name", help="Service repository") - parser.add_argument("--pdf-name", help="PDF File name") - parser.add_argument( - "--templates-location", - default="templates", - help="Location of additional templates", - ) - self.args = parser.parse_args() - if self.args.dest: - dest = pathlib.Path(self.args.dest) - else: - dest = pathlib.Path(self.args.path, "result") - dest.mkdir(parents=True, exist_ok=True) - - metadata_file = pathlib.Path(self.args.path, "CLASS.TXT.json") - meta_data = dict() - - if not metadata_file.exists(): - logging.warning( - f"CLASS.TXT.json file is missing in {self.args.path}, " - f"assuming initial import" - ) - with open(pathlib.Path(dest, "index.rst"), "w") as index: - index.write("=" * (len(self.args.title)) + "\n") - index.write(self.args.title + "\n") - index.write("=" * (len(self.args.title)) + "\n") - index.write("\n") - else: - meta_data = json.loads(open(metadata_file).read()) - metadata_by_uri = dict() - metadata_by_code = dict() - self.doc_images = set() - for f in meta_data: - f["new_name"] = self.get_new_name(f["title"]) - metadata_by_uri[f["uri"]] = f - metadata_by_code[f.get("code")] = f - - tree = self.build_doc_tree(metadata_by_code) - - pathlib.Path(self.args.path, "temp/").mkdir( - parents=True, exist_ok=True - ) - - # Scan all docs for anchors - for f in pathlib.Path(self.args.path).glob("*.html"): - if f.name not in metadata_by_uri: - continue - # Registering section links - with open(f, "r") as reader: - logging.debug(f"Scanning {f.name}") - content = reader.read() - soup = bs4.BeautifulSoup(content, "lxml") - for lnk in soup.body.find_all("a"): - if "name" in lnk.attrs and lnk.string is None: - anchor = lnk.attrs["name"] - title = re.sub("[ _:]", "-", anchor) - res = dict( - fname=f.name.lower(), - title=title, - replace=title.lower() - ) - self.doc_anchors[anchor] = res - if "href" in lnk.attrs and lnk["href"]: - self.doc_links[lnk["href"].lower()] = f.name - - for f in pathlib.Path(self.args.path).glob("*.html"): - if f.name not in metadata_by_uri: - continue - _target = metadata_by_uri[f.name] - target = _target["new_name"] - target_path = self.get_target_path( - _target["p_code"], metadata_by_code - ) - pathlib.Path(self.args.path, "temp").mkdir( - parents=True, exist_ok=True - ) - pathlib.Path(self.args.path, "tmp_result/" + target_path).mkdir( - parents=True, exist_ok=True - ) - pathlib.Path(dest, target_path).mkdir(parents=True, exist_ok=True) - - # Pre-processing of html content - with open(f, "r") as reader, open( - pathlib.Path(self.args.path, f"temp/{target}.tmp"), "w" - ) as writer: - # if f.name not in [ - # ]: - # continue - logging.info(f"Pre-Processing {f} as {target}") - content = reader.read() - # Preprocess - Fix space inside link and not text - # i.e. `
|
s3:* | |
s3:* | If the VPC, subnet, and security group are displayed in the DB - instance list, you need to configure vpc:*:get and - vpc:*:list. |
- """
- expected = """
- If the VPC, subnet, and security group are displayed in the DB - instance list, you need to configure vpc:``*``:get and - vpc:``*``:list. |
- """
- soup = bs4.BeautifulSoup(test_data, 'lxml')
- res = self.convertor.streamline_html(soup, "dummy")
- self.assertEqual(str(res.find('td')), expected.strip())
-
- def test_streamline_html_escape_4(self):
- test_data = """
-
|
- """
- expected = """
-
|
- """
- soup = bs4.BeautifulSoup(test_data, 'lxml')
- res = self.convertor.streamline_html(soup, "dummy")
- self.assertEqual(str(res.find('td')), expected.strip())
-
- def test_streamline_html_escape_5(self):
- test_data = """
- If the VPC, subnet, and security group are displayed in the DB instance list, you need to configure vpc:*:get and vpc:*:list. |
- """ # noqa
- expected = """
- If the VPC, subnet, and security group are displayed in the DB instance list, you need to configure vpc:``*``:get and vpc:``*``:list. |
- """ # noqa
- soup = bs4.BeautifulSoup(test_data, 'lxml')
- res = self.convertor.streamline_html(soup, "dummy")
- self.assertEqual(
- str(res.find('td')),
- expected.strip(),
- )
-
- def test_streamline_html_escape_12(self):
- test_data= """
- Mandatory |
- """ # noqa
- expected = """
- Mandatory |
- """ # noqa
- soup = bs4.BeautifulSoup(test_data, 'lxml')
- res = self.convertor.streamline_html(soup, "dummy")
- self.assertEqual(
- str(res.find('th')),
- expected.strip(),
- )
-
- def test_streamline_html_escape_17(self):
- test_data= """
- Mandatory | - """ # noqa - expected = """ -Mandatory | - """ # noqa - soup = bs4.BeautifulSoup(test_data, 'lxml') - res = self.convertor.streamline_html(soup, "dummy") - self.assertEqual( - str(res.find('th')), - expected.strip(), - ) diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml index eb93ac2b..2203f28c 100644 --- a/playbooks/pre.yaml +++ b/playbooks/pre.yaml @@ -5,12 +5,10 @@ - ensure-pip - ensure-virtualenv - role: "ensure-pandoc" - vars: - ensure_pandoc_version: "2.19.2" tasks: - name: Install convertor pip: chdir: "{{ zuul.project.src_dir }}" virtualenv: "{{ ansible_user_dir }}/.venv" - name: . + name: "{{ ansible_user_dir }}/{{ zuul.projects['gitea.eco.tsi-dev.otc-service.com/docs/doc-convertor'].src_dir }}" editable: "yes" diff --git a/setup.cfg b/setup.cfg index a7519989..df8c8162 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,11 @@ [metadata] -name = otc-doc-convertor +name = otc-doc-exports author = Open Telekom Cloud - Ecosystem Squad author_email = dl-pbcotcdeleco@t-systems.com -description = Python program to convert docs exported in HTML into RST +description = Doc sources (HTML) to track changes in the vendors documentation system description_file = README.md -home_page = https://github.com/opentelekomcloud-docs/doc-exports +home_page = https://gitea.eco.tsi-dev.otc-service.com/docs/doc-exports classifier = License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux @@ -17,12 +17,3 @@ classifier = Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 -keywords = Sphinx, search, python - -[options] -packages = otc_doc_convertor - -[options.entry_points] -console_scripts = - otc-convert-doc = otc_doc_convertor.convertor:main - otc-convert-compare = otc_doc_convertor.comparator:main diff --git a/templates/conf.py b/templates/conf.py index 8ce7e2dd..11f08baa 100644 --- a/templates/conf.py +++ b/templates/conf.py @@ -18,7 +18,7 @@ import os import sys extensions = [ - 'otcdocstheme' + 'otcdocstheme', ] otcdocs_auto_name = False diff --git a/zuul.yaml b/zuul.yaml index 0305fd5d..08748af8 100644 --- a/zuul.yaml +++ b/zuul.yaml @@ -10,6 +10,8 @@ pre-run: playbooks/pre.yaml run: playbooks/run.yaml post-run: playbooks/post.yaml + required-projects: + - name: docs/doc-convertor vars: docs_update_data_file: "metadata.yaml" @@ -20,7 +22,6 @@ Convert Application doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/aom - docs/cse - docs/dms @@ -39,7 +40,6 @@ Convert BigData doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/apig - docs/css - docs/dataartsstudio @@ -62,7 +62,6 @@ Convert Compute doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/as - docs/bms - docs/deh @@ -82,7 +81,6 @@ Convert Container doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/cce - docs/cci - docs/swr @@ -100,7 +98,6 @@ Convert Database doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/das - docs/dcs - docs/ddm @@ -125,7 +122,6 @@ Convert Network doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/cdn - docs/dc - docs/dns @@ -151,7 +147,6 @@ Convert Management doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/ces - docs/cts - docs/lts @@ -172,7 +167,6 @@ Convert Security doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/antiddos - docs/dbss - docs/iam @@ -193,7 +187,6 @@ Convert Storage doc exports from html to rst and generate corresponding rst patch files. files: - - otc_doc_convertor/convertor.py - docs/cbr - docs/evs - docs/obs @@ -208,22 +201,6 @@ vars: docs_service_category: "storage" -- job: - name: otc-doc-exports-compare-current-hc - parent: unittests - voting: false - description: | - Verify whether content is matching the one published in the current - HelpCenter. - files: - - docs - - otc_doc_convertor/comparator.py - - roles/compare - vars: - docs_update_data_file: "metadata.yaml" - pre-run: playbooks/pre.yaml - run: playbooks/compare.yaml - - project: merge-mode: squash-merge default-branch: main @@ -231,14 +208,11 @@ docs_rst_location: "docs" docs_base_location: "base" docs_new_location: "new" - ensure_pandoc_version: "3.1" + ensure_pandoc_version: "2.19.2" propose_change_git_provider: "gitea" propose_change_git_baseurl: "gitea.eco.tsi-dev.otc-service.com" check: jobs: - - otc-tox-py39 - - otc-tox-pep8: - nodeset: ubuntu-focal - otc-doc-exports-convert-application - otc-doc-exports-convert-big-data - otc-doc-exports-convert-compute @@ -284,11 +258,8 @@ dependencies: - name: otc-doc-exports-convert-storage soft: true - # - otc-doc-exports-compare-current-hc gate: jobs: - - otc-tox-pep8: - nodeset: ubuntu-focal - otc-doc-exports-convert-application - otc-doc-exports-convert-big-data - otc-doc-exports-convert-compute
---|