From cbfff7d7b67821f80cb50e943e10aa810eeaf49a Mon Sep 17 00:00:00 2001 From: Andrew Scheller Date: Mon, 12 Aug 2024 17:52:15 +0100 Subject: [PATCH] Standardise whitespace and shebang in Python scripts --- scripts/create_output_supplemental_data.py | 34 +-- scripts/postprocess_doxygen_adoc.py | 264 +++++++++---------- scripts/postprocess_doxygen_xml.py | 284 ++++++++++----------- scripts/tests/test_doxygen_adoc.py | 152 +++++------ tests/test_create_build_adoc.py | 2 + tests/test_create_build_adoc_doxygen.py | 2 + tests/test_create_build_adoc_include.py | 2 + tests/test_create_nav.py | 2 + 8 files changed, 377 insertions(+), 365 deletions(-) diff --git a/scripts/create_output_supplemental_data.py b/scripts/create_output_supplemental_data.py index b24e05e736..4052046f1c 100755 --- a/scripts/create_output_supplemental_data.py +++ b/scripts/create_output_supplemental_data.py @@ -6,24 +6,24 @@ import re def get_release_version(doxyfile_path): - version = "unknown" - with open(doxyfile_path) as f: - doxy_content = f.read() - version_search = re.search(r"(\nPROJECT_NUMBER\s*=\s*)([\d.]+)", doxy_content) - if version_search is not None: - version = version_search.group(2) - return version + version = "unknown" + with open(doxyfile_path) as f: + doxy_content = f.read() + version_search = re.search(r"(\nPROJECT_NUMBER\s*=\s*)([\d.]+)", doxy_content) + if version_search is not None: + version = version_search.group(2) + return version def write_new_data_file(output_json_file, data_obj): - f = open(output_json_file, 'w') - f.write(json.dumps(data_obj)) - f.close() + f = open(output_json_file, 'w') + f.write(json.dumps(data_obj)) + f.close() if __name__ == "__main__": - # read the doxygen config file - doxyfile_path = sys.argv[1] - # output the new data file - output_json_file = sys.argv[2] - version = get_release_version(doxyfile_path) - data_obj = {"pico_sdk_release": version} - write_new_data_file(output_json_file, data_obj) + # read the doxygen config file + doxyfile_path = sys.argv[1] + # output the new data file + output_json_file = sys.argv[2] + version = get_release_version(doxyfile_path) + data_obj = {"pico_sdk_release": version} + write_new_data_file(output_json_file, data_obj) diff --git a/scripts/postprocess_doxygen_adoc.py b/scripts/postprocess_doxygen_adoc.py index bf6d4fe0a4..eab442bf03 100644 --- a/scripts/postprocess_doxygen_adoc.py +++ b/scripts/postprocess_doxygen_adoc.py @@ -1,149 +1,151 @@ +#!/usr/bin/env python3 + import re import sys import os import json def cleanup_text_page(adoc_file, output_adoc_path, link_targets): - filename = os.path.basename(adoc_file) - with open(adoc_file) as f: - adoc_content = f.read() - # remove any errant spaces before anchors - adoc_content = re.sub(r'( +)(\[\[[^[]*?\]\])', "\\2", adoc_content) - # collect link targets - for line in adoc_content.split('\n'): - link_targets = collect_link_target(line, filename) - with open(adoc_file, 'w') as f: - f.write(adoc_content) - return link_targets + filename = os.path.basename(adoc_file) + with open(adoc_file) as f: + adoc_content = f.read() + # remove any errant spaces before anchors + adoc_content = re.sub(r'( +)(\[\[[^[]*?\]\])', "\\2", adoc_content) + # collect link targets + for line in adoc_content.split('\n'): + link_targets = collect_link_target(line, filename) + with open(adoc_file, 'w') as f: + f.write(adoc_content) + return link_targets def collect_link_target(line, chapter_filename): - # collect a list of all link targets, so we can fix internal links - l = re.search(r'(#)([^,\]]+)([,\]])', line) - if l is not None: - link_targets[l.group(2)] = chapter_filename - return link_targets + # collect a list of all link targets, so we can fix internal links + l = re.search(r'(#)([^,\]]+)([,\]])', line) + if l is not None: + link_targets[l.group(2)] = chapter_filename + return link_targets def resolve_links(adoc_file, link_targets): - filename = os.path.basename(adoc_file) - with open(adoc_file) as f: - adoc_content = f.read() - output_content = [] - for line in adoc_content.split('\n'): - # e.g., <> - m = re.search("(<<)([^,]+)(,?[^>]*>>)", line) - if m is not None: - target = m.group(2) - # only resolve link if it points to another file - if target in link_targets and link_targets[target] != filename: - new_target = link_targets[target]+"#"+target - line = re.sub("(<<)([^,]+)(,?[^>]*>>)", f"\\1{new_target}\\3", line) - output_content.append(line) - with open(adoc_file, 'w') as f: - f.write('\n'.join(output_content)) - return + filename = os.path.basename(adoc_file) + with open(adoc_file) as f: + adoc_content = f.read() + output_content = [] + for line in adoc_content.split('\n'): + # e.g., <> + m = re.search("(<<)([^,]+)(,?[^>]*>>)", line) + if m is not None: + target = m.group(2) + # only resolve link if it points to another file + if target in link_targets and link_targets[target] != filename: + new_target = link_targets[target]+"#"+target + line = re.sub("(<<)([^,]+)(,?[^>]*>>)", f"\\1{new_target}\\3", line) + output_content.append(line) + with open(adoc_file, 'w') as f: + f.write('\n'.join(output_content)) + return def build_json(sections, output_path): - json_path = os.path.join(output_path, "picosdk_index.json") - with open(json_path, 'w') as f: - f.write(json.dumps(sections, indent="\t")) - return + json_path = os.path.join(output_path, "picosdk_index.json") + with open(json_path, 'w') as f: + f.write(json.dumps(sections, indent="\t")) + return def tag_content(adoc_content): - # this is dependent on the same order of attributes every time - ids_to_tag = re.findall(r'(\[#)(.*?)(,.*?contextspecific,tag=)(.*?)(,type=)(.*?)(\])', adoc_content) - for this_id in ids_to_tag: - tag = re.sub("PICO_", "", this_id[3]) - img = f" [.contexttag {tag}]*{tag}*" - # `void <> ()`:: An rp2040 function. - adoc_content = re.sub(rf'(\n`.*?<<{this_id[1]},.*?`)(::)', f"\\1{img}\\2", adoc_content) - # |<>\n|Low-level types and (atomic) accessors for memory-mapped hardware registers. - adoc_content = re.sub(rf'(\n\|<<{this_id[1]},.*?>>\n\|.*?)(\n)', f"\\1{img}\\2", adoc_content) - # [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n=== anonymous enum - HEADING_RE = re.compile(r'(\[#.*?role=contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*=+\s+\S*?)(\n)') - # [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n*anonymous enum* - H6_HEADING_RE = re.compile(r'(\[#.*?role=h6 contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*\*\S+.*?)(\n)') - # [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n---- - NONHEADING_RE = re.compile(r'(\[#.*?role=h?6?\s?contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*[^=\*])') - adoc_content = re.sub(HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content) - adoc_content = re.sub(H6_HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content) - adoc_content = re.sub(NONHEADING_RE, f'[.contexttag \\2]*\\2*\n\n\\1\\2\\3', adoc_content) - return adoc_content + # this is dependent on the same order of attributes every time + ids_to_tag = re.findall(r'(\[#)(.*?)(,.*?contextspecific,tag=)(.*?)(,type=)(.*?)(\])', adoc_content) + for this_id in ids_to_tag: + tag = re.sub("PICO_", "", this_id[3]) + img = f" [.contexttag {tag}]*{tag}*" + # `void <> ()`:: An rp2040 function. + adoc_content = re.sub(rf'(\n`.*?<<{this_id[1]},.*?`)(::)', f"\\1{img}\\2", adoc_content) + # |<>\n|Low-level types and (atomic) accessors for memory-mapped hardware registers. + adoc_content = re.sub(rf'(\n\|<<{this_id[1]},.*?>>\n\|.*?)(\n)', f"\\1{img}\\2", adoc_content) + # [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n=== anonymous enum + HEADING_RE = re.compile(r'(\[#.*?role=contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*=+\s+\S*?)(\n)') + # [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n*anonymous enum* + H6_HEADING_RE = re.compile(r'(\[#.*?role=h6 contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*\*\S+.*?)(\n)') + # [#group_cyw43_ll_1ga0411cd49bb5b71852cecd93bcbf0ca2d,role=h6 contextspecific,tag=PICO_RP2040,type=PICO_RP2040]\n---- + NONHEADING_RE = re.compile(r'(\[#.*?role=h?6?\s?contextspecific.*?tag=P?I?C?O?_?)(.*?)(,.*?\]\s*?\n\s*[^=\*])') + adoc_content = re.sub(HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content) + adoc_content = re.sub(H6_HEADING_RE, f'\\1\\2\\3 [.contexttag \\2]*\\2*\n', adoc_content) + adoc_content = re.sub(NONHEADING_RE, f'[.contexttag \\2]*\\2*\n\n\\1\\2\\3', adoc_content) + return adoc_content def postprocess_doxygen_adoc(adoc_file, output_adoc_path, link_targets): - output_path = re.sub(r'[^/]+$', "", adoc_file) - sections = [{ - "group_id": "index_doxygen", - "name": "Introduction", - "description": "An introduction to the Pico SDK", - "html": "index_doxygen.html", - "subitems": [] - }] - with open(adoc_file) as f: - adoc_content = f.read() - # first, lets add any tags - adoc_content = tag_content(adoc_content) - # now split the file into top-level sections: - # toolchain expects all headings to be two levels lower - adoc_content = re.sub(r'(\n==)(=+ \S+)', "\n\\2", adoc_content) - # then make it easier to match the chapter breaks - adoc_content = re.sub(r'(\[#.*?,reftext=".*?"\])(\s*\n)(= )', "\\1\\3", adoc_content) - # find all the chapter descriptions, to use later - descriptions = re.findall(r'(\[#.*?,reftext=".*?"\])(= .*?\n\s*\n)(.*?)(\n)', adoc_content) - CHAPTER_START_RE = re.compile(r'(\[#)(.*?)(,reftext=".*?"\]= )(.*?$)') - # check line by line; if the line matches our chapter break, - # then pull all following lines into the chapter list until a new match. - chapter_filename = "all_groups.adoc" - current_chapter = None - chapter_dict = {} - counter = 0 - for line in adoc_content.split('\n'): - link_targets = collect_link_target(line, chapter_filename) - m = CHAPTER_START_RE.match(line) - if m is not None: - # write the previous chapter - if current_chapter is not None: - with open(chapter_path, 'w') as f: - f.write('\n'.join(current_chapter)) - # start the new chapter - current_chapter = [] - # set the data for this chapter - group_id = re.sub("^group_+", "", m.group(2)) - chapter_filename = group_id+".adoc" - chapter_path = os.path.join(output_path, chapter_filename) - chapter_dict = { - "group_id": group_id, - "html": group_id+".html", - "name": m.group(4), - "subitems": [], - "description": descriptions[counter][2] - } - sections.append(chapter_dict) - # re-split the line into 2 - start_line = re.sub("= ", "\n= ", line) - current_chapter.append(start_line) - counter += 1 - else: - current_chapter.append(line) - # write the last chapter - if current_chapter is not None: - with open(chapter_path, 'w') as f: - f.write('\n'.join(current_chapter)) - build_json(sections, output_path) - os.remove(adoc_file) - return link_targets + output_path = re.sub(r'[^/]+$', "", adoc_file) + sections = [{ + "group_id": "index_doxygen", + "name": "Introduction", + "description": "An introduction to the Pico SDK", + "html": "index_doxygen.html", + "subitems": [] + }] + with open(adoc_file) as f: + adoc_content = f.read() + # first, lets add any tags + adoc_content = tag_content(adoc_content) + # now split the file into top-level sections: + # toolchain expects all headings to be two levels lower + adoc_content = re.sub(r'(\n==)(=+ \S+)', "\n\\2", adoc_content) + # then make it easier to match the chapter breaks + adoc_content = re.sub(r'(\[#.*?,reftext=".*?"\])(\s*\n)(= )', "\\1\\3", adoc_content) + # find all the chapter descriptions, to use later + descriptions = re.findall(r'(\[#.*?,reftext=".*?"\])(= .*?\n\s*\n)(.*?)(\n)', adoc_content) + CHAPTER_START_RE = re.compile(r'(\[#)(.*?)(,reftext=".*?"\]= )(.*?$)') + # check line by line; if the line matches our chapter break, + # then pull all following lines into the chapter list until a new match. + chapter_filename = "all_groups.adoc" + current_chapter = None + chapter_dict = {} + counter = 0 + for line in adoc_content.split('\n'): + link_targets = collect_link_target(line, chapter_filename) + m = CHAPTER_START_RE.match(line) + if m is not None: + # write the previous chapter + if current_chapter is not None: + with open(chapter_path, 'w') as f: + f.write('\n'.join(current_chapter)) + # start the new chapter + current_chapter = [] + # set the data for this chapter + group_id = re.sub("^group_+", "", m.group(2)) + chapter_filename = group_id+".adoc" + chapter_path = os.path.join(output_path, chapter_filename) + chapter_dict = { + "group_id": group_id, + "html": group_id+".html", + "name": m.group(4), + "subitems": [], + "description": descriptions[counter][2] + } + sections.append(chapter_dict) + # re-split the line into 2 + start_line = re.sub("= ", "\n= ", line) + current_chapter.append(start_line) + counter += 1 + else: + current_chapter.append(line) + # write the last chapter + if current_chapter is not None: + with open(chapter_path, 'w') as f: + f.write('\n'.join(current_chapter)) + build_json(sections, output_path) + os.remove(adoc_file) + return link_targets if __name__ == '__main__': - output_adoc_path = sys.argv[1] - adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None] - link_targets = {} - for adoc_file in adoc_files: - adoc_filepath = os.path.join(output_adoc_path, adoc_file) - if re.search("all_groups.adoc", adoc_file) is not None: - link_targets = postprocess_doxygen_adoc(adoc_filepath, output_adoc_path, link_targets) - else: - link_targets = cleanup_text_page(adoc_filepath, output_adoc_path, link_targets) - # now that we have a complete list of all link targets, resolve all internal links - adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None] - for adoc_file in adoc_files: - adoc_filepath = os.path.join(output_adoc_path, adoc_file) - resolve_links(adoc_filepath, link_targets) + output_adoc_path = sys.argv[1] + adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None] + link_targets = {} + for adoc_file in adoc_files: + adoc_filepath = os.path.join(output_adoc_path, adoc_file) + if re.search("all_groups.adoc", adoc_file) is not None: + link_targets = postprocess_doxygen_adoc(adoc_filepath, output_adoc_path, link_targets) + else: + link_targets = cleanup_text_page(adoc_filepath, output_adoc_path, link_targets) + # now that we have a complete list of all link targets, resolve all internal links + adoc_files = [f for f in os.listdir(output_adoc_path) if re.search(".adoc", f) is not None] + for adoc_file in adoc_files: + adoc_filepath = os.path.join(output_adoc_path, adoc_file) + resolve_links(adoc_filepath, link_targets) diff --git a/scripts/postprocess_doxygen_xml.py b/scripts/postprocess_doxygen_xml.py index b0f0b9e165..b2ae893142 100755 --- a/scripts/postprocess_doxygen_xml.py +++ b/scripts/postprocess_doxygen_xml.py @@ -13,157 +13,157 @@ # instead of searching every xml every time, make a list of available functions in each xml def compile_id_list(xml_content): - # get any element that has an id - els = xml_content.find_all(id=True) - id_list = [x["id"] for x in els] - return id_list + # get any element that has an id + els = xml_content.find_all(id=True) + id_list = [x["id"] for x in els] + return id_list def insert_example_code_from_file(combined_content): - els = combined_content.doxygen.find_all("programlisting") - all_examples = {} - # get the examples path - examples_path = re.sub(r"/scripts/.+$", "/lib/pico-examples", os.path.realpath(__file__)) - # get a recursive list of all files in examples - for f in os.walk(examples_path): - for filename in f[2]: - if filename in all_examples: - all_examples[filename].append(os.path.join(f[0], filename)) - else: - all_examples[filename] = [os.path.join(f[0], filename)] - for el in els: - if el.get("filename") is not None: - filename = el.get("filename") - # find the file here or in examples - if filename in all_examples: - with open(all_examples[filename][0]) as f: - example_content = f.read() - example_lines = example_content.split("\n") - for line in example_lines: - codeline = BeautifulSoup(""+html.escape(line)+"", 'xml') - el.append(codeline) - return combined_content + els = combined_content.doxygen.find_all("programlisting") + all_examples = {} + # get the examples path + examples_path = re.sub(r"/scripts/.+$", "/lib/pico-examples", os.path.realpath(__file__)) + # get a recursive list of all files in examples + for f in os.walk(examples_path): + for filename in f[2]: + if filename in all_examples: + all_examples[filename].append(os.path.join(f[0], filename)) + else: + all_examples[filename] = [os.path.join(f[0], filename)] + for el in els: + if el.get("filename") is not None: + filename = el.get("filename") + # find the file here or in examples + if filename in all_examples: + with open(all_examples[filename][0]) as f: + example_content = f.read() + example_lines = example_content.split("\n") + for line in example_lines: + codeline = BeautifulSoup(""+html.escape(line)+"", 'xml') + el.append(codeline) + return combined_content def walk_and_tag_xml_tree(el, output_contexts, all_contexts): - """ - Process an individual xml file, adding context-specific tags as needed. + """ + Process an individual xml file, adding context-specific tags as needed. - For performance purposes (to avoid traversing multiple dicts for every element), - we use element IDs as the key, and the contexts it belongs to as the value. - Thus, output_contexts will look something like this: - { - "group__hardware__gpio_1gaecd01f57f1cac060abe836793f7bea18": [ - "PICO_RP2040", - "FOO" - ], - "group__hardware__gpio_1ga7becbc8db22ff0a54707029a2c0010e6": [ - "PICO_RP2040" - ], - "group__hardware__gpio_1ga192335a098d40e08b23cc6d4e0513786": [ - "PICO_RP2040" - ], - "group__hardware__gpio_1ga8510fa7c1bf1c6e355631b0a2861b22b": [ - "FOO", - "BAR" - ], - "group__hardware__gpio_1ga5d7dbadb2233e2e6627e9101411beb27": [ - "FOO" - ] - } - """ - targets = [] - if el.get('id') is not None: - myid = el["id"] - if myid in output_contexts: - targets = output_contexts[myid] - # if this content is in all contexts, no label is required - if len(targets) > 0 and len(targets) < len(all_contexts): - el["role"] = "contextspecific" - el["tag"] = ', '.join(targets) - if len(targets) > 1: - el["type"] = "multi" - else: - el["type"] = targets[0] - # only check nested children if the parent has NOT been tagged as context-specific - else: - # for child in el.iterchildren(): - for child in el.find_all(True, recursive=False): - walk_and_tag_xml_tree(child, output_contexts, all_contexts) - else: - for child in el.find_all(True, recursive=False): - walk_and_tag_xml_tree(child, output_contexts, all_contexts) - return + For performance purposes (to avoid traversing multiple dicts for every element), + we use element IDs as the key, and the contexts it belongs to as the value. + Thus, output_contexts will look something like this: + { + "group__hardware__gpio_1gaecd01f57f1cac060abe836793f7bea18": [ + "PICO_RP2040", + "FOO" + ], + "group__hardware__gpio_1ga7becbc8db22ff0a54707029a2c0010e6": [ + "PICO_RP2040" + ], + "group__hardware__gpio_1ga192335a098d40e08b23cc6d4e0513786": [ + "PICO_RP2040" + ], + "group__hardware__gpio_1ga8510fa7c1bf1c6e355631b0a2861b22b": [ + "FOO", + "BAR" + ], + "group__hardware__gpio_1ga5d7dbadb2233e2e6627e9101411beb27": [ + "FOO" + ] + } + """ + targets = [] + if el.get('id') is not None: + myid = el["id"] + if myid in output_contexts: + targets = output_contexts[myid] + # if this content is in all contexts, no label is required + if len(targets) > 0 and len(targets) < len(all_contexts): + el["role"] = "contextspecific" + el["tag"] = ', '.join(targets) + if len(targets) > 1: + el["type"] = "multi" + else: + el["type"] = targets[0] + # only check nested children if the parent has NOT been tagged as context-specific + else: + # for child in el.iterchildren(): + for child in el.find_all(True, recursive=False): + walk_and_tag_xml_tree(child, output_contexts, all_contexts) + else: + for child in el.find_all(True, recursive=False): + walk_and_tag_xml_tree(child, output_contexts, all_contexts) + return def postprocess_doxygen_xml_file(combined_xmlfile, xmlfiles, output_context_paths): - """ - Process an individual xml file, adding context-specific tags as needed. + """ + Process an individual xml file, adding context-specific tags as needed. - xmlfiles will look something like this: - { - "PICO_RP2040": "/path/to/PICO_RP2040/myfilename.xml", - "FOO": "/path/to/FOO/myfilename.xml" - } - """ - output_contexts = {} - for item in xmlfiles: - label = item - # parse the xml file - with open(xmlfiles[item], encoding="utf-8") as f: - xml_content = BeautifulSoup(f, 'xml') - # compile a list of all element ids within the file - id_list = compile_id_list(xml_content.doxygen) - # create the map of ids and their contexts (see example above) - for myid in id_list: - if myid in output_contexts: - output_contexts[myid].append(label) - else: - output_contexts[myid] = [label] - with open(combined_xmlfile, encoding="utf-8") as f: - combined_content = BeautifulSoup(f, 'xml') - # start with top-level children, and then walk the tree as appropriate - els = combined_content.doxygen.find_all(True, recursive=False) - for el in els: - walk_and_tag_xml_tree(el, output_contexts, list(output_context_paths.keys())) - combined_content = insert_example_code_from_file(combined_content) - return str(combined_content) + xmlfiles will look something like this: + { + "PICO_RP2040": "/path/to/PICO_RP2040/myfilename.xml", + "FOO": "/path/to/FOO/myfilename.xml" + } + """ + output_contexts = {} + for item in xmlfiles: + label = item + # parse the xml file + with open(xmlfiles[item], encoding="utf-8") as f: + xml_content = BeautifulSoup(f, 'xml') + # compile a list of all element ids within the file + id_list = compile_id_list(xml_content.doxygen) + # create the map of ids and their contexts (see example above) + for myid in id_list: + if myid in output_contexts: + output_contexts[myid].append(label) + else: + output_contexts[myid] = [label] + with open(combined_xmlfile, encoding="utf-8") as f: + combined_content = BeautifulSoup(f, 'xml') + # start with top-level children, and then walk the tree as appropriate + els = combined_content.doxygen.find_all(True, recursive=False) + for el in els: + walk_and_tag_xml_tree(el, output_contexts, list(output_context_paths.keys())) + combined_content = insert_example_code_from_file(combined_content) + return str(combined_content) def postprocess_doxygen_xml(xml_path): - """ - Expectation is that xml for each context will be generated - within a subfolder titled with the context name, e.g.: - - doxygen_build/ - - combined/ - - PICO_RP2040/ - - FOO/ - """ - # collect a list of all context-specific subdirs - skip = ["index.xml", "Doxyfile.xml"] - output_context_paths = {} - combined_output_path = None - for item in list(filter(lambda x: os.path.isdir(os.path.join(xml_path, x)), os.listdir(xml_path))): - if item == "combined": - # if doxygen ever changes the output path for the xml, this will need to be updated - combined_output_path = os.path.join(xml_path, item, "docs", "doxygen", "xml") - else: - # same as above - output_context_paths[item] = os.path.join(xml_path, item, "docs", "doxygen", "xml") - # we need to process all generated xml files - for combined_xmlfile in list(filter(lambda x: re.search(r'\.xml$', x) is not None, os.listdir(combined_output_path))): - # skip the index -- it's just a listing - if combined_xmlfile not in skip: - xmlfiles = {} - # get all context-specific versions of this file - for context in output_context_paths: - if os.path.isfile(os.path.join(output_context_paths[context], combined_xmlfile)): - xmlfiles[context] = os.path.join(output_context_paths[context], combined_xmlfile) - combined_content = postprocess_doxygen_xml_file(os.path.join(combined_output_path, combined_xmlfile), xmlfiles, output_context_paths) - # write the output - with open(os.path.join(combined_output_path, combined_xmlfile), 'w') as f: - f.write(combined_content) - return + """ + Expectation is that xml for each context will be generated + within a subfolder titled with the context name, e.g.: + - doxygen_build/ + - combined/ + - PICO_RP2040/ + - FOO/ + """ + # collect a list of all context-specific subdirs + skip = ["index.xml", "Doxyfile.xml"] + output_context_paths = {} + combined_output_path = None + for item in list(filter(lambda x: os.path.isdir(os.path.join(xml_path, x)), os.listdir(xml_path))): + if item == "combined": + # if doxygen ever changes the output path for the xml, this will need to be updated + combined_output_path = os.path.join(xml_path, item, "docs", "doxygen", "xml") + else: + # same as above + output_context_paths[item] = os.path.join(xml_path, item, "docs", "doxygen", "xml") + # we need to process all generated xml files + for combined_xmlfile in list(filter(lambda x: re.search(r'\.xml$', x) is not None, os.listdir(combined_output_path))): + # skip the index -- it's just a listing + if combined_xmlfile not in skip: + xmlfiles = {} + # get all context-specific versions of this file + for context in output_context_paths: + if os.path.isfile(os.path.join(output_context_paths[context], combined_xmlfile)): + xmlfiles[context] = os.path.join(output_context_paths[context], combined_xmlfile) + combined_content = postprocess_doxygen_xml_file(os.path.join(combined_output_path, combined_xmlfile), xmlfiles, output_context_paths) + # write the output + with open(os.path.join(combined_output_path, combined_xmlfile), 'w') as f: + f.write(combined_content) + return if __name__ == '__main__': - xml_path = sys.argv[1] - file_path = os.path.realpath(__file__) - # splitting thse subs into two parts to make testing easier - # xml_path = re.sub(r'/documentation-toolchain/.*?$', "/"+xml_path, re.sub(r'/lib/', "/", file_path)) - postprocess_doxygen_xml(xml_path) + xml_path = sys.argv[1] + file_path = os.path.realpath(__file__) + # splitting thse subs into two parts to make testing easier + # xml_path = re.sub(r'/documentation-toolchain/.*?$', "/"+xml_path, re.sub(r'/lib/', "/", file_path)) + postprocess_doxygen_xml(xml_path) diff --git a/scripts/tests/test_doxygen_adoc.py b/scripts/tests/test_doxygen_adoc.py index e4ce20b5e2..9c25f6cc76 100644 --- a/scripts/tests/test_doxygen_adoc.py +++ b/scripts/tests/test_doxygen_adoc.py @@ -1,4 +1,6 @@ -import os +#!/usr/bin/env python3 + +import os import re import unittest from pathlib import Path @@ -6,85 +8,85 @@ # to run: on the command line, from the /scripts dir: python3 -m unittest tests.test_doxygen_adoc class TestDoxygenAdoc(unittest.TestCase): - def setUp(self): - self.current_file = os.path.realpath(__file__) - self.current_dir = Path(self.current_file).parent.absolute() - self.parent_dir = re.sub("/tests", "", str(self.current_dir)) + def setUp(self): + self.current_file = os.path.realpath(__file__) + self.current_dir = Path(self.current_file).parent.absolute() + self.parent_dir = re.sub("/tests", "", str(self.current_dir)) - def tearDown(self): - pass + def tearDown(self): + pass - def test_doxygen_adoc_variables(self): - # run AFTER the content has been built; - # test will fail if ANY of the below are different or missing - expected = { - "pico-sdk/index_doxygen.adoc" : [ - ":doctitle: Introduction - Raspberry Pi Documentation", - ":page-sub_title: Introduction" - ], - "pico-sdk/hardware.adoc": [ - ":doctitle: Hardware APIs - Raspberry Pi Documentation", - ":page-sub_title: Hardware APIs" - ], - "pico-sdk/high_level.adoc": [ - ":doctitle: High Level APIs - Raspberry Pi Documentation", - ":page-sub_title: High Level APIs" - ], - "pico-sdk/third_party.adoc": [ - ":doctitle: Third-party Libraries - Raspberry Pi Documentation", - ":page-sub_title: Third-party Libraries" - ], - "pico-sdk/networking.adoc": [ - ":doctitle: Networking Libraries - Raspberry Pi Documentation", - ":page-sub_title: Networking Libraries" - ], - "pico-sdk/runtime.adoc": [ - ":doctitle: Runtime Infrastructure - Raspberry Pi Documentation", - ":page-sub_title: Runtime Infrastructure" - ], - "pico-sdk/misc.adoc": [ - ":doctitle: External API Headers - Raspberry Pi Documentation", - ":page-sub_title: External API Headers" - ] - } + def test_doxygen_adoc_variables(self): + # run AFTER the content has been built; + # test will fail if ANY of the below are different or missing + expected = { + "pico-sdk/index_doxygen.adoc" : [ + ":doctitle: Introduction - Raspberry Pi Documentation", + ":page-sub_title: Introduction" + ], + "pico-sdk/hardware.adoc": [ + ":doctitle: Hardware APIs - Raspberry Pi Documentation", + ":page-sub_title: Hardware APIs" + ], + "pico-sdk/high_level.adoc": [ + ":doctitle: High Level APIs - Raspberry Pi Documentation", + ":page-sub_title: High Level APIs" + ], + "pico-sdk/third_party.adoc": [ + ":doctitle: Third-party Libraries - Raspberry Pi Documentation", + ":page-sub_title: Third-party Libraries" + ], + "pico-sdk/networking.adoc": [ + ":doctitle: Networking Libraries - Raspberry Pi Documentation", + ":page-sub_title: Networking Libraries" + ], + "pico-sdk/runtime.adoc": [ + ":doctitle: Runtime Infrastructure - Raspberry Pi Documentation", + ":page-sub_title: Runtime Infrastructure" + ], + "pico-sdk/misc.adoc": [ + ":doctitle: External API Headers - Raspberry Pi Documentation", + ":page-sub_title: External API Headers" + ] + } - # get the appropriate working dir - file_path = os.path.join(self.parent_dir, "..", "build", "jekyll") + # get the appropriate working dir + file_path = os.path.join(self.parent_dir, "..", "build", "jekyll") - for item in expected: - print("FILE: ", item) - # find the file - this_path = os.path.join(file_path, item) - # make sure the file exists - if os.path.isfile(this_path): - # open the file and read the content - with open(this_path) as f: - content = f.read() - # find each expected line - for line in expected[item]: - print("LOOKING FOR: ", line) - match = re.search(line, content, re.M) - self.assertTrue(match is not None) - else: - print("Could not find this file. did you run `make` first?") + for item in expected: + print("FILE: ", item) + # find the file + this_path = os.path.join(file_path, item) + # make sure the file exists + if os.path.isfile(this_path): + # open the file and read the content + with open(this_path) as f: + content = f.read() + # find each expected line + for line in expected[item]: + print("LOOKING FOR: ", line) + match = re.search(line, content, re.M) + self.assertTrue(match is not None) + else: + print("Could not find this file. did you run `make` first?") def run_doxygen_adoc_tests(event, context): - suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestDoxygenAdoc) - result = unittest.TextTestRunner(verbosity=2).run(suite) - if result.wasSuccessful(): - body = { "message": "Tests passed!" } - response = { - "statusCode": 200, - "body": json.dumps(body) - } - return response - else : - body = { "message": "Tests failed!" } - response = { - "statusCode": 500, - "body": json.dumps(body) - } - return response + suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestDoxygenAdoc) + result = unittest.TextTestRunner(verbosity=2).run(suite) + if result.wasSuccessful(): + body = { "message": "Tests passed!" } + response = { + "statusCode": 200, + "body": json.dumps(body) + } + return response + else : + body = { "message": "Tests failed!" } + response = { + "statusCode": 500, + "body": json.dumps(body) + } + return response if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tests/test_create_build_adoc.py b/tests/test_create_build_adoc.py index 30cf7ba4ed..772bb816bd 100755 --- a/tests/test_create_build_adoc.py +++ b/tests/test_create_build_adoc.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import re import subprocess diff --git a/tests/test_create_build_adoc_doxygen.py b/tests/test_create_build_adoc_doxygen.py index 97ccee44cf..0686530dff 100644 --- a/tests/test_create_build_adoc_doxygen.py +++ b/tests/test_create_build_adoc_doxygen.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import re import subprocess diff --git a/tests/test_create_build_adoc_include.py b/tests/test_create_build_adoc_include.py index e8d6eb5247..ed33677d55 100644 --- a/tests/test_create_build_adoc_include.py +++ b/tests/test_create_build_adoc_include.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import re import subprocess diff --git a/tests/test_create_nav.py b/tests/test_create_nav.py index 8ff5e33a23..0a307048d0 100644 --- a/tests/test_create_nav.py +++ b/tests/test_create_nav.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import re import subprocess