1#!/usr/bin/env python3 2# 3# Copyright (C) 2022 Intel Corporation. 4# 5# SPDX-License-Identifier: BSD-3-Clause 6# 7# pyodide support status: Yes 8""" 9This program support run in pyodide env. 10In js side you can pass params by add follow code before this script. 11```js 12params = "${Base64.encode(params)}" 13``` 14""" 15import re 16import sys 17 18from urllib.parse import urljoin 19from subprocess import check_output 20 21from typing import Optional 22 23try: 24 import locale # module missing in Jython 25 26 locale.setlocale(locale.LC_ALL, '') 27except locale.Error: 28 pass 29 30from docutils.core import publish_string 31from sphinx.ext.intersphinx import fetch_inventory 32from bs4 import BeautifulSoup 33 34 35class RSTNormalizer: 36 def __init__(self, url_base, objects_inv: [dict, str]): 37 self.url_base = url_base 38 39 if isinstance(objects_inv, dict): 40 # parsed data 41 self.data = objects_inv 42 elif isinstance(objects_inv, str): 43 self.data = self.__convert_inv(objects_inv) 44 else: 45 raise NotImplementedError 46 47 self.fake_roles = [ 48 self.fake_role('option', 'std:cmdoption'), 49 self.fake_role('ref', 'std:label') 50 ] 51 52 @staticmethod 53 def __convert_inv(url): 54 try: 55 inv_data = RSTNormalizer.__prase_inv_file(url) 56 except Exception as e: 57 print(e) 58 print('Download inv data failed, document link will link to search page.') 59 inv_data = None 60 return inv_data 61 62 @staticmethod 63 def __prase_inv_file(filename): 64 class MockConfig: 65 intersphinx_timeout: int = None 66 tls_verify = False 67 user_agent = None 68 69 class MockApp: 70 srcdir = '' 71 config = MockConfig() 72 73 def warn(self, msg: str) -> None: 74 print(msg, file=sys.stderr) 75 76 invdata = fetch_inventory(MockApp(), '', filename) # type: ignore 77 result = {} 78 for key in sorted(invdata or {}): 79 result[key] = {} 80 data = result[key] 81 for entry, einfo in sorted(invdata[key].items()): 82 data[entry] = {"title": einfo[3] if einfo[3] != '-' else '', "link": einfo[2]} 83 return result 84 85 def normalize(self, rest_text): 86 result = re.sub(r'\.\. option::[ \t]+(\S+)', lambda x: x.group(1) + f'\n{len(x.group(1)) * "-"}', rest_text) 87 for fake_role_handle in self.fake_roles: 88 result = fake_role_handle(result) 89 90 return result 91 92 def url(self, key, data): 93 title = data["title"] if data["title"] else key 94 url = urljoin(self.url_base, data['link']) 95 return f'`{title} <{url}>`_' 96 97 def fake_role(self, rest_key, json_key): 98 def handel_role(rest_text): 99 def re_sub(match): 100 key = match.group(1) 101 if self.data is None or json_key not in self.data or key not in self.data[json_key]: 102 return self.url(key, {'title': '', 'link': f'search.html?q={key}&check_keywords=yes&area=default'}) 103 return self.url(key, self.data[json_key][key]) 104 105 return re.sub(f':{rest_key}:' + r'`(.+?)`', re_sub, rest_text) 106 107 return handel_role 108 109 110class ACRNDocumentStringConvertor: 111 def __init__(self, objects_inv: Optional[dict] = None): 112 self.version = self.get_acrn_document_version() 113 self.url_base = 'https://projectacrn.github.io/{}/'.format(self.version) 114 115 self.objects_inv = objects_inv 116 if self.objects_inv is None: 117 self.objects_inv = urljoin(self.url_base, 'objects.inv') 118 119 self.rst_normalizer = RSTNormalizer(self.url_base, self.objects_inv) 120 121 @staticmethod 122 def get_acrn_document_version(default_version='latest'): 123 version = default_version 124 try: 125 branch_name = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode() 126 version = re.match(r"^release_(\d\.\d)$", branch_name).group(1) 127 except: 128 print("Can't detect current acrn-hypervisor version, document string will link to latest") 129 130 return version 131 132 def convert(self, docstring): 133 rst = self.rst_normalizer.normalize(docstring) 134 html = publish_string(rst, writer_name='html5').decode('utf-8') 135 soup = BeautifulSoup(html, 'lxml') 136 for link in soup.select('a'): 137 link['target'] = '_blank' 138 try: 139 fragment = soup.select_one('div.document').prettify() 140 except: 141 fragment = '\n'.join([str(x) for x in soup.select_one('main').children]).strip() 142 return fragment 143 144 145def main(): 146 params = { 147 "text": open('configdoc.txt').read(), 148 "objectsInv": None 149 } 150 doc_html = ACRNDocumentStringConvertor(params['objectsInv']).convert(params['text']) 151 return doc_html 152