From 470ddbc4585c792e1cb543600e869c3e7b04e0bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=98JoinTyang=E2=80=99?= Date: Wed, 13 Sep 2023 16:59:35 +0800 Subject: [PATCH] convert markdown and sdoc --- app/app.py | 3 + app/config.py | 3 + file_converter/__init__.py | 1 + file_converter/apis.py | 83 +++++++ file_converter/converter_server.py | 27 +++ file_converter/markdown_converter.py | 289 +++++++++++++++++++++++ file_converter/sdoc_converter.py | 330 +++++++++++++++++++++++++++ file_converter/utils.py | 56 +++++ main.py | 2 + requirements.txt | 4 + 10 files changed, 798 insertions(+) create mode 100644 file_converter/__init__.py create mode 100644 file_converter/apis.py create mode 100644 file_converter/converter_server.py create mode 100644 file_converter/markdown_converter.py create mode 100644 file_converter/sdoc_converter.py create mode 100644 file_converter/utils.py diff --git a/app/app.py b/app/app.py index ad0e5ebf..81578dd7 100644 --- a/app/app.py +++ b/app/app.py @@ -3,6 +3,7 @@ VirusScanner, Statistics, CountUserActivity, CountTrafficInfo, ContentScanner,\ WorkWinxinNoticeSender, FileUpdatesSender, RepoOldFileAutoDelScanner,\ DeletedFilesCountCleaner +from seafevents.file_converter.converter_server import ConverterServer class App(object): @@ -17,6 +18,7 @@ def __init__(self, config, ccnet_config, seafile_config, self._events_handler = EventsHandler(config) self._count_traffic_task = CountTrafficInfo(config) self._update_login_record_task = CountUserActivity(config) + self._converter_server = ConverterServer(config) if self._bg_tasks_enabled: self._index_updater = IndexUpdater(config) @@ -35,6 +37,7 @@ def serve_forever(self): self._events_handler.start() self._update_login_record_task.start() self._count_traffic_task.start() + self._converter_server.start() if self._bg_tasks_enabled: self._file_updates_sender.start() diff --git a/app/config.py b/app/config.py index acd4853b..235a657e 100644 --- a/app/config.py +++ b/app/config.py @@ -22,6 +22,9 @@ DTABLE_WEB_SERVER = getattr(seahub_settings, 'DTABLE_WEB_SERVER', None) SEATABLE_EX_PROPS_BASE_API_TOKEN = getattr(seahub_settings, 'SEATABLE_EX_PROPS_BASE_API_TOKEN', None) EX_PROPS_TABLE = getattr(seahub_settings, 'EX_PROPS_TABLE', None) + SECRET_KEY = getattr(seahub_settings, 'SECRET_KEY', '') + FILE_SERVER_ROOT = getattr(seahub_settings, 'FILE_SERVER_ROOT', '') + SEAHUB_SERVER = getattr(seahub_settings, 'SERVICE_URL', '') except ImportError: logger.critical("Can not import seahub settings.") raise RuntimeError("Can not import seahub settings.") diff --git a/file_converter/__init__.py b/file_converter/__init__.py new file mode 100644 index 00000000..40a96afc --- /dev/null +++ b/file_converter/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/file_converter/apis.py b/file_converter/apis.py new file mode 100644 index 00000000..996db164 --- /dev/null +++ b/file_converter/apis.py @@ -0,0 +1,83 @@ +import json +import logging +import os +import jwt +from pathlib import Path + +from flask import request, Flask + +from seafevents.file_converter.sdoc_converter import md2sdoc +from seafevents.file_converter.markdown_converter import sdoc2md +from seafevents.file_converter.utils import get_file_by_token, upload_file_by_token +from seafevents.app.config import SECRET_KEY + +logger = logging.getLogger(__name__) +flask_app = Flask(__name__) + +def check_auth_token(req): + auth = req.headers.get('Authorization', '').split() + if not auth or auth[0].lower() != 'token' or len(auth) != 2: + return False + + token = auth[1] + if not token: + return False + + private_key = SECRET_KEY + try: + jwt.decode(token, private_key, algorithms=['HS256']) + except (jwt.ExpiredSignatureError, jwt.InvalidSignatureError) as e: + return False + + return True + + +@flask_app.route('/api/v1/file-convert/', methods=['POST']) +def convert_markdown_to_sdoc(): + is_valid = check_auth_token(request) + if not is_valid: + return {'error_msg': 'Permission denied'}, 403 + try: + data = json.loads(request.data) + except Exception as e: + logger.exception(e) + return {'error_msg': 'Bad request.'}, 400 + + path = data.get('path') + username = data.get('username') + doc_uuid = data.get('doc_uuid') + + extension = Path(path).suffix + if extension not in ['.md', '.sdoc']: + return {'error_msg': 'path invalid.'}, 400 + + download_token = data.get('download_token') + upload_token = data.get('upload_token') + + file_content = get_file_by_token(path, download_token).decode() + + parent_dir = os.path.dirname(path) + file_name = os.path.basename(path) + + if extension == '.md': + if file_content: + file_content = md2sdoc(file_content, username=username) + file_name = file_name[:-2] + 'sdoc' + else: + if file_content: + file_content = json.loads(file_content) + file_content = sdoc2md(file_content, doc_uuid=doc_uuid) + file_name = file_name[:-4] + 'md' + + try: + resp = upload_file_by_token(parent_dir, file_name, upload_token, file_content) + if not resp.ok: + logger.error(resp.text) + return {'error_msg': resp.text}, 500 + + except Exception as e: + logger.error(e) + error_msg = 'Internal Server Error' + return {'error_msg': error_msg}, 500 + + return {'success': True}, 200 diff --git a/file_converter/converter_server.py b/file_converter/converter_server.py new file mode 100644 index 00000000..d88a137d --- /dev/null +++ b/file_converter/converter_server.py @@ -0,0 +1,27 @@ +from threading import Thread +from gevent.pywsgi import WSGIServer + +from seafevents.file_converter.apis import flask_app + + +class ConverterServer(Thread): + + def __init__(self, config): + Thread.__init__(self) + self._parse_config(config) + + self._server = WSGIServer((self._host, int(self._port)), flask_app) + + def _parse_config(self, config): + if config.has_option('FILE CONVERTER', 'host'): + self._host = config.get('FILE CONVERTER', 'host') + else: + self._host = '127.0.0.1' + + if config.has_option('FILE CONVERTER', 'port'): + self._port = config.getint('FILE CONVERTER', 'port') + else: + self._port = '8888' + + def run(self): + self._server.serve_forever() diff --git a/file_converter/markdown_converter.py b/file_converter/markdown_converter.py new file mode 100644 index 00000000..90f06bcd --- /dev/null +++ b/file_converter/markdown_converter.py @@ -0,0 +1,289 @@ +from html2text import HTML2Text +from seafevents.file_converter.utils import trans_img_path_to_url + +md_hander = HTML2Text(bodywidth=0) # no wrapping length + + +HEADER_LABEL = [ + 'header1', + 'header2', + 'header3', + 'header4', + 'header5', + 'header6', +] + +def _handle_text_style(json_data_text, return_null=False): + text = json_data_text.get('text', '') + pure_text = text + bold = json_data_text.get('BOLD') + italic = json_data_text.get('ITALIC') + + if italic: + text = "_%s_" % text + if bold: + text = "**%s**" % text + + if (not text) and return_null: + text = '.' + return text, pure_text + +# sdoc 2 html dom +# 1. header +def _handle_header_dom(header_json, header_type): + output = '' + for child in header_json['children']: + if 'text' in child: + output += child.get('text') + else: + child_type = child.get('type') + if child_type == 'link': + output += _handle_link_dom(child) + + tag = { + "header1": "

%s

", + "header2": "

%s

", + "header3": "

%s

", + "header4": "

%s

", + "header5": "
%s
", + "header6": "
%s
", + + }.get(header_type) + return tag % output + +# 2 image +def _handle_img_dom(img_json, doc_uuid=''): + + output = '' + url = img_json.get('data', {}).get('src') + if doc_uuid: + url = trans_img_path_to_url(url, doc_uuid) + + output += '' % url + return output + + +# 3 list including ordered / unordered list +def _handle_list_dom(list_json, tag='', ordered=False): + for list_item in list_json['children']: + item_eles = list_item['children'] + text = '' + for lic in item_eles: + + if lic.get('type') == 'unordered_list': + tag += _handle_list_dom(lic, '') + if lic.get('type') == 'ordered_list': + tag += _handle_list_dom(lic, '', True) + + if lic.get('type') == 'list-lic': + for item in lic['children']: + if 'text' in item: + text += _handle_text_style(item)[0] + else: + item_type = item.get('type') + if item_type == 'link': + text_name = item['children'][0]['text'] + text_url = item.get('href') + text += "%s" % (text_url, text_name) + tag += "
  • %s

  • " % text + if ordered: + res = "" % tag + else: + res = "" % tag + return res + +# 4 checkbox +def _handle_check_list_dom(check_list_json): + output = "" + checked = check_list_json.get('checked') + for child in check_list_json['children']: + if 'text' in child: + output += _handle_text_style(child)[0] + else: + child_type = child.get('type') + if child_type == 'link': + output += _handle_link_dom(child) + + if checked: + output = "

    * [x] %s

    " % output + else: + output = "

    * [ ] %s

    " % output + + return output + +# 5 blockquote +def _handle_blockquote_dom(blockquote_json): + output = "" + for child in blockquote_json['children']: + child_type = child.get('type') + if child_type in ['ordered_list', 'unordered_list']: + output += _handle_list_dom(child, '', child_type == 'ordered_list') + + if child_type == 'link': + text_name = child['children'][0]['text'] + text_url = child.get('href') + output += "%s" % (text_url, text_name) + + if child_type == 'paragraph': + output += '%s' % _handle_pagragh_dom(child) + + if child_type == 'check-list-item': + output += '%s' % _handle_check_list_dom(child) + + if child_type == 'image': + output += _handle_img_dom(child) + + if 'text' in child: + text = child.get('text') + text_list = text.split("\n") + output += ''.join(['

    %s

    ' % t for t in text_list if t.strip()]) + + tag = "
    %s
    " % output + return tag + +# 6 url link +def _handle_link_dom(link_json): + href = link_json.get('href') + link_child = link_json['children'][0] + + res = "%s" % (href, link_child.get('text')) + return res + + +# 7 pagragh +def _handle_pagragh_dom(pagragh_json, doc_uuid=''): + output = '' + for child in pagragh_json['children']: + if 'text' in child: + output += _handle_text_style(child)[0] + else: + child_type = child.get('type') + if child_type == 'link': + output += _handle_link_dom(child) + if child_type == 'image': + output += _handle_img_dom(child, doc_uuid) + + + result = "

    %s

    " % output + return result.replace("\n", "") + + + +def _handle_table_cell_dom(table_cell_json): + output = '' + for child in table_cell_json['children']: + if 'text' in child: + output += _handle_text_style(child)[0] + else: + child_type = child.get('type') + if child_type == 'link': + output += _handle_link_dom(child) + + return output + + +# html2markdown +def handle_header(header_json, header_type): + dom = _handle_header_dom(header_json, header_type) + return md_hander.handle(dom) + + +def handle_img(img_json): + dom = _handle_img_dom(img_json) + return dom + + +def handle_check_list(check_list_json): + return md_hander.handle(_handle_check_list_dom(check_list_json)) + + +def handle_paragraph(paragraph_json, doc_uuid=''): + dom = _handle_pagragh_dom(paragraph_json, doc_uuid) + return md_hander.handle(dom) + + +def handle_list(json_data, ordered=False): + html = _handle_list_dom(json_data, '', ordered) + md = md_hander.handle(html) + return md + + +def handle_codeblock(code_bloc_json): + lang = code_bloc_json.get('language', '') + output = "" + for child in code_bloc_json.get('children'): + if 'children' in child: + output += "%s\n" % child.get('children', '')[0].get('text') + return "```%s\n%s```" % (lang, output) + + +def handle_blockquote(json_data): + html = _handle_blockquote_dom(json_data) + md = md_hander.handle(html) + return md + + +def handle_table(table_json): + th_headers = '' + th_body = '' + first_table_row = table_json['children'][0] + other_table_rows = table_json['children'][1:] + + for first_table_cell in first_table_row['children']: + th_headers += "%s" % _handle_table_cell_dom(first_table_cell) + + for table_row in other_table_rows: + td = '' + for table_cell in table_row['children']: + td += "%s" % _handle_table_cell_dom(table_cell) + th_body += "%s" % td + + html = "
    %s%s
    " % (th_headers, th_body) + return md_hander.handle(html) + + +# +def json2md(json_data, doc_uuid=''): + doc_type = json_data.get('type') + markdown_output = '' + if doc_type in HEADER_LABEL: + output = handle_header(json_data, doc_type) + markdown_output += output + + if doc_type == 'check-list-item': + output = handle_check_list(json_data) + markdown_output += output + + if doc_type == 'paragraph': + output = handle_paragraph(json_data, doc_uuid) + markdown_output += output + + if doc_type == 'code-block': + output = handle_codeblock(json_data) + markdown_output += output + + if doc_type == 'table': + output = handle_table(json_data) + markdown_output += output + + if doc_type == 'unordered_list': + output = handle_list(json_data) + markdown_output += output + + if doc_type == 'ordered_list': + output = handle_list(json_data, ordered=True) + markdown_output += output + + if doc_type == 'blockquote': + output = handle_blockquote(json_data) + markdown_output += output + + return markdown_output + +def sdoc2md(json_tree, doc_uuid=''): + results = [] + for sub in json_tree.get('children'): + results.append(json2md(sub, doc_uuid)) + + markdown_text = "\n".join(results) + return markdown_text diff --git a/file_converter/sdoc_converter.py b/file_converter/sdoc_converter.py new file mode 100644 index 00000000..b6dd0a73 --- /dev/null +++ b/file_converter/sdoc_converter.py @@ -0,0 +1,330 @@ +import pypandoc +import json +import random +import string +import re +from seafevents.file_converter.utils import IMAGE_PATTERN + + +def get_random_id(): + ran_str = ''.join(random.sample(string.ascii_letters + string.digits, 22)) + return ran_str + + +def parse_italic(italic_json, json_doc={}): + json_doc['ITALIC'] = True + children = italic_json['c'] + for item in children: + if item['t'] == 'Strong': + parse_strong(item, json_doc) + if item['t'] == 'Str': + json_doc['text'] = item['c'] + json_doc['id'] = get_random_id() + return json_doc + +def parse_strong(strong_json, json_doc={}): + json_doc['BOLD'] = True + children = strong_json['c'] + for item in children: + if item['t'] == 'Emph': + parse_italic(item, json_doc) + if item['t'] == 'Str': + json_doc['text'] = item['c'] + json_doc['id'] = get_random_id() + + return json_doc + +def parse_plain(plain): + l = [] + for item in plain['c']: + if item['t'] == 'Str': + l.append({'text': item['c'], 'id': get_random_id()}) + if item['t'] == 'Space': + l.append({'text': ' ', 'id': get_random_id()}) + if item['t'] == 'Link': + l.append(parse_link(item)) + if item['t'] == 'Code': + l.append(parse_inline_code(item)) + return l + + +def parse_link(link_json): + link_url = link_json['c'][2][0] + link_main = link_json['c'][1] + sdoc_json = { + 'type': 'link', + 'href': link_url, + 'children': [], + 'id': get_random_id(), + } + for item in link_main: + if item['t'] == 'Str': + sdoc_json['children'].append({'text': item['c'], 'id': get_random_id()}) + sdoc_json['title'] = item['c'] + + return sdoc_json + +def parse_header(header_json): + header_level = header_json['c'][0] + sdoc_json = { + 'type': 'header%s' % header_level, + 'children': [], + 'id':get_random_id() + } + header_structure = header_json['c'][2] + for item in header_structure: + if item['t'] == 'Str': + sdoc_json['children'].append({'text': item['c'], "id": get_random_id()}) + if item['t'] == 'Space': + sdoc_json['children'].append({'text': ' ', 'id': get_random_id()}) + if item['t'] == 'Link': + sdoc_json['children'].append(parse_link(item)) + if item['t'] == 'Strong': + sdoc_json['children'].append(parse_strong(item, {})) + + return sdoc_json + +def parse_image(image_json): + image_link = image_json['c'][2][0] + sdoc_json = { + 'type': 'image', + 'children': [{'id': get_random_id(), 'text': ''}], + 'id': get_random_id(), + 'data': {'src': image_link}, + } + return sdoc_json + +def parse_raw_inline(inline_json): + try: + txt_type = inline_json['c'][0] + if txt_type == 'html': + img_html = inline_json['c'][1] + image_link_res = re.findall(IMAGE_PATTERN, img_html) + if image_link_res: + image_link = image_link_res[0] + sdoc_json = image_link and { + 'type': 'image', + 'children': [{'id': get_random_id(), 'text': ''}], + 'id': get_random_id(), + 'data': {'src': image_link}, + } + return sdoc_json + except: + return None + return None + + +def parse_inline_code(code_json): + code_text = code_json['c'][-1] + sdoc_json = { + 'text': code_text, + 'id': get_random_id() + } + return sdoc_json + +def parse_list_sub(list_json): + sdoc_json = {'type':'unordered_list', 'id': get_random_id(), 'children': []} + for items in list_json['c']: + list_item = {'type':'list-item', 'id': get_random_id(), 'children': []} + for item in items: + if item['t'] in ['Plain', 'Para']: + list_item['children'].append({'type': 'list-lic', 'children': parse_plain(item), 'id': get_random_id()}) + if item['t'] == 'BulletList': + list_item['children'].append(parse_list(item)) + sdoc_json['children'].append(list_item) + return sdoc_json + +def parse_list(list_json): + sdoc_json = {'type':'unordered_list', 'id': get_random_id(), 'children': []} + for items in list_json['c']: + list_item = {'type':'list-item', 'id': get_random_id(), 'children': []} + for item in items: + if item['t'] in ['Plain', 'Para']: + list_item['children'].append({'type': 'list-lic', 'children': parse_plain(item), 'id': get_random_id()}) + if item['t'] == 'BulletList': + list_item['children'].append(parse_list_sub(item)) + sdoc_json['children'].append(list_item) + return sdoc_json + + +def parse_codeblock(code_json): + try: + lang = code_json['c'][0][1][0] + except: + lang = '' + + sdoc_json = { + 'type': 'code-block', + 'children': [], + 'id': get_random_id(), + 'language': lang or 'plaintext', + 'style':{'white_space': "nowrap"} + } + main_code = code_json['c'][1] + for code in main_code.split('\n'): + sdoc_json['children'].append({ + 'type':'code-line', + 'id': get_random_id(), + 'children':[{'text': code}] + }) + return sdoc_json + + +def parse_paragragh(para_json): + sdoc_json = { + 'type': 'paragraph', + 'children': [], + 'id': get_random_id() + } + for item in para_json['c']: + if item['t'] == 'Str': + sdoc_json['children'].append({'text': item['c'], "id": get_random_id()}) + if item['t'] == 'Space': + sdoc_json['children'].append({'text': ' ', "id": get_random_id()}) + if item['t'] == 'Link': + sdoc_json['children'].append(parse_link(item)) + if item['t'] == 'Strong': + sdoc_json['children'].append(parse_strong(item, {})) + if item['t'] == 'Image': + sdoc_json['children'].append(parse_image(item)) + if item['t'] == 'Emph': + sdoc_json['children'].append(parse_italic(item, {})) + if item['t'] == 'RawInline': + res = parse_raw_inline(item) + if res: + sdoc_json['children'].append(parse_raw_inline(item)) + + return sdoc_json + + +def parse_blockquote(block_json): + sdoc_json = { + 'type': 'blockquote', + 'children': [], + 'id': get_random_id() + } + for item in block_json['c']: + if item['t'] == 'Para': + sdoc_json['children'].append(parse_paragragh(item)) + if item['t'] == 'BulletList': + sdoc_json['children'].append(parse_list(item)) + + return sdoc_json + +def parse_table(table_json): + table_sdoc = { + 'type': 'table', + 'id': get_random_id(), + 'children': [], + 'columns': [] + } + table_head = table_json['c'][3] + column_num = len(table_head) + column_length = int (672 / column_num) + for i in range(column_num): + table_sdoc['columns'].append({'width': column_length}) + + table_row_head = { + 'type': 'table-row', + 'id': get_random_id(), + 'children': [], + 'style': {'minHeight': 43} + } + for row in table_head: + table_cell = { + 'id': get_random_id(), + 'children': [], + 'type': 'table-cell' + } + if not row: + row = [{'t': 'Plain', 'c': [{'t': 'Str', 'c': ''}]}] + for c in row[0]['c']: + if c['t'] == 'Str': + table_cell['children'].append({'text': c['c'], 'id': get_random_id(), }) + if c['t'] == 'Space': + table_cell['children'].append({'text': ' ', 'id': get_random_id(), }) + if c['t'] == 'Code': + table_cell['children'].append(parse_inline_code(c)) + if c['t'] == 'Strong': + table_cell['children'].append(parse_strong(c, {})) + if c['t'] == 'Emph': + table_cell['children'].append(parse_italic(c, {})) + if c['t'] == 'Link': + table_cell['children'].append(parse_link(c)) + table_row_head['children'].append(table_cell) + + table_sdoc['children'].append(table_row_head) + + table_body = table_json['c'][4] + + for row in table_body: + table_row_body = { + 'type': 'table-row', + 'id': get_random_id(), + 'children': [], + 'style': {'minHeight': 43} + } + for v in row: + table_cell = { + 'id': get_random_id(), + 'children': [], + 'type': 'table-cell' + } + + if not v: + v = [{'t': 'Plain', 'c': [{'t': 'Str', 'c': ''}]}] + + for c in v[-1]['c']: + if c['t'] == 'Str': + table_cell['children'].append({'text': c['c'], 'id': get_random_id(), }) + if c['t'] == 'Space': + table_cell['children'].append({'text': ' ', 'id': get_random_id(), }) + if c['t'] == 'Strong': + table_cell['children'].append(parse_strong(c, {})) + if c['t'] == 'Emph': + table_cell['children'].append(parse_italic(c, {})) + if c['t'] == 'Link': + table_cell['children'].append(parse_link(c)) + if c['t'] == 'Code': + table_cell['children'].append(parse_inline_code(c)) + + + table_row_body['children'].append(table_cell) + + table_sdoc['children'].append(table_row_body) + return table_sdoc + + +def md2sdoc(md_txt, username=''): + md_ast = pypandoc.convert_text(md_txt, 'json', 'markdown') + json_ast = json.loads(md_ast) + blocks = json_ast['blocks'] + + l = [] + for item in blocks: + if item['t'] == 'Header': + l.append(parse_header(item)) + + if item['t'] == 'Para': + l.append(parse_paragragh(item)) + + if item['t'] == 'CodeBlock': + l.append(parse_codeblock(item)) + + if item['t'] == 'BulletList': + l.append(parse_list(item)) + + if item['t'] == 'BlockQuote': + l.append(parse_blockquote(item)) + + if item['t'] == 'Table': + l.append(parse_table(item)) + + sdoc_json = { + 'cursors': {}, + 'last_modify_user': username, + 'children': l, + 'version': 1 + } + + return sdoc_json diff --git a/file_converter/utils.py b/file_converter/utils.py new file mode 100644 index 00000000..bbe8a143 --- /dev/null +++ b/file_converter/utils.py @@ -0,0 +1,56 @@ +import re +import os +import json +import requests +from seafevents.app.config import FILE_SERVER_ROOT, SEAHUB_SERVER + + +IMAGE_PATTERN = r'' + +def is_url_link(s): + if re.match(r'^http[s]?://', s): + return True + else: + return False + +def trans_img_path_to_url(image_path, doc_uuid): + if is_url_link(image_path): + return image_path + + return "%(server_url)s/%(tag)s/%(doc_uuid)s/%(image_path)s" % ({ + 'server_url': SEAHUB_SERVER.rstrip('/'), + 'tag': 'api/v2.1/seadoc/download-image', + 'doc_uuid': doc_uuid, + 'image_path': image_path.strip('/') + }) + + +def gen_file_get_url(token, filename): + from urllib.parse import quote as urlquote + return '%s/files/%s/%s' % (FILE_SERVER_ROOT, token, urlquote(filename)) + + +def gen_file_upload_url(op, token): + return '%s/%s/%s' % (FILE_SERVER_ROOT, op, token) + + +def get_file_by_token(path, token): + filename = os.path.basename(path) + url = gen_file_get_url(token, filename) + content = requests.get(url).content + return content + + +def upload_file_by_token(parent_dir, file_name, token, content): + new_file_name = file_name + upload_link = gen_file_upload_url('upload-api', token) + new_file_path = os.path.join(parent_dir, new_file_name) + + if isinstance(content, dict): + content = json.dumps(content) + + resp = requests.post(upload_link, + data={'target_file': new_file_path, 'parent_dir': parent_dir}, + files={'file': (new_file_name, content.encode())} + ) + return resp diff --git a/main.py b/main.py index 68aaf5fa..7e6e374b 100644 --- a/main.py +++ b/main.py @@ -2,6 +2,8 @@ import os import logging import argparse +from gevent import monkey +monkey.patch_all() from seafevents.db import create_db_tables, prepare_db_tables from seafevents.utils import write_pidfile diff --git a/requirements.txt b/requirements.txt index f8c5442f..37a3e4e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,7 @@ mock pytest pyjwt pymysql +Flask +gevent +pypandoc +html2text