Skip to content

Commit

Permalink
fix: refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
eggplants committed Feb 7, 2022
1 parent cb6c3ca commit 2013c86
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 41 deletions.
32 changes: 24 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

[![PyPI version](https://badge.fury.io/py/getjump.svg)](https://badge.fury.io/py/getjump) [![Maintainability](https://api.codeclimate.com/v1/badges/8d8c16d52b49885dad8c/maintainability)](https://codeclimate.com/github/eggplants/getjump/maintainability)

Retrieve and save images from manga distribution sites using [GigaViewer]()
Retrieve and save images from manga distribution sites using [GigaViewer](https://prtimes.jp/main/html/searchrlp/company_id/6510)

*Note: Redistribution of downloaded image data is prohibited. Please keep it to private use.*

Expand Down Expand Up @@ -54,15 +54,31 @@ usage: jget [-h] [-b] [-d DIR] [-f] [-o] url
Get images from jump web viewer

positional arguments:
url target url
url target url

optional arguments:
-h, --help show this help message and exit
-b, --bulk download series in bulk (default: False)
-d DIR, --savedir DIR
directory to save downloaded images (default: .)
-f, --first download only first page (default: False)
-o, --overwrite overwrite (default: False)
-h, --help show this help message and exit
-b, --bulk download series in bulk (default: False)
-d DIR, --savedir DIR directory to save downloaded images (default: .)
-f, --first download only first page (default: False)
-o, --overwrite overwrite (default: False)

available urls:
- https://comic-action.com/episode/***.json
- https://comic-days.com/episode/***.json
- https://comic-gardo.com/episode/***.json
- https://comic-trail.com/episode/***.json
- https://comic-zenon.com/episode/***.json
- https://comicborder.com/episode/***.json
- https://comicbushi-web.com/episode/***.json
- https://feelweb.jp/episode/***.json
- https://kuragebunch.com/episode/***.json
- https://magcomi.com/episode/***.json
- https://pocket.shonenmagazine.com/episode/***.json
- https://shonenjumpplus.com/episode/***.json
- https://www.sunday-webry.com/episode/***.json
- https://tonarinoyj.jp/episode/***.json
- https://viewer.heros-web.com/episode/***.json
$ jget https://shonenjumpplus.com/episode/13932016480028799982.json
get: https://shonenjumpplus.com/episode/13932016480028799982.json
saved: ./阿波連さんははかれない/[1話]阿波連さんははかれない
Expand Down
37 changes: 26 additions & 11 deletions getjump/GetJump.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import sys
import warnings
from typing import Any, Dict, List, Optional, Tuple, cast
from urllib.parse import urlparse

import cv2 # type: ignore
import numpy as np
Expand All @@ -19,6 +20,24 @@
)
}

VALID_HOSTS = (
"comic-action.com",
"comic-days.com",
"comic-gardo.com",
"comic-trail.com",
"comic-zenon.com",
"comicborder.com",
"comicbushi-web.com",
"feelweb.jp",
"kuragebunch.com",
"magcomi.com",
"pocket.shonenmagazine.com",
"shonenjumpplus.com",
"www.sunday-webry.com",
"tonarinoyj.jp",
"viewer.heros-web.com",
)


class NeedPurchase(Warning):
pass
Expand Down Expand Up @@ -62,21 +81,17 @@ def get(

@staticmethod
def is_valid_uri(url: str) -> bool:
pattern = re.compile(
r"""
^https://(?:pocket\.shonenmagazine\.com
|(?:(?:viewer\.heros\-web|shonenjumpplus|comicbushi\-web
|comic(?:\-(?:action|gardo|trail|zenon)|border)
|kuragebunch|comic\-days|magcomi)\.com
|(?:tonarinoyj|feelweb)\.jp))/episode/\d+.json$
""",
re.X,
o = urlparse(url)
return (
type(url) is str
and o.scheme == "https"
and o.hostname in VALID_HOSTS
and bool(re.match(r"^/episode/[0-9]+\.json$", o.path))
)
return type(url) is str and bool(pattern.match(url))

def __check_url(self, url: str) -> None:
if not self.is_valid_uri(url):
raise ValueError("'{}' is not valid url.".format(url))
raise ValueError(f"'{url}' is not valid url.")

@staticmethod
def __check_content_type(type_: str) -> None:
Expand Down
6 changes: 3 additions & 3 deletions getjump/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .GetJump import GetJump
from .GetJump import VALID_HOSTS, GetJump

__version__ = '0.13'
__all__ = ["GetJump"]
__version__ = "0.13"
__all__ = ["GetJump", "VALID_HOSTS"]
25 changes: 6 additions & 19 deletions getjump/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,7 @@
from shutil import get_terminal_size
from typing import List, Optional

from .GetJump import GetJump

AVAILABLE_URLS = [
"https://comic-action.com/episode/***.json",
"https://comic-days.com/episode/***.json",
"https://comic-gardo.com/episode/***.json",
"https://comic-trail.com/episode/***.json",
"https://comic-zenon.com/episode/***.json",
"https://comicborder.com/episode/***.json",
"https://comicbushi-web.com/episode/***.json",
"https://feelweb.jp/episode/***.json",
"https://kuragebunch.com/episode/***.json",
"https://magcomi.com/episode/***.json",
"https://pocket.shonenmagazine.com/episode/***.json",
"https://shonenjumpplus.com/episode/***.json",
"https://tonarinoyj.jp/episode/***.json",
"https://viewer.heros-web.com/episode/***.json",
]
from .GetJump import VALID_HOSTS, GetJump


class GetJumpFormatter(
Expand All @@ -35,7 +18,11 @@ class HttpConnectionNotFountError(Exception):


def available_list() -> str:
return "Available urls:\n- " + "\n- ".join(AVAILABLE_URLS)
return (
"available urls:\n - https://"
+ "/episode/***.json\n - https://".join(VALID_HOSTS)
+ "/episode/***.json"
)


def check_connectivity(url: str = "www.google.com", timeout: int = 3) -> bool:
Expand Down

0 comments on commit 2013c86

Please sign in to comment.