Skip to content

Commit

Permalink
new version
Browse files Browse the repository at this point in the history
  • Loading branch information
adogecheems committed Sep 8, 2024
1 parent 7c750fa commit 523a9f4
Show file tree
Hide file tree
Showing 20 changed files with 79 additions and 273 deletions.
15 changes: 8 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,9 @@ AniSearch 使用基于元类的插件系统来支持不同的搜索源
非常悲哀的是,以下搜索源都需要代理

- `dmhy`: 动漫花园搜索源(速度较快)
- `comicat`: 漫猫搜索源(实现非常慢,慎用,建议只搜索季度合集)
- `kisssub`: 爱恋搜索源(同上)
- `miobt`:MioBT 搜索源(同上)
- `comicat`: [存档] 漫猫搜索源(实现非常慢,慎用,建议只搜索季度合集)
- `kisssub`: [存档] 爱恋搜索源(同上)
- `miobt`[存档] MioBT 搜索源(同上)
- `nyaa`: nyaa.si 搜索源(速度超群,不能使用季度合集搜索)
- `acgrip`: acg.rip 搜索源(速度适中,不能使用季度合集搜索,由于站点的自身原因,获取的magnet是种子的下载链接)
- `tokyotosho` : 东京图书馆搜索源(速度适中,不能使用季度合集搜索,绝大部分资源都需要英/日文才能搜到)
Expand All @@ -121,20 +121,21 @@ AniSearch 使用基于元类的插件系统来支持不同的搜索源
```python
# 运行此代码,没有异常说明自定义插件创建成功,已经注册在插件系统中
from anisearch.plugins import BasePlugin
from anisearch.anime.Anime import Anime
from anisearch.plugins.Anime import Anime
from anisearch.plugins._webget import get_html


class Custom(BasePlugin):
abstract = False

def __init__(self, parser, verify, timefmt) -> None:
super().__init__(parser, verify, timefmt)

def search(self, keyword, collected=True, proxies=None, system_proxy=False, **extra_options):
html = get_html("<url>", proxies=proxies, system_proxy=system_proxy, verify=self._verify)

# 这里实现您的搜索逻辑

# 返回一个 Anime 对象的列表
return [Anime("2023/06/01 12:00", "Custom Anime", "1.5GB", "magnet:?xt=urn:btih:..."), ...]
```
Expand Down
16 changes: 9 additions & 7 deletions README_en.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,12 @@ AniSearch uses a metaclass-based plugin system to support different search sourc

- `dmhy`: Anime Garden search source (faster)

- `comicat`: Manmao search source (very slow to implement, use with caution, it is recommended to search only for quarterly collections)
- `comicat`: [achieve] Comicat search source (very slow to implement, use with caution, it is recommended to search only
for quarterly collections)

- `kisssub`: Love search source (same as above)
- `kisssub`: [achieve] KissSub search source (same as above)

- `miobt`: MioBT search source (same as above)
- `miobt`: [achieve] MioBT search source (same as above)

- `nyaa`: nyaa.si search source (superb speed, can not use quarterly collection search)

Expand All @@ -127,20 +128,21 @@ To create a custom plugin, you need to inherit the BasePlugin class and implemen
```python
# Run this code. If there is no exception, it means that the custom plug-in is created successfully and has been registered in the plug-in system
from anisearch.plugins import BasePlugin
from anisearch.anime.Anime import Anime
from anisearch.plugins.Anime import Anime
from anisearch.plugins._webget import get_html


class Custom(BasePlugin):
abstract = False

def __init__(self, parser, verify, timefmt) -> None:
super().__init__(parser, verify, timefmt)

def search(self, keyword, collected=True, proxies=None, system_proxy=False, **extra_options):
html = get_html("<url>", proxies=proxies, system_proxy=system_proxy, verify=self._verify)

# Implement your search logic here

# Return a list of Anime objects
return [Anime("2023/06/01 12:00", "Custom Anime", "1.5GB", "magnet:?xt=urn:btih:..."), ...]
```
Expand Down
4 changes: 2 additions & 2 deletions anisearch/search/AniSearch.py → anisearch/AniSearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
from typing import List, Optional

from . import log
from .. import plugins
from ..anime.Anime import Anime
from . import plugins
from .plugins.Anime import Anime


class AniSearch:
Expand Down
206 changes: 0 additions & 206 deletions anisearch/README_en.md

This file was deleted.

30 changes: 29 additions & 1 deletion anisearch/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,29 @@
from anisearch.search.AniSearch import AniSearch
import logging

LOG_FORMAT = "%(asctime)s %(levelname)s %(message)s"
LOG_FILE = "search.log"


def setup_logger(name: str = "global", level: int = logging.DEBUG) -> logging.Logger:
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level)

try:
file_handler = logging.FileHandler(LOG_FILE, mode='w')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(file_handler)
except Exception as e:
print(f"Failed to set up file handler: {e}")

stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)

return logger


log = setup_logger()

eval("from .AniSearch import AniSearch")
Empty file removed anisearch/anime/__init__.py
Empty file.
File renamed without changes.
2 changes: 1 addition & 1 deletion anisearch/anime/Anime.py → anisearch/plugins/Anime.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import re
from typing import Tuple

from ..search import log
from .. import log

# Regular expression patterns
size_pattern = re.compile(r'(\d+(?:\.\d+)?)\s*(\w+)')
Expand Down
7 changes: 4 additions & 3 deletions anisearch/plugins/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import importlib
import logging
from abc import ABCMeta, abstractmethod

from .. import log


class PluginMeta(ABCMeta):
plugins = {}
Expand All @@ -21,7 +22,7 @@ def __init__(self, parser, verify, timefmt):
self._timefmt = timefmt

@abstractmethod
def search(self, keyword, collected, proxies, system_proxy, extra_options):
def search(self, keyword, collected, proxies, system_proxy, **extra_options):
"""
Abstract method to search for a keyword.
Expand All @@ -48,6 +49,6 @@ def get_plugin(name: str):
try:
importlib.import_module(f".{name}", package=__name__)
except ImportError:
logging.info(f"The plugin {name} cannot be automatically imported, please import it manually")
log.info(f"The plugin {name} cannot be automatically imported, please import it manually")

return PluginMeta.plugins.get(name.title())
7 changes: 5 additions & 2 deletions anisearch/plugins/_webget.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import os

import requests
from ..search import log

from .. import log


def get_html(url, proxies=None, system_proxy=False, verify=True):
headers = {
Expand All @@ -26,4 +29,4 @@ def get_html(url, proxies=None, system_proxy=False, verify=True):

except requests.RequestException as e:
log.exception(f"The search was aborted due to network reasons: {e}")
raise
raise
6 changes: 4 additions & 2 deletions anisearch/plugins/_webget_cf.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import os
import requests

import cloudscraper
from ..search import log
import requests

from .. import log


def get_html(url, proxies=None, system_proxy=False, verify=True):
Expand Down
Loading

0 comments on commit 523a9f4

Please sign in to comment.