Skip to content

Commit

Permalink
Merge pull request #1505 from hlohaus/sort
Browse files Browse the repository at this point in the history
Mish
  • Loading branch information
hlohaus authored Jan 23, 2024
2 parents 2a35052 + b6feec9 commit c10f49d
Show file tree
Hide file tree
Showing 11 changed files with 7,044 additions and 204 deletions.
2 changes: 1 addition & 1 deletion etc/tool/copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def main():
if not pull:
print(f"No PR number found")
exit()
if pull.get_reviews().totalCount > 0 or pull.get_comments().totalCount > 0:
if pull.get_reviews().totalCount > 0 or pull.get_issue_comments().totalCount > 0:
print(f"Has already a review")
exit()
diff = get_diff(pull.diff_url)
Expand Down
33 changes: 16 additions & 17 deletions g4f/Provider/GptForLove.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,7 @@
from __future__ import annotations

from aiohttp import ClientSession
import json
from Crypto.Cipher import AES
from Crypto.Util import Padding
import base64
import hashlib
import time
import math
import execjs, os, json

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
Expand Down Expand Up @@ -71,13 +65,18 @@ async def create_async_generator(


def get_secret() -> str:
k = '14487141bvirvvG'
e = math.floor(time.time())

plaintext = str(e).encode('utf-8')
key = hashlib.md5(k.encode('utf-8')).digest()

cipher = AES.new(key, AES.MODE_ECB)
ciphertext = cipher.encrypt(Padding.pad(plaintext, AES.block_size, style='pkcs7'))

return base64.b64encode(ciphertext).decode()
dir = os.path.dirname(__file__)
include = f'{dir}/npm/node_modules/crypto-js/crypto-js'
source = """
CryptoJS = require({include})
var k = 'fjfsdwiuhfwf'
, e = Math.floor(new Date().getTime() / 1e3);
var t = CryptoJS.enc.Utf8.parse(e)
, o = CryptoJS.AES.encrypt(t, k, {
mode: CryptoJS.mode.ECB,
padding: CryptoJS.pad.Pkcs7
});
return o.toString()
"""
source = source.replace('{include}', json.dumps(include))
return execjs.compile(source).call('')
25 changes: 18 additions & 7 deletions g4f/Provider/HuggingChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,23 @@
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt, get_cookies

map = {
"openchat/openchat_3.5": "openchat/openchat-3.5-1210",
}

class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat"
working = True
model = "meta-llama/Llama-2-70b-chat-hf"
default_model = "meta-llama/Llama-2-70b-chat-hf"
models = [
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"meta-llama/Llama-2-70b-chat-hf",
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"codellama/CodeLlama-34b-Instruct-hf",
"mistralai/Mistral-7B-Instruct-v0.2",
"openchat/openchat-3.5-0106"
]
model_map = {
"openchat/openchat_3.5": "openchat/openchat-3.5-1210",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.2"
}

@classmethod
async def create_async_generator(
Expand All @@ -29,9 +38,11 @@ async def create_async_generator(
**kwargs
) -> AsyncResult:
if not model:
model = cls.model
elif model in map:
model = map[model]
model = cls.default_model
elif model in cls.model_map:
model = cls.model_map[model]
elif model not in cls.models:
raise ValueError(f"Model is not supported: {model}")
if not cookies:
cookies = get_cookies(".huggingface.co")

Expand Down
29 changes: 15 additions & 14 deletions g4f/Provider/PerplexityLabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,28 @@

import random
import json
from aiohttp import ClientSession, WSMsgType
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider

API_URL = "https://labs-api.perplexity.ai/socket.io/"
WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
MODELS = ['pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
'mistral-medium', 'related']
DEFAULT_MODEL = MODELS[1]
MODEL_MAP = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
"meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
}

class PerplexityLabs(AsyncGeneratorProvider):
url = "https://labs.perplexity.ai"
working = True
supports_gpt_35_turbo = True
models = ['pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
'mistral-medium', 'related']
default_model = 'pplx-70b-online'
model_map = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
"meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
"codellama/CodeLlama-34b-Instruct-hf": "codellama-34b-instruct"
}

@classmethod
async def create_async_generator(
Expand All @@ -33,10 +34,10 @@ async def create_async_generator(
**kwargs
) -> AsyncResult:
if not model:
model = DEFAULT_MODEL
elif model in MODEL_MAP:
model = MODEL_MAP[model]
elif model not in MODELS:
model = cls.default_model
elif model in cls.model_map:
model = cls.model_map[model]
elif model not in cls.models:
raise ValueError(f"Model is not supported: {model}")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0",
Expand Down
12 changes: 12 additions & 0 deletions g4f/Provider/npm/node_modules/.package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit c10f49d

Please sign in to comment.