Skip to content

Commit

Permalink
Update g4f/Provider/Blackbox.py
Browse files Browse the repository at this point in the history
  • Loading branch information
kqlio67 committed Nov 20, 2024
2 parents 4d1dfa8 + 419264f commit 81f694c
Show file tree
Hide file tree
Showing 14 changed files with 230 additions and 136 deletions.
23 changes: 16 additions & 7 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@
from ..typing import CreateResult, Messages, ImageType
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
from ..providers.helper import format_cookies
from ..requests import get_nodriver
from ..image import to_bytes, is_accepted_format
from ..image import ImageResponse, to_bytes, is_accepted_format
from .. import debug

class Conversation(BaseConversation):
Expand Down Expand Up @@ -70,18 +71,21 @@ def create_completion(
access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
else:
access_token = conversation.access_token
websocket_url = f"{websocket_url}&acessToken={quote(access_token)}"
headers = {"Authorization": f"Bearer {access_token}"}
debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
debug.log(f"Copilot: Cookies: {';'.join([*cookies])}")
websocket_url = f"{websocket_url}&accessToken={quote(access_token)}"
headers = {"authorization": f"Bearer {access_token}", "cookie": format_cookies(cookies)}

with Session(
timeout=timeout,
proxy=proxy,
impersonate="chrome",
headers=headers,
cookies=cookies
cookies=cookies,
) as session:
response = session.get(f"{cls.url}/")
response = session.get("https://copilot.microsoft.com/c/api/user")
raise_for_status(response)
debug.log(f"Copilot: User: {response.json().get('firstName', 'null')}")
if conversation is None:
response = session.post(cls.conversation_url)
raise_for_status(response)
Expand Down Expand Up @@ -119,6 +123,7 @@ def create_completion(

is_started = False
msg = None
image_prompt: str = None
while True:
try:
msg = wss.recv()[0]
Expand All @@ -128,7 +133,11 @@ def create_completion(
if msg.get("event") == "appendText":
is_started = True
yield msg.get("text")
elif msg.get("event") in ["done", "partCompleted"]:
elif msg.get("event") == "generatingImage":
image_prompt = msg.get("prompt")
elif msg.get("event") == "imageGenerated":
yield ImageResponse(msg.get("url"), image_prompt, {"preview": msg.get("thumbnailUrl")})
elif msg.get("event") == "done":
break
if not is_started:
raise RuntimeError(f"Last message: {msg}")
Expand All @@ -152,7 +161,7 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
})()
""")
if access_token is None:
asyncio.sleep(1)
await asyncio.sleep(1)
cookies = {}
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
cookies[c.name] = c.value
Expand Down
124 changes: 47 additions & 77 deletions g4f/Provider/RubiksAI.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@

from __future__ import annotations

import asyncio
import aiohttp
import random
import string
import json
Expand All @@ -11,34 +10,24 @@

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

from ..requests.raise_for_status import raise_for_status

class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Rubiks AI"
url = "https://rubiks.ai"
api_endpoint = "https://rubiks.ai/search/api.php"
api_endpoint = "https://rubiks.ai/search/api/"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = 'llama-3.1-70b-versatile'
models = [default_model, 'gpt-4o-mini']
default_model = 'gpt-4o-mini'
models = [default_model, 'gpt-4o', 'o1-mini', 'claude-3.5-sonnet', 'grok-beta', 'gemini-1.5-pro', 'nova-pro']

model_aliases = {
"llama-3.1-70b": "llama-3.1-70b-versatile",
}

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model

@staticmethod
def generate_mid() -> str:
"""
Expand Down Expand Up @@ -70,7 +59,8 @@ async def create_async_generator(
model: str,
messages: Messages,
proxy: str = None,
websearch: bool = False,
web_search: bool = False,
temperature: float = 0.6,
**kwargs
) -> AsyncResult:
"""
Expand All @@ -80,20 +70,18 @@ async def create_async_generator(
- model (str): The model to use in the request.
- messages (Messages): The messages to send as a prompt.
- proxy (str, optional): Proxy URL, if needed.
- websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
- web_search (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
"""
model = cls.get_model(model)
prompt = format_prompt(messages)
q_value = prompt
mid_value = cls.generate_mid()
referer = cls.create_referer(q=q_value, mid=mid_value, model=model)

url = cls.api_endpoint
params = {
'q': q_value,
'model': model,
'id': '',
'mid': mid_value
referer = cls.create_referer(q=messages[-1]["content"], mid=mid_value, model=model)

data = {
"messages": messages,
"model": model,
"search": web_search,
"stream": True,
"temperature": temperature
}

headers = {
Expand All @@ -111,52 +99,34 @@ async def create_async_generator(
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"'
}

try:
timeout = aiohttp.ClientTimeout(total=None)
async with ClientSession(timeout=timeout) as session:
async with session.get(url, headers=headers, params=params, proxy=proxy) as response:
if response.status != 200:
yield f"Request ended with status code {response.status}"
return

assistant_text = ''
sources = []

async for line in response.content:
decoded_line = line.decode('utf-8').strip()
if not decoded_line.startswith('data: '):
continue
data = decoded_line[6:]
if data in ('[DONE]', '{"done": ""}'):
break
try:
json_data = json.loads(data)
except json.JSONDecodeError:
continue

if 'url' in json_data and 'title' in json_data:
if websearch:
sources.append({'title': json_data['title'], 'url': json_data['url']})

elif 'choices' in json_data:
for choice in json_data['choices']:
delta = choice.get('delta', {})
content = delta.get('content', '')
role = delta.get('role', '')
if role == 'assistant':
continue
assistant_text += content

if websearch and sources:
sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
assistant_text += f"\n\n**Source:**\n{sources_text}"

yield assistant_text

except asyncio.CancelledError:
yield "The request was cancelled."
except aiohttp.ClientError as e:
yield f"An error occurred during the request: {e}"
except Exception as e:
yield f"An unexpected error occurred: {e}"
async with ClientSession() as session:
async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
await raise_for_status(response)

sources = []
async for line in response.content:
decoded_line = line.decode('utf-8').strip()
if not decoded_line.startswith('data: '):
continue
data = decoded_line[6:]
if data in ('[DONE]', '{"done": ""}'):
break
try:
json_data = json.loads(data)
except json.JSONDecodeError:
continue

if 'url' in json_data and 'title' in json_data:
if web_search:
sources.append({'title': json_data['title'], 'url': json_data['url']})

elif 'choices' in json_data:
for choice in json_data['choices']:
delta = choice.get('delta', {})
content = delta.get('content', '')
if content:
yield content

if web_search and sources:
sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)])
yield f"\n\n**Source:**\n{sources_text}"
65 changes: 65 additions & 0 deletions g4f/Provider/needs_auth/Cerebras.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
from __future__ import annotations

import requests
from aiohttp import ClientSession

from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages, Cookies
from ...requests.raise_for_status import raise_for_status
from ...cookies import get_cookies

class Cerebras(OpenaiAPI):
label = "Cerebras Inference"
url = "https://inference.cerebras.ai/"
working = True
default_model = "llama3.1-70b"
fallback_models = [
"llama3.1-70b",
"llama3.1-8b",
]
model_aliases = {"llama-3.1-70b": "llama3.1-70b", "llama-3.1-8b": "llama3.1-8b"}

@classmethod
def get_models(cls, api_key: str = None):
if not cls.models:
try:
headers = {}
if api_key:
headers["authorization"] = f"Bearer ${api_key}"
response = requests.get(f"https://api.cerebras.ai/v1/models", headers=headers)
raise_for_status(response)
data = response.json()
cls.models = [model.get("model") for model in data.get("models")]
except Exception:
cls.models = cls.fallback_models
return cls.models

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://api.cerebras.ai/v1",
api_key: str = None,
cookies: Cookies = None,
**kwargs
) -> AsyncResult:
if api_key is None and cookies is None:
cookies = get_cookies(".cerebras.ai")
async with ClientSession(cookies=cookies) as session:
async with session.get("https://inference.cerebras.ai/api/auth/session") as response:
raise_for_status(response)
data = await response.json()
if data:
api_key = data.get("user", {}).get("demoApiKey")
async for chunk in super().create_async_generator(
model, messages,
api_base=api_base,
impersonate="chrome",
api_key=api_key,
headers={
"User-Agent": "ex/JS 1.5.0",
},
**kwargs
):
yield chunk
7 changes: 5 additions & 2 deletions g4f/Provider/needs_auth/CopilotAccount.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
from __future__ import annotations

from ..base_provider import ProviderModelMixin
from ..Copilot import Copilot

class CopilotAccount(Copilot):
class CopilotAccount(Copilot, ProviderModelMixin):
needs_auth = True
parent = "Copilot"
default_model = "Copilot"
default_vision_model = default_model
default_vision_model = default_model
models = [default_model]
image_models = models
28 changes: 28 additions & 0 deletions g4f/Provider/needs_auth/HuggingFace2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from __future__ import annotations

from .OpenaiAPI import OpenaiAPI
from ..HuggingChat import HuggingChat
from ...typing import AsyncResult, Messages

class HuggingFace2(OpenaiAPI):
label = "HuggingFace (Inference API)"
url = "https://huggingface.co"
working = True
default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
default_vision_model = default_model
models = [
*HuggingChat.models
]

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://api-inference.huggingface.co/v1",
max_tokens: int = 500,
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
)
4 changes: 3 additions & 1 deletion g4f/Provider/needs_auth/OpenaiAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ async def create_async_generator(
stop: Union[str, list[str]] = None,
stream: bool = False,
headers: dict = None,
impersonate: str = None,
extra_data: dict = {},
**kwargs
) -> AsyncResult:
Expand All @@ -55,7 +56,8 @@ async def create_async_generator(
async with StreamSession(
proxies={"all": proxy},
headers=cls.get_headers(stream, api_key, headers),
timeout=timeout
timeout=timeout,
impersonate=impersonate,
) as session:
data = filter_none(
messages=messages,
Expand Down
2 changes: 2 additions & 0 deletions g4f/Provider/needs_auth/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
from .gigachat import *

from .BingCreateImages import BingCreateImages
from .Cerebras import Cerebras
from .CopilotAccount import CopilotAccount
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Gemini import Gemini
from .GeminiPro import GeminiPro
from .Groq import Groq
from .HuggingFace import HuggingFace
from .HuggingFace2 import HuggingFace2
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
from .OpenaiAPI import OpenaiAPI
Expand Down
2 changes: 1 addition & 1 deletion g4f/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from ..providers.base_provider import AsyncGeneratorProvider
from ..image import ImageResponse, to_image, to_data_uri, is_accepted_format, EXTENSIONS_MAP
from ..typing import Messages, Cookies, Image
from ..typing import Messages, Image
from ..providers.types import ProviderType, FinishReason, BaseConversation
from ..errors import NoImageResponseError
from ..providers.retry_provider import IterListProvider
Expand Down
Loading

0 comments on commit 81f694c

Please sign in to comment.