From 46038c6a207a594d32a88e98d5ed6532f0bd17f3 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 25 Nov 2024 13:27:56 +0100 Subject: [PATCH 1/2] Add .har file support for Copilot Update provider in Vision documentation Hide --- docs/async_client.md | 11 +++--- docs/client.md | 5 +-- g4f/Provider/Copilot.py | 53 ++++++++++++++++++++++++++--- g4f/cli.py | 6 ++-- g4f/gui/client/static/css/style.css | 4 +++ 5 files changed, 66 insertions(+), 13 deletions(-) diff --git a/docs/async_client.md b/docs/async_client.md index fe6f46ff16b..e501aefa8a2 100644 --- a/docs/async_client.md +++ b/docs/async_client.md @@ -154,13 +154,14 @@ import asyncio from g4f.client import AsyncClient async def main(): - client = AsyncClient() - + client = AsyncClient( + provider=g4f.Provider.CopilotAccount + ) + image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw - + response = await client.chat.completions.create( model=g4f.models.default, - provider=g4f.Provider.Bing, messages=[ { "role": "user", @@ -169,7 +170,7 @@ async def main(): ], image=image ) - + print(response.choices[0].message.content) asyncio.run(main()) diff --git a/docs/client.md b/docs/client.md index da45d7fdf36..c318bee3612 100644 --- a/docs/client.md +++ b/docs/client.md @@ -265,7 +265,9 @@ from g4f.client import Client image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw # Or: image = open("docs/cat.jpeg", "rb") -client = Client() +client = Client( + provider=CopilotAccount +) response = client.chat.completions.create( model=g4f.models.default, @@ -275,7 +277,6 @@ response = client.chat.completions.create( "content": "What are on this image?" } ], - provider=g4f.Provider.Bing, image=image # Add any other necessary parameters ) diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index 2f37b1ebf8f..5721f37701e 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -1,5 +1,6 @@ from __future__ import annotations +import os import json import asyncio from http.cookiejar import CookieJar @@ -21,9 +22,11 @@ from ..typing import CreateResult, Messages, ImageType from ..errors import MissingRequirementsError from ..requests.raise_for_status import raise_for_status -from ..providers.helper import format_cookies +from ..providers.asyncio import get_running_loop +from ..Provider.openai.har_file import NoValidHarFileError, get_headers from ..requests import get_nodriver from ..image import ImageResponse, to_bytes, is_accepted_format +from ..cookies import get_cookies_dir from .. import debug class Conversation(BaseConversation): @@ -69,7 +72,15 @@ def create_completion( cookies = conversation.cookie_jar if conversation is not None else None if cls.needs_auth or image is not None: if conversation is None or conversation.access_token is None: - access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy)) + try: + access_token, cookies = readHAR() + except NoValidHarFileError as h: + debug.log(f"Copilot: {h}") + try: + get_running_loop(check_nested=True) + access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy)) + except MissingRequirementsError: + raise h else: access_token = conversation.access_token debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}") @@ -159,7 +170,9 @@ async def get_access_token_and_cookies(cls, proxy: str = None): for (var i = 0; i < localStorage.length; i++) { try { item = JSON.parse(localStorage.getItem(localStorage.key(i))); - if (item.credentialType == "AccessToken") { + if (item.credentialType == "AccessToken" + && item.expiresOn > Math.floor(Date.now() / 1000) + && item.target.includes("ChatAI")) { return item.secret; } } catch(e) {} @@ -172,4 +185,36 @@ async def get_access_token_and_cookies(cls, proxy: str = None): for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])): cookies[c.name] = c.value await page.close() - return access_token, cookies \ No newline at end of file + return access_token, cookies + +def readHAR(): + harPath = [] + for root, _, files in os.walk(get_cookies_dir()): + for file in files: + if file.endswith(".har"): + harPath.append(os.path.join(root, file)) + if not harPath: + raise NoValidHarFileError("No .har file found") + api_key = None + cookies = None + for path in harPath: + with open(path, 'rb') as file: + try: + harFile = json.loads(file.read()) + except json.JSONDecodeError: + # Error: not a HAR file! + continue + for v in harFile['log']['entries']: + v_headers = get_headers(v) + if v['request']['url'].startswith(Copilot.url): + try: + if "authorization" in v_headers: + api_key = v_headers["authorization"].split(maxsplit=1).pop() + except Exception as e: + debug.log(f"Error on read headers: {e}") + if v['request']['cookies']: + cookies = {c['name']: c['value'] for c in v['request']['cookies']} + if api_key is None: + raise NoValidHarFileError("No access token found in .har files") + + return api_key, cookies \ No newline at end of file diff --git a/g4f/cli.py b/g4f/cli.py index 90ec37fa6f0..a8a038a2ce2 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -10,8 +10,9 @@ def main(): parser = argparse.ArgumentParser(description="Run gpt4free") subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.") api_parser = subparsers.add_parser("api") - api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.") - api_parser.add_argument("--debug", action="store_true", help="Enable verbose logging.") + api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)") + api_parser.add_argument("--port", default=None, help="Change the port of the server.") + api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.") api_parser.add_argument("--gui", "-g", default=False, action="store_true", help="Add gui to the api.") api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)") api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working], @@ -55,6 +56,7 @@ def run_api_args(args): g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers] run_api( bind=args.bind, + port=args.port, debug=args.debug, workers=args.workers, use_colors=not args.disable_colors, diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index 57b75bae866..856f9fb5878 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -177,6 +177,10 @@ body { filter: blur(calc(0.5 * 70vw)) opacity(var(--opacity)); } +body.white .gradient{ + display: none; +} + .conversations { display: flex; flex-direction: column; From fb831bcc26b632bd45197ec6868991e91a691ca5 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 25 Nov 2024 13:53:05 +0100 Subject: [PATCH 2/2] Add image upload to api --- g4f/api/__init__.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 3b1c540fc00..a899251b0a9 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -39,7 +39,7 @@ class Annotated: from g4f.client import AsyncClient, ChatCompletion, ImagesResponse, convert_to_provider from g4f.providers.response import BaseConversation from g4f.client.helper import filter_none -from g4f.image import is_accepted_format, images_dir +from g4f.image import is_accepted_format, is_data_uri_an_image, images_dir from g4f.typing import Messages from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError from g4f.cookies import read_cookie_files, get_cookies_dir @@ -93,6 +93,8 @@ class ChatCompletionsConfig(BaseModel): model: str = Field(default="") provider: Optional[str] = None stream: bool = False + image: Optional[str] = None + image_name: Optional[str] = None temperature: Optional[float] = None max_tokens: Optional[int] = None stop: Union[list[str], str, None] = None @@ -263,6 +265,7 @@ async def model_info(model_name: str) -> ModelResponseModel: HTTP_200_OK: {"model": ChatCompletion}, HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel}, HTTP_404_NOT_FOUND: {"model": ErrorResponseModel}, + HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel}, HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel}, }) async def chat_completions( @@ -284,6 +287,12 @@ async def chat_completions( if config.provider in self.conversations[config.conversation_id]: conversation = self.conversations[config.conversation_id][config.provider] + if config.image is not None: + try: + is_data_uri_an_image(config.image) + except ValueError as e: + return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/webp;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY) + # Create the completion response response = self.client.chat.completions.create( **filter_none( @@ -291,7 +300,7 @@ async def chat_completions( "model": AppConfig.model, "provider": AppConfig.provider, "proxy": AppConfig.proxy, - **config.dict(exclude_none=True), + **config.model_dump(exclude_none=True), **{ "conversation_id": None, "return_conversation": return_conversation,