Skip to content

Commit

Permalink
Merge pull request xtekky#2423 from hlohaus/model
Browse files Browse the repository at this point in the history
Add .har file support for Copilot
  • Loading branch information
hlohaus authored Nov 25, 2024
2 parents a722abb + fb831bc commit a73b3fc
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 15 deletions.
11 changes: 6 additions & 5 deletions docs/async_client.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,13 +154,14 @@ import asyncio
from g4f.client import AsyncClient

async def main():
client = AsyncClient()

client = AsyncClient(
provider=g4f.Provider.CopilotAccount
)

image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw

response = await client.chat.completions.create(
model=g4f.models.default,
provider=g4f.Provider.Bing,
messages=[
{
"role": "user",
Expand All @@ -169,7 +170,7 @@ async def main():
],
image=image
)

print(response.choices[0].message.content)

asyncio.run(main())
Expand Down
5 changes: 3 additions & 2 deletions docs/client.md
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,9 @@ from g4f.client import Client
image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
# Or: image = open("docs/cat.jpeg", "rb")

client = Client()
client = Client(
provider=CopilotAccount
)

response = client.chat.completions.create(
model=g4f.models.default,
Expand All @@ -275,7 +277,6 @@ response = client.chat.completions.create(
"content": "What are on this image?"
}
],
provider=g4f.Provider.Bing,
image=image
# Add any other necessary parameters
)
Expand Down
53 changes: 49 additions & 4 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import os
import json
import asyncio
from http.cookiejar import CookieJar
Expand All @@ -21,9 +22,11 @@
from ..typing import CreateResult, Messages, ImageType
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
from ..providers.helper import format_cookies
from ..providers.asyncio import get_running_loop
from ..Provider.openai.har_file import NoValidHarFileError, get_headers
from ..requests import get_nodriver
from ..image import ImageResponse, to_bytes, is_accepted_format
from ..cookies import get_cookies_dir
from .. import debug

class Conversation(BaseConversation):
Expand Down Expand Up @@ -69,7 +72,15 @@ def create_completion(
cookies = conversation.cookie_jar if conversation is not None else None
if cls.needs_auth or image is not None:
if conversation is None or conversation.access_token is None:
access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
try:
access_token, cookies = readHAR()
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
try:
get_running_loop(check_nested=True)
access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
except MissingRequirementsError:
raise h
else:
access_token = conversation.access_token
debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
Expand Down Expand Up @@ -159,7 +170,9 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
for (var i = 0; i < localStorage.length; i++) {
try {
item = JSON.parse(localStorage.getItem(localStorage.key(i)));
if (item.credentialType == "AccessToken") {
if (item.credentialType == "AccessToken"
&& item.expiresOn > Math.floor(Date.now() / 1000)
&& item.target.includes("ChatAI")) {
return item.secret;
}
} catch(e) {}
Expand All @@ -172,4 +185,36 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
cookies[c.name] = c.value
await page.close()
return access_token, cookies
return access_token, cookies

def readHAR():
harPath = []
for root, _, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
if not harPath:
raise NoValidHarFileError("No .har file found")
api_key = None
cookies = None
for path in harPath:
with open(path, 'rb') as file:
try:
harFile = json.loads(file.read())
except json.JSONDecodeError:
# Error: not a HAR file!
continue
for v in harFile['log']['entries']:
v_headers = get_headers(v)
if v['request']['url'].startswith(Copilot.url):
try:
if "authorization" in v_headers:
api_key = v_headers["authorization"].split(maxsplit=1).pop()
except Exception as e:
debug.log(f"Error on read headers: {e}")
if v['request']['cookies']:
cookies = {c['name']: c['value'] for c in v['request']['cookies']}
if api_key is None:
raise NoValidHarFileError("No access token found in .har files")

return api_key, cookies
13 changes: 11 additions & 2 deletions g4f/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class Annotated:
from g4f.client import AsyncClient, ChatCompletion, ImagesResponse, convert_to_provider
from g4f.providers.response import BaseConversation
from g4f.client.helper import filter_none
from g4f.image import is_accepted_format, images_dir
from g4f.image import is_accepted_format, is_data_uri_an_image, images_dir
from g4f.typing import Messages
from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError
from g4f.cookies import read_cookie_files, get_cookies_dir
Expand Down Expand Up @@ -93,6 +93,8 @@ class ChatCompletionsConfig(BaseModel):
model: str = Field(default="")
provider: Optional[str] = None
stream: bool = False
image: Optional[str] = None
image_name: Optional[str] = None
temperature: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
Expand Down Expand Up @@ -263,6 +265,7 @@ async def model_info(model_name: str) -> ModelResponseModel:
HTTP_200_OK: {"model": ChatCompletion},
HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
})
async def chat_completions(
Expand All @@ -284,14 +287,20 @@ async def chat_completions(
if config.provider in self.conversations[config.conversation_id]:
conversation = self.conversations[config.conversation_id][config.provider]

if config.image is not None:
try:
is_data_uri_an_image(config.image)
except ValueError as e:
return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/webp;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY)

# Create the completion response
response = self.client.chat.completions.create(
**filter_none(
**{
"model": AppConfig.model,
"provider": AppConfig.provider,
"proxy": AppConfig.proxy,
**config.dict(exclude_none=True),
**config.model_dump(exclude_none=True),
**{
"conversation_id": None,
"return_conversation": return_conversation,
Expand Down
6 changes: 4 additions & 2 deletions g4f/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ def main():
parser = argparse.ArgumentParser(description="Run gpt4free")
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
api_parser = subparsers.add_parser("api")
api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.")
api_parser.add_argument("--debug", action="store_true", help="Enable verbose logging.")
api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
api_parser.add_argument("--port", default=None, help="Change the port of the server.")
api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
api_parser.add_argument("--gui", "-g", default=False, action="store_true", help="Add gui to the api.")
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
Expand Down Expand Up @@ -55,6 +56,7 @@ def run_api_args(args):
g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers]
run_api(
bind=args.bind,
port=args.port,
debug=args.debug,
workers=args.workers,
use_colors=not args.disable_colors,
Expand Down
4 changes: 4 additions & 0 deletions g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,10 @@ body {
filter: blur(calc(0.5 * 70vw)) opacity(var(--opacity));
}

body.white .gradient{
display: none;
}

.conversations {
display: flex;
flex-direction: column;
Expand Down

0 comments on commit a73b3fc

Please sign in to comment.