Skip to content

Commit

Permalink
Merge branch 'xtekky:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
kqlio67 authored Nov 25, 2024
2 parents 326e781 + a73b3fc commit 3811099
Show file tree
Hide file tree
Showing 9 changed files with 98 additions and 33 deletions.
1 change: 1 addition & 0 deletions .github/workflows/publish-workflow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ jobs:
python -m etc.tool.openapi
- uses: actions/upload-artifact@v4
with:
name: openapi
path: openapi.json
publish:
runs-on: ubuntu-latest
Expand Down
11 changes: 6 additions & 5 deletions docs/async_client.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,13 +154,14 @@ import asyncio
from g4f.client import AsyncClient

async def main():
client = AsyncClient()

client = AsyncClient(
provider=g4f.Provider.CopilotAccount
)

image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw

response = await client.chat.completions.create(
model=g4f.models.default,
provider=g4f.Provider.Bing,
messages=[
{
"role": "user",
Expand All @@ -169,7 +170,7 @@ async def main():
],
image=image
)

print(response.choices[0].message.content)

asyncio.run(main())
Expand Down
5 changes: 3 additions & 2 deletions docs/client.md
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,9 @@ from g4f.client import Client
image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw
# Or: image = open("docs/cat.jpeg", "rb")

client = Client()
client = Client(
provider=CopilotAccount
)

response = client.chat.completions.create(
model=g4f.models.default,
Expand All @@ -275,7 +277,6 @@ response = client.chat.completions.create(
"content": "What are on this image?"
}
],
provider=g4f.Provider.Bing,
image=image
# Add any other necessary parameters
)
Expand Down
53 changes: 49 additions & 4 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import os
import json
import asyncio
from http.cookiejar import CookieJar
Expand All @@ -21,9 +22,11 @@
from ..typing import CreateResult, Messages, ImageType
from ..errors import MissingRequirementsError
from ..requests.raise_for_status import raise_for_status
from ..providers.helper import format_cookies
from ..providers.asyncio import get_running_loop
from ..Provider.openai.har_file import NoValidHarFileError, get_headers
from ..requests import get_nodriver
from ..image import ImageResponse, to_bytes, is_accepted_format
from ..cookies import get_cookies_dir
from .. import debug

class Conversation(BaseConversation):
Expand Down Expand Up @@ -69,7 +72,15 @@ def create_completion(
cookies = conversation.cookie_jar if conversation is not None else None
if cls.needs_auth or image is not None:
if conversation is None or conversation.access_token is None:
access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
try:
access_token, cookies = readHAR()
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
try:
get_running_loop(check_nested=True)
access_token, cookies = asyncio.run(cls.get_access_token_and_cookies(proxy))
except MissingRequirementsError:
raise h
else:
access_token = conversation.access_token
debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
Expand Down Expand Up @@ -159,7 +170,9 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
for (var i = 0; i < localStorage.length; i++) {
try {
item = JSON.parse(localStorage.getItem(localStorage.key(i)));
if (item.credentialType == "AccessToken") {
if (item.credentialType == "AccessToken"
&& item.expiresOn > Math.floor(Date.now() / 1000)
&& item.target.includes("ChatAI")) {
return item.secret;
}
} catch(e) {}
Expand All @@ -172,4 +185,36 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
cookies[c.name] = c.value
await page.close()
return access_token, cookies
return access_token, cookies

def readHAR():
harPath = []
for root, _, files in os.walk(get_cookies_dir()):
for file in files:
if file.endswith(".har"):
harPath.append(os.path.join(root, file))
if not harPath:
raise NoValidHarFileError("No .har file found")
api_key = None
cookies = None
for path in harPath:
with open(path, 'rb') as file:
try:
harFile = json.loads(file.read())
except json.JSONDecodeError:
# Error: not a HAR file!
continue
for v in harFile['log']['entries']:
v_headers = get_headers(v)
if v['request']['url'].startswith(Copilot.url):
try:
if "authorization" in v_headers:
api_key = v_headers["authorization"].split(maxsplit=1).pop()
except Exception as e:
debug.log(f"Error on read headers: {e}")
if v['request']['cookies']:
cookies = {c['name']: c['value'] for c in v['request']['cookies']}
if api_key is None:
raise NoValidHarFileError("No access token found in .har files")

return api_key, cookies
29 changes: 19 additions & 10 deletions g4f/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class Annotated:
from g4f.client import AsyncClient, ChatCompletion, ImagesResponse, convert_to_provider
from g4f.providers.response import BaseConversation
from g4f.client.helper import filter_none
from g4f.image import is_accepted_format, images_dir
from g4f.image import is_accepted_format, is_data_uri_an_image, images_dir
from g4f.typing import Messages
from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError
from g4f.cookies import read_cookie_files, get_cookies_dir
Expand Down Expand Up @@ -91,15 +91,17 @@ def create_app_debug(g4f_api_key: str = None):
class ChatCompletionsConfig(BaseModel):
messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
model: str = Field(default="")
provider: Optional[str] = Field(examples=[None])
provider: Optional[str] = None
stream: bool = False
temperature: Optional[float] = Field(examples=[None])
max_tokens: Optional[int] = Field(examples=[None])
stop: Union[list[str], str, None] = Field(examples=[None])
api_key: Optional[str] = Field(examples=[None])
web_search: Optional[bool] = Field(examples=[None])
proxy: Optional[str] = Field(examples=[None])
conversation_id: Optional[str] = Field(examples=[None])
image: Optional[str] = None
image_name: Optional[str] = None
temperature: Optional[float] = None
max_tokens: Optional[int] = None
stop: Union[list[str], str, None] = None
api_key: Optional[str] = None
web_search: Optional[bool] = None
proxy: Optional[str] = None
conversation_id: Optional[str] = None

class ImageGenerationConfig(BaseModel):
prompt: str
Expand Down Expand Up @@ -263,6 +265,7 @@ async def model_info(model_name: str) -> ModelResponseModel:
HTTP_200_OK: {"model": ChatCompletion},
HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
})
async def chat_completions(
Expand All @@ -284,14 +287,20 @@ async def chat_completions(
if config.provider in self.conversations[config.conversation_id]:
conversation = self.conversations[config.conversation_id][config.provider]

if config.image is not None:
try:
is_data_uri_an_image(config.image)
except ValueError as e:
return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/webp;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY)

# Create the completion response
response = self.client.chat.completions.create(
**filter_none(
**{
"model": AppConfig.model,
"provider": AppConfig.provider,
"proxy": AppConfig.proxy,
**config.dict(exclude_none=True),
**config.model_dump(exclude_none=True),
**{
"conversation_id": None,
"return_conversation": return_conversation,
Expand Down
6 changes: 4 additions & 2 deletions g4f/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@ def main():
parser = argparse.ArgumentParser(description="Run gpt4free")
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
api_parser = subparsers.add_parser("api")
api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.")
api_parser.add_argument("--debug", action="store_true", help="Enable verbose logging.")
api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
api_parser.add_argument("--port", default=None, help="Change the port of the server.")
api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
api_parser.add_argument("--gui", "-g", default=False, action="store_true", help="Add gui to the api.")
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
Expand Down Expand Up @@ -55,6 +56,7 @@ def run_api_args(args):
g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers]
run_api(
bind=args.bind,
port=args.port,
debug=args.debug,
workers=args.workers,
use_colors=not args.disable_colors,
Expand Down
19 changes: 10 additions & 9 deletions g4f/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,7 @@ async def async_iter_response(
content = filter_json(content)
yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
finally:
if hasattr(response, 'aclose'):
await safe_aclose(response)
await safe_aclose(response)

async def async_iter_append_model_and_provider(
response: AsyncChatCompletionResponseType
Expand All @@ -167,8 +166,7 @@ async def async_iter_append_model_and_provider(
chunk.provider = last_provider.get("name")
yield chunk
finally:
if hasattr(response, 'aclose'):
await safe_aclose(response)
await safe_aclose(response)

class Client(BaseClient):
def __init__(
Expand Down Expand Up @@ -292,7 +290,7 @@ async def async_generate(
proxy = self.client.proxy

response = None
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
if hasattr(provider_handler, "create_async_generator"):
messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
async for item in provider_handler.create_async_generator(model, messages, prompt=prompt, **kwargs):
if isinstance(item, ImageResponse):
Expand Down Expand Up @@ -354,7 +352,7 @@ async def async_create_variation(
if proxy is None:
proxy = self.client.proxy

if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
if hasattr(provider, "create_async_generator"):
messages = [{"role": "user", "content": "create a variation of this image"}]
generator = None
try:
Expand All @@ -364,8 +362,7 @@ async def async_create_variation(
response = chunk
break
finally:
if generator and hasattr(generator, 'aclose'):
await safe_aclose(generator)
await safe_aclose(generator)
elif hasattr(provider, 'create_variation'):
if asyncio.iscoroutinefunction(provider.create_variation):
response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
Expand Down Expand Up @@ -454,7 +451,11 @@ def create(
)
stop = [stop] if isinstance(stop, str) else stop

response = provider.create_completion(
if hasattr(provider, "create_async_generator"):
create_handler = provider.create_async_generator
else:
create_handler = provider.create_completion
response = create_handler(
model,
messages,
stream=stream,
Expand Down
3 changes: 2 additions & 1 deletion g4f/client/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ def filter_none(**kwargs) -> dict:

async def safe_aclose(generator: AsyncGenerator) -> None:
try:
await generator.aclose()
if generator and hasattr(generator, 'aclose'):
await generator.aclose()
except Exception as e:
logging.warning(f"Error while closing generator: {e}")

Expand Down
4 changes: 4 additions & 0 deletions g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,10 @@ body {
filter: blur(calc(0.5 * 70vw)) opacity(var(--opacity));
}

body.white .gradient{
display: none;
}

.conversations {
display: flex;
flex-direction: column;
Expand Down

0 comments on commit 3811099

Please sign in to comment.