From 30b9707657c66e280e66b43dbd61a3688260edf3 Mon Sep 17 00:00:00 2001 From: TrueSaiyan Date: Mon, 25 Nov 2024 21:28:50 +0800 Subject: [PATCH 1/3] FIX: Bug fix for flux-pro aka flux-schnell https://api.airforce/imagine2?model=flux-4o https://api.airforce/imagine2?model=flux-schnell and https://api.airforce/imagine2?model=flux-1.1-pro all generate same images but "Flux-1.1-Pro" doesnt work --- g4f/Provider/Airforce.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index f5bcfefad2e..c0799a43b32 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -36,7 +36,7 @@ def fetch_imagine_models(cls): default_model = "gpt-4o-mini" default_image_model = "flux" - additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"] + additional_models_imagine = ["stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "flux-1.1-pro"] @classmethod def get_models(cls): @@ -86,7 +86,7 @@ def get_models(cls): ### imagine ### "sdxl": "stable-diffusion-xl-base", "sdxl": "stable-diffusion-xl-lightning", - "flux-pro": "Flux-1.1-Pro", + "flux-pro": "flux-1.1-pro", } @classmethod From 442185eac25da0e06c277478e21bb543f8d697f9 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 25 Nov 2024 15:44:17 +0100 Subject: [PATCH 2/3] Fix provider selection in images generate Improve image generation in Airforce provider --- g4f/Provider/Airforce.py | 18 ++++++------- g4f/client/__init__.py | 55 ++++++++++++++++++++++------------------ 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index f5bcfefad2e..283d561e586 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -7,6 +7,7 @@ import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +from urllib.parse import quote from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin @@ -95,14 +96,18 @@ def create_async_generator( model: str, messages: Messages, proxy: str = None, + prompt: str = None, seed: int = None, size: str = "1:1", # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1" stream: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) + if model in cls.image_models: - return cls._generate_image(model, messages, proxy, seed, size) + if prompt is None: + prompt = messages[-1]['content'] + return cls._generate_image(model, prompt, proxy, seed, size) else: return cls._generate_text(model, messages, proxy, stream, **kwargs) @@ -110,7 +115,7 @@ def create_async_generator( async def _generate_image( cls, model: str, - messages: Messages, + prompt: str, proxy: str = None, seed: int = None, size: str = "1:1", @@ -125,7 +130,6 @@ async def _generate_image( } if seed is None: seed = random.randint(0, 100000) - prompt = messages[-1]['content'] async with StreamSession(headers=headers, proxy=proxy) as session: params = { @@ -140,12 +144,8 @@ async def _generate_image( if 'application/json' in content_type: raise RuntimeError(await response.json().get("error", {}).get("message")) - elif 'image' in content_type: - image_data = b"" - async for chunk in response.iter_content(): - if chunk: - image_data += chunk - image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={prompt}&size={size}&seed={seed}" + elif content_type.startswith("image/"): + image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={quote(prompt)}&size={size}&seed={seed}" yield ImageResponse(images=image_url, alt=prompt) @classmethod diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index dcd408ab87a..86a810493b9 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -16,7 +16,7 @@ from ..errors import NoImageResponseError, ModelNotFoundError from ..providers.retry_provider import IterListProvider from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list -from ..Provider.needs_auth.BingCreateImages import BingCreateImages +from ..Provider.needs_auth import BingCreateImages, OpenaiAccount from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse from .image_models import ImageModels from .types import IterResponse, ImageProvider, Client as BaseClient @@ -264,28 +264,34 @@ def generate( """ return asyncio.run(self.async_generate(prompt, model, provider, response_format, proxy, **kwargs)) - async def async_generate( - self, - prompt: str, - model: Optional[str] = None, - provider: Optional[ProviderType] = None, - response_format: Optional[str] = "url", - proxy: Optional[str] = None, - **kwargs - ) -> ImagesResponse: + async def get_provider_handler(self, model: Optional[str], provider: Optional[ImageProvider], default: ImageProvider) -> ImageProvider: if provider is None: - provider_handler = self.models.get(model, provider or self.provider or BingCreateImages) + provider_handler = self.provider + if provider_handler is None: + provider_handler = self.models.get(model, default) elif isinstance(provider, str): provider_handler = convert_to_provider(provider) else: provider_handler = provider if provider_handler is None: - raise ModelNotFoundError(f"Unknown model: {model}") + return default if isinstance(provider_handler, IterListProvider): if provider_handler.providers: provider_handler = provider_handler.providers[0] else: raise ModelNotFoundError(f"IterListProvider for model {model} has no providers") + return provider_handler + + async def async_generate( + self, + prompt: str, + model: Optional[str] = None, + provider: Optional[ProviderType] = None, + response_format: Optional[str] = "url", + proxy: Optional[str] = None, + **kwargs + ) -> ImagesResponse: + provider_handler = await self.get_provider_handler(model, provider, BingCreateImages) if proxy is None: proxy = self.client.proxy @@ -311,7 +317,7 @@ async def async_generate( response = item break else: - raise ValueError(f"Provider {provider} does not support image generation") + raise ValueError(f"Provider {getattr(provider_handler, '__name__')} does not support image generation") if isinstance(response, ImageResponse): return await self._process_image_response( response, @@ -320,6 +326,8 @@ async def async_generate( model, getattr(provider_handler, "__name__", None) ) + if response is None: + raise NoImageResponseError(f"No image response from {getattr(provider_handler, '__name__')}") raise NoImageResponseError(f"Unexpected response type: {type(response)}") def create_variation( @@ -343,31 +351,26 @@ async def async_create_variation( proxy: Optional[str] = None, **kwargs ) -> ImagesResponse: - if provider is None: - provider = self.models.get(model, provider or self.provider or BingCreateImages) - if provider is None: - raise ModelNotFoundError(f"Unknown model: {model}") - if isinstance(provider, str): - provider = convert_to_provider(provider) + provider_handler = await self.get_provider_handler(model, provider, OpenaiAccount) if proxy is None: proxy = self.client.proxy - if hasattr(provider, "create_async_generator"): + if hasattr(provider_handler, "create_async_generator"): messages = [{"role": "user", "content": "create a variation of this image"}] generator = None try: - generator = provider.create_async_generator(model, messages, image=image, response_format=response_format, proxy=proxy, **kwargs) + generator = provider_handler.create_async_generator(model, messages, image=image, response_format=response_format, proxy=proxy, **kwargs) async for chunk in generator: if isinstance(chunk, ImageResponse): response = chunk break finally: await safe_aclose(generator) - elif hasattr(provider, 'create_variation'): - if asyncio.iscoroutinefunction(provider.create_variation): - response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs) + elif hasattr(provider_handler, 'create_variation'): + if asyncio.iscoroutinefunction(provider.provider_handler): + response = await provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs) else: - response = provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs) + response = provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs) else: raise NoImageResponseError(f"Provider {provider} does not support image variation") @@ -375,6 +378,8 @@ async def async_create_variation( response = ImageResponse([response]) if isinstance(response, ImageResponse): return self._process_image_response(response, response_format, proxy, model, getattr(provider, "__name__", None)) + if response is None: + raise NoImageResponseError(f"No image response from {getattr(provider, '__name__')}") raise NoImageResponseError(f"Unexpected response type: {type(response)}") async def _process_image_response( From 0eb1d3ed500932d4897004529d9dc2ec7a1b2a78 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 25 Nov 2024 16:35:58 +0100 Subject: [PATCH 3/3] Fix start api without port argument --- g4f/api/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index a899251b0a9..01c75daeb6d 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -48,6 +48,8 @@ class Annotated: logger = logging.getLogger(__name__) +DEFAULT_PORT = 1337 + def create_app(g4f_api_key: str = None): app = FastAPI() @@ -493,7 +495,7 @@ def format_exception(e: Union[Exception, str], config: Union[ChatCompletionsConf def run_api( host: str = '0.0.0.0', - port: int = 1337, + port: int = None, bind: str = None, debug: bool = False, workers: int = None, @@ -505,6 +507,8 @@ def run_api( use_colors = debug if bind is not None: host, port = bind.split(":") + if port is None: + port = DEFAULT_PORT uvicorn.run( f"g4f.api:create_app{'_debug' if debug else ''}", host=host,