Skip to content

Commit

Permalink
Merge pull request #1267 from hlohaus/any
Browse files Browse the repository at this point in the history
Add AiChatOnline, ChatgptDemoAi, ChatgptNext Providers
  • Loading branch information
xtekky authored Nov 18, 2023
2 parents 9c45b56 + cadc507 commit 2fcb3f9
Show file tree
Hide file tree
Showing 202 changed files with 453 additions and 29,102 deletions.
4 changes: 2 additions & 2 deletions etc/tool/create_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def input_command():
class ChatGpt(AsyncGeneratorProvider):
url = "https://chat-gpt.com"
url = "https://chat-gpt.com"
working = True
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
Expand Down
3 changes: 1 addition & 2 deletions g4f/Provider/AItianhu.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from __future__ import annotations

import json
import browser_cookie3

from ..typing import AsyncResult, Messages
from ..requests import StreamSession
Expand All @@ -10,7 +9,7 @@

class AItianhu(AsyncGeneratorProvider):
url = "https://www.aitianhu.com"
working = True
working = False
supports_gpt_35_turbo = True

@classmethod
Expand Down
12 changes: 6 additions & 6 deletions g4f/Provider/AItianhuSpace.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import WebDriver, format_prompt, get_browser
from .helper import WebDriver, format_prompt, get_browser, get_random_string
from .. import debug

class AItianhuSpace(BaseProvider):
Expand All @@ -31,8 +31,7 @@ def create_completion(
if not model:
model = "gpt-3.5-turbo"
if not domain:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
rand = get_random_string(6)
domain = random.choice(cls._domains)
domain = f"{rand}.{domain}"
if debug.logging:
Expand Down Expand Up @@ -65,10 +64,11 @@ def create_completion(
driver.switch_to.window(window_handle)
break

# Wait for page load
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el")))

try:
# Add hook in XMLHttpRequest
# Register hook in XMLHttpRequest
script = """
const _http_request_open = XMLHttpRequest.prototype.open;
window._last_message = window._message = "";
Expand All @@ -90,11 +90,11 @@ def create_completion(
"""
driver.execute_script(script)

# Input and submit prompt
# Submit prompt
driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.n-button.n-button--primary-type.n-button--medium-type").click()

# Yield response
# Read response
while True:
chunk = driver.execute_script("""
if (window._message && window._message != window._last_message) {
Expand Down
59 changes: 59 additions & 0 deletions g4f/Provider/AiChatOnline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string

class AiChatOnline(AsyncGeneratorProvider):
url = "https://aichatonline.org"
working = True
supports_gpt_35_turbo = True
supports_message_history = False

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chatgpt/chat/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "aichatonline.org",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": None,
"session": get_random_string(16),
"chatId": get_random_string(),
"contextId": 7,
"messages": messages,
"newMessage": messages[-1]["content"],
"newImageId": None,
"stream": True
}
async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.startswith(b"data: "):
data = json.loads(chunk[6:])
if data["type"] == "live":
yield data["data"]
elif data["type"] == "end":
break
55 changes: 55 additions & 0 deletions g4f/Provider/ChatgptDemoAi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string

class ChatgptDemoAi(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.ai"
working = True
supports_gpt_35_turbo = True
supports_message_history = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": "8824fe9bdb323a5d585a3223aaa0cb6e",
"session": "N/A",
"chatId": get_random_string(12),
"contextId": 2,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.startswith(b"data: "):
data = json.loads(chunk[6:])
if data["type"] == "live":
yield data["data"]
3 changes: 0 additions & 3 deletions g4f/Provider/ChatgptFree.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
#cloudflare block

from __future__ import annotations

import re
from aiohttp import ClientSession

from ..requests import StreamSession
from ..typing import Messages
Expand Down
61 changes: 61 additions & 0 deletions g4f/Provider/ChatgptNext.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt


class ChatgptNext(AsyncGeneratorProvider):
url = "https://www.chatgpt-free.cc"
working = True
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Referer": "https://chat.fstha.com/",
"x-requested-with": "XMLHttpRequest",
"Origin": "https://chat.fstha.com",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Authorization": "Bearer ak-chatgpt-nice",
"Connection": "keep-alive",
"Alt-Used": "chat.fstha.com",
}
async with ClientSession(headers=headers) as session:
data = {
"messages": messages,
"stream": True,
"model": model,
"temperature": 0.5,
"presence_penalty": 0,
"frequency_penalty": 0,
"top_p": 1,
**kwargs
}
async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk.startswith(b"data: [DONE]"):
break
if chunk.startswith(b"data: "):
content = json.loads(chunk[6:])["choices"][0]["delta"].get("content")
if content:
yield content
1 change: 0 additions & 1 deletion g4f/Provider/ChatgptX.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ async def create_async_generator(
data = {
"user_id": user_id,
"chats_id": chat_id,
"prompt": format_prompt(messages),
"current_model": "gpt3",
"conversions_id": chat["conversions_id"],
"ass_conversions_id": chat["ass_conversions_id"],
Expand Down
11 changes: 4 additions & 7 deletions g4f/Provider/FakeGpt.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from __future__ import annotations

import uuid, time, random, string, json
import uuid, time, random, json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from .helper import format_prompt, get_random_string


class FakeGpt(AsyncGeneratorProvider):
Expand Down Expand Up @@ -39,7 +39,7 @@ async def create_async_generator(
token_ids = [t["token_id"] for t in list if t["count"] == 0]
data = {
"token_key": random.choice(token_ids),
"session_password": random_string()
"session_password": get_random_string()
}
async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
response.raise_for_status()
Expand Down Expand Up @@ -88,7 +88,4 @@ async def create_async_generator(
except:
continue
if not last_message:
raise RuntimeError("No valid response")

def random_string(length: int = 10):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
raise RuntimeError("No valid response")
2 changes: 0 additions & 2 deletions g4f/Provider/GptChatly.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# cloudflare block

from __future__ import annotations

from ..requests import StreamSession
Expand Down
5 changes: 4 additions & 1 deletion g4f/Provider/GptGod.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
from __future__ import annotations
import secrets, json

import secrets
import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
Expand Down
4 changes: 2 additions & 2 deletions g4f/Provider/Hashnode.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from __future__ import annotations

import secrets
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_hex

class SearchTypes():
quick = "quick"
Expand Down Expand Up @@ -55,7 +55,7 @@ async def create_async_generator(
response.raise_for_status()
cls._sources = (await response.json())["result"]
data = {
"chatId": secrets.token_hex(16).zfill(32),
"chatId": get_random_hex(),
"history": messages,
"prompt": prompt,
"searchType": search_type,
Expand Down
13 changes: 3 additions & 10 deletions g4f/Provider/Koala.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from __future__ import annotations

import random
import string
import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string

class Koala(AsyncGeneratorProvider):
url = "https://koala.sh"
Expand All @@ -32,7 +31,7 @@ async def create_async_generator(
"Referer": f"{cls.url}/chat",
"Content-Type": "application/json",
"Flag-Real-Time-Data": "false",
"Visitor-ID": random_string(),
"Visitor-ID": get_random_string(20),
"Origin": cls.url,
"Alt-Used": "koala.sh",
"Connection": "keep-alive",
Expand Down Expand Up @@ -62,10 +61,4 @@ async def create_async_generator(
response.raise_for_status()
async for chunk in response.content:
if chunk.startswith(b"data: "):
yield json.loads(chunk[6:])


def random_string(length: int = 20):
return ''.join(random.choice(
string.ascii_letters + string.digits
) for _ in range(length))
yield json.loads(chunk[6:])
6 changes: 6 additions & 0 deletions g4f/Provider/Liaobots.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,12 @@
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-4-0613": {
"id": "gpt-4-0613",
"name": "GPT-4",
"maxLength": 32000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
Expand Down
Loading

0 comments on commit 2fcb3f9

Please sign in to comment.