Skip to content

Commit

Permalink
Update (g4f/models.py g4f/Provider/airforce/AirforceChat.py docs/prov…
Browse files Browse the repository at this point in the history
…iders-and-models.md)
  • Loading branch information
kqlio67 committed Nov 9, 2024
1 parent d2f36d5 commit 3a15957
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 438 deletions.
4 changes: 2 additions & 2 deletions docs/providers-and-models.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ This document provides an overview of various AI providers and models, including
|----------|-------------|--------------|---------------|--------|--------|------|
|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`||||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`||||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[api.airforce](https://api.airforce)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`||||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`claude-3-haiku, claude-3-sonnet, claude-3-opus, gpt-4, gpt-4-turbo, gpt-4o-mini, gpt-3.5-turbo, llama-3-70b, llama-3-8b, llama-2-13b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llamaguard-2-8b, llamaguard-7b, llama-3.2-90b, llamaguard-3-8b, llama-3.2-11b, llamaguard-3-11b, llama-3.2-3b, llama-3.2-1b, llama-2-7b, mixtral-8x7b, mixtral-8x22b, mythomax-13b, openchat-3.5, qwen-2-72b, qwen-2-5-7b, qwen-2-5-72b, gemma-2b, gemma-2-9b, gemma-2b-27b, gemini-flash, gemini-pro, dbrx-instruct, deepseek-coder, hermes-2-dpo, hermes-2, openhermes-2.5, wizardlm-2-8x22b, phi-2, solar-10-7b, cosmosrp, lfm-40b, german-7b, zephyr-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[aimathgpt.forit.ai](https://aimathgpt.forit.ai)|`g4f.Provider.AiMathGPT`|`llama-3.1-70b`||||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4o, gpt-4o-mini, gpt-4-turbo, llama-2-7b, llama-3.1-8b, llama-3.1-70b, hermes-2-pro, hermes-2-dpo, phi-2, deepseek-coder, openchat-3.5, openhermes-2.5, cosmosrp, lfm-40b, german-7b, zephyr-7b, neural-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|||||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`||||![Active](https://img.shields.io/badge/Active-brightgreen)||
|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`||`gpt-4-vision`||![Active](https://img.shields.io/badge/Active-brightgreen)|❌+✔|
Expand Down
261 changes: 27 additions & 234 deletions g4f/Provider/airforce/AirforceChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from aiohttp import ClientSession
import json
from typing import List
import requests

from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
Expand Down Expand Up @@ -51,258 +52,50 @@ class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True

default_model = 'llama-3-70b-chat'
text_models = [
# anthropic
'claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-5-sonnet-20240620',
'claude-3-5-sonnet-20241022',
'claude-3-opus-20240229',

# openai
'chatgpt-4o-latest',
'gpt-4',
'gpt-4-turbo',
'gpt-4o-2024-05-13',
'gpt-4o-mini-2024-07-18',
'gpt-4o-mini',
'gpt-4o-2024-08-06',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-1106',
'gpt-4o',
'gpt-4-turbo-2024-04-09',
'gpt-4-0125-preview',
'gpt-4-1106-preview',

# meta-llama
default_model,
'llama-3-70b-chat-turbo',
'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
'llama-3-70b-chat-lite',
'llama-3-8b-chat-lite',
'llama-2-13b-chat',
'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',
'LlamaGuard-2-8b',
'llamaguard-7b',
'Llama-Vision-Free',
'Llama-Guard-7b',
'Llama-3.2-90B-Vision-Instruct-Turbo',
'Meta-Llama-Guard-3-8B',
'Llama-3.2-11B-Vision-Instruct-Turbo',
'Llama-Guard-3-11B-Vision-Turbo',
'Llama-3.2-3B-Instruct-Turbo',
'Llama-3.2-1B-Instruct-Turbo',
'llama-2-7b-chat-int8',
'llama-2-7b-chat-fp16',
'Llama 3.1 405B Instruct',
'Llama 3.1 70B Instruct',
'Llama 3.1 8B Instruct',

# mistral-ai
'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3',

# Gryphe
'MythoMax-L2-13b-Lite',
'MythoMax-L2-13b',

# openchat
'openchat-3.5-0106',

# qwen
#'Qwen1.5-72B-Chat', # Empty answer
#'Qwen1.5-110B-Chat', # Empty answer
'Qwen2-72B-Instruct',
'Qwen2.5-7B-Instruct-Turbo',
'Qwen2.5-72B-Instruct-Turbo',

# google
'gemma-2b-it',
'gemma-2-9b-it',
'gemma-2-27b-it',

# gemini
'gemini-1.5-flash',
'gemini-1.5-pro',

# databricks
'dbrx-instruct',

# deepseek-ai
'deepseek-coder-6.7b-base',
'deepseek-coder-6.7b-instruct',
'deepseek-math-7b-instruct',

# NousResearch
'deepseek-math-7b-instruct',
'Nous-Hermes-2-Mixtral-8x7B-DPO',
'hermes-2-pro-mistral-7b',

# teknium
'openhermes-2.5-mistral-7b',

# microsoft
'WizardLM-2-8x22B',
'phi-2',

# upstage
'SOLAR-10.7B-Instruct-v1.0',

# pawan
'cosmosrp',

# liquid
'lfm-40b-moe',

# DiscoResearch
'discolm-german-7b-v1',

# tiiuae
'falcon-7b-instruct',

# defog
'sqlcoder-7b-2',

# tinyllama
'tinyllama-1.1b-chat',

# HuggingFaceH4
'zephyr-7b-beta',
]
default_model = 'llama-3.1-70b-chat'
response = requests.get('https://api.airforce/models')
data = response.json()

text_models = [model['id'] for model in data['data']]

models = [*text_models]

model_aliases = {
# anthropic
"claude-3-haiku": "claude-3-haiku-20240307",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3-opus": "claude-3-opus-20240229",

# openai
"gpt-4o": "chatgpt-4o-latest",
#"gpt-4": "gpt-4",
#"gpt-4-turbo": "gpt-4-turbo",
"gpt-4o": "gpt-4o-2024-05-13",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
#"gpt-4o-mini": "gpt-4o-mini",
"gpt-4o": "gpt-4o-2024-08-06",
"gpt-3.5-turbo": "gpt-3.5-turbo",
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106",
#"gpt-4o": "gpt-4o",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4": "gpt-4-0125-preview",
"gpt-4": "gpt-4-1106-preview",

# meta-llama
"llama-3-70b": "llama-3-70b-chat",
"llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo",
"llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat-lite",
"llama-2-13b": "llama-2-13b-chat",
"llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
"llamaguard-2-8b": "LlamaGuard-2-8b",
"llamaguard-7b": "llamaguard-7b",
#"llama_vision_free": "Llama-Vision-Free", # Unknown
"llamaguard-7b": "Llama-Guard-7b",
"llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
"llamaguard-3-8b": "Meta-Llama-Guard-3-8B",
"llama-3.2-11b": "Llama-3.2-11B-Vision-Instruct-Turbo",
"llamaguard-3-11b": "Llama-Guard-3-11B-Vision-Turbo",
"llama-3.2-3b": "Llama-3.2-3B-Instruct-Turbo",
"llama-3.2-1b": "Llama-3.2-1B-Instruct-Turbo",
"llama-2-7b": "llama-2-7b-chat-int8",
"llama-2-7b": "llama-2-7b-chat-fp16",
"llama-3.1-405b": "Llama 3.1 405B Instruct",
"llama-3.1-70b": "Llama 3.1 70B Instruct",
"llama-3.1-8b": "Llama 3.1 8B Instruct",

# mistral-ai
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mixtral-8x7b": "Mistral-7B-Instruct-v0.1",
"mixtral-8x7b": "Mistral-7B-Instruct-v0.2",
"mixtral-8x7b": "Mistral-7B-Instruct-v0.3",

# Gryphe
"mythomax-13b": "MythoMax-L2-13b-Lite",
"mythomax-13b": "MythoMax-L2-13b",

# openchat
"openchat-3.5": "openchat-3.5-0106",

# qwen
#"qwen-1.5-72b": "Qwen1.5-72B-Chat", # Empty answer
#"qwen-1.5-110b": "Qwen1.5-110B-Chat", # Empty answer
"qwen-2-72b": "Qwen2-72B-Instruct",
"qwen-2-5-7b": "Qwen2.5-7B-Instruct-Turbo",
"qwen-2-5-72b": "Qwen2.5-72B-Instruct-Turbo",

# google
"gemma-2b": "gemma-2b-it",
"gemma-2-9b": "gemma-2-9b-it",
"gemma-2b-27b": "gemma-2-27b-it",

# gemini
"gemini-flash": "gemini-1.5-flash",
"gemini-pro": "gemini-1.5-pro",

# databricks
"dbrx-instruct": "dbrx-instruct",


# deepseek-ai
#"deepseek-coder": "deepseek-coder-6.7b-base",
"deepseek-coder": "deepseek-coder-6.7b-instruct",
#"deepseek-math": "deepseek-math-7b-instruct",


# NousResearch
#"deepseek-math": "deepseek-math-7b-instruct",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"hermes-2": "hermes-2-pro-mistral-7b",

"hermes-2-pro": "hermes-2-pro-mistral-7b",
# teknium
"openhermes-2.5": "openhermes-2.5-mistral-7b",

# microsoft
"wizardlm-2-8x22b": "WizardLM-2-8x22B",
#"phi-2": "phi-2",

# upstage
"solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",

# pawan
#"cosmosrp": "cosmosrp",


# liquid
"lfm-40b": "lfm-40b-moe",

# DiscoResearch
"german-7b": "discolm-german-7b-v1",

# tiiuae
#"falcon-7b": "falcon-7b-instruct",

# defog
#"sqlcoder-7b": "sqlcoder-7b-2",

# tinyllama
#"tinyllama-1b": "tinyllama-1.1b-chat",


# meta-llama
"llama-2-7b": "llama-2-7b-chat-int8",
"llama-2-7b": "llama-2-7b-chat-fp16",
"llama-3.1-70b": "llama-3.1-70b-chat",
"llama-3.1-8b": "llama-3.1-8b-chat",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",

# inferless
"neural-7b": "neural-chat-7b-v3-1",

# HuggingFaceH4
"zephyr-7b": "zephyr-7b-beta",

# llmplayground.net
#"any-uncensored": "any-uncensored",
}

@classmethod
Expand Down
Loading

0 comments on commit 3a15957

Please sign in to comment.