Skip to content

Commit 88a93bd

Browse files
committed
Update weak model document
1 parent 3c31048 commit 88a93bd

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

docs/docs/usage-guide/changing_a_model.md

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ To use a different model than the default (GPT-4), you need to edit in the [conf
55
```
66
[config]
77
model = "..."
8-
model_turbo = "..."
8+
model_week = "..."
99
fallback_models = ["..."]
1010
```
1111

@@ -28,7 +28,7 @@ and set in your configuration file:
2828
```
2929
[config]
3030
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
31-
model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
31+
model_week="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
3232
fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
3333
```
3434

@@ -52,7 +52,7 @@ MAX_TOKENS={
5252
5353
[config] # in configuration.toml
5454
model = "ollama/llama2"
55-
model_turbo = "ollama/llama2"
55+
model_week = "ollama/llama2"
5656
fallback_models=["ollama/llama2"]
5757
5858
[ollama] # in .secrets.toml
@@ -76,7 +76,7 @@ MAX_TOKENS={
7676
}
7777
[config] # in configuration.toml
7878
model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
79-
model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf"
79+
model_week = "huggingface/meta-llama/Llama-2-7b-chat-hf"
8080
fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
8181
8282
[huggingface] # in .secrets.toml
@@ -91,7 +91,7 @@ To use Llama2 model with Replicate, for example, set:
9191
```
9292
[config] # in configuration.toml
9393
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
94-
model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
94+
model_week = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
9595
fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
9696
[replicate] # in .secrets.toml
9797
key = ...
@@ -107,7 +107,7 @@ To use Llama3 model with Groq, for example, set:
107107
```
108108
[config] # in configuration.toml
109109
model = "llama3-70b-8192"
110-
model_turbo = "llama3-70b-8192"
110+
model_week = "llama3-70b-8192"
111111
fallback_models = ["groq/llama3-70b-8192"]
112112
[groq] # in .secrets.toml
113113
key = ... # your Groq api key
@@ -121,7 +121,7 @@ To use Google's Vertex AI platform and its associated models (chat-bison/codecha
121121
```
122122
[config] # in configuration.toml
123123
model = "vertex_ai/codechat-bison"
124-
model_turbo = "vertex_ai/codechat-bison"
124+
model_week = "vertex_ai/codechat-bison"
125125
fallback_models="vertex_ai/codechat-bison"
126126
127127
[vertexai] # in .secrets.toml
@@ -140,7 +140,7 @@ To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant
140140
```toml
141141
[config] # in configuration.toml
142142
model="google_ai_studio/gemini-1.5-flash"
143-
model_turbo="google_ai_studio/gemini-1.5-flash"
143+
model_week="google_ai_studio/gemini-1.5-flash"
144144
fallback_models=["google_ai_studio/gemini-1.5-flash"]
145145

146146
[google_ai_studio] # in .secrets.toml
@@ -156,7 +156,7 @@ To use Anthropic models, set the relevant models in the configuration section of
156156
```
157157
[config]
158158
model="anthropic/claude-3-opus-20240229"
159-
model_turbo="anthropic/claude-3-opus-20240229"
159+
model_week="anthropic/claude-3-opus-20240229"
160160
fallback_models=["anthropic/claude-3-opus-20240229"]
161161
```
162162

@@ -173,7 +173,7 @@ To use Amazon Bedrock and its foundational models, add the below configuration:
173173
```
174174
[config] # in configuration.toml
175175
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
176-
model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
176+
model_week="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
177177
fallback_models=["bedrock/anthropic.claude-v2:1"]
178178
```
179179

@@ -195,7 +195,7 @@ If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agen
195195
```
196196
[config]
197197
model="custom_model_name"
198-
model_turbo="custom_model_name"
198+
model_week="custom_model_name"
199199
fallback_models=["custom_model_name"]
200200
```
201201
(2) Set the maximal tokens for the model:

0 commit comments

Comments
 (0)