@@ -5,7 +5,7 @@ To use a different model than the default (GPT-4), you need to edit in the [conf
5
5
```
6
6
[config]
7
7
model = "..."
8
- model_turbo = "..."
8
+ model_week = "..."
9
9
fallback_models = ["..."]
10
10
```
11
11
@@ -28,7 +28,7 @@ and set in your configuration file:
28
28
```
29
29
[config]
30
30
model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
31
- model_turbo ="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
31
+ model_week ="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
32
32
fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo)
33
33
```
34
34
@@ -52,7 +52,7 @@ MAX_TOKENS={
52
52
53
53
[config] # in configuration.toml
54
54
model = "ollama/llama2"
55
- model_turbo = "ollama/llama2"
55
+ model_week = "ollama/llama2"
56
56
fallback_models=["ollama/llama2"]
57
57
58
58
[ollama] # in .secrets.toml
@@ -76,7 +76,7 @@ MAX_TOKENS={
76
76
}
77
77
[config] # in configuration.toml
78
78
model = "huggingface/meta-llama/Llama-2-7b-chat-hf"
79
- model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf"
79
+ model_week = "huggingface/meta-llama/Llama-2-7b-chat-hf"
80
80
fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"]
81
81
82
82
[huggingface] # in .secrets.toml
@@ -91,7 +91,7 @@ To use Llama2 model with Replicate, for example, set:
91
91
```
92
92
[config] # in configuration.toml
93
93
model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
94
- model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
94
+ model_week = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
95
95
fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"]
96
96
[replicate] # in .secrets.toml
97
97
key = ...
@@ -107,7 +107,7 @@ To use Llama3 model with Groq, for example, set:
107
107
```
108
108
[config] # in configuration.toml
109
109
model = "llama3-70b-8192"
110
- model_turbo = "llama3-70b-8192"
110
+ model_week = "llama3-70b-8192"
111
111
fallback_models = ["groq/llama3-70b-8192"]
112
112
[groq] # in .secrets.toml
113
113
key = ... # your Groq api key
@@ -121,7 +121,7 @@ To use Google's Vertex AI platform and its associated models (chat-bison/codecha
121
121
```
122
122
[config] # in configuration.toml
123
123
model = "vertex_ai/codechat-bison"
124
- model_turbo = "vertex_ai/codechat-bison"
124
+ model_week = "vertex_ai/codechat-bison"
125
125
fallback_models="vertex_ai/codechat-bison"
126
126
127
127
[vertexai] # in .secrets.toml
@@ -140,7 +140,7 @@ To use [Google AI Studio](https://aistudio.google.com/) models, set the relevant
140
140
``` toml
141
141
[config ] # in configuration.toml
142
142
model =" google_ai_studio/gemini-1.5-flash"
143
- model_turbo =" google_ai_studio/gemini-1.5-flash"
143
+ model_week =" google_ai_studio/gemini-1.5-flash"
144
144
fallback_models =[" google_ai_studio/gemini-1.5-flash" ]
145
145
146
146
[google_ai_studio ] # in .secrets.toml
@@ -156,7 +156,7 @@ To use Anthropic models, set the relevant models in the configuration section of
156
156
```
157
157
[config]
158
158
model="anthropic/claude-3-opus-20240229"
159
- model_turbo ="anthropic/claude-3-opus-20240229"
159
+ model_week ="anthropic/claude-3-opus-20240229"
160
160
fallback_models=["anthropic/claude-3-opus-20240229"]
161
161
```
162
162
@@ -173,7 +173,7 @@ To use Amazon Bedrock and its foundational models, add the below configuration:
173
173
```
174
174
[config] # in configuration.toml
175
175
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
176
- model_turbo ="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
176
+ model_week ="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
177
177
fallback_models=["bedrock/anthropic.claude-v2:1"]
178
178
```
179
179
@@ -195,7 +195,7 @@ If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agen
195
195
```
196
196
[config]
197
197
model="custom_model_name"
198
- model_turbo ="custom_model_name"
198
+ model_week ="custom_model_name"
199
199
fallback_models=["custom_model_name"]
200
200
```
201
201
(2) Set the maximal tokens for the model:
0 commit comments