-
Notifications
You must be signed in to change notification settings - Fork 0
/
appsettings.json
164 lines (163 loc) · 7.52 KB
/
appsettings.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
{
"DetailedErrors": true,
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft.AspNetCore": "Warning"
}
},
"AllowedHosts": "*",
"KernelMemory": {
"Services": {
"AzureOpenAIText": {
// "ApiKey" or "AzureIdentity"
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>.openai.azure.com/",
"APIKey": "",
"Deployment": "",
// The max number of tokens supported by model deployed
// See https://learn.microsoft.com/azure/ai-services/openai/concepts/models
"MaxTokenTotal": 16384,
// "ChatCompletion" or "TextCompletion"
"APIType": "ChatCompletion",
// How many times to retry in case of throttling.
"MaxRetries": 10
},
"AzureOpenAIEmbedding": {
// "ApiKey" or "AzureIdentity"
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>.openai.azure.com/",
"APIKey": "",
"Deployment": "",
// The max number of tokens supported by model deployed
// See https://learn.microsoft.com/azure/ai-services/openai/concepts/models
"MaxTokenTotal": 8191,
// The number of dimensions output embeddings should have.
// Only supported in "text-embedding-3" and later models developed with
// MRL, see https://arxiv.org/abs/2205.13147
"EmbeddingDimensions": null,
// How many embeddings to calculate in parallel. The max value depends on
// the model and deployment in use.
// See also hhttps://learn.microsoft.com/azure/ai-services/openai/reference#embeddings
"MaxEmbeddingBatchSize": 10,
// How many times to retry in case of throttling.
"MaxRetries": 10
},
"AzureAIDocIntel": {
// "APIKey" or "AzureIdentity".
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
// Required when Auth == APIKey
"APIKey": "",
"Endpoint": ""
},
"AzureAISearch": {
// "ApiKey" or "AzureIdentity". For other options see <AzureAISearchConfig>.
// AzureIdentity: use automatic AAD authentication mechanism. You can test locally
// using the env vars AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET.
"Auth": "AzureIdentity",
"Endpoint": "https://<...>",
"APIKey": "",
// Hybrid search is not enabled by default. Note that when using hybrid search
// relevance scores are different, usually lower, than when using just vector search
"UseHybridSearch": false
},
"OpenAI": {
// Name of the model used to generate text (text completion or chat completion)
"TextModel": "gpt-4o-mini",
// The max number of tokens supported by the text model.
"TextModelMaxTokenTotal": 128000,
// What type of text generation, by default autodetect using the model name.
// Possible values: "Auto", "TextCompletion", "Chat"
"TextGenerationType": "Auto",
// Name of the model used to generate text embeddings
"EmbeddingModel": "text-embedding-ada-002",
// The max number of tokens supported by the embedding model
// See https://platform.openai.com/docs/guides/embeddings/what-are-embeddings
"EmbeddingModelMaxTokenTotal": 8191,
// OpenAI API Key
"APIKey": "",
// OpenAI Organization ID (usually empty, unless you have multiple accounts on different orgs)
"OrgId": "",
// Endpoint to use. By default the system uses 'https://api.openai.com/v1'.
// Change this to use proxies or services compatible with OpenAI HTTP protocol like LM Studio.
"Endpoint": "",
// How many times to retry in case of throttling
"MaxRetries": 10,
// The number of dimensions output embeddings should have.
// Only supported in "text-embedding-3" and later models developed with
// MRL, see https://arxiv.org/abs/2205.13147
"EmbeddingDimensions": null,
// How many embeddings to calculate in parallel.
// See https://platform.openai.com/docs/api-reference/embeddings/create
"MaxEmbeddingBatchSize": 100
},
"AWSS3": {
"Auth": "AccessKey",
// AccessKey ID, required when using AccessKey auth
// Note: you can use an env var 'KernelMemory__Services__AWSS3__AccessKey' to set this
"AccessKey": "",
// SecretAccessKey, required when using AccessKey auth
// Note: you can use an env var 'KernelMemory__Services__AWSS3__SecretAccessKey' to set this
"SecretAccessKey": "",
// Required bucket name where to create directories and upload files.
// Note: you can use an env var 'KernelMemory__Services__AWSS3__BucketName' to set this
"BucketName": ""
// Allows to specify a custom AWS or a compatible endpoint
// Examples: "https://s3.amazonaws.com", "https://s3.us-west-2.amazonaws.com", "http://127.0.0.1:9444"
// Note: you can use an env var 'KernelMemory__Services__AWSS3__Endpoint' to set this
// Note: you can test locally using S3 Ninja https://s3ninja.net
// "Endpoint": "https://s3.amazonaws.com"
},
"LlamaSharp": {
"TextModel": {
// path to file, e.g. "llama-2-7b-chat.Q6_K.gguf"
"ModelPath": "",
// Max number of tokens supported by the model
"MaxTokenTotal": 4096
// Optional parameters
// "GpuLayerCount": 32,
},
"EmbeddingModel": {
// path to file, e.g. "nomic-embed-text-v1.5.Q8_0.gguf"
"ModelPath": "",
// Max number of tokens supported by the model
"MaxTokenTotal": 4096
// Optional parameters
// "GpuLayerCount": 32,
}
}
},
"DataIngestion": {
// How many memory DB records to insert at once when extracting memories from
// uploaded documents (used only if the Memory Db supports batching).
"MemoryDbUpsertBatchSize": 100
},
"Retrieval": {
"SearchClient": {
// Maximum number of tokens accepted by the LLM used to generate answers.
// The number includes the tokens used for the answer, e.g. when using
// GPT4-32k, set this number to 32768.
// If the value is not set or less than one, SearchClient will use the
// max amount of tokens supported by the model in use.
"MaxAskPromptSize": -1,
// Maximum number of relevant sources to consider when generating an answer.
// The value is also used as the max number of results returned by SearchAsync
// when passing a limit less or equal to zero.
"MaxMatchesCount": 200,
// How many tokens to reserve for the answer generated by the LLM.
// E.g. if the LLM supports max 4000 tokens, and AnswerTokens is 300, then
// the prompt sent to LLM will contain max 3700 tokens, composed by
// prompt + question + grounding information retrieved from memory.
"AnswerTokens": 300,
// Text to return when the LLM cannot produce an answer.
"EmptyAnswer": "INFO NOT FOUND"
}
}
}
}