Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 76 additions & 8 deletions src/main/presenter/configPresenter/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,12 @@ import {
} from '@shared/presenter'
import { ProviderBatchUpdate } from '@shared/provider-operations'
import { SearchEngineTemplate } from '@shared/chat'
import { ModelType } from '@shared/model'
import {
ModelType,
isNewApiEndpointType,
resolveNewApiCapabilityProviderId,
type NewApiEndpointType
} from '@shared/model'
import {
DEFAULT_MODEL_CAPABILITY_FALLBACKS,
resolveModelContextLength,
Expand Down Expand Up @@ -531,19 +536,70 @@ export class ConfigPresenter implements IConfigPresenter {
return providerDbLoader.refreshIfNeeded(force)
}

private resolveNewApiCapabilityEndpointType(modelId: string): NewApiEndpointType {
const modelConfig = this.getModelConfig(modelId, 'new-api')
if (isNewApiEndpointType(modelConfig.endpointType)) {
return modelConfig.endpointType
}

const storedModel =
this.getProviderModels('new-api').find((model) => model.id === modelId) ??
this.getCustomModels('new-api').find((model) => model.id === modelId)

if (storedModel) {
if (isNewApiEndpointType(storedModel.endpointType)) {
return storedModel.endpointType
}

const supportedEndpointTypes =
storedModel.supportedEndpointTypes?.filter(isNewApiEndpointType) ?? []
if (
storedModel.type === ModelType.ImageGeneration &&
supportedEndpointTypes.includes('image-generation')
) {
return 'image-generation'
}
if (supportedEndpointTypes.length > 0) {
return supportedEndpointTypes[0]
}
if (storedModel.type === ModelType.ImageGeneration) {
return 'image-generation'
}
}

return 'openai'
}

private resolveCapabilityProviderId(providerId: string, modelId: string): string {
if (providerId.trim().toLowerCase() !== 'new-api') {
return providerId
}

return resolveNewApiCapabilityProviderId(this.resolveNewApiCapabilityEndpointType(modelId))
}
Comment on lines +539 to +579
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Avoid hard-coding 'new-api' in capability resolution.

These helpers only activate for a literal provider id of 'new-api' and they also read config/model metadata from the 'new-api' bucket. The renderer already treats any provider whose apiType is 'new-api' as a NewAPI provider, so cloned/custom NewAPI providers will fall back to the raw custom id here and resolve capabilities from the wrong config store. Reasoning/verbosity support will be wrong for those providers.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/main/presenter/configPresenter/index.ts` around lines 539 - 579, The code
currently hard-codes the literal string 'new-api' when deciding NewAPI behavior
in resolveNewApiCapabilityEndpointType and resolveCapabilityProviderId; instead
look up the provider's apiType for the given providerId and use that apiType
when calling getModelConfig/getProviderModels/getCustomModels and when checking
whether to run NewAPI logic. Concretely: in resolveCapabilityProviderId, query
the provider object (e.g., this.getProvider(providerId) or equivalent) to get
providerApiType; return providerId early if providerApiType !== 'new-api';
otherwise call resolveNewApiCapabilityEndpointType with the resolved
providerApiType (or modify resolveNewApiCapabilityEndpointType to fetch
providerApiType internally) and replace all hard-coded 'new-api' bucket
references in resolveNewApiCapabilityEndpointType with the providerApiType
variable so cloned/custom providers with apiType 'new-api' are handled correctly
while preserving fallback behavior and the call to
resolveNewApiCapabilityProviderId.


supportsReasoningCapability(providerId: string, modelId: string): boolean {
return modelCapabilities.supportsReasoning(providerId, modelId)
return modelCapabilities.supportsReasoning(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

getReasoningPortrait(providerId: string, modelId: string): ReasoningPortrait | null {
return modelCapabilities.getReasoningPortrait(providerId, modelId)
return modelCapabilities.getReasoningPortrait(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

getThinkingBudgetRange(
providerId: string,
modelId: string
): { min?: number; max?: number; default?: number } {
return modelCapabilities.getThinkingBudgetRange(providerId, modelId)
return modelCapabilities.getThinkingBudgetRange(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

supportsSearchCapability(providerId: string, modelId: string): boolean {
Expand All @@ -558,22 +614,34 @@ export class ConfigPresenter implements IConfigPresenter {
}

supportsReasoningEffortCapability(providerId: string, modelId: string): boolean {
return modelCapabilities.supportsReasoningEffort(providerId, modelId)
return modelCapabilities.supportsReasoningEffort(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

getReasoningEffortDefault(
providerId: string,
modelId: string
): 'minimal' | 'low' | 'medium' | 'high' | undefined {
return modelCapabilities.getReasoningEffortDefault(providerId, modelId)
return modelCapabilities.getReasoningEffortDefault(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

supportsVerbosityCapability(providerId: string, modelId: string): boolean {
return modelCapabilities.supportsVerbosity(providerId, modelId)
return modelCapabilities.supportsVerbosity(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

getVerbosityDefault(providerId: string, modelId: string): 'low' | 'medium' | 'high' | undefined {
return modelCapabilities.getVerbosityDefault(providerId, modelId)
return modelCapabilities.getVerbosityDefault(
this.resolveCapabilityProviderId(providerId, modelId),
modelId
)
}

private migrateConfigData(oldVersion: string | undefined): void {
Expand Down
6 changes: 5 additions & 1 deletion src/main/presenter/configPresenter/modelConfig.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { ApiEndpointType, ModelType } from '@shared/model'
import { ApiEndpointType, ModelType, isNewApiEndpointType } from '@shared/model'
import { IModelConfig, ModelConfig, ModelConfigSource } from '@shared/presenter'
import {
DEFAULT_MODEL_CAPABILITY_FALLBACKS,
Expand Down Expand Up @@ -453,6 +453,7 @@ export class ModelConfigHelper {
temperature: 0.6,
type: ModelType.Chat,
apiEndpoint: ApiEndpointType.Chat,
endpointType: undefined,
thinkingBudget: undefined,
forceInterleavedThinkingCompat: undefined,
reasoningEffort: undefined,
Expand All @@ -476,6 +477,9 @@ export class ModelConfigHelper {
maxCompletionTokens: storedConfig.maxCompletionTokens ?? finalConfig.maxCompletionTokens,
conversationId: storedConfig.conversationId ?? finalConfig.conversationId,
apiEndpoint: storedConfig.apiEndpoint ?? finalConfig.apiEndpoint,
endpointType: isNewApiEndpointType(storedConfig.endpointType)
? storedConfig.endpointType
: finalConfig.endpointType,
enableSearch: storedConfig.enableSearch ?? finalConfig.enableSearch,
forcedSearch: storedConfig.forcedSearch ?? finalConfig.forcedSearch,
searchStrategy: storedConfig.searchStrategy ?? finalConfig.searchStrategy,
Expand Down
3 changes: 3 additions & 0 deletions src/main/presenter/configPresenter/providerModelHelper.ts
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ export class ProviderModelHelper {
model.reasoning =
model.reasoning !== undefined ? model.reasoning : config.reasoning || false
model.type = model.type !== undefined ? model.type : config.type || ModelType.Chat
model.endpointType = config.endpointType ?? model.endpointType
} else {
model.vision = model.vision || false
model.functionCall = model.functionCall || false
Expand Down Expand Up @@ -153,10 +154,12 @@ export class ProviderModelHelper {
const store = this.getProviderModelStore(providerId)
const customModels = (store.get('custom_models') || []) as MODEL_META[]
return customModels.map((model) => {
const config = this.getModelConfig(model.id, providerId)
model.vision = model.vision !== undefined ? model.vision : false
model.functionCall = model.functionCall !== undefined ? model.functionCall : false
model.reasoning = model.reasoning !== undefined ? model.reasoning : false
model.type = model.type || ModelType.Chat
model.endpointType = config?.endpointType ?? model.endpointType
return model
})
}
Expand Down
15 changes: 15 additions & 0 deletions src/main/presenter/configPresenter/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,21 @@ export const DEFAULT_PROVIDERS: LLM_PROVIDER_BASE[] = [
defaultBaseUrl: 'https://open.cherryin.ai/v1'
}
},
{
id: 'new-api',
name: 'New API',
apiType: 'new-api',
apiKey: '',
baseUrl: 'https://www.newapi.ai',
enable: false,
websites: {
official: 'https://www.newapi.ai/',
apiKey: 'https://www.newapi.ai/token',
docs: 'https://www.newapi.ai/zh/docs/api',
models: 'https://www.newapi.ai/zh/docs/api',
defaultBaseUrl: 'https://www.newapi.ai'
}
},
{
id: 'openai',
name: 'OpenAI',
Expand Down
4 changes: 4 additions & 0 deletions src/main/presenter/llmProviderPresenter/baseProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ export abstract class BaseLLMProvider {
return BaseLLMProvider.DEFAULT_MODEL_FETCH_TIMEOUT
}

protected getCapabilityProviderId(): string {
return this.provider.capabilityProviderId || this.provider.id
}

/**
* Load cached model data from configuration
* Called in constructor to avoid needing to re-fetch model lists every time
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,14 @@ export class ModelManager {
model.functionCall = config.functionCall
model.reasoning = config.reasoning
model.type = config.type
model.endpointType = config.endpointType ?? model.endpointType
} else {
model.vision = model.vision !== undefined ? model.vision : config.vision
model.functionCall =
model.functionCall !== undefined ? model.functionCall : config.functionCall
model.reasoning = model.reasoning !== undefined ? model.reasoning : config.reasoning
model.type = model.type || config.type
model.endpointType = model.endpointType ?? config.endpointType
}

return model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import { JiekouProvider } from '../providers/jiekouProvider'
import { ZenmuxProvider } from '../providers/zenmuxProvider'
import { O3fanProvider } from '../providers/o3fanProvider'
import { VoiceAIProvider } from '../providers/voiceAIProvider'
import { NewApiProvider } from '../providers/newApiProvider'
import { RateLimitManager } from './rateLimitManager'
import { StreamState } from '../types'
import { AcpSessionPersistence } from '../acp'
Expand Down Expand Up @@ -90,6 +91,7 @@ export class ProviderInstanceManager {
['voiceai', VoiceAIProvider],
['openai-responses', OpenAIResponsesProvider],
['cherryin', CherryInProvider],
['new-api', NewApiProvider],
['lmstudio', LMStudioProvider],
['together', TogetherProvider],
['groq', GroqProvider],
Expand Down Expand Up @@ -124,6 +126,7 @@ export class ProviderInstanceManager {
['voiceai', VoiceAIProvider],
['openai-compatible', OpenAICompatibleProvider],
['openai-responses', OpenAIResponsesProvider],
['new-api', NewApiProvider],
['lmstudio', LMStudioProvider],
['together', TogetherProvider],
['groq', GroqProvider],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,10 @@ export class GeminiProvider extends BaseLLMProvider {
// 判断模型是否支持 thinkingBudget
private supportsThinkingBudget(modelId: string): boolean {
const normalized = modelId.replace(/^models\//i, '')
const range = modelCapabilities.getThinkingBudgetRange(this.provider.id, normalized)
const range = modelCapabilities.getThinkingBudgetRange(
this.getCapabilityProviderId(),
normalized
)
return (
typeof range.default === 'number' ||
typeof range.min === 'number' ||
Expand Down
Loading
Loading