Skip to content

Commit

Permalink
Merge branch 'main' into feat/deepseek-chat-node
Browse files Browse the repository at this point in the history
  • Loading branch information
nguyenhung10012003 authored Dec 21, 2024
2 parents 0de4ac8 + e8a33e4 commit 098d2a7
Show file tree
Hide file tree
Showing 15 changed files with 34,309 additions and 33,900 deletions.
23 changes: 23 additions & 0 deletions packages/components/credentials/ComposioApi.credential.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface'

class ComposioApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]

constructor() {
this.label = 'Composio API'
this.name = 'composioApi'
this.version = 1.0
this.inputs = [
{
label: 'Composio API Key',
name: 'composioApi',
type: 'password'
}
]
}
}

module.exports = { credClass: ComposioApi }
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ class ChatVertexAI extends LcChatVertexAI implements IVisionChatModal {

constructor(id: string, fields?: ChatVertexAIInput) {
// @ts-ignore
if (fields?.model) {
fields.modelName = fields.model
delete fields.model
}
super(fields ?? {})
this.id = id
this.configuredModel = fields?.modelName || ''
Expand Down Expand Up @@ -61,7 +65,7 @@ class GoogleVertexAI_ChatModels implements INode {
constructor() {
this.label = 'ChatGoogleVertexAI'
this.name = 'chatGoogleVertexAI'
this.version = 5.0
this.version = 5.1
this.type = 'ChatGoogleVertexAI'
this.icon = 'GoogleVertex.svg'
this.category = 'Chat Models'
Expand Down Expand Up @@ -89,6 +93,14 @@ class GoogleVertexAI_ChatModels implements INode {
type: 'asyncOptions',
loadMethod: 'listModels'
},
{
label: 'Custom Model Name',
name: 'customModelName',
type: 'string',
placeholder: 'gemini-1.5-pro-exp-0801',
description: 'Custom model name to use. If provided, it will override the model selected',
additionalParams: true
},
{
label: 'Temperature',
name: 'temperature',
Expand Down Expand Up @@ -163,7 +175,6 @@ class GoogleVertexAI_ChatModels implements INode {
throw new Error(
'Error: More than one component has been inputted. Please use only one of the following: Google Application Credential File Path or Google Credential JSON Object'
)

if (googleApplicationCredentialFilePath && !googleApplicationCredential)
authOptions.keyFile = googleApplicationCredentialFilePath
else if (!googleApplicationCredentialFilePath && googleApplicationCredential)
Expand All @@ -174,6 +185,7 @@ class GoogleVertexAI_ChatModels implements INode {

const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const customModelName = nodeData.inputs?.customModelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
const cache = nodeData.inputs?.cache as BaseCache
Expand All @@ -190,11 +202,10 @@ class GoogleVertexAI_ChatModels implements INode {

const obj: ChatVertexAIInput = {
temperature: parseFloat(temperature),
model: modelName,
modelName: customModelName || modelName,
streaming: streaming ?? true
}
if (Object.keys(authOptions).length !== 0) obj.authOptions = authOptions

if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (cache) obj.cache = cache
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class ChatIBMWatsonx_ChatModels implements INode {
constructor() {
this.label = 'ChatIBMWatsonx'
this.name = 'chatIBMWatsonx'
this.version = 1.0
this.version = 2.0
this.type = 'ChatIBMWatsonx'
this.icon = 'ibm.png'
this.category = 'Chat Models'
Expand Down Expand Up @@ -75,6 +75,59 @@ class ChatIBMWatsonx_ChatModels implements INode {
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Frequency Penalty',
name: 'frequencyPenalty',
type: 'number',
step: 1,
optional: true,
additionalParams: true,
description:
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
},
{
label: 'Log Probs',
name: 'logprobs',
type: 'boolean',
default: false,
optional: true,
additionalParams: true,
description:
'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.'
},
{
label: 'N',
name: 'n',
type: 'number',
step: 1,
default: 1,
optional: true,
additionalParams: true,
description:
'How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.'
},
{
label: 'Presence Penalty',
name: 'presencePenalty',
type: 'number',
step: 1,
default: 1,
optional: true,
additionalParams: true,
description:
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
},
{
label: 'Top P',
name: 'topP',
type: 'number',
step: 0.1,
default: 0.1,
optional: true,
additionalParams: true,
description:
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.'
}
]
}
Expand All @@ -84,6 +137,11 @@ class ChatIBMWatsonx_ChatModels implements INode {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const logprobs = nodeData.inputs?.logprobs as boolean
const n = nodeData.inputs?.n as string
const presencePenalty = nodeData.inputs?.presencePenalty as string
const topP = nodeData.inputs?.topP as string
const streaming = nodeData.inputs?.streaming as boolean

const credentialData = await getCredentialData(nodeData.credential ?? '', options)
Expand Down Expand Up @@ -111,6 +169,11 @@ class ChatIBMWatsonx_ChatModels implements INode {
}
if (cache) obj.cache = cache
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (frequencyPenalty) obj.frequencyPenalty = parseInt(frequencyPenalty, 10)
if (logprobs) obj.logprobs = logprobs
if (n) obj.maxTokens = parseInt(n, 10)
if (presencePenalty) obj.presencePenalty = parseInt(presencePenalty, 10)
if (topP) obj.topP = parseFloat(topP)

const model = new ChatWatsonx(obj)
return model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchai
import { BaseMessage } from '@langchain/core/messages'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import {
ConversationHistorySelection,
ICommonObject,
IDatabaseEntity,
INode,
Expand All @@ -23,6 +24,7 @@ import {
customGet,
getVM,
transformObjectPropertyToFunction,
filterConversationHistory,
restructureMessages
} from '../commonUtils'
import { ChatGoogleGenerativeAI } from '../../chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI'
Expand Down Expand Up @@ -149,7 +151,7 @@ class ConditionAgent_SeqAgents implements INode {
constructor() {
this.label = 'Condition Agent'
this.name = 'seqConditionAgent'
this.version = 2.0
this.version = 3.0
this.type = 'ConditionAgent'
this.icon = 'condition.svg'
this.category = 'Sequential Agents'
Expand Down Expand Up @@ -185,6 +187,42 @@ class ConditionAgent_SeqAgents implements INode {
additionalParams: true,
optional: true
},
{
label: 'Conversation History',
name: 'conversationHistorySelection',
type: 'options',
options: [
{
label: 'User Question',
name: 'user_question',
description: 'Use the user question from the historical conversation messages as input.'
},
{
label: 'Last Conversation Message',
name: 'last_message',
description: 'Use the last conversation message from the historical conversation messages as input.'
},
{
label: 'All Conversation Messages',
name: 'all_messages',
description: 'Use all conversation messages from the historical conversation messages as input.'
},
{
label: 'Empty',
name: 'empty',
description:
'Do not use any messages from the conversation history. ' +
'Ensure to use either System Prompt, Human Prompt, or Messages History.'
}
],
default: 'all_messages',
optional: true,
description:
'Select which messages from the conversation history to include in the prompt. ' +
'The selected messages will be inserted between the System Prompt (if defined) and ' +
'Human Prompt.',
additionalParams: true
},
{
label: 'Human Prompt',
name: 'humanMessagePrompt',
Expand Down Expand Up @@ -481,6 +519,9 @@ const runCondition = async (
})
}

const historySelection = (nodeData.inputs?.conversationHistorySelection || 'all_messages') as ConversationHistorySelection
// @ts-ignore
state.messages = filterConversationHistory(historySelection, input, state)
// @ts-ignore
state.messages = restructureMessages(model, state)

Expand Down
Loading

0 comments on commit 098d2a7

Please sign in to comment.