diff --git a/LICENSE.md b/LICENSE.md
index 0f4afcd1189..80800001864 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -2,22 +2,6 @@
Version 2.0, January 2004
http://www.apache.org/licenses/
-Flowise is governed by the Apache License 2.0, with additional terms and conditions outlined below:
-
-Flowise can be used for commercial purposes for "backend-as-a-service" for your applications or as a development platform for enterprises. However, under specific conditions, you must reach out to the project's administrators to secure a commercial license:
-
-a. Multi-tenant SaaS service: Unless you have explicit written authorization from Flowise, you may not utilize the Flowise source code to operate a multi-tenant SaaS service that closely resembles the Flowise cloud-based services.
-b. Logo and copyright information: While using Flowise in commercial application, you are prohibited from removing or altering the LOGO or copyright information displayed in the Flowise console and UI.
-
-For inquiries regarding licensing matters, please contact hello@flowiseai.com via email.
-
-Contributors are required to consent to the following terms related to their contributed code:
-
-a. The project maintainers have the authority to modify the open-source agreement to be more stringent or lenient.
-b. Contributed code can be used for commercial purposes, including Flowise's cloud-based services.
-
-All other rights and restrictions are in accordance with the Apache License 2.0.
-
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
diff --git a/README-ZH.md b/README-ZH.md
index 2805ef9bcab..8750ebc7fc3 100644
--- a/README-ZH.md
+++ b/README-ZH.md
@@ -145,25 +145,40 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
## 🌐 自托管
-### [Railway](https://docs.flowiseai.com/deployment/railway)
+在您现有的基础设施中部署自托管的 Flowise,我们支持各种[部署](https://docs.flowiseai.com/configuration/deployment)
-[![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
+- [AWS](https://docs.flowiseai.com/deployment/aws)
+- [Azure](https://docs.flowiseai.com/deployment/azure)
+- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
+- [GCP](https://docs.flowiseai.com/deployment/gcp)
+-
+ 其他
-### [Render](https://docs.flowiseai.com/deployment/render)
+ - [Railway](https://docs.flowiseai.com/deployment/railway)
-[![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
+ [![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
-### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
+ - [Render](https://docs.flowiseai.com/deployment/render)
-
+ [![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
-### [AWS](https://docs.flowiseai.com/deployment/aws)
+ - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
-### [Azure](https://docs.flowiseai.com/deployment/azure)
+
-### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean)
+ - [Elestio](https://elest.io/open-source/flowiseai)
-### [GCP](https://docs.flowiseai.com/deployment/gcp)
+ [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
+
+ - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+
+ [![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+
+ - [RepoCloud](https://repocloud.io/details/?app_id=29)
+
+ [![部署到 RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
+
+
## 💻 云托管
diff --git a/README.md b/README.md
index 25026237f76..3e6b7e5610c 100644
--- a/README.md
+++ b/README.md
@@ -145,29 +145,40 @@ Flowise support different environment variables to configure your instance. You
## 🌐 Self Host
-### [Railway](https://docs.flowiseai.com/deployment/railway)
+Deploy Flowise self-hosted in your existing infrastructure, we support various [deployments](https://docs.flowiseai.com/configuration/deployment)
-[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
+- [AWS](https://docs.flowiseai.com/deployment/aws)
+- [Azure](https://docs.flowiseai.com/deployment/azure)
+- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
+- [GCP](https://docs.flowiseai.com/deployment/gcp)
+-
+ Others
-### [Render](https://docs.flowiseai.com/deployment/render)
+ - [Railway](https://docs.flowiseai.com/deployment/railway)
-[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
+ [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
-### [Elestio](https://elest.io/open-source/flowiseai)
+ - [Render](https://docs.flowiseai.com/deployment/render)
-[![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
+ [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
-### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
+ - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
-
+
-### [AWS](https://docs.flowiseai.com/deployment/aws)
+ - [Elestio](https://elest.io/open-source/flowiseai)
-### [Azure](https://docs.flowiseai.com/deployment/azure)
+ [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
-### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean)
+ - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
-### [GCP](https://docs.flowiseai.com/deployment/gcp)
+ [![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
+
+ - [RepoCloud](https://repocloud.io/details/?app_id=29)
+
+ [![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
+
+
## 💻 Cloud Hosted
diff --git a/docker/README.md b/docker/README.md
index d3ad1c19708..11b29cf3827 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -1,6 +1,6 @@
# Flowise Docker Hub Image
-Starts Flowise from [DockerHub Image](https://hub.docker.com/repository/docker/flowiseai/flowise/general)
+Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise)
## Usage
diff --git a/package.json b/package.json
index 5ecbb59b200..5a9bfcbf3d5 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise",
- "version": "1.4.8",
+ "version": "1.4.9",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
diff --git a/packages/components/credentials/AstraApi.credential.ts b/packages/components/credentials/AstraApi.credential.ts
new file mode 100644
index 00000000000..a89a259f52f
--- /dev/null
+++ b/packages/components/credentials/AstraApi.credential.ts
@@ -0,0 +1,34 @@
+import { INodeParams, INodeCredential } from '../src/Interface'
+
+class AstraDBApi implements INodeCredential {
+ label: string
+ name: string
+ version: number
+ description: string
+ inputs: INodeParams[]
+
+ constructor() {
+ this.label = 'Astra DB API'
+ this.name = 'AstraDBApi'
+ this.version = 1.0
+ this.inputs = [
+ {
+ label: 'Astra DB Collection Name',
+ name: 'collectionName',
+ type: 'string'
+ },
+ {
+ label: 'Astra DB Application Token',
+ name: 'applicationToken',
+ type: 'password'
+ },
+ {
+ label: 'Astra DB Api Endpoint',
+ name: 'dbEndPoint',
+ type: 'string'
+ }
+ ]
+ }
+}
+
+module.exports = { credClass: AstraDBApi }
diff --git a/packages/components/credentials/ZapierNLAApi.credential.ts b/packages/components/credentials/LocalAIApi.credential.ts
similarity index 51%
rename from packages/components/credentials/ZapierNLAApi.credential.ts
rename to packages/components/credentials/LocalAIApi.credential.ts
index 72035660eca..4aafe040dc3 100644
--- a/packages/components/credentials/ZapierNLAApi.credential.ts
+++ b/packages/components/credentials/LocalAIApi.credential.ts
@@ -1,24 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface'
-class ZapierNLAApi implements INodeCredential {
+class LocalAIApi implements INodeCredential {
label: string
name: string
version: number
- description: string
inputs: INodeParams[]
constructor() {
- this.label = 'Zapier NLA API'
- this.name = 'zapierNLAApi'
+ this.label = 'LocalAI API'
+ this.name = 'localAIApi'
this.version = 1.0
this.inputs = [
{
- label: 'Zapier NLA Api Key',
- name: 'zapierNLAApiKey',
+ label: 'LocalAI Api Key',
+ name: 'localAIApiKey',
type: 'password'
}
]
}
}
-module.exports = { credClass: ZapierNLAApi }
+module.exports = { credClass: LocalAIApi }
diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
index 8a2329b584d..7f857b1caad 100644
--- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
+++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts
@@ -1,11 +1,14 @@
-import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
-import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents'
import { Tool } from 'langchain/tools'
-import { BaseChatMemory } from 'langchain/memory'
-import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { BaseChatModel } from 'langchain/chat_models/base'
import { flatten } from 'lodash'
-import { additionalCallbacks } from '../../../src/handler'
+import { AgentStep, BaseMessage, ChainValues, AIMessage, HumanMessage } from 'langchain/schema'
+import { RunnableSequence } from 'langchain/schema/runnable'
+import { getBaseClasses } from '../../../src/utils'
+import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
+import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
+import { AgentExecutor } from '../../../src/agents'
+import { ChatConversationalAgent } from 'langchain/agents'
+import { renderTemplate } from '@langchain/core/prompts'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@@ -15,6 +18,15 @@ Assistant is constantly learning and improving, and its capabilities are constan
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.`
+const TEMPLATE_TOOL_RESPONSE = `TOOL RESPONSE:
+---------------------
+{observation}
+
+USER'S INPUT
+--------------------
+
+Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.`
+
class ConversationalAgent_Agents implements INode {
label: string
name: string
@@ -25,8 +37,9 @@ class ConversationalAgent_Agents implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
+ sessionId?: string
- constructor() {
+ constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Agent'
this.name = 'conversationalAgent'
this.version = 2.0
@@ -43,7 +56,7 @@ class ConversationalAgent_Agents implements INode {
list: true
},
{
- label: 'Language Model',
+ label: 'Chat Model',
name: 'model',
type: 'BaseChatModel'
},
@@ -62,52 +75,114 @@ class ConversationalAgent_Agents implements INode {
additionalParams: true
}
]
+ this.sessionId = fields?.sessionId
}
- async init(nodeData: INodeData): Promise {
- const model = nodeData.inputs?.model as BaseChatModel
- let tools = nodeData.inputs?.tools as Tool[]
- tools = flatten(tools)
- const memory = nodeData.inputs?.memory as BaseChatMemory
- const systemMessage = nodeData.inputs?.systemMessage as string
+ async init(nodeData: INodeData, input: string, options: ICommonObject): Promise {
+ return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
+ }
- const obj: InitializeAgentExecutorOptions = {
- agentType: 'chat-conversational-react-description',
- verbose: process.env.DEBUG === 'true' ? true : false
- }
+ async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
+ const memory = nodeData.inputs?.memory as FlowiseMemory
+ const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
- const agentArgs: any = {}
- if (systemMessage) {
- agentArgs.systemMessage = systemMessage
- }
+ const loggerHandler = new ConsoleCallbackHandler(options.logger)
+ const callbacks = await additionalCallbacks(nodeData, options)
- if (Object.keys(agentArgs).length) obj.agentArgs = agentArgs
+ let res: ChainValues = {}
- const executor = await initializeAgentExecutorWithOptions(tools, model, obj)
- executor.memory = memory
- return executor
- }
-
- async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
- const executor = nodeData.instance as AgentExecutor
- const memory = nodeData.inputs?.memory as BaseChatMemory
-
- if (options && options.chatHistory) {
- const chatHistoryClassName = memory.chatHistory.constructor.name
- // Only replace when its In-Memory
- if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
- memory.chatHistory = mapChatHistory(options)
- executor.memory = memory
- }
+ if (options.socketIO && options.socketIOClientId) {
+ const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
+ res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
+ } else {
+ res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
}
- ;(executor.memory as any).returnMessages = true // Return true for BaseChatModel
+ await memory.addChatMessages(
+ [
+ {
+ text: input,
+ type: 'userMessage'
+ },
+ {
+ text: res?.output,
+ type: 'apiMessage'
+ }
+ ],
+ this.sessionId
+ )
+
+ return res?.output
+ }
+}
- const callbacks = await additionalCallbacks(nodeData, options)
+const prepareAgent = async (
+ nodeData: INodeData,
+ flowObj: { sessionId?: string; chatId?: string; input?: string },
+ chatHistory: IMessage[] = []
+) => {
+ const model = nodeData.inputs?.model as BaseChatModel
+ let tools = nodeData.inputs?.tools as Tool[]
+ tools = flatten(tools)
+ const memory = nodeData.inputs?.memory as FlowiseMemory
+ const systemMessage = nodeData.inputs?.systemMessage as string
+ const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
+ const inputKey = memory.inputKey ? memory.inputKey : 'input'
+
+ /** Bind a stop token to the model */
+ const modelWithStop = model.bind({
+ stop: ['\nObservation']
+ })
+
+ const outputParser = ChatConversationalAgent.getDefaultOutputParser({
+ llm: model,
+ toolNames: tools.map((tool) => tool.name)
+ })
+
+ const prompt = ChatConversationalAgent.createPrompt(tools, {
+ systemMessage: systemMessage ? systemMessage : DEFAULT_PREFIX,
+ outputParser
+ })
+
+ const runnableAgent = RunnableSequence.from([
+ {
+ [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
+ agent_scratchpad: async (i: { input: string; steps: AgentStep[] }) => await constructScratchPad(i.steps),
+ [memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
+ const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
+ return messages ?? []
+ }
+ },
+ prompt,
+ modelWithStop,
+ outputParser
+ ])
+
+ const executor = AgentExecutor.fromAgentAndTools({
+ agent: runnableAgent,
+ tools,
+ sessionId: flowObj?.sessionId,
+ chatId: flowObj?.chatId,
+ input: flowObj?.input,
+ verbose: process.env.DEBUG === 'true' ? true : false
+ })
+
+ return executor
+}
- const result = await executor.call({ input }, [...callbacks])
- return result?.output
+const constructScratchPad = async (steps: AgentStep[]): Promise => {
+ const thoughts: BaseMessage[] = []
+ for (const step of steps) {
+ thoughts.push(new AIMessage(step.action.log))
+ thoughts.push(
+ new HumanMessage(
+ renderTemplate(TEMPLATE_TOOL_RESPONSE, 'f-string', {
+ observation: step.observation
+ })
+ )
+ )
}
+ return thoughts
}
module.exports = { nodeClass: ConversationalAgent_Agents }
diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts
index 643c6a658db..406a156ffe6 100644
--- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts
+++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts
@@ -1,9 +1,14 @@
-import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
-import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
-import { getBaseClasses, mapChatHistory } from '../../../src/utils'
+import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema'
import { flatten } from 'lodash'
-import { BaseChatMemory } from 'langchain/memory'
+import { ChatOpenAI } from 'langchain/chat_models/openai'
+import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
+import { formatToOpenAIFunction } from 'langchain/tools'
+import { RunnableSequence } from 'langchain/schema/runnable'
+import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
+import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
+import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
+import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`
@@ -17,8 +22,9 @@ class ConversationalRetrievalAgent_Agents implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
+ sessionId?: string
- constructor() {
+ constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval Agent'
this.name = 'conversationalRetrievalAgent'
this.version = 3.0
@@ -54,55 +60,96 @@ class ConversationalRetrievalAgent_Agents implements INode {
additionalParams: true
}
]
+ this.sessionId = fields?.sessionId
}
- async init(nodeData: INodeData): Promise {
- const model = nodeData.inputs?.model
- const memory = nodeData.inputs?.memory as BaseChatMemory
- const systemMessage = nodeData.inputs?.systemMessage as string
-
- let tools = nodeData.inputs?.tools
- tools = flatten(tools)
-
- const executor = await initializeAgentExecutorWithOptions(tools, model, {
- agentType: 'openai-functions',
- verbose: process.env.DEBUG === 'true' ? true : false,
- agentArgs: {
- prefix: systemMessage ?? defaultMessage
- },
- returnIntermediateSteps: true
- })
- executor.memory = memory
- return executor
+ async init(nodeData: INodeData, input: string, options: ICommonObject): Promise {
+ return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
- const executor = nodeData.instance as AgentExecutor
-
- if (executor.memory) {
- ;(executor.memory as any).memoryKey = 'chat_history'
- ;(executor.memory as any).outputKey = 'output'
- ;(executor.memory as any).returnMessages = true
-
- const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name
- // Only replace when its In-Memory
- if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
- ;(executor.memory as any).chatHistory = mapChatHistory(options)
- }
- }
+ const memory = nodeData.inputs?.memory as FlowiseMemory
+ const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
+ let res: ChainValues = {}
+
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
- const result = await executor.call({ input }, [loggerHandler, handler, ...callbacks])
- return result?.output
+ res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
} else {
- const result = await executor.call({ input }, [loggerHandler, ...callbacks])
- return result?.output
+ res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
}
+
+ await memory.addChatMessages(
+ [
+ {
+ text: input,
+ type: 'userMessage'
+ },
+ {
+ text: res?.output,
+ type: 'apiMessage'
+ }
+ ],
+ this.sessionId
+ )
+
+ return res?.output
}
}
+const prepareAgent = (
+ nodeData: INodeData,
+ flowObj: { sessionId?: string; chatId?: string; input?: string },
+ chatHistory: IMessage[] = []
+) => {
+ const model = nodeData.inputs?.model as ChatOpenAI
+ const memory = nodeData.inputs?.memory as FlowiseMemory
+ const systemMessage = nodeData.inputs?.systemMessage as string
+ let tools = nodeData.inputs?.tools
+ tools = flatten(tools)
+ const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
+ const inputKey = memory.inputKey ? memory.inputKey : 'input'
+
+ const prompt = ChatPromptTemplate.fromMessages([
+ ['ai', systemMessage ? systemMessage : defaultMessage],
+ new MessagesPlaceholder(memoryKey),
+ ['human', `{${inputKey}}`],
+ new MessagesPlaceholder('agent_scratchpad')
+ ])
+
+ const modelWithFunctions = model.bind({
+ functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))]
+ })
+
+ const runnableAgent = RunnableSequence.from([
+ {
+ [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
+ agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
+ [memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
+ const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
+ return messages ?? []
+ }
+ },
+ prompt,
+ modelWithFunctions,
+ new OpenAIFunctionsAgentOutputParser()
+ ])
+
+ const executor = AgentExecutor.fromAgentAndTools({
+ agent: runnableAgent,
+ tools,
+ sessionId: flowObj?.sessionId,
+ chatId: flowObj?.chatId,
+ input: flowObj?.input,
+ returnIntermediateSteps: true,
+ verbose: process.env.DEBUG === 'true' ? true : false
+ })
+
+ return executor
+}
+
module.exports = { nodeClass: ConversationalRetrievalAgent_Agents }
diff --git a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts
index cf69022ba91..62ecec5b03c 100644
--- a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts
+++ b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts
@@ -96,45 +96,51 @@ class OpenAIAssistant_Agents implements INode {
return null
}
- //@ts-ignore
- memoryMethods = {
- async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise {
- const selectedAssistantId = nodeData.inputs?.selectedAssistant as string
- const appDataSource = options.appDataSource as DataSource
- const databaseEntities = options.databaseEntities as IDatabaseEntity
- let sessionId = nodeData.inputs?.sessionId as string
+ async clearChatMessages(nodeData: INodeData, options: ICommonObject, sessionIdObj: { type: string; id: string }): Promise {
+ const selectedAssistantId = nodeData.inputs?.selectedAssistant as string
+ const appDataSource = options.appDataSource as DataSource
+ const databaseEntities = options.databaseEntities as IDatabaseEntity
- const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
- id: selectedAssistantId
- })
+ const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
+ id: selectedAssistantId
+ })
- if (!assistant) {
- options.logger.error(`Assistant ${selectedAssistantId} not found`)
- return
- }
+ if (!assistant) {
+ options.logger.error(`Assistant ${selectedAssistantId} not found`)
+ return
+ }
- if (!sessionId && options.chatId) {
- const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({
- chatId: options.chatId
- })
- if (!chatmsg) {
- options.logger.error(`Chat Message with Chat Id: ${options.chatId} not found`)
- return
- }
- sessionId = chatmsg.sessionId
- }
+ if (!sessionIdObj) return
- const credentialData = await getCredentialData(assistant.credential ?? '', options)
- const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
- if (!openAIApiKey) {
- options.logger.error(`OpenAI ApiKey not found`)
+ let sessionId = ''
+ if (sessionIdObj.type === 'chatId') {
+ const chatId = sessionIdObj.id
+ const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({
+ chatId
+ })
+ if (!chatmsg) {
+ options.logger.error(`Chat Message with Chat Id: ${chatId} not found`)
return
}
+ sessionId = chatmsg.sessionId
+ } else if (sessionIdObj.type === 'threadId') {
+ sessionId = sessionIdObj.id
+ }
- const openai = new OpenAI({ apiKey: openAIApiKey })
- options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
+ const credentialData = await getCredentialData(assistant.credential ?? '', options)
+ const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
+ if (!openAIApiKey) {
+ options.logger.error(`OpenAI ApiKey not found`)
+ return
+ }
+
+ const openai = new OpenAI({ apiKey: openAIApiKey })
+ options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
+ try {
if (sessionId) await openai.beta.threads.del(sessionId)
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
+ } catch (e) {
+ throw new Error(e)
}
}
@@ -297,7 +303,11 @@ class OpenAIAssistant_Agents implements INode {
options.socketIO.to(options.socketIOClientId).emit('tool', tool.name)
try {
- const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, threadId)
+ const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
+ sessionId: threadId,
+ chatId: options.chatId,
+ input
+ })
await analyticHandlers.onToolEnd(toolIds, toolOutput)
submitToolOutputs.push({
tool_call_id: actions[i].toolCallId,
@@ -462,6 +472,7 @@ class OpenAIAssistant_Agents implements INode {
const imageRegex = /]*\/>/g
let llmOutput = returnVal.replace(imageRegex, '')
llmOutput = llmOutput.replace('
', '')
+
await analyticHandlers.onLLMEnd(llmIds, llmOutput)
await analyticHandlers.onChainEnd(parentIds, messageData, true)
diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
index c0095cee16b..c21c887aaf2 100644
--- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
+++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
@@ -1,17 +1,14 @@
-import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
-import { AgentExecutor as LCAgentExecutor, AgentExecutorInput } from 'langchain/agents'
-import { ChainValues, AgentStep, AgentFinish, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema'
-import { OutputParserException } from 'langchain/schema/output_parser'
-import { CallbackManagerForChainRun } from 'langchain/callbacks'
-import { formatToOpenAIFunction } from 'langchain/tools'
-import { ToolInputParsingException, Tool } from '@langchain/core/tools'
+import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema'
import { getBaseClasses } from '../../../src/utils'
import { flatten } from 'lodash'
import { RunnableSequence } from 'langchain/schema/runnable'
+import { formatToOpenAIFunction } from 'langchain/tools'
+import { ChatOpenAI } from 'langchain/chat_models/openai'
+import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
-import { ChatOpenAI } from 'langchain/chat_models/openai'
import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser'
+import { AgentExecutor, formatAgentSteps } from '../../../src/agents'
class OpenAIFunctionAgent_Agents implements INode {
label: string
@@ -25,7 +22,7 @@ class OpenAIFunctionAgent_Agents implements INode {
inputs: INodeParams[]
sessionId?: string
- constructor(fields: { sessionId?: string }) {
+ constructor(fields?: { sessionId?: string }) {
this.label = 'OpenAI Function Agent'
this.name = 'openAIFunctionAgent'
this.version = 3.0
@@ -33,7 +30,7 @@ class OpenAIFunctionAgent_Agents implements INode {
this.category = 'Agents'
this.icon = 'function.svg'
this.description = `An agent that uses Function Calling to pick the tool and args to call`
- this.baseClasses = [this.type, ...getBaseClasses(LCAgentExecutor)]
+ this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [
{
label: 'Allowed Tools',
@@ -63,19 +60,13 @@ class OpenAIFunctionAgent_Agents implements INode {
this.sessionId = fields?.sessionId
}
- async init(nodeData: INodeData): Promise {
- const memory = nodeData.inputs?.memory as FlowiseMemory
-
- const executor = prepareAgent(nodeData, this.sessionId)
- if (memory) executor.memory = memory
-
- return executor
+ async init(nodeData: INodeData, input: string, options: ICommonObject): Promise {
+ return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
const memory = nodeData.inputs?.memory as FlowiseMemory
-
- const executor = prepareAgent(nodeData, this.sessionId)
+ const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
@@ -107,17 +98,11 @@ class OpenAIFunctionAgent_Agents implements INode {
}
}
-const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
- steps.flatMap(({ action, observation }) => {
- if ('messageLog' in action && action.messageLog !== undefined) {
- const log = action.messageLog as BaseMessage[]
- return log.concat(new FunctionMessage(observation, action.tool))
- } else {
- return [new AIMessage(action.log)]
- }
- })
-
-const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
+const prepareAgent = (
+ nodeData: INodeData,
+ flowObj: { sessionId?: string; chatId?: string; input?: string },
+ chatHistory: IMessage[] = []
+) => {
const model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string
@@ -127,7 +112,7 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prompt = ChatPromptTemplate.fromMessages([
- ['ai', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
+ ['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad')
@@ -142,7 +127,7 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps),
[memoryKey]: async (_: { input: string; steps: AgentStep[] }) => {
- const messages = (await memory.getChatMessages(sessionId, true)) as BaseMessage[]
+ const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[]
return messages ?? []
}
},
@@ -154,231 +139,13 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => {
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
tools,
- sessionId
+ sessionId: flowObj?.sessionId,
+ chatId: flowObj?.chatId,
+ input: flowObj?.input,
+ verbose: process.env.DEBUG === 'true' ? true : false
})
return executor
}
-type AgentExecutorOutput = ChainValues
-
-class AgentExecutor extends LCAgentExecutor {
- sessionId?: string
-
- static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string }): AgentExecutor {
- const newInstance = new AgentExecutor(fields)
- if (fields.sessionId) newInstance.sessionId = fields.sessionId
- return newInstance
- }
-
- shouldContinueIteration(iterations: number): boolean {
- return this.maxIterations === undefined || iterations < this.maxIterations
- }
-
- async _call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise {
- const toolsByName = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t]))
-
- const steps: AgentStep[] = []
- let iterations = 0
-
- const getOutput = async (finishStep: AgentFinish): Promise => {
- const { returnValues } = finishStep
- const additional = await this.agent.prepareForOutput(returnValues, steps)
-
- if (this.returnIntermediateSteps) {
- return { ...returnValues, intermediateSteps: steps, ...additional }
- }
- await runManager?.handleAgentEnd(finishStep)
- return { ...returnValues, ...additional }
- }
-
- while (this.shouldContinueIteration(iterations)) {
- let output
- try {
- output = await this.agent.plan(steps, inputs, runManager?.getChild())
- } catch (e) {
- if (e instanceof OutputParserException) {
- let observation
- let text = e.message
- if (this.handleParsingErrors === true) {
- if (e.sendToLLM) {
- observation = e.observation
- text = e.llmOutput ?? ''
- } else {
- observation = 'Invalid or incomplete response'
- }
- } else if (typeof this.handleParsingErrors === 'string') {
- observation = this.handleParsingErrors
- } else if (typeof this.handleParsingErrors === 'function') {
- observation = this.handleParsingErrors(e)
- } else {
- throw e
- }
- output = {
- tool: '_Exception',
- toolInput: observation,
- log: text
- } as AgentAction
- } else {
- throw e
- }
- }
- // Check if the agent has finished
- if ('returnValues' in output) {
- return getOutput(output)
- }
-
- let actions: AgentAction[]
- if (Array.isArray(output)) {
- actions = output as AgentAction[]
- } else {
- actions = [output as AgentAction]
- }
-
- const newSteps = await Promise.all(
- actions.map(async (action) => {
- await runManager?.handleAgentAction(action)
- const tool = action.tool === '_Exception' ? new ExceptionTool() : toolsByName[action.tool?.toLowerCase()]
- let observation
- try {
- // here we need to override Tool call method to include sessionId as parameter
- observation = tool
- ? // @ts-ignore
- await tool.call(action.toolInput, runManager?.getChild(), undefined, this.sessionId)
- : `${action.tool} is not a valid tool, try another one.`
- } catch (e) {
- if (e instanceof ToolInputParsingException) {
- if (this.handleParsingErrors === true) {
- observation = 'Invalid or incomplete tool input. Please try again.'
- } else if (typeof this.handleParsingErrors === 'string') {
- observation = this.handleParsingErrors
- } else if (typeof this.handleParsingErrors === 'function') {
- observation = this.handleParsingErrors(e)
- } else {
- throw e
- }
- observation = await new ExceptionTool().call(observation, runManager?.getChild())
- return { action, observation: observation ?? '' }
- }
- }
- return { action, observation: observation ?? '' }
- })
- )
-
- steps.push(...newSteps)
-
- const lastStep = steps[steps.length - 1]
- const lastTool = toolsByName[lastStep.action.tool?.toLowerCase()]
-
- if (lastTool?.returnDirect) {
- return getOutput({
- returnValues: { [this.agent.returnValues[0]]: lastStep.observation },
- log: ''
- })
- }
-
- iterations += 1
- }
-
- const finish = await this.agent.returnStoppedResponse(this.earlyStoppingMethod, steps, inputs)
-
- return getOutput(finish)
- }
-
- async _takeNextStep(
- nameToolMap: Record,
- inputs: ChainValues,
- intermediateSteps: AgentStep[],
- runManager?: CallbackManagerForChainRun
- ): Promise {
- let output
- try {
- output = await this.agent.plan(intermediateSteps, inputs, runManager?.getChild())
- } catch (e) {
- if (e instanceof OutputParserException) {
- let observation
- let text = e.message
- if (this.handleParsingErrors === true) {
- if (e.sendToLLM) {
- observation = e.observation
- text = e.llmOutput ?? ''
- } else {
- observation = 'Invalid or incomplete response'
- }
- } else if (typeof this.handleParsingErrors === 'string') {
- observation = this.handleParsingErrors
- } else if (typeof this.handleParsingErrors === 'function') {
- observation = this.handleParsingErrors(e)
- } else {
- throw e
- }
- output = {
- tool: '_Exception',
- toolInput: observation,
- log: text
- } as AgentAction
- } else {
- throw e
- }
- }
-
- if ('returnValues' in output) {
- return output
- }
-
- let actions: AgentAction[]
- if (Array.isArray(output)) {
- actions = output as AgentAction[]
- } else {
- actions = [output as AgentAction]
- }
-
- const result: AgentStep[] = []
- for (const agentAction of actions) {
- let observation = ''
- if (runManager) {
- await runManager?.handleAgentAction(agentAction)
- }
- if (agentAction.tool in nameToolMap) {
- const tool = nameToolMap[agentAction.tool]
- try {
- // here we need to override Tool call method to include sessionId as parameter
- // @ts-ignore
- observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, this.sessionId)
- } catch (e) {
- if (e instanceof ToolInputParsingException) {
- if (this.handleParsingErrors === true) {
- observation = 'Invalid or incomplete tool input. Please try again.'
- } else if (typeof this.handleParsingErrors === 'string') {
- observation = this.handleParsingErrors
- } else if (typeof this.handleParsingErrors === 'function') {
- observation = this.handleParsingErrors(e)
- } else {
- throw e
- }
- observation = await new ExceptionTool().call(observation, runManager?.getChild())
- }
- }
- } else {
- observation = `${agentAction.tool} is not a valid tool, try another available tool: ${Object.keys(nameToolMap).join(', ')}`
- }
- result.push({
- action: agentAction,
- observation
- })
- }
- return result
- }
-}
-
-class ExceptionTool extends Tool {
- name = '_Exception'
-
- description = 'Exception tool'
-
- async _call(query: string) {
- return query
- }
-}
-
module.exports = { nodeClass: OpenAIFunctionAgent_Agents }
diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts
index 54d4252a64f..fcd9921e506 100644
--- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts
+++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts
@@ -1,14 +1,16 @@
-import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
+import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConversationChain } from 'langchain/chains'
-import { getBaseClasses, mapChatHistory } from '../../../src/utils'
+import { getBaseClasses } from '../../../src/utils'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
-import { BufferMemory } from 'langchain/memory'
import { BaseChatModel } from 'langchain/chat_models/base'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { flatten } from 'lodash'
import { Document } from 'langchain/document'
+import { RunnableSequence } from 'langchain/schema/runnable'
+import { StringOutputParser } from 'langchain/schema/output_parser'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
+const inputKey = 'input'
class ConversationChain_Chains implements INode {
label: string
@@ -20,8 +22,9 @@ class ConversationChain_Chains implements INode {
baseClasses: string[]
description: string
inputs: INodeParams[]
+ sessionId?: string
- constructor() {
+ constructor(fields?: { sessionId?: string }) {
this.label = 'Conversation Chain'
this.name = 'conversationChain'
this.version = 1.0
@@ -32,7 +35,7 @@ class ConversationChain_Chains implements INode {
this.baseClasses = [this.type, ...getBaseClasses(ConversationChain)]
this.inputs = [
{
- label: 'Language Model',
+ label: 'Chat Model',
name: 'model',
type: 'BaseChatModel'
},
@@ -60,76 +63,99 @@ class ConversationChain_Chains implements INode {
placeholder: 'You are a helpful assistant that write codes'
}
]
+ this.sessionId = fields?.sessionId
}
- async init(nodeData: INodeData): Promise {
- const model = nodeData.inputs?.model as BaseChatModel
- const memory = nodeData.inputs?.memory as BufferMemory
- const prompt = nodeData.inputs?.systemMessagePrompt as string
- const docs = nodeData.inputs?.document as Document[]
-
- const flattenDocs = docs && docs.length ? flatten(docs) : []
- const finalDocs = []
- for (let i = 0; i < flattenDocs.length; i += 1) {
- if (flattenDocs[i] && flattenDocs[i].pageContent) {
- finalDocs.push(new Document(flattenDocs[i]))
- }
- }
-
- let finalText = ''
- for (let i = 0; i < finalDocs.length; i += 1) {
- finalText += finalDocs[i].pageContent
- }
-
- const replaceChar: string[] = ['{', '}']
- for (const char of replaceChar) finalText = finalText.replaceAll(char, '')
-
- if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}`
-
- const obj: any = {
- llm: model,
- memory,
- verbose: process.env.DEBUG === 'true' ? true : false
- }
-
- const chatPrompt = ChatPromptTemplate.fromMessages([
- SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage),
- new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
- HumanMessagePromptTemplate.fromTemplate('{input}')
- ])
- obj.prompt = chatPrompt
-
- const chain = new ConversationChain(obj)
+ async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
- const chain = nodeData.instance as ConversationChain
- const memory = nodeData.inputs?.memory as BufferMemory
- memory.returnMessages = true // Return true for BaseChatModel
-
- if (options && options.chatHistory) {
- const chatHistoryClassName = memory.chatHistory.constructor.name
- // Only replace when its In-Memory
- if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
- memory.chatHistory = mapChatHistory(options)
- }
- }
-
- chain.memory = memory
+ const memory = nodeData.inputs?.memory
+ const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
+ let res = ''
+
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
- const res = await chain.call({ input }, [loggerHandler, handler, ...callbacks])
- return res?.response
+ res = await chain.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
} else {
- const res = await chain.call({ input }, [loggerHandler, ...callbacks])
- return res?.response
+ res = await chain.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
+ }
+
+ await memory.addChatMessages(
+ [
+ {
+ text: input,
+ type: 'userMessage'
+ },
+ {
+ text: res,
+ type: 'apiMessage'
+ }
+ ],
+ this.sessionId
+ )
+
+ return res
+ }
+}
+
+const prepareChatPrompt = (nodeData: INodeData) => {
+ const memory = nodeData.inputs?.memory as FlowiseMemory
+ const prompt = nodeData.inputs?.systemMessagePrompt as string
+ const docs = nodeData.inputs?.document as Document[]
+
+ const flattenDocs = docs && docs.length ? flatten(docs) : []
+ const finalDocs = []
+ for (let i = 0; i < flattenDocs.length; i += 1) {
+ if (flattenDocs[i] && flattenDocs[i].pageContent) {
+ finalDocs.push(new Document(flattenDocs[i]))
}
}
+
+ let finalText = ''
+ for (let i = 0; i < finalDocs.length; i += 1) {
+ finalText += finalDocs[i].pageContent
+ }
+
+ const replaceChar: string[] = ['{', '}']
+ for (const char of replaceChar) finalText = finalText.replaceAll(char, '')
+
+ if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}`
+
+ const chatPrompt = ChatPromptTemplate.fromMessages([
+ SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage),
+ new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
+ HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
+ ])
+
+ return chatPrompt
+}
+
+const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMessage[] = []) => {
+ const model = nodeData.inputs?.model as BaseChatModel
+ const memory = nodeData.inputs?.memory as FlowiseMemory
+ const memoryKey = memory.memoryKey ?? 'chat_history'
+
+ const conversationChain = RunnableSequence.from([
+ {
+ [inputKey]: (input: { input: string }) => input.input,
+ [memoryKey]: async () => {
+ const history = await memory.getChatMessages(sessionId, true, chatHistory)
+ return history
+ }
+ },
+ prepareChatPrompt(nodeData),
+ model,
+ new StringOutputParser()
+ ])
+
+ return conversationChain
}
module.exports = { nodeClass: ConversationChain_Chains }
diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
index 36376e132b3..5f98cba17ae 100644
--- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
+++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts
@@ -1,20 +1,25 @@
import { BaseLanguageModel } from 'langchain/base_language'
-import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
-import { getBaseClasses, mapChatHistory } from '../../../src/utils'
-import { ConversationalRetrievalQAChain, QAChainParams } from 'langchain/chains'
+import { ConversationalRetrievalQAChain } from 'langchain/chains'
import { BaseRetriever } from 'langchain/schema/retriever'
-import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
+import { BufferMemoryInput } from 'langchain/memory'
import { PromptTemplate } from 'langchain/prompts'
-import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
-import {
- default_map_reduce_template,
- default_qa_template,
- qa_template,
- map_reduce_template,
- CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT,
- refine_question_template,
- refine_template
-} from './prompts'
+import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts'
+import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from 'langchain/schema/runnable'
+import { BaseMessage, HumanMessage, AIMessage } from 'langchain/schema'
+import { StringOutputParser } from 'langchain/schema/output_parser'
+import type { Document } from 'langchain/document'
+import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts'
+import { applyPatch } from 'fast-json-patch'
+import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
+import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
+import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
+
+type RetrievalChainInput = {
+ chat_history: string
+ question: string
+}
+
+const sourceRunnableName = 'FindDocs'
class ConversationalRetrievalQAChain_Chains implements INode {
label: string
@@ -26,11 +31,12 @@ class ConversationalRetrievalQAChain_Chains implements INode {
baseClasses: string[]
description: string
inputs: INodeParams[]
+ sessionId?: string
- constructor() {
+ constructor(fields?: { sessionId?: string }) {
this.label = 'Conversational Retrieval QA Chain'
this.name = 'conversationalRetrievalQAChain'
- this.version = 1.0
+ this.version = 2.0
this.type = 'ConversationalRetrievalQAChain'
this.icon = 'qa.svg'
this.category = 'Chains'
@@ -38,9 +44,9 @@ class ConversationalRetrievalQAChain_Chains implements INode {
this.baseClasses = [this.type, ...getBaseClasses(ConversationalRetrievalQAChain)]
this.inputs = [
{
- label: 'Language Model',
+ label: 'Chat Model',
name: 'model',
- type: 'BaseLanguageModel'
+ type: 'BaseChatModel'
},
{
label: 'Vector Store Retriever',
@@ -60,6 +66,29 @@ class ConversationalRetrievalQAChain_Chains implements INode {
type: 'boolean',
optional: true
},
+ {
+ label: 'Rephrase Prompt',
+ name: 'rephrasePrompt',
+ type: 'string',
+ description: 'Using previous chat history, rephrase question into a standalone question',
+ warning: 'Prompt must include input variables: {chat_history} and {question}',
+ rows: 4,
+ additionalParams: true,
+ optional: true,
+ default: REPHRASE_TEMPLATE
+ },
+ {
+ label: 'Response Prompt',
+ name: 'responsePrompt',
+ type: 'string',
+ description: 'Taking the rephrased question, search for answer from the provided context',
+ warning: 'Prompt must include input variable: {context}',
+ rows: 4,
+ additionalParams: true,
+ optional: true,
+ default: RESPONSE_TEMPLATE
+ }
+ /** Deprecated
{
label: 'System Message',
name: 'systemMessagePrompt',
@@ -70,6 +99,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
placeholder:
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
},
+ // TODO: create standalone chains for these 3 modes as they are not compatible with memory
{
label: 'Chain Option',
name: 'chainOption',
@@ -95,124 +125,246 @@ class ConversationalRetrievalQAChain_Chains implements INode {
additionalParams: true,
optional: true
}
+ */
]
+ this.sessionId = fields?.sessionId
}
async init(nodeData: INodeData): Promise {
const model = nodeData.inputs?.model as BaseLanguageModel
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
- const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
- const chainOption = nodeData.inputs?.chainOption as string
+ const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
+ const responsePrompt = nodeData.inputs?.responsePrompt as string
+
+ let customResponsePrompt = responsePrompt
+ // If the deprecated systemMessagePrompt is still exists
+ if (systemMessagePrompt) {
+ customResponsePrompt = `${systemMessagePrompt}\n${QA_TEMPLATE}`
+ }
+
+ const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
+ return answerChain
+ }
+
+ async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
+ const model = nodeData.inputs?.model as BaseLanguageModel
const externalMemory = nodeData.inputs?.memory
+ const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
+ const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
+ const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
+ const responsePrompt = nodeData.inputs?.responsePrompt as string
+ const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
- const obj: any = {
- verbose: process.env.DEBUG === 'true' ? true : false,
- questionGeneratorChainOptions: {
- template: CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT
- }
+ let customResponsePrompt = responsePrompt
+ // If the deprecated systemMessagePrompt is still exists
+ if (systemMessagePrompt) {
+ customResponsePrompt = `${systemMessagePrompt}\n${QA_TEMPLATE}`
}
- if (returnSourceDocuments) obj.returnSourceDocuments = returnSourceDocuments
-
- if (chainOption === 'map_reduce') {
- obj.qaChainOptions = {
- type: 'map_reduce',
- combinePrompt: PromptTemplate.fromTemplate(
- systemMessagePrompt ? `${systemMessagePrompt}\n${map_reduce_template}` : default_map_reduce_template
- )
- } as QAChainParams
- } else if (chainOption === 'refine') {
- const qprompt = new PromptTemplate({
- inputVariables: ['context', 'question'],
- template: refine_question_template(systemMessagePrompt)
- })
- const rprompt = new PromptTemplate({
- inputVariables: ['context', 'question', 'existing_answer'],
- template: refine_template
+ let memory: FlowiseMemory | undefined = externalMemory
+ if (!memory) {
+ memory = new BufferMemory({
+ returnMessages: true,
+ memoryKey: 'chat_history',
+ inputKey: 'input'
})
- obj.qaChainOptions = {
- type: 'refine',
- questionPrompt: qprompt,
- refinePrompt: rprompt
- } as QAChainParams
- } else {
- obj.qaChainOptions = {
- type: 'stuff',
- prompt: PromptTemplate.fromTemplate(systemMessagePrompt ? `${systemMessagePrompt}\n${qa_template}` : default_qa_template)
- } as QAChainParams
}
- if (externalMemory) {
- externalMemory.memoryKey = 'chat_history'
- externalMemory.inputKey = 'question'
- externalMemory.outputKey = 'text'
- externalMemory.returnMessages = true
- if (chainOption === 'refine') externalMemory.outputKey = 'output_text'
- obj.memory = externalMemory
- } else {
- const fields: BufferMemoryInput = {
- memoryKey: 'chat_history',
- inputKey: 'question',
- outputKey: 'text',
- returnMessages: true
+ const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt)
+
+ const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
+
+ const loggerHandler = new ConsoleCallbackHandler(options.logger)
+ const callbacks = await additionalCallbacks(nodeData, options)
+
+ const stream = answerChain.streamLog(
+ { question: input, chat_history: history },
+ { callbacks: [loggerHandler, ...callbacks] },
+ {
+ includeNames: [sourceRunnableName]
+ }
+ )
+
+ let streamedResponse: Record = {}
+ let sourceDocuments: ICommonObject[] = []
+ let text = ''
+ let isStreamingStarted = false
+ const isStreamingEnabled = options.socketIO && options.socketIOClientId
+
+ for await (const chunk of stream) {
+ streamedResponse = applyPatch(streamedResponse, chunk.ops).newDocument
+
+ if (streamedResponse.final_output) {
+ text = streamedResponse.final_output?.output
+ if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('end')
+ if (Array.isArray(streamedResponse?.logs?.[sourceRunnableName]?.final_output?.output)) {
+ sourceDocuments = streamedResponse?.logs?.[sourceRunnableName]?.final_output?.output
+ if (isStreamingEnabled && returnSourceDocuments)
+ options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments)
+ }
+ }
+
+ if (
+ Array.isArray(streamedResponse?.streamed_output) &&
+ streamedResponse?.streamed_output.length &&
+ !streamedResponse.final_output
+ ) {
+ const token = streamedResponse.streamed_output[streamedResponse.streamed_output.length - 1]
+
+ if (!isStreamingStarted) {
+ isStreamingStarted = true
+ if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('start', token)
+ }
+ if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('token', token)
}
- if (chainOption === 'refine') fields.outputKey = 'output_text'
- obj.memory = new BufferMemory(fields)
}
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj)
- return chain
+ await memory.addChatMessages(
+ [
+ {
+ text: input,
+ type: 'userMessage'
+ },
+ {
+ text: text,
+ type: 'apiMessage'
+ }
+ ],
+ this.sessionId
+ )
+
+ if (returnSourceDocuments) return { text, sourceDocuments }
+ else return { text }
}
+}
- async run(nodeData: INodeData, input: string, options: ICommonObject): Promise {
- const chain = nodeData.instance as ConversationalRetrievalQAChain
- const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
- const chainOption = nodeData.inputs?.chainOption as string
+const createRetrieverChain = (llm: BaseLanguageModel, retriever: Runnable, rephrasePrompt: string) => {
+ // Small speed/accuracy optimization: no need to rephrase the first question
+ // since there shouldn't be any meta-references to prior chat history
+ const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt)
+ const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, llm, new StringOutputParser()]).withConfig({
+ runName: 'CondenseQuestion'
+ })
- let model = nodeData.inputs?.model
+ const hasHistoryCheckFn = RunnableLambda.from((input: RetrievalChainInput) => input.chat_history.length > 0).withConfig({
+ runName: 'HasChatHistoryCheck'
+ })
- // Temporary fix: https://github.com/hwchase17/langchainjs/issues/754
- model.streaming = false
- chain.questionGeneratorChain.llm = model
+ const conversationChain = condenseQuestionChain.pipe(retriever).withConfig({
+ runName: 'RetrievalChainWithHistory'
+ })
- const obj = { question: input }
+ const basicRetrievalChain = RunnableLambda.from((input: RetrievalChainInput) => input.question)
+ .withConfig({
+ runName: 'Itemgetter:question'
+ })
+ .pipe(retriever)
+ .withConfig({ runName: 'RetrievalChainWithNoHistory' })
- if (options && options.chatHistory && chain.memory) {
- const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name
- // Only replace when its In-Memory
- if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
- ;(chain.memory as any).chatHistory = mapChatHistory(options)
- }
+ return RunnableBranch.from([[hasHistoryCheckFn, conversationChain], basicRetrievalChain]).withConfig({ runName: sourceRunnableName })
+}
+
+const formatDocs = (docs: Document[]) => {
+ return docs.map((doc, i) => `${doc.pageContent}`).join('\n')
+}
+
+const formatChatHistoryAsString = (history: BaseMessage[]) => {
+ return history.map((message) => `${message._getType()}: ${message.content}`).join('\n')
+}
+
+const serializeHistory = (input: any) => {
+ const chatHistory: IMessage[] = input.chat_history || []
+ const convertedChatHistory = []
+ for (const message of chatHistory) {
+ if (message.type === 'userMessage') {
+ convertedChatHistory.push(new HumanMessage({ content: message.message }))
+ }
+ if (message.type === 'apiMessage') {
+ convertedChatHistory.push(new AIMessage({ content: message.message }))
}
+ }
+ return convertedChatHistory
+}
- const loggerHandler = new ConsoleCallbackHandler(options.logger)
- const callbacks = await additionalCallbacks(nodeData, options)
+const createChain = (
+ llm: BaseLanguageModel,
+ retriever: Runnable,
+ rephrasePrompt = REPHRASE_TEMPLATE,
+ responsePrompt = RESPONSE_TEMPLATE
+) => {
+ const retrieverChain = createRetrieverChain(llm, retriever, rephrasePrompt)
- if (options.socketIO && options.socketIOClientId) {
- const handler = new CustomChainHandler(
- options.socketIO,
- options.socketIOClientId,
- chainOption === 'refine' ? 4 : undefined,
- returnSourceDocuments
- )
- const res = await chain.call(obj, [loggerHandler, handler, ...callbacks])
- if (chainOption === 'refine') {
- if (res.output_text && res.sourceDocuments) {
- return {
- text: res.output_text,
- sourceDocuments: res.sourceDocuments
- }
- }
- return res?.output_text
- }
- if (res.text && res.sourceDocuments) return res
- return res?.text
- } else {
- const res = await chain.call(obj, [loggerHandler, ...callbacks])
- if (res.text && res.sourceDocuments) return res
- return res?.text
+ const context = RunnableMap.from({
+ context: RunnableSequence.from([
+ ({ question, chat_history }) => ({
+ question,
+ chat_history: formatChatHistoryAsString(chat_history)
+ }),
+ retrieverChain,
+ RunnableLambda.from(formatDocs).withConfig({
+ runName: 'FormatDocumentChunks'
+ })
+ ]),
+ question: RunnableLambda.from((input: RetrievalChainInput) => input.question).withConfig({
+ runName: 'Itemgetter:question'
+ }),
+ chat_history: RunnableLambda.from((input: RetrievalChainInput) => input.chat_history).withConfig({
+ runName: 'Itemgetter:chat_history'
+ })
+ }).withConfig({ tags: ['RetrieveDocs'] })
+
+ const prompt = ChatPromptTemplate.fromMessages([
+ ['system', responsePrompt],
+ new MessagesPlaceholder('chat_history'),
+ ['human', `{question}`]
+ ])
+
+ const responseSynthesizerChain = RunnableSequence.from([prompt, llm, new StringOutputParser()]).withConfig({
+ tags: ['GenerateResponse']
+ })
+
+ const conversationalQAChain = RunnableSequence.from([
+ {
+ question: RunnableLambda.from((input: RetrievalChainInput) => input.question).withConfig({
+ runName: 'Itemgetter:question'
+ }),
+ chat_history: RunnableLambda.from(serializeHistory).withConfig({
+ runName: 'SerializeHistory'
+ })
+ },
+ context,
+ responseSynthesizerChain
+ ])
+
+ return conversationalQAChain
+}
+
+class BufferMemory extends FlowiseMemory implements MemoryMethods {
+ constructor(fields: BufferMemoryInput) {
+ super(fields)
+ }
+
+ async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise {
+ await this.chatHistory.clear()
+
+ for (const msg of prevHistory) {
+ if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
+ else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
+
+ const memoryResult = await this.loadMemoryVariables({})
+ const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
+ return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages)
+ }
+
+ async addChatMessages(): Promise {
+ // adding chat messages will be done on the fly in getChatMessages()
+ return
+ }
+
+ async clearChatMessages(): Promise {
+ await this.clear()
}
}
diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts
index 132e3a97e7a..dccc73588b4 100644
--- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts
+++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts
@@ -1,64 +1,27 @@
-export const default_qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
-
-{context}
-
-Question: {question}
-Helpful Answer:`
+export const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, answer in the same language as the follow up question. include it in the standalone question.
-export const qa_template = `Use the following pieces of context to answer the question at the end.
+Chat History:
+{chat_history}
+Follow Up Input: {question}
+Standalone question:`
+export const RESPONSE_TEMPLATE = `I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". Using the provided context, answer the user's question to the best of your ability using the resources provided.
+If there is nothing in the context relevant to the question at hand, just say "Hmm, I'm not sure" and stop after that. Refuse to answer any question not about the info. Never break character.
+------------
{context}
+------------
+REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm not sure". Don't try to make up an answer. Never break character.`
-Question: {question}
-Helpful Answer:`
-
-export const default_map_reduce_template = `Given the following extracted parts of a long document and a question, create a final answer.
-If you don't know the answer, just say that you don't know. Don't try to make up an answer.
-
-{summaries}
-
-Question: {question}
-Helpful Answer:`
-
-export const map_reduce_template = `Given the following extracted parts of a long document and a question, create a final answer.
+export const QA_TEMPLATE = `Use the following pieces of context to answer the question at the end.
-{summaries}
+{context}
Question: {question}
Helpful Answer:`
-export const refine_question_template = (sysPrompt?: string) => {
- let returnPrompt = ''
- if (sysPrompt)
- returnPrompt = `Context information is below.
----------------------
-{context}
----------------------
-Given the context information and not prior knowledge, ${sysPrompt}
-Answer the question: {question}.
-Answer:`
- if (!sysPrompt)
- returnPrompt = `Context information is below.
----------------------
-{context}
----------------------
-Given the context information and not prior knowledge, answer the question: {question}.
-Answer:`
- return returnPrompt
-}
-
-export const refine_template = `The original question is as follows: {question}
-We have provided an existing answer: {existing_answer}
-We have the opportunity to refine the existing answer (only if needed) with some more context below.
-------------
-{context}
-------------
-Given the new context, refine the original answer to better answer the question.
-If you can't find answer from the context, return the original answer.`
-
-export const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, answer in the same language as the follow up question. include it in the standalone question.
+export const REPHRASE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
-Standalone question:`
+Standalone Question:`
diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts
index 3799d062fe8..7d65c9cd078 100644
--- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts
+++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts
@@ -69,22 +69,23 @@ class VectaraChain_Chains implements INode {
options: [
{
label: 'vectara-summary-ext-v1.2.0 (gpt-3.5-turbo)',
- name: 'vectara-summary-ext-v1.2.0'
+ name: 'vectara-summary-ext-v1.2.0',
+ description: 'base summarizer, available to all Vectara users'
},
{
label: 'vectara-experimental-summary-ext-2023-10-23-small (gpt-3.5-turbo)',
name: 'vectara-experimental-summary-ext-2023-10-23-small',
- description: 'In beta, available to both Growth and Scale Vectara users'
+ description: `In beta, available to both Growth and Scale Vectara users`
},
{
label: 'vectara-summary-ext-v1.3.0 (gpt-4.0)',
name: 'vectara-summary-ext-v1.3.0',
- description: 'Only available to paying Scale Vectara users'
+ description: 'Only available to Scale Vectara users'
},
{
label: 'vectara-experimental-summary-ext-2023-10-23-med (gpt-4.0)',
name: 'vectara-experimental-summary-ext-2023-10-23-med',
- description: 'In beta, only available to paying Scale Vectara users'
+ description: `In beta, only available to Scale Vectara users`
}
],
default: 'vectara-summary-ext-v1.2.0'
@@ -228,7 +229,7 @@ class VectaraChain_Chains implements INode {
async run(nodeData: INodeData, input: string): Promise