From 75b5b33d8d9b92c4ef9458b81dc7f1e8c949532a Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 30 Dec 2023 12:29:00 +0000 Subject: [PATCH 01/51] update langchain version --- .../nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts | 5 ++--- .../components/nodes/chatmodels/ChatMistral/ChatMistral.ts | 4 +++- .../components/nodes/chatmodels/ChatOllama/ChatOllama.ts | 5 ++--- .../nodes/embeddings/OllamaEmbedding/OllamaEmbedding.ts | 2 +- packages/components/nodes/llms/Ollama/Ollama.ts | 3 +-- packages/components/package.json | 6 +++--- packages/server/src/utils/index.ts | 2 +- 7 files changed, 13 insertions(+), 14 deletions(-) diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index 99e151e6c3f..9b7b724a224 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -1,7 +1,6 @@ -import { OpenAIBaseInput } from 'langchain/dist/types/openai-types' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' -import { AzureOpenAIInput, ChatOpenAI } from 'langchain/chat_models/openai' +import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai' import { BaseCache } from 'langchain/schema' import { BaseLLMParams } from 'langchain/llms/base' @@ -123,7 +122,7 @@ class AzureChatOpenAI_ChatModels implements INode { const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) - const obj: Partial & BaseLLMParams & Partial = { + const obj: Partial & BaseLLMParams & Partial = { temperature: parseFloat(temperature), modelName, azureOpenAIApiKey, diff --git a/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts b/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts index f65de85158b..9162457462e 100644 --- a/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts +++ b/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts @@ -124,13 +124,15 @@ class ChatMistral_ChatModels implements INode { const safeMode = nodeData.inputs?.safeMode as boolean const randomSeed = nodeData.inputs?.safeMode as string const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string + const streaming = nodeData.inputs?.streaming as boolean // Waiting fix from langchain + mistral to enable streaming - https://github.com/mistralai/client-js/issues/18 const cache = nodeData.inputs?.cache as BaseCache const obj: ChatMistralAIInput = { apiKey: apiKey, - modelName: modelName + modelName: modelName, + streaming: streaming ?? true } if (maxOutputTokens) obj.maxTokens = parseInt(maxOutputTokens, 10) diff --git a/packages/components/nodes/chatmodels/ChatOllama/ChatOllama.ts b/packages/components/nodes/chatmodels/ChatOllama/ChatOllama.ts index ed58589b15d..d445c7e128c 100644 --- a/packages/components/nodes/chatmodels/ChatOllama/ChatOllama.ts +++ b/packages/components/nodes/chatmodels/ChatOllama/ChatOllama.ts @@ -1,8 +1,7 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' -import { ChatOllama } from 'langchain/chat_models/ollama' +import { ChatOllama, ChatOllamaInput } from 'langchain/chat_models/ollama' import { BaseCache } from 'langchain/schema' -import { OllamaInput } from 'langchain/dist/util/ollama' import { BaseLLMParams } from 'langchain/llms/base' class ChatOllama_ChatModels implements INode { @@ -209,7 +208,7 @@ class ChatOllama_ChatModels implements INode { const cache = nodeData.inputs?.cache as BaseCache - const obj: OllamaInput & BaseLLMParams = { + const obj: ChatOllamaInput & BaseLLMParams = { baseUrl, temperature: parseFloat(temperature), model: modelName diff --git a/packages/components/nodes/embeddings/OllamaEmbedding/OllamaEmbedding.ts b/packages/components/nodes/embeddings/OllamaEmbedding/OllamaEmbedding.ts index 698770b3ba3..8892b03f2cd 100644 --- a/packages/components/nodes/embeddings/OllamaEmbedding/OllamaEmbedding.ts +++ b/packages/components/nodes/embeddings/OllamaEmbedding/OllamaEmbedding.ts @@ -1,7 +1,7 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { OllamaInput } from 'langchain/llms/ollama' import { OllamaEmbeddings } from 'langchain/embeddings/ollama' -import { OllamaInput } from 'langchain/dist/util/ollama' class OllamaEmbedding_Embeddings implements INode { label: string diff --git a/packages/components/nodes/llms/Ollama/Ollama.ts b/packages/components/nodes/llms/Ollama/Ollama.ts index c7250a0409b..385890c9ad2 100644 --- a/packages/components/nodes/llms/Ollama/Ollama.ts +++ b/packages/components/nodes/llms/Ollama/Ollama.ts @@ -1,8 +1,7 @@ import { INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' -import { Ollama } from 'langchain/llms/ollama' +import { Ollama, OllamaInput } from 'langchain/llms/ollama' import { BaseCache } from 'langchain/schema' -import { OllamaInput } from 'langchain/dist/util/ollama' import { BaseLLMParams } from 'langchain/llms/base' class Ollama_LLMs implements INode { diff --git a/packages/components/package.json b/packages/components/package.json index 9cb0bf1e97c..8a145186171 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -26,8 +26,8 @@ "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^0.2.1", "@huggingface/inference": "^2.6.1", - "@langchain/google-genai": "^0.0.3", - "@langchain/mistralai": "^0.0.3", + "@langchain/google-genai": "^0.0.6", + "@langchain/mistralai": "^0.0.6", "@notionhq/client": "^2.2.8", "@opensearch-project/opensearch": "^1.2.0", "@pinecone-database/pinecone": "^1.1.1", @@ -52,7 +52,7 @@ "html-to-text": "^9.0.5", "husky": "^8.0.3", "ioredis": "^5.3.2", - "langchain": "^0.0.196", + "langchain": "^0.0.213", "langfuse": "^1.2.0", "langfuse-langchain": "^1.0.31", "langsmith": "^0.0.49", diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 0bc28861727..ce444512257 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -818,7 +818,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component */ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { const streamAvailableLLMs = { - 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'], + 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'], LLMs: ['azureOpenAI', 'openAI', 'ollama'] } From 9e7f3587f14881e5192ff4a2ef8f85d83b969fad Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Sat, 30 Dec 2023 18:22:21 +0530 Subject: [PATCH 02/51] Upgrading of analytic dependencies - langfuse and langsmith. --- packages/components/package.json | 6 +++--- packages/components/src/handler.ts | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/packages/components/package.json b/packages/components/package.json index 9cb0bf1e97c..ef1f92eb106 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -53,9 +53,9 @@ "husky": "^8.0.3", "ioredis": "^5.3.2", "langchain": "^0.0.196", - "langfuse": "^1.2.0", - "langfuse-langchain": "^1.0.31", - "langsmith": "^0.0.49", + "langfuse": "2.0.2", + "langfuse-langchain": "2.0.2", + "langsmith": "0.0.53", "linkifyjs": "^4.1.1", "llmonitor": "^0.5.5", "mammoth": "^1.5.1", diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index 29aff3e2f05..ce7a1a1caa7 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -536,9 +536,10 @@ export class AnalyticHandler { if (Object.prototype.hasOwnProperty.call(this.handlers, 'langFuse')) { const trace: LangfuseTraceClient | undefined = this.handlers['langFuse'].trace[parentIds['langFuse'].trace] if (trace) { + trace.id const generation = trace.generation({ name, - prompt: input + input: input }) this.handlers['langFuse'].generation = { [generation.id]: generation } returnIds['langFuse'].generation = generation.id @@ -583,7 +584,7 @@ export class AnalyticHandler { const generation: LangfuseGenerationClient | undefined = this.handlers['langFuse'].generation[returnIds['langFuse'].generation] if (generation) { generation.end({ - completion: output + output: output }) } } @@ -618,7 +619,7 @@ export class AnalyticHandler { const generation: LangfuseGenerationClient | undefined = this.handlers['langFuse'].generation[returnIds['langFuse'].generation] if (generation) { generation.end({ - completion: error + output: error }) } } From 28e32f0ae68a18905b52921be2bc97e4447386a2 Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Sat, 30 Dec 2023 09:46:44 -0500 Subject: [PATCH 03/51] Initial support for Airtable views --- .../documentloaders/Airtable/Airtable.ts | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/packages/components/nodes/documentloaders/Airtable/Airtable.ts b/packages/components/nodes/documentloaders/Airtable/Airtable.ts index 70d0c674a27..a2c1eef3fd2 100644 --- a/packages/components/nodes/documentloaders/Airtable/Airtable.ts +++ b/packages/components/nodes/documentloaders/Airtable/Airtable.ts @@ -55,6 +55,15 @@ class Airtable_DocumentLoaders implements INode { description: 'If your table URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, tblJdmvbrgizbYICO is the table id' }, + { + label: 'View Id', + name: 'viewId', + type: 'string', + placeholder: 'viw9UrP77Id0CE4ee', + description: + 'If your view URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, viw9UrP77Id0CE4ee is the view id', + optional: true + }, { label: 'Return All', name: 'returnAll', @@ -83,6 +92,7 @@ class Airtable_DocumentLoaders implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const baseId = nodeData.inputs?.baseId as string const tableId = nodeData.inputs?.tableId as string + const viewId = nodeData.inputs?.viewId as string const returnAll = nodeData.inputs?.returnAll as boolean const limit = nodeData.inputs?.limit as string const textSplitter = nodeData.inputs?.textSplitter as TextSplitter @@ -94,6 +104,7 @@ class Airtable_DocumentLoaders implements INode { const airtableOptions: AirtableLoaderParams = { baseId, tableId, + viewId, returnAll, accessToken, limit: limit ? parseInt(limit, 10) : 100 @@ -133,6 +144,7 @@ interface AirtableLoaderParams { baseId: string tableId: string accessToken: string + viewId?: string limit?: number returnAll?: boolean } @@ -153,16 +165,19 @@ class AirtableLoader extends BaseDocumentLoader { public readonly tableId: string + public readonly viewId?: string + public readonly accessToken: string public readonly limit: number public readonly returnAll: boolean - constructor({ baseId, tableId, accessToken, limit = 100, returnAll = false }: AirtableLoaderParams) { + constructor({ baseId, tableId, viewId, accessToken, limit = 100, returnAll = false }: AirtableLoaderParams) { super() this.baseId = baseId this.tableId = tableId + this.viewId = viewId this.accessToken = accessToken this.limit = limit this.returnAll = returnAll @@ -203,7 +218,7 @@ class AirtableLoader extends BaseDocumentLoader { } private async loadLimit(): Promise { - const params = { maxRecords: this.limit } + const params = { maxRecords: this.limit, view: this.viewId } const data = await this.fetchAirtableData(`https://api.airtable.com/v0/${this.baseId}/${this.tableId}`, params) if (data.records.length === 0) { return [] @@ -212,7 +227,7 @@ class AirtableLoader extends BaseDocumentLoader { } private async loadAll(): Promise { - const params: ICommonObject = { pageSize: 100 } + const params: ICommonObject = { pageSize: 100, view: this.viewId } let data: AirtableLoaderResponse let returnPages: AirtableLoaderPage[] = [] From 543c41b5c5c685df09b8682972d5341f2f76371a Mon Sep 17 00:00:00 2001 From: Henry Heng Date: Sat, 30 Dec 2023 16:07:05 +0000 Subject: [PATCH 04/51] Update ChatMistral.ts --- packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts b/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts index 9162457462e..4524db4625a 100644 --- a/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts +++ b/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts @@ -125,8 +125,6 @@ class ChatMistral_ChatModels implements INode { const randomSeed = nodeData.inputs?.safeMode as string const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string const streaming = nodeData.inputs?.streaming as boolean - // Waiting fix from langchain + mistral to enable streaming - https://github.com/mistralai/client-js/issues/18 - const cache = nodeData.inputs?.cache as BaseCache const obj: ChatMistralAIInput = { From da76a151ff08d346c1091895876f109326b8fc89 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Sun, 31 Dec 2023 12:56:35 +0530 Subject: [PATCH 05/51] minor typo fixes... --- packages/components/src/handler.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index ce7a1a1caa7..1eb05a51746 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -536,7 +536,6 @@ export class AnalyticHandler { if (Object.prototype.hasOwnProperty.call(this.handlers, 'langFuse')) { const trace: LangfuseTraceClient | undefined = this.handlers['langFuse'].trace[parentIds['langFuse'].trace] if (trace) { - trace.id const generation = trace.generation({ name, input: input From 9ba38dcd73fff805a13da61669f5163d28e563ee Mon Sep 17 00:00:00 2001 From: cosark <121065588+cosark@users.noreply.github.com> Date: Sun, 31 Dec 2023 17:16:53 -0700 Subject: [PATCH 06/51] Added deploy template for RepoCloud.io Integration with RepoCloud Deploy Template to the Flowise GitHub page, enabling one-click deployment. This addition simplifies the process for users to quickly deploy and scale Flowise using RepoCloud's efficient cloud hosting services. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 25026237f76..a80c53f7c3a 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,10 @@ Flowise support different environment variables to configure your instance. You [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) +### [RepoCloud](https://repocloud.io/details/?app_id=29) + +[![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29) + ### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) HuggingFace Spaces From 467e71ba1f01dcd3ab2b86c79d981987f9667ba2 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sun, 31 Dec 2023 18:36:23 -0800 Subject: [PATCH 07/51] added support for MMR --- .../nodes/chains/VectaraChain/VectaraChain.ts | 38 ++++++++++++++----- .../nodes/vectorstores/Vectara/Vectara.ts | 31 +++++++++++++-- 2 files changed, 56 insertions(+), 13 deletions(-) diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts index 3799d062fe8..c80b354f717 100644 --- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts +++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts @@ -69,22 +69,23 @@ class VectaraChain_Chains implements INode { options: [ { label: 'vectara-summary-ext-v1.2.0 (gpt-3.5-turbo)', - name: 'vectara-summary-ext-v1.2.0' + name: 'vectara-summary-ext-v1.2.0', + description: 'base summarizer, available to all Vectara users' }, { label: 'vectara-experimental-summary-ext-2023-10-23-small (gpt-3.5-turbo)', name: 'vectara-experimental-summary-ext-2023-10-23-small', - description: 'In beta, available to both Growth and Scale Vectara users' + description: `In beta, available to both Growth and Scale Vectara users` }, { label: 'vectara-summary-ext-v1.3.0 (gpt-4.0)', name: 'vectara-summary-ext-v1.3.0', - description: 'Only available to paying Scale Vectara users' + description: 'Only available to Scale Vectara users' }, { label: 'vectara-experimental-summary-ext-2023-10-23-med (gpt-4.0)', name: 'vectara-experimental-summary-ext-2023-10-23-med', - description: 'In beta, only available to paying Scale Vectara users' + description: `In beta, only available to Scale Vectara users` } ], default: 'vectara-summary-ext-v1.2.0' @@ -228,7 +229,7 @@ class VectaraChain_Chains implements INode { async run(nodeData: INodeData, input: string): Promise { const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore - const responseLang = (nodeData.inputs?.responseLang as string) ?? 'auto' + const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng' const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string const maxSummarizedResultsStr = nodeData.inputs?.maxSummarizedResults as string const maxSummarizedResults = maxSummarizedResultsStr ? parseInt(maxSummarizedResultsStr, 10) : 7 @@ -247,17 +248,28 @@ class VectaraChain_Chains implements INode { lexicalInterpolationConfig: { lambda: vectaraFilter?.lambda ?? 0.025 } })) + const mmrRerankerId = 272725718 // Vectara reranker ID for MMR const data = { query: [ { query: input, start: 0, - numResults: topK, + numResults: vectaraFilter?.mmrConfig?.mmrK > 0 ? vectaraFilter?.mmrK : topK, + corpusKey: corpusKeys, contextConfig: { sentencesAfter: vectaraFilter?.contextConfig?.sentencesAfter ?? 2, sentencesBefore: vectaraFilter?.contextConfig?.sentencesBefore ?? 2 }, - corpusKey: corpusKeys, + ...(vectaraFilter?.mmrConfig?.mmrK > 0 + ? { + rerankingConfig: { + rerankerId: mmrRerankerId, + mmrConfig: { + diversityBias: vectaraFilter?.mmrConfig.diversityBias + } + } + } + : {}), summary: [ { summarizerPromptName, @@ -285,6 +297,14 @@ class VectaraChain_Chains implements INode { const documents = result.responseSet[0].document let rawSummarizedText = '' + // remove responses that are not in the topK (in case of MMR) + // Note that this does not really matter functionally due to the reorder citations, but it is more efficient + const maxResponses = vectaraFilter?.mmrConfig?.mmrK > 0 ? Math.min(responses.length, topK) : responses.length + if (responses.length > maxResponses) { + responses.splice(0, maxResponses) + } + + // Add metadata to each text response given its corresponding document metadata for (let i = 0; i < responses.length; i += 1) { const responseMetadata = responses[i].metadata const documentMetadata = documents[responses[i].documentIndex].metadata @@ -301,13 +321,13 @@ class VectaraChain_Chains implements INode { responses[i].metadata = combinedMetadata } + // Create the summarization response const summaryStatus = result.responseSet[0].summary[0].status if (summaryStatus.length > 0 && summaryStatus[0].code === 'BAD_REQUEST') { throw new Error( `BAD REQUEST: Too much text for the summarizer to summarize. Please try reducing the number of search results to summarize, or the context of each result by adjusting the 'summary_num_sentences', and 'summary_num_results' parameters respectively.` ) } - if ( summaryStatus.length > 0 && summaryStatus[0].code === 'NOT_FOUND' && @@ -316,8 +336,8 @@ class VectaraChain_Chains implements INode { throw new Error(`BAD REQUEST: summarizer ${summarizerPromptName} is invalid for this account.`) } + // Reorder citations in summary and create the list of returned source documents rawSummarizedText = result.responseSet[0].summary[0]?.text - let summarizedText = reorderCitations(rawSummarizedText) let summaryResponses = applyCitationOrder(responses, rawSummarizedText) diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index 7460c5864e7..98acf00c251 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -82,7 +82,9 @@ class Vectara_VectorStores implements INode { label: 'Lambda', name: 'lambda', description: - 'Improves retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.', + 'Enable hybrid search to improve retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.' + + 'A value of 0.0 means that only neural search is used, while a value of 1.0 means that only keyword-based search is used. Defaults to 0.0 (neural only).', + default: 0.0, type: 'number', additionalParams: true, optional: true @@ -90,8 +92,26 @@ class Vectara_VectorStores implements INode { { label: 'Top K', name: 'topK', - description: 'Number of top results to fetch. Defaults to 4', - placeholder: '4', + description: 'Number of top results to fetch. Defaults to 5', + placeholder: '5', + type: 'number', + additionalParams: true, + optional: true + }, + { + label: 'MMR K', + name: 'mmrK', + description: 'Number of top results to fetch for MMR. Defaults to 50', + placeholder: '50', + type: 'number', + additionalParams: true, + optional: true + }, + { + label: 'MMR diversity bias', + name: 'mmrDiversityBias', + description: 'The diversity bias to use for MMR. Defaults to 0.3', + placeholder: '0.3', type: 'number', additionalParams: true, optional: true @@ -191,7 +211,9 @@ class Vectara_VectorStores implements INode { const lambda = nodeData.inputs?.lambda as number const output = nodeData.outputs?.output as string const topK = nodeData.inputs?.topK as string - const k = topK ? parseFloat(topK) : 4 + const k = topK ? parseFloat(topK) : 5 + const mmrK = nodeData.inputs?.mmrK as number + const mmrDiversityBias = nodeData.inputs?.mmrDiversityBias as number const vectaraArgs: VectaraLibArgs = { apiKey: apiKey, @@ -208,6 +230,7 @@ class Vectara_VectorStores implements INode { if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter vectaraFilter.contextConfig = vectaraContextConfig + if (mmrK) vectaraFilter.mmrConfig = { mmrK: mmrK, diversityBias: mmrDiversityBias } const vectorStore = new VectaraStore(vectaraArgs) From e5f0ca0c0ac869d1ab49e835e77d8e810199bd3f Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sun, 31 Dec 2023 18:59:49 -0800 Subject: [PATCH 08/51] bug fix --- .../components/nodes/chains/VectaraChain/VectaraChain.ts | 8 +++++--- packages/components/nodes/vectorstores/Vectara/Vectara.ts | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts index c80b354f717..16257b69d8a 100644 --- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts +++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts @@ -249,18 +249,20 @@ class VectaraChain_Chains implements INode { })) const mmrRerankerId = 272725718 // Vectara reranker ID for MMR + const mmrEnabled = vectaraFilter?.mmrConfig?.mmrDiversityBias > 0 + const data = { query: [ { query: input, start: 0, - numResults: vectaraFilter?.mmrConfig?.mmrK > 0 ? vectaraFilter?.mmrK : topK, + numResults: mmrEnabled ? vectaraFilter?.mmrK : topK, corpusKey: corpusKeys, contextConfig: { sentencesAfter: vectaraFilter?.contextConfig?.sentencesAfter ?? 2, sentencesBefore: vectaraFilter?.contextConfig?.sentencesBefore ?? 2 }, - ...(vectaraFilter?.mmrConfig?.mmrK > 0 + ...(mmrEnabled ? { rerankingConfig: { rerankerId: mmrRerankerId, @@ -299,7 +301,7 @@ class VectaraChain_Chains implements INode { // remove responses that are not in the topK (in case of MMR) // Note that this does not really matter functionally due to the reorder citations, but it is more efficient - const maxResponses = vectaraFilter?.mmrConfig?.mmrK > 0 ? Math.min(responses.length, topK) : responses.length + const maxResponses = mmrEnabled ? Math.min(responses.length, topK) : responses.length if (responses.length > maxResponses) { responses.splice(0, maxResponses) } diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index 98acf00c251..488a8803626 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -110,8 +110,8 @@ class Vectara_VectorStores implements INode { { label: 'MMR diversity bias', name: 'mmrDiversityBias', - description: 'The diversity bias to use for MMR. Defaults to 0.3', - placeholder: '0.3', + description: 'The diversity bias to use for MMR. Defaults to 0 (MMR disabled)', + placeholder: '0.0', type: 'number', additionalParams: true, optional: true @@ -230,7 +230,7 @@ class Vectara_VectorStores implements INode { if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter vectaraFilter.contextConfig = vectaraContextConfig - if (mmrK) vectaraFilter.mmrConfig = { mmrK: mmrK, diversityBias: mmrDiversityBias } + vectaraFilter.mmrConfig = { mmrK: mmrK, diversityBias: mmrDiversityBias } const vectorStore = new VectaraStore(vectaraArgs) From dc3e4fd059b64d153de0e5c11ebb0836b59ca56f Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sun, 31 Dec 2023 19:18:33 -0800 Subject: [PATCH 09/51] bug fix 2 --- .../nodes/vectorstores/Vectara/Vectara.ts | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index 488a8803626..d83f6cb9e27 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -1,5 +1,12 @@ import { flatten } from 'lodash' -import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile } from 'langchain/vectorstores/vectara' +import { + VectaraStore, + VectaraLibArgs, + VectaraFilter, + VectaraContextConfig, + VectaraFile, + VectaraMMRConfig +} from 'langchain/vectorstores/vectara' import { Document } from 'langchain/document' import { Embeddings } from 'langchain/embeddings/base' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' @@ -230,7 +237,10 @@ class Vectara_VectorStores implements INode { if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter vectaraFilter.contextConfig = vectaraContextConfig - vectaraFilter.mmrConfig = { mmrK: mmrK, diversityBias: mmrDiversityBias } + const mmrConfig: VectaraMMRConfig = {} + mmrConfig.mmrK = mmrK + mmrConfig.diversityBias = mmrDiversityBias + vectaraFilter.mmrConfig = mmrConfig const vectorStore = new VectaraStore(vectaraArgs) From b44a0f1d39f5c7f3c1c00a16900229fb6a3b7e08 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sun, 31 Dec 2023 21:53:48 -0800 Subject: [PATCH 10/51] bugfix --- .../nodes/chains/VectaraChain/VectaraChain.ts | 4 ++-- .../nodes/vectorstores/Vectara/Vectara.ts | 14 ++++---------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts index 16257b69d8a..986d587aa49 100644 --- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts +++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts @@ -249,14 +249,14 @@ class VectaraChain_Chains implements INode { })) const mmrRerankerId = 272725718 // Vectara reranker ID for MMR - const mmrEnabled = vectaraFilter?.mmrConfig?.mmrDiversityBias > 0 + const mmrEnabled = vectaraFilter?.mmrConfig?.enabled const data = { query: [ { query: input, start: 0, - numResults: mmrEnabled ? vectaraFilter?.mmrK : topK, + numResults: mmrEnabled ? vectaraFilter?.mmrTopK : topK, corpusKey: corpusKeys, contextConfig: { sentencesAfter: vectaraFilter?.contextConfig?.sentencesAfter ?? 2, diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index d83f6cb9e27..be63d58228b 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -1,12 +1,5 @@ import { flatten } from 'lodash' -import { - VectaraStore, - VectaraLibArgs, - VectaraFilter, - VectaraContextConfig, - VectaraFile, - VectaraMMRConfig -} from 'langchain/vectorstores/vectara' +import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile, MMRConfig } from 'langchain/vectorstores/vectara' import { Document } from 'langchain/document' import { Embeddings } from 'langchain/embeddings/base' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' @@ -237,8 +230,9 @@ class Vectara_VectorStores implements INode { if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter vectaraFilter.contextConfig = vectaraContextConfig - const mmrConfig: VectaraMMRConfig = {} - mmrConfig.mmrK = mmrK + const mmrConfig: MMRConfig = {} + mmrConfig.enabled = mmrDiversityBias > 0 + mmrConfig.mmrTopK = mmrK mmrConfig.diversityBias = mmrDiversityBias vectaraFilter.mmrConfig = mmrConfig From e4ab1df4286bef22db119a6cc57f7c771d42b773 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sun, 31 Dec 2023 23:40:04 -0800 Subject: [PATCH 11/51] na --- package.json | 2 +- packages/components/package.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package.json b/package.json index 5ecbb59b200..cac3898408d 100644 --- a/package.json +++ b/package.json @@ -53,5 +53,5 @@ }, "engines": { "node": ">=18.15.0" - } + }, } diff --git a/packages/components/package.json b/packages/components/package.json index cb3448ebba7..72c8f81561d 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -47,12 +47,12 @@ "express": "^4.17.3", "faiss-node": "^0.2.2", "form-data": "^4.0.0", - "google-auth-library": "^9.0.0", + "google-auth-library": "^9.4.0", "graphql": "^16.6.0", "html-to-text": "^9.0.5", "husky": "^8.0.3", "ioredis": "^5.3.2", - "langchain": "^0.0.196", + "langchain": "^0.0.213", "langfuse": "^1.2.0", "langfuse-langchain": "^1.0.31", "langsmith": "^0.0.49", From 3f835fb50731f8eafede6bfeb59e01f8500b8b91 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sun, 31 Dec 2023 23:41:19 -0800 Subject: [PATCH 12/51] extra comma --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index cac3898408d..5ecbb59b200 100644 --- a/package.json +++ b/package.json @@ -53,5 +53,5 @@ }, "engines": { "node": ">=18.15.0" - }, + } } From 383f612e114050eb71977bb0498ac356c3afbdf3 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 3 Jan 2024 00:35:55 +0000 Subject: [PATCH 13/51] add check for secretkey_path --- packages/server/src/utils/index.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 39bd0854e5c..671873250d8 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -873,7 +873,9 @@ export const getEncryptionKey = async (): Promise => { return await fs.promises.readFile(getEncryptionKeyPath(), 'utf8') } catch (error) { const encryptKey = generateEncryptKey() - const defaultLocation = path.join(getUserHome(), '.flowise', 'encryption.key') + const defaultLocation = process.env.SECRETKEY_PATH + ? path.join(process.env.SECRETKEY_PATH, 'encryption.key') + : path.join(getUserHome(), '.flowise', 'encryption.key') await fs.promises.writeFile(defaultLocation, encryptKey) return encryptKey } From e513b69e3eb50dd0b453338df30937a892d28496 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 3 Jan 2024 00:54:31 +0000 Subject: [PATCH 14/51] =?UTF-8?q?=F0=9F=A5=B3=20flowise@1.4.9=20bugfix=20r?= =?UTF-8?q?elease?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- package.json | 2 +- packages/server/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 5ecbb59b200..5a9bfcbf3d5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.8", + "version": "1.4.9", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ diff --git a/packages/server/package.json b/packages/server/package.json index 54409e298e6..f1c0b7f79c6 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.8", + "version": "1.4.9", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", From 14aa19987880e62f0f623088c3a9fcd800b798e6 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 3 Jan 2024 17:40:39 +0000 Subject: [PATCH 15/51] no message --- packages/components/package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/components/package.json b/packages/components/package.json index 50377e1abba..f3371d26e67 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -52,10 +52,10 @@ "html-to-text": "^9.0.5", "husky": "^8.0.3", "ioredis": "^5.3.2", - "langchain": "^0.0.213", + "langchain": "^0.0.214", "langfuse": "^1.2.0", "langfuse-langchain": "^1.0.31", - "langsmith": "^0.0.49", + "langsmith": "^0.0.53", "linkifyjs": "^4.1.1", "llmonitor": "^0.5.5", "mammoth": "^1.5.1", From c035363d6f5558c55e0ccf4308e3c87d80715e6e Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Wed, 3 Jan 2024 13:20:39 -0500 Subject: [PATCH 16/51] Fixing linting issues using 'yarn lint-fix' --- .../nodes/documentloaders/Airtable/Airtable.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/components/nodes/documentloaders/Airtable/Airtable.ts b/packages/components/nodes/documentloaders/Airtable/Airtable.ts index a2c1eef3fd2..a7cd5021e69 100644 --- a/packages/components/nodes/documentloaders/Airtable/Airtable.ts +++ b/packages/components/nodes/documentloaders/Airtable/Airtable.ts @@ -55,14 +55,14 @@ class Airtable_DocumentLoaders implements INode { description: 'If your table URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, tblJdmvbrgizbYICO is the table id' }, - { - label: 'View Id', - name: 'viewId', - type: 'string', - placeholder: 'viw9UrP77Id0CE4ee', - description: - 'If your view URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, viw9UrP77Id0CE4ee is the view id', - optional: true + { + label: 'View Id', + name: 'viewId', + type: 'string', + placeholder: 'viw9UrP77Id0CE4ee', + description: + 'If your view URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, viw9UrP77Id0CE4ee is the view id', + optional: true }, { label: 'Return All', From 53bfd07694b6bd6e1783dae08f7ebaa20ffa4f05 Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Wed, 3 Jan 2024 21:23:43 -0500 Subject: [PATCH 17/51] Bumping version to reflect new feature --- packages/components/nodes/documentloaders/Airtable/Airtable.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/nodes/documentloaders/Airtable/Airtable.ts b/packages/components/nodes/documentloaders/Airtable/Airtable.ts index a7cd5021e69..9a824ac9a62 100644 --- a/packages/components/nodes/documentloaders/Airtable/Airtable.ts +++ b/packages/components/nodes/documentloaders/Airtable/Airtable.ts @@ -20,7 +20,7 @@ class Airtable_DocumentLoaders implements INode { constructor() { this.label = 'Airtable' this.name = 'airtable' - this.version = 1.0 + this.version = 2.0 this.type = 'Document' this.icon = 'airtable.svg' this.category = 'Document Loaders' From efe602970ce2df8ef1b4f186f3ea3f42ff217d6c Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Thu, 4 Jan 2024 14:19:46 +0100 Subject: [PATCH 18/51] Added ssl flag for postgres vectorstores --- .../nodes/vectorstores/Postgres/Postgres_Exisiting.ts | 11 ++++++++++- .../nodes/vectorstores/Postgres/Postgres_Upsert.ts | 11 ++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts b/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts index 99794a0def9..da3b1d18f62 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts @@ -52,6 +52,13 @@ class Postgres_Existing_VectorStores implements INode { name: 'database', type: 'string' }, + { + label: 'SSL Connection', + name: 'sslConnection', + type: 'boolean', + default: false, + optional: false + }, { label: 'Port', name: 'port', @@ -109,6 +116,7 @@ class Postgres_Existing_VectorStores implements INode { const output = nodeData.outputs?.output as string const topK = nodeData.inputs?.topK as string const k = topK ? parseFloat(topK) : 4 + const sslConnection = nodeData.inputs?.sslConnection as boolean let additionalConfiguration = {} if (additionalConfig) { @@ -126,7 +134,8 @@ class Postgres_Existing_VectorStores implements INode { port: nodeData.inputs?.port as number, username: user, password: password, - database: nodeData.inputs?.database as string + database: nodeData.inputs?.database as string, + ssl: sslConnection } const args = { diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts b/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts index f706cbe827c..25551517bf4 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts @@ -59,6 +59,13 @@ class PostgresUpsert_VectorStores implements INode { name: 'database', type: 'string' }, + { + label: 'SSL Connection', + name: 'sslConnection', + type: 'boolean', + default: false, + optional: false + }, { label: 'Port', name: 'port', @@ -117,6 +124,7 @@ class PostgresUpsert_VectorStores implements INode { const output = nodeData.outputs?.output as string const topK = nodeData.inputs?.topK as string const k = topK ? parseFloat(topK) : 4 + const sslConnection = nodeData.inputs?.sslConnection as boolean let additionalConfiguration = {} if (additionalConfig) { @@ -134,7 +142,8 @@ class PostgresUpsert_VectorStores implements INode { port: nodeData.inputs?.port as number, username: user, password: password, - database: nodeData.inputs?.database as string + database: nodeData.inputs?.database as string, + ssl: sslConnection } const args = { From b833cb80d4f5adeaed612db5159dd8f727935153 Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Thu, 4 Jan 2024 14:51:41 +0100 Subject: [PATCH 19/51] Added ssl flag --- .../nodes/vectorstores/Postgres/Postgres.ts | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index ac4b80c3c1b..75f9669acb7 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -60,6 +60,13 @@ class Postgres_VectorStores implements INode { name: 'database', type: 'string' }, + { + label: 'SSL Connection', + name: 'sslConnection', + type: 'boolean', + default: false, + optional: false + }, { label: 'Port', name: 'port', @@ -117,6 +124,7 @@ class Postgres_VectorStores implements INode { const docs = nodeData.inputs?.document as Document[] const embeddings = nodeData.inputs?.embeddings as Embeddings const additionalConfig = nodeData.inputs?.additionalConfig as string + const sslConnection = nodeData.inputs?.sslConnection as boolean let additionalConfiguration = {} if (additionalConfig) { @@ -134,7 +142,8 @@ class Postgres_VectorStores implements INode { port: nodeData.inputs?.port as number, username: user, password: password, - database: nodeData.inputs?.database as string + database: nodeData.inputs?.database as string, + ssl: sslConnection } const args = { From 609ae8703de416c1a62dab565f7e7aa13177b333 Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Thu, 4 Jan 2024 15:58:18 +0100 Subject: [PATCH 20/51] Added a function which cheks which port to use when port is 443 or 80 --- .../nodes/vectorstores/Qdrant/Qdrant.ts | 29 +++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts index 6413f8bf87e..390e7fc9ea5 100644 --- a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts +++ b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts @@ -149,9 +149,12 @@ class Qdrant_VectorStores implements INode { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData) + const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl); + const client = new QdrantClient({ url: qdrantServerUrl, - apiKey: qdrantApiKey + apiKey: qdrantApiKey, + port: port }) const flattenDocs = docs && docs.length ? flatten(docs) : [] @@ -198,9 +201,12 @@ class Qdrant_VectorStores implements INode { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData) + const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl); + const client = new QdrantClient({ url: qdrantServerUrl, - apiKey: qdrantApiKey + apiKey: qdrantApiKey, + port: port }) const dbConfig: QdrantLibArgs = { @@ -242,6 +248,25 @@ class Qdrant_VectorStores implements INode { } return vectorStore } + + /** + * Determine the port number from the given URL. + * + * The problem is when not doing this the qdrant-client.js will fall back on 6663 when you enter a port 443 and 80. + * See: https://stackoverflow.com/questions/59104197/nodejs-new-url-urlhttps-myurl-com80-lists-the-port-as-empty + * @param qdrantServerUrl the url to get the port from + */ + static determinePortByUrl(qdrantServerUrl: string) :number { + let port = 6333; + const parsedUrl = new URL(qdrantServerUrl); + if (parsedUrl.protocol === 'https:' && parsedUrl.port === '') { + port = 443; + } + if (parsedUrl.protocol === 'http:' && parsedUrl.port === '') { + port = 80; + } + return port; + } } module.exports = { nodeClass: Qdrant_VectorStores } From 2355cb2ec5c83fe7302c919d89361cd33db35fff Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Thu, 4 Jan 2024 16:07:54 +0100 Subject: [PATCH 21/51] Fixed port handling so it returns the correct port and not only 6663 --- packages/components/nodes/vectorstores/Qdrant/Qdrant.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts index 390e7fc9ea5..54b55d34cd2 100644 --- a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts +++ b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts @@ -257,14 +257,18 @@ class Qdrant_VectorStores implements INode { * @param qdrantServerUrl the url to get the port from */ static determinePortByUrl(qdrantServerUrl: string) :number { - let port = 6333; const parsedUrl = new URL(qdrantServerUrl); + + let port = parsedUrl.port ? parseInt(parsedUrl.port) : 6663 + if (parsedUrl.protocol === 'https:' && parsedUrl.port === '') { port = 443; } if (parsedUrl.protocol === 'http:' && parsedUrl.port === '') { port = 80; } + + return port; } } From a046d5296176b0649dcc6abd9d29f8ef8e003a4e Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Thu, 4 Jan 2024 16:23:08 +0100 Subject: [PATCH 22/51] Make the linter happy --- .../components/nodes/vectorstores/Qdrant/Qdrant.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts index 54b55d34cd2..5e01b030ddc 100644 --- a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts +++ b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts @@ -149,7 +149,7 @@ class Qdrant_VectorStores implements INode { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData) - const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl); + const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl) const client = new QdrantClient({ url: qdrantServerUrl, @@ -201,7 +201,7 @@ class Qdrant_VectorStores implements INode { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData) - const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl); + const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl) const client = new QdrantClient({ url: qdrantServerUrl, @@ -256,20 +256,20 @@ class Qdrant_VectorStores implements INode { * See: https://stackoverflow.com/questions/59104197/nodejs-new-url-urlhttps-myurl-com80-lists-the-port-as-empty * @param qdrantServerUrl the url to get the port from */ - static determinePortByUrl(qdrantServerUrl: string) :number { - const parsedUrl = new URL(qdrantServerUrl); + static determinePortByUrl(qdrantServerUrl: string): number { + const parsedUrl = new URL(qdrantServerUrl) let port = parsedUrl.port ? parseInt(parsedUrl.port) : 6663 if (parsedUrl.protocol === 'https:' && parsedUrl.port === '') { - port = 443; + port = 443 } if (parsedUrl.protocol === 'http:' && parsedUrl.port === '') { - port = 80; + port = 80 } - return port; + return port } } From e35faa57afb5243fe4b011d408b1d96eee263622 Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Thu, 4 Jan 2024 16:27:53 +0100 Subject: [PATCH 23/51] One last linting --- packages/components/nodes/vectorstores/Qdrant/Qdrant.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts index 5e01b030ddc..e07b728a57f 100644 --- a/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts +++ b/packages/components/nodes/vectorstores/Qdrant/Qdrant.ts @@ -268,7 +268,6 @@ class Qdrant_VectorStores implements INode { port = 80 } - return port } } From d882ebfcb6a12f6fb51c5cda276e70b6fafa2f1b Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 4 Jan 2024 23:57:36 +0000 Subject: [PATCH 24/51] remove restrictions --- LICENSE.md | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/LICENSE.md b/LICENSE.md index 0f4afcd1189..80800001864 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -2,22 +2,6 @@ Version 2.0, January 2004 http://www.apache.org/licenses/ -Flowise is governed by the Apache License 2.0, with additional terms and conditions outlined below: - -Flowise can be used for commercial purposes for "backend-as-a-service" for your applications or as a development platform for enterprises. However, under specific conditions, you must reach out to the project's administrators to secure a commercial license: - -a. Multi-tenant SaaS service: Unless you have explicit written authorization from Flowise, you may not utilize the Flowise source code to operate a multi-tenant SaaS service that closely resembles the Flowise cloud-based services. -b. Logo and copyright information: While using Flowise in commercial application, you are prohibited from removing or altering the LOGO or copyright information displayed in the Flowise console and UI. - -For inquiries regarding licensing matters, please contact hello@flowiseai.com via email. - -Contributors are required to consent to the following terms related to their contributed code: - -a. The project maintainers have the authority to modify the open-source agreement to be more stringent or lenient. -b. Contributed code can be used for commercial purposes, including Flowise's cloud-based services. - -All other rights and restrictions are in accordance with the Apache License 2.0. - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. From 8cb939386210a62484cdf4bf5b11a7c455eafaa1 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 5 Jan 2024 02:46:36 +0000 Subject: [PATCH 25/51] add fix for passing json variable --- .../nodes/utilities/CustomFunction/CustomFunction.ts | 2 +- packages/server/src/utils/index.ts | 6 +++++- packages/ui/src/ui-component/dialog/ExpandTextDialog.js | 6 +++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts index b358b24b3ab..37511e476ff 100644 --- a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts +++ b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts @@ -65,7 +65,7 @@ class CustomFunction_Utilities implements INode { inputVars = typeof functionInputVariablesRaw === 'object' ? functionInputVariablesRaw : JSON.parse(functionInputVariablesRaw) } catch (exception) { - throw new Error("Invalid JSON in the PromptTemplate's promptValues: " + exception) + throw new Error('Invalid JSON in the Custom Function Input Variables: ' + exception) } } diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index e7a35c82098..9c2d1d79def 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -561,7 +561,11 @@ export const getVariableValue = ( variablePaths.forEach((path) => { const variableValue = variableDict[path] // Replace all occurrence - returnVal = returnVal.split(path).join(variableValue) + if (typeof variableValue === 'object') { + returnVal = returnVal.split(path).join(JSON.stringify(variableValue).replace(/"/g, '\\"')) + } else { + returnVal = returnVal.split(path).join(variableValue) + } }) return returnVal } diff --git a/packages/ui/src/ui-component/dialog/ExpandTextDialog.js b/packages/ui/src/ui-component/dialog/ExpandTextDialog.js index 0ef70e29e9e..f4fdb9f9eed 100644 --- a/packages/ui/src/ui-component/dialog/ExpandTextDialog.js +++ b/packages/ui/src/ui-component/dialog/ExpandTextDialog.js @@ -67,7 +67,11 @@ const ExpandTextDialog = ({ show, dialogProps, onCancel, onConfirm }) => { useEffect(() => { if (executeCustomFunctionNodeApi.data) { - setCodeExecutedResult(executeCustomFunctionNodeApi.data) + if (typeof executeCustomFunctionNodeApi.data === 'object') { + setCodeExecutedResult(JSON.stringify(executeCustomFunctionNodeApi.data, null, 2)) + } else { + setCodeExecutedResult(executeCustomFunctionNodeApi.data) + } } }, [executeCustomFunctionNodeApi.data]) From 36ce6b7a853f8b8f507cf773373134722f4fe063 Mon Sep 17 00:00:00 2001 From: fanux Date: Fri, 5 Jan 2024 18:17:41 +0800 Subject: [PATCH 26/51] add one-click deploy on sealos --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 25026237f76..6e2ade7ded4 100644 --- a/README.md +++ b/README.md @@ -161,6 +161,10 @@ Flowise support different environment variables to configure your instance. You HuggingFace Spaces +### Sealos + +[![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + ### [AWS](https://docs.flowiseai.com/deployment/aws) ### [Azure](https://docs.flowiseai.com/deployment/azure) From d8a778e4d989c87b18b24d5d35c52211c53e532a Mon Sep 17 00:00:00 2001 From: tuxBurner Date: Fri, 5 Jan 2024 12:46:47 +0100 Subject: [PATCH 27/51] Bumped version from 1.0 to 2.0 --- packages/components/nodes/vectorstores/Postgres/Postgres.ts | 2 +- .../nodes/vectorstores/Postgres/Postgres_Exisiting.ts | 2 +- .../components/nodes/vectorstores/Postgres/Postgres_Upsert.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 75f9669acb7..4e8bae32ba7 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -24,7 +24,7 @@ class Postgres_VectorStores implements INode { constructor() { this.label = 'Postgres' this.name = 'postgres' - this.version = 1.0 + this.version = 2.0 this.type = 'Postgres' this.icon = 'postgres.svg' this.category = 'Vector Stores' diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts b/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts index da3b1d18f62..3fa8a1078e9 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres_Exisiting.ts @@ -23,7 +23,7 @@ class Postgres_Existing_VectorStores implements INode { constructor() { this.label = 'Postgres Load Existing Index' this.name = 'postgresExistingIndex' - this.version = 1.0 + this.version = 2.0 this.type = 'Postgres' this.icon = 'postgres.svg' this.category = 'Vector Stores' diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts b/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts index 25551517bf4..d26a642ddbf 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres_Upsert.ts @@ -24,7 +24,7 @@ class PostgresUpsert_VectorStores implements INode { constructor() { this.label = 'Postgres Upsert Document' this.name = 'postgresUpsert' - this.version = 1.0 + this.version = 2.0 this.type = 'Postgres' this.icon = 'postgres.svg' this.category = 'Vector Stores' From 595f1ed7f2ec634d25386b29d37febc1816abfb1 Mon Sep 17 00:00:00 2001 From: Keith Kacsh Date: Sat, 6 Jan 2024 17:16:06 -0700 Subject: [PATCH 28/51] Introduce new credential for LocalAI, Pass optional auth to LocalAI, New env var --- .../credentials/LcoalAIApi.credential.ts | 23 +++++++++++++++ .../chatmodels/ChatLocalAI/ChatLocalAI.ts | 29 +++++++++++++++---- packages/server/.env.example | 2 ++ 3 files changed, 48 insertions(+), 6 deletions(-) create mode 100644 packages/components/credentials/LcoalAIApi.credential.ts diff --git a/packages/components/credentials/LcoalAIApi.credential.ts b/packages/components/credentials/LcoalAIApi.credential.ts new file mode 100644 index 00000000000..624e07fa46f --- /dev/null +++ b/packages/components/credentials/LcoalAIApi.credential.ts @@ -0,0 +1,23 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class LocalAIApi implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'LocalAI API' + this.name = 'LocalAIApi' + this.version = 1.0 + this.inputs = [ + { + label: 'LocalAI Api Key', + name: 'LocalAIApiKey', + type: 'password' + } + ] + } +} + +module.exports = { credClass: LocalAIApi } diff --git a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts index 18ed409bf7b..258db1f805c 100644 --- a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts +++ b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts @@ -1,5 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { OpenAIChat } from 'langchain/llms/openai' import { OpenAIChatInput } from 'langchain/chat_models/openai' import { BaseCache } from 'langchain/schema' @@ -14,6 +14,7 @@ class ChatLocalAI_ChatModels implements INode { category: string description: string baseClasses: string[] + credential: INodeParams inputs: INodeParams[] constructor() { @@ -25,6 +26,16 @@ class ChatLocalAI_ChatModels implements INode { this.category = 'Chat Models' this.description = 'Use local LLMs like llama.cpp, gpt4all using LocalAI' this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(OpenAIChat)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['LocalAIApi'], + optional: true + } + + const modelOptions = JSON.parse(process.env.LOCALAI_CHAT_MODELS || '[]'); + this.inputs = [ { label: 'Cache', @@ -41,8 +52,10 @@ class ChatLocalAI_ChatModels implements INode { { label: 'Model Name', name: 'modelName', - type: 'string', - placeholder: 'gpt4all-lora-quantized.bin' + type: 'options', + options: modelOptions, + default: modelOptions.length > 0 ? modelOptions[0].name : '', + optional: true }, { label: 'Temperature', @@ -79,19 +92,23 @@ class ChatLocalAI_ChatModels implements INode { ] } - async init(nodeData: INodeData): Promise { + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const temperature = nodeData.inputs?.temperature as string const modelName = nodeData.inputs?.modelName as string const maxTokens = nodeData.inputs?.maxTokens as string const topP = nodeData.inputs?.topP as string const timeout = nodeData.inputs?.timeout as string const basePath = nodeData.inputs?.basePath as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData) + const cache = nodeData.inputs?.cache as BaseCache const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = { temperature: parseFloat(temperature), modelName, - openAIApiKey: 'sk-' + openAIApiKey } if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) diff --git a/packages/server/.env.example b/packages/server/.env.example index 6e746a4df4e..9b7be0ff899 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -26,3 +26,5 @@ PORT=3000 # LANGCHAIN_ENDPOINT=https://api.smith.langchain.com # LANGCHAIN_API_KEY=your_api_key # LANGCHAIN_PROJECT=your_project + +# LOCALAI_CHAT_MODELS='[{"label": "model1", "name": "model1"}, {"label": "model2", "name": "model2"}]' From accea214d22356f780428f0d40a00b9d3353904f Mon Sep 17 00:00:00 2001 From: Keith Kacsh Date: Sat, 6 Jan 2024 17:33:41 -0700 Subject: [PATCH 29/51] Updating docs --- CONTRIBUTING.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 04cb80b4d61..2c91906c9c5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -141,6 +141,7 @@ Flowise support different environment variables to configure your instance. You | DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | | SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | | FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | +| LOCALAI_CHAT_MODELS | JSON-encoded string representing an array of chat models for LocalAI. Each object in the array should have a 'label' and 'name' property. | String | '[]' (Empty Array) | You can also specify the env variables when using `npx`. For example: From 02482f1b3862779ea14e8a335f4299c76030a21e Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 8 Jan 2024 13:02:56 +0000 Subject: [PATCH 30/51] change agent/chain with memory to use runnable --- .../ConversationalAgent.ts | 159 +++-- .../ConversationalRetrievalAgent.ts | 127 ++-- .../agents/OpenAIAssistant/OpenAIAssistant.ts | 73 ++- .../OpenAIFunctionAgent.ts | 273 +------- .../ConversationChain/ConversationChain.ts | 146 +++-- .../ConversationalRetrievalQAChain.ts | 370 +++++++---- .../ConversationalRetrievalQAChain/prompts.ts | 65 +- .../nodes/memory/BufferMemory/BufferMemory.ts | 33 +- .../BufferWindowMemory/BufferWindowMemory.ts | 34 +- .../ConversationSummaryMemory.ts | 42 +- .../nodes/memory/DynamoDb/DynamoDb.ts | 49 +- .../memory/MongoDBMemory/MongoDBMemory.ts | 49 +- .../memory/MotorheadMemory/MotorheadMemory.ts | 92 ++- .../RedisBackedChatMemory.ts | 70 +- .../UpstashRedisBackedChatMemory.ts | 49 +- .../nodes/memory/ZepMemory/ZepMemory.ts | 46 +- .../nodes/tools/CustomTool/CustomTool.ts | 8 +- .../components/nodes/tools/CustomTool/core.ts | 17 +- packages/components/package.json | 1 + packages/components/src/Interface.ts | 28 +- packages/components/src/agents.ts | 615 ++++++++++++++++++ .../marketplaces/chatflows/API Agent.json | 2 +- .../chatflows/Chat with a Podcast.json | 56 +- .../marketplaces/chatflows/Claude LLM.json | 2 +- .../chatflows/Conversational Agent.json | 2 +- .../Conversational Retrieval QA Chain.json | 62 +- .../chatflows/Flowise Docs QnA.json | 61 +- .../marketplaces/chatflows/Local QnA.json | 61 +- .../chatflows/Long Term Memory.json | 63 +- .../chatflows/Metadata Filter.json | 61 +- .../chatflows/Multiple VectorDB.json | 2 +- .../chatflows/Simple Conversation Chain.json | 2 +- .../chatflows/Vectara LLM Chain Upload.json | 55 +- .../marketplaces/chatflows/WebBrowser.json | 2 +- .../marketplaces/chatflows/WebPage QnA.json | 63 +- packages/server/src/index.ts | 98 ++- packages/server/src/utils/index.ts | 175 ++--- .../ui/src/views/canvas/NodeInputHandler.js | 3 +- 38 files changed, 1737 insertions(+), 1379 deletions(-) create mode 100644 packages/components/src/agents.ts diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 8a2329b584d..7f857b1caad 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -1,11 +1,14 @@ -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents' import { Tool } from 'langchain/tools' -import { BaseChatMemory } from 'langchain/memory' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' import { BaseChatModel } from 'langchain/chat_models/base' import { flatten } from 'lodash' -import { additionalCallbacks } from '../../../src/handler' +import { AgentStep, BaseMessage, ChainValues, AIMessage, HumanMessage } from 'langchain/schema' +import { RunnableSequence } from 'langchain/schema/runnable' +import { getBaseClasses } from '../../../src/utils' +import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { AgentExecutor } from '../../../src/agents' +import { ChatConversationalAgent } from 'langchain/agents' +import { renderTemplate } from '@langchain/core/prompts' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -15,6 +18,15 @@ Assistant is constantly learning and improving, and its capabilities are constan Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.` +const TEMPLATE_TOOL_RESPONSE = `TOOL RESPONSE: +--------------------- +{observation} + +USER'S INPUT +-------------------- + +Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.` + class ConversationalAgent_Agents implements INode { label: string name: string @@ -25,8 +37,9 @@ class ConversationalAgent_Agents implements INode { category: string baseClasses: string[] inputs: INodeParams[] + sessionId?: string - constructor() { + constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Agent' this.name = 'conversationalAgent' this.version = 2.0 @@ -43,7 +56,7 @@ class ConversationalAgent_Agents implements INode { list: true }, { - label: 'Language Model', + label: 'Chat Model', name: 'model', type: 'BaseChatModel' }, @@ -62,52 +75,114 @@ class ConversationalAgent_Agents implements INode { additionalParams: true } ] + this.sessionId = fields?.sessionId } - async init(nodeData: INodeData): Promise { - const model = nodeData.inputs?.model as BaseChatModel - let tools = nodeData.inputs?.tools as Tool[] - tools = flatten(tools) - const memory = nodeData.inputs?.memory as BaseChatMemory - const systemMessage = nodeData.inputs?.systemMessage as string + async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { + return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) + } - const obj: InitializeAgentExecutorOptions = { - agentType: 'chat-conversational-react-description', - verbose: process.env.DEBUG === 'true' ? true : false - } + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const memory = nodeData.inputs?.memory as FlowiseMemory + const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) - const agentArgs: any = {} - if (systemMessage) { - agentArgs.systemMessage = systemMessage - } + const loggerHandler = new ConsoleCallbackHandler(options.logger) + const callbacks = await additionalCallbacks(nodeData, options) - if (Object.keys(agentArgs).length) obj.agentArgs = agentArgs + let res: ChainValues = {} - const executor = await initializeAgentExecutorWithOptions(tools, model, obj) - executor.memory = memory - return executor - } - - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { - const executor = nodeData.instance as AgentExecutor - const memory = nodeData.inputs?.memory as BaseChatMemory - - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - executor.memory = memory - } + if (options.socketIO && options.socketIOClientId) { + const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) + res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] }) + } else { + res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] }) } - ;(executor.memory as any).returnMessages = true // Return true for BaseChatModel + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: res?.output, + type: 'apiMessage' + } + ], + this.sessionId + ) + + return res?.output + } +} - const callbacks = await additionalCallbacks(nodeData, options) +const prepareAgent = async ( + nodeData: INodeData, + flowObj: { sessionId?: string; chatId?: string; input?: string }, + chatHistory: IMessage[] = [] +) => { + const model = nodeData.inputs?.model as BaseChatModel + let tools = nodeData.inputs?.tools as Tool[] + tools = flatten(tools) + const memory = nodeData.inputs?.memory as FlowiseMemory + const systemMessage = nodeData.inputs?.systemMessage as string + const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history' + const inputKey = memory.inputKey ? memory.inputKey : 'input' + + /** Bind a stop token to the model */ + const modelWithStop = model.bind({ + stop: ['\nObservation'] + }) + + const outputParser = ChatConversationalAgent.getDefaultOutputParser({ + llm: model, + toolNames: tools.map((tool) => tool.name) + }) + + const prompt = ChatConversationalAgent.createPrompt(tools, { + systemMessage: systemMessage ? systemMessage : DEFAULT_PREFIX, + outputParser + }) + + const runnableAgent = RunnableSequence.from([ + { + [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input, + agent_scratchpad: async (i: { input: string; steps: AgentStep[] }) => await constructScratchPad(i.steps), + [memoryKey]: async (_: { input: string; steps: AgentStep[] }) => { + const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[] + return messages ?? [] + } + }, + prompt, + modelWithStop, + outputParser + ]) + + const executor = AgentExecutor.fromAgentAndTools({ + agent: runnableAgent, + tools, + sessionId: flowObj?.sessionId, + chatId: flowObj?.chatId, + input: flowObj?.input, + verbose: process.env.DEBUG === 'true' ? true : false + }) + + return executor +} - const result = await executor.call({ input }, [...callbacks]) - return result?.output +const constructScratchPad = async (steps: AgentStep[]): Promise => { + const thoughts: BaseMessage[] = [] + for (const step of steps) { + thoughts.push(new AIMessage(step.action.log)) + thoughts.push( + new HumanMessage( + renderTemplate(TEMPLATE_TOOL_RESPONSE, 'f-string', { + observation: step.observation + }) + ) + ) } + return thoughts } module.exports = { nodeClass: ConversationalAgent_Agents } diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 643c6a658db..406a156ffe6 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -1,9 +1,14 @@ -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema' import { flatten } from 'lodash' -import { BaseChatMemory } from 'langchain/memory' +import { ChatOpenAI } from 'langchain/chat_models/openai' +import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts' +import { formatToOpenAIFunction } from 'langchain/tools' +import { RunnableSequence } from 'langchain/schema/runnable' +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser' +import { AgentExecutor, formatAgentSteps } from '../../../src/agents' const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.` @@ -17,8 +22,9 @@ class ConversationalRetrievalAgent_Agents implements INode { category: string baseClasses: string[] inputs: INodeParams[] + sessionId?: string - constructor() { + constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Retrieval Agent' this.name = 'conversationalRetrievalAgent' this.version = 3.0 @@ -54,55 +60,96 @@ class ConversationalRetrievalAgent_Agents implements INode { additionalParams: true } ] + this.sessionId = fields?.sessionId } - async init(nodeData: INodeData): Promise { - const model = nodeData.inputs?.model - const memory = nodeData.inputs?.memory as BaseChatMemory - const systemMessage = nodeData.inputs?.systemMessage as string - - let tools = nodeData.inputs?.tools - tools = flatten(tools) - - const executor = await initializeAgentExecutorWithOptions(tools, model, { - agentType: 'openai-functions', - verbose: process.env.DEBUG === 'true' ? true : false, - agentArgs: { - prefix: systemMessage ?? defaultMessage - }, - returnIntermediateSteps: true - }) - executor.memory = memory - return executor + async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { + return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { - const executor = nodeData.instance as AgentExecutor - - if (executor.memory) { - ;(executor.memory as any).memoryKey = 'chat_history' - ;(executor.memory as any).outputKey = 'output' - ;(executor.memory as any).returnMessages = true - - const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - ;(executor.memory as any).chatHistory = mapChatHistory(options) - } - } + const memory = nodeData.inputs?.memory as FlowiseMemory + const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) + let res: ChainValues = {} + if (options.socketIO && options.socketIOClientId) { const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) - const result = await executor.call({ input }, [loggerHandler, handler, ...callbacks]) - return result?.output + res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] }) } else { - const result = await executor.call({ input }, [loggerHandler, ...callbacks]) - return result?.output + res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] }) } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: res?.output, + type: 'apiMessage' + } + ], + this.sessionId + ) + + return res?.output } } +const prepareAgent = ( + nodeData: INodeData, + flowObj: { sessionId?: string; chatId?: string; input?: string }, + chatHistory: IMessage[] = [] +) => { + const model = nodeData.inputs?.model as ChatOpenAI + const memory = nodeData.inputs?.memory as FlowiseMemory + const systemMessage = nodeData.inputs?.systemMessage as string + let tools = nodeData.inputs?.tools + tools = flatten(tools) + const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history' + const inputKey = memory.inputKey ? memory.inputKey : 'input' + + const prompt = ChatPromptTemplate.fromMessages([ + ['ai', systemMessage ? systemMessage : defaultMessage], + new MessagesPlaceholder(memoryKey), + ['human', `{${inputKey}}`], + new MessagesPlaceholder('agent_scratchpad') + ]) + + const modelWithFunctions = model.bind({ + functions: [...tools.map((tool: any) => formatToOpenAIFunction(tool))] + }) + + const runnableAgent = RunnableSequence.from([ + { + [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input, + agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps), + [memoryKey]: async (_: { input: string; steps: AgentStep[] }) => { + const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[] + return messages ?? [] + } + }, + prompt, + modelWithFunctions, + new OpenAIFunctionsAgentOutputParser() + ]) + + const executor = AgentExecutor.fromAgentAndTools({ + agent: runnableAgent, + tools, + sessionId: flowObj?.sessionId, + chatId: flowObj?.chatId, + input: flowObj?.input, + returnIntermediateSteps: true, + verbose: process.env.DEBUG === 'true' ? true : false + }) + + return executor +} + module.exports = { nodeClass: ConversationalRetrievalAgent_Agents } diff --git a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts index cf69022ba91..62ecec5b03c 100644 --- a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts +++ b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts @@ -96,45 +96,51 @@ class OpenAIAssistant_Agents implements INode { return null } - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const selectedAssistantId = nodeData.inputs?.selectedAssistant as string - const appDataSource = options.appDataSource as DataSource - const databaseEntities = options.databaseEntities as IDatabaseEntity - let sessionId = nodeData.inputs?.sessionId as string + async clearChatMessages(nodeData: INodeData, options: ICommonObject, sessionIdObj: { type: string; id: string }): Promise { + const selectedAssistantId = nodeData.inputs?.selectedAssistant as string + const appDataSource = options.appDataSource as DataSource + const databaseEntities = options.databaseEntities as IDatabaseEntity - const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ - id: selectedAssistantId - }) + const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ + id: selectedAssistantId + }) - if (!assistant) { - options.logger.error(`Assistant ${selectedAssistantId} not found`) - return - } + if (!assistant) { + options.logger.error(`Assistant ${selectedAssistantId} not found`) + return + } - if (!sessionId && options.chatId) { - const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ - chatId: options.chatId - }) - if (!chatmsg) { - options.logger.error(`Chat Message with Chat Id: ${options.chatId} not found`) - return - } - sessionId = chatmsg.sessionId - } + if (!sessionIdObj) return - const credentialData = await getCredentialData(assistant.credential ?? '', options) - const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) - if (!openAIApiKey) { - options.logger.error(`OpenAI ApiKey not found`) + let sessionId = '' + if (sessionIdObj.type === 'chatId') { + const chatId = sessionIdObj.id + const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ + chatId + }) + if (!chatmsg) { + options.logger.error(`Chat Message with Chat Id: ${chatId} not found`) return } + sessionId = chatmsg.sessionId + } else if (sessionIdObj.type === 'threadId') { + sessionId = sessionIdObj.id + } - const openai = new OpenAI({ apiKey: openAIApiKey }) - options.logger.info(`Clearing OpenAI Thread ${sessionId}`) + const credentialData = await getCredentialData(assistant.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + if (!openAIApiKey) { + options.logger.error(`OpenAI ApiKey not found`) + return + } + + const openai = new OpenAI({ apiKey: openAIApiKey }) + options.logger.info(`Clearing OpenAI Thread ${sessionId}`) + try { if (sessionId) await openai.beta.threads.del(sessionId) options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`) + } catch (e) { + throw new Error(e) } } @@ -297,7 +303,11 @@ class OpenAIAssistant_Agents implements INode { options.socketIO.to(options.socketIOClientId).emit('tool', tool.name) try { - const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, threadId) + const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, { + sessionId: threadId, + chatId: options.chatId, + input + }) await analyticHandlers.onToolEnd(toolIds, toolOutput) submitToolOutputs.push({ tool_call_id: actions[i].toolCallId, @@ -462,6 +472,7 @@ class OpenAIAssistant_Agents implements INode { const imageRegex = /]*\/>/g let llmOutput = returnVal.replace(imageRegex, '') llmOutput = llmOutput.replace('
', '') + await analyticHandlers.onLLMEnd(llmIds, llmOutput) await analyticHandlers.onChainEnd(parentIds, messageData, true) diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts index c0095cee16b..135121d25c4 100644 --- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts +++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts @@ -1,17 +1,14 @@ -import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { AgentExecutor as LCAgentExecutor, AgentExecutorInput } from 'langchain/agents' -import { ChainValues, AgentStep, AgentFinish, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema' -import { OutputParserException } from 'langchain/schema/output_parser' -import { CallbackManagerForChainRun } from 'langchain/callbacks' -import { formatToOpenAIFunction } from 'langchain/tools' -import { ToolInputParsingException, Tool } from '@langchain/core/tools' +import { ChainValues, AgentStep, BaseMessage } from 'langchain/schema' import { getBaseClasses } from '../../../src/utils' import { flatten } from 'lodash' import { RunnableSequence } from 'langchain/schema/runnable' +import { formatToOpenAIFunction } from 'langchain/tools' +import { ChatOpenAI } from 'langchain/chat_models/openai' +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts' -import { ChatOpenAI } from 'langchain/chat_models/openai' import { OpenAIFunctionsAgentOutputParser } from 'langchain/agents/openai/output_parser' +import { AgentExecutor, formatAgentSteps } from '../../../src/agents' class OpenAIFunctionAgent_Agents implements INode { label: string @@ -25,7 +22,7 @@ class OpenAIFunctionAgent_Agents implements INode { inputs: INodeParams[] sessionId?: string - constructor(fields: { sessionId?: string }) { + constructor(fields?: { sessionId?: string }) { this.label = 'OpenAI Function Agent' this.name = 'openAIFunctionAgent' this.version = 3.0 @@ -33,7 +30,7 @@ class OpenAIFunctionAgent_Agents implements INode { this.category = 'Agents' this.icon = 'function.svg' this.description = `An agent that uses Function Calling to pick the tool and args to call` - this.baseClasses = [this.type, ...getBaseClasses(LCAgentExecutor)] + this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)] this.inputs = [ { label: 'Allowed Tools', @@ -63,19 +60,13 @@ class OpenAIFunctionAgent_Agents implements INode { this.sessionId = fields?.sessionId } - async init(nodeData: INodeData): Promise { - const memory = nodeData.inputs?.memory as FlowiseMemory - - const executor = prepareAgent(nodeData, this.sessionId) - if (memory) executor.memory = memory - - return executor + async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { + return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory - - const executor = prepareAgent(nodeData, this.sessionId) + const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) @@ -107,17 +98,11 @@ class OpenAIFunctionAgent_Agents implements INode { } } -const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] => - steps.flatMap(({ action, observation }) => { - if ('messageLog' in action && action.messageLog !== undefined) { - const log = action.messageLog as BaseMessage[] - return log.concat(new FunctionMessage(observation, action.tool)) - } else { - return [new AIMessage(action.log)] - } - }) - -const prepareAgent = (nodeData: INodeData, sessionId?: string) => { +const prepareAgent = ( + nodeData: INodeData, + flowObj: { sessionId?: string; chatId?: string; input?: string }, + chatHistory: IMessage[] = [] +) => { const model = nodeData.inputs?.model as ChatOpenAI const memory = nodeData.inputs?.memory as FlowiseMemory const systemMessage = nodeData.inputs?.systemMessage as string @@ -142,7 +127,7 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => { [inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input, agent_scratchpad: (i: { input: string; steps: AgentStep[] }) => formatAgentSteps(i.steps), [memoryKey]: async (_: { input: string; steps: AgentStep[] }) => { - const messages = (await memory.getChatMessages(sessionId, true)) as BaseMessage[] + const messages = (await memory.getChatMessages(flowObj?.sessionId, true, chatHistory)) as BaseMessage[] return messages ?? [] } }, @@ -154,231 +139,13 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => { const executor = AgentExecutor.fromAgentAndTools({ agent: runnableAgent, tools, - sessionId + sessionId: flowObj?.sessionId, + chatId: flowObj?.chatId, + input: flowObj?.input, + verbose: process.env.DEBUG === 'true' ? true : false }) return executor } -type AgentExecutorOutput = ChainValues - -class AgentExecutor extends LCAgentExecutor { - sessionId?: string - - static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string }): AgentExecutor { - const newInstance = new AgentExecutor(fields) - if (fields.sessionId) newInstance.sessionId = fields.sessionId - return newInstance - } - - shouldContinueIteration(iterations: number): boolean { - return this.maxIterations === undefined || iterations < this.maxIterations - } - - async _call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise { - const toolsByName = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t])) - - const steps: AgentStep[] = [] - let iterations = 0 - - const getOutput = async (finishStep: AgentFinish): Promise => { - const { returnValues } = finishStep - const additional = await this.agent.prepareForOutput(returnValues, steps) - - if (this.returnIntermediateSteps) { - return { ...returnValues, intermediateSteps: steps, ...additional } - } - await runManager?.handleAgentEnd(finishStep) - return { ...returnValues, ...additional } - } - - while (this.shouldContinueIteration(iterations)) { - let output - try { - output = await this.agent.plan(steps, inputs, runManager?.getChild()) - } catch (e) { - if (e instanceof OutputParserException) { - let observation - let text = e.message - if (this.handleParsingErrors === true) { - if (e.sendToLLM) { - observation = e.observation - text = e.llmOutput ?? '' - } else { - observation = 'Invalid or incomplete response' - } - } else if (typeof this.handleParsingErrors === 'string') { - observation = this.handleParsingErrors - } else if (typeof this.handleParsingErrors === 'function') { - observation = this.handleParsingErrors(e) - } else { - throw e - } - output = { - tool: '_Exception', - toolInput: observation, - log: text - } as AgentAction - } else { - throw e - } - } - // Check if the agent has finished - if ('returnValues' in output) { - return getOutput(output) - } - - let actions: AgentAction[] - if (Array.isArray(output)) { - actions = output as AgentAction[] - } else { - actions = [output as AgentAction] - } - - const newSteps = await Promise.all( - actions.map(async (action) => { - await runManager?.handleAgentAction(action) - const tool = action.tool === '_Exception' ? new ExceptionTool() : toolsByName[action.tool?.toLowerCase()] - let observation - try { - // here we need to override Tool call method to include sessionId as parameter - observation = tool - ? // @ts-ignore - await tool.call(action.toolInput, runManager?.getChild(), undefined, this.sessionId) - : `${action.tool} is not a valid tool, try another one.` - } catch (e) { - if (e instanceof ToolInputParsingException) { - if (this.handleParsingErrors === true) { - observation = 'Invalid or incomplete tool input. Please try again.' - } else if (typeof this.handleParsingErrors === 'string') { - observation = this.handleParsingErrors - } else if (typeof this.handleParsingErrors === 'function') { - observation = this.handleParsingErrors(e) - } else { - throw e - } - observation = await new ExceptionTool().call(observation, runManager?.getChild()) - return { action, observation: observation ?? '' } - } - } - return { action, observation: observation ?? '' } - }) - ) - - steps.push(...newSteps) - - const lastStep = steps[steps.length - 1] - const lastTool = toolsByName[lastStep.action.tool?.toLowerCase()] - - if (lastTool?.returnDirect) { - return getOutput({ - returnValues: { [this.agent.returnValues[0]]: lastStep.observation }, - log: '' - }) - } - - iterations += 1 - } - - const finish = await this.agent.returnStoppedResponse(this.earlyStoppingMethod, steps, inputs) - - return getOutput(finish) - } - - async _takeNextStep( - nameToolMap: Record, - inputs: ChainValues, - intermediateSteps: AgentStep[], - runManager?: CallbackManagerForChainRun - ): Promise { - let output - try { - output = await this.agent.plan(intermediateSteps, inputs, runManager?.getChild()) - } catch (e) { - if (e instanceof OutputParserException) { - let observation - let text = e.message - if (this.handleParsingErrors === true) { - if (e.sendToLLM) { - observation = e.observation - text = e.llmOutput ?? '' - } else { - observation = 'Invalid or incomplete response' - } - } else if (typeof this.handleParsingErrors === 'string') { - observation = this.handleParsingErrors - } else if (typeof this.handleParsingErrors === 'function') { - observation = this.handleParsingErrors(e) - } else { - throw e - } - output = { - tool: '_Exception', - toolInput: observation, - log: text - } as AgentAction - } else { - throw e - } - } - - if ('returnValues' in output) { - return output - } - - let actions: AgentAction[] - if (Array.isArray(output)) { - actions = output as AgentAction[] - } else { - actions = [output as AgentAction] - } - - const result: AgentStep[] = [] - for (const agentAction of actions) { - let observation = '' - if (runManager) { - await runManager?.handleAgentAction(agentAction) - } - if (agentAction.tool in nameToolMap) { - const tool = nameToolMap[agentAction.tool] - try { - // here we need to override Tool call method to include sessionId as parameter - // @ts-ignore - observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, this.sessionId) - } catch (e) { - if (e instanceof ToolInputParsingException) { - if (this.handleParsingErrors === true) { - observation = 'Invalid or incomplete tool input. Please try again.' - } else if (typeof this.handleParsingErrors === 'string') { - observation = this.handleParsingErrors - } else if (typeof this.handleParsingErrors === 'function') { - observation = this.handleParsingErrors(e) - } else { - throw e - } - observation = await new ExceptionTool().call(observation, runManager?.getChild()) - } - } - } else { - observation = `${agentAction.tool} is not a valid tool, try another available tool: ${Object.keys(nameToolMap).join(', ')}` - } - result.push({ - action: agentAction, - observation - }) - } - return result - } -} - -class ExceptionTool extends Tool { - name = '_Exception' - - description = 'Exception tool' - - async _call(query: string) { - return query - } -} - module.exports = { nodeClass: OpenAIFunctionAgent_Agents } diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 54d4252a64f..fcd9921e506 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,14 +1,16 @@ -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConversationChain } from 'langchain/chains' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' -import { BufferMemory } from 'langchain/memory' import { BaseChatModel } from 'langchain/chat_models/base' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { flatten } from 'lodash' import { Document } from 'langchain/document' +import { RunnableSequence } from 'langchain/schema/runnable' +import { StringOutputParser } from 'langchain/schema/output_parser' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` +const inputKey = 'input' class ConversationChain_Chains implements INode { label: string @@ -20,8 +22,9 @@ class ConversationChain_Chains implements INode { baseClasses: string[] description: string inputs: INodeParams[] + sessionId?: string - constructor() { + constructor(fields?: { sessionId?: string }) { this.label = 'Conversation Chain' this.name = 'conversationChain' this.version = 1.0 @@ -32,7 +35,7 @@ class ConversationChain_Chains implements INode { this.baseClasses = [this.type, ...getBaseClasses(ConversationChain)] this.inputs = [ { - label: 'Language Model', + label: 'Chat Model', name: 'model', type: 'BaseChatModel' }, @@ -60,76 +63,99 @@ class ConversationChain_Chains implements INode { placeholder: 'You are a helpful assistant that write codes' } ] + this.sessionId = fields?.sessionId } - async init(nodeData: INodeData): Promise { - const model = nodeData.inputs?.model as BaseChatModel - const memory = nodeData.inputs?.memory as BufferMemory - const prompt = nodeData.inputs?.systemMessagePrompt as string - const docs = nodeData.inputs?.document as Document[] - - const flattenDocs = docs && docs.length ? flatten(docs) : [] - const finalDocs = [] - for (let i = 0; i < flattenDocs.length; i += 1) { - if (flattenDocs[i] && flattenDocs[i].pageContent) { - finalDocs.push(new Document(flattenDocs[i])) - } - } - - let finalText = '' - for (let i = 0; i < finalDocs.length; i += 1) { - finalText += finalDocs[i].pageContent - } - - const replaceChar: string[] = ['{', '}'] - for (const char of replaceChar) finalText = finalText.replaceAll(char, '') - - if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}` - - const obj: any = { - llm: model, - memory, - verbose: process.env.DEBUG === 'true' ? true : false - } - - const chatPrompt = ChatPromptTemplate.fromMessages([ - SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage), - new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), - HumanMessagePromptTemplate.fromTemplate('{input}') - ]) - obj.prompt = chatPrompt - - const chain = new ConversationChain(obj) + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const chain = prepareChain(nodeData, this.sessionId, options.chatHistory) return chain } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { - const chain = nodeData.instance as ConversationChain - const memory = nodeData.inputs?.memory as BufferMemory - memory.returnMessages = true // Return true for BaseChatModel - - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - } - } - - chain.memory = memory + const memory = nodeData.inputs?.memory + const chain = prepareChain(nodeData, this.sessionId, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) + let res = '' + if (options.socketIO && options.socketIOClientId) { const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) - const res = await chain.call({ input }, [loggerHandler, handler, ...callbacks]) - return res?.response + res = await chain.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] }) } else { - const res = await chain.call({ input }, [loggerHandler, ...callbacks]) - return res?.response + res = await chain.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] }) + } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: res, + type: 'apiMessage' + } + ], + this.sessionId + ) + + return res + } +} + +const prepareChatPrompt = (nodeData: INodeData) => { + const memory = nodeData.inputs?.memory as FlowiseMemory + const prompt = nodeData.inputs?.systemMessagePrompt as string + const docs = nodeData.inputs?.document as Document[] + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new Document(flattenDocs[i])) } } + + let finalText = '' + for (let i = 0; i < finalDocs.length; i += 1) { + finalText += finalDocs[i].pageContent + } + + const replaceChar: string[] = ['{', '}'] + for (const char of replaceChar) finalText = finalText.replaceAll(char, '') + + if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}` + + const chatPrompt = ChatPromptTemplate.fromMessages([ + SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage), + new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), + HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`) + ]) + + return chatPrompt +} + +const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMessage[] = []) => { + const model = nodeData.inputs?.model as BaseChatModel + const memory = nodeData.inputs?.memory as FlowiseMemory + const memoryKey = memory.memoryKey ?? 'chat_history' + + const conversationChain = RunnableSequence.from([ + { + [inputKey]: (input: { input: string }) => input.input, + [memoryKey]: async () => { + const history = await memory.getChatMessages(sessionId, true, chatHistory) + return history + } + }, + prepareChatPrompt(nodeData), + model, + new StringOutputParser() + ]) + + return conversationChain } module.exports = { nodeClass: ConversationChain_Chains } diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts index 36376e132b3..5f98cba17ae 100644 --- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts +++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts @@ -1,20 +1,25 @@ import { BaseLanguageModel } from 'langchain/base_language' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' -import { ConversationalRetrievalQAChain, QAChainParams } from 'langchain/chains' +import { ConversationalRetrievalQAChain } from 'langchain/chains' import { BaseRetriever } from 'langchain/schema/retriever' -import { BufferMemory, BufferMemoryInput } from 'langchain/memory' +import { BufferMemoryInput } from 'langchain/memory' import { PromptTemplate } from 'langchain/prompts' -import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' -import { - default_map_reduce_template, - default_qa_template, - qa_template, - map_reduce_template, - CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT, - refine_question_template, - refine_template -} from './prompts' +import { QA_TEMPLATE, REPHRASE_TEMPLATE, RESPONSE_TEMPLATE } from './prompts' +import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from 'langchain/schema/runnable' +import { BaseMessage, HumanMessage, AIMessage } from 'langchain/schema' +import { StringOutputParser } from 'langchain/schema/output_parser' +import type { Document } from 'langchain/document' +import { ChatPromptTemplate, MessagesPlaceholder } from 'langchain/prompts' +import { applyPatch } from 'fast-json-patch' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' +import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler' +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface' + +type RetrievalChainInput = { + chat_history: string + question: string +} + +const sourceRunnableName = 'FindDocs' class ConversationalRetrievalQAChain_Chains implements INode { label: string @@ -26,11 +31,12 @@ class ConversationalRetrievalQAChain_Chains implements INode { baseClasses: string[] description: string inputs: INodeParams[] + sessionId?: string - constructor() { + constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Retrieval QA Chain' this.name = 'conversationalRetrievalQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'ConversationalRetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -38,9 +44,9 @@ class ConversationalRetrievalQAChain_Chains implements INode { this.baseClasses = [this.type, ...getBaseClasses(ConversationalRetrievalQAChain)] this.inputs = [ { - label: 'Language Model', + label: 'Chat Model', name: 'model', - type: 'BaseLanguageModel' + type: 'BaseChatModel' }, { label: 'Vector Store Retriever', @@ -60,6 +66,29 @@ class ConversationalRetrievalQAChain_Chains implements INode { type: 'boolean', optional: true }, + { + label: 'Rephrase Prompt', + name: 'rephrasePrompt', + type: 'string', + description: 'Using previous chat history, rephrase question into a standalone question', + warning: 'Prompt must include input variables: {chat_history} and {question}', + rows: 4, + additionalParams: true, + optional: true, + default: REPHRASE_TEMPLATE + }, + { + label: 'Response Prompt', + name: 'responsePrompt', + type: 'string', + description: 'Taking the rephrased question, search for answer from the provided context', + warning: 'Prompt must include input variable: {context}', + rows: 4, + additionalParams: true, + optional: true, + default: RESPONSE_TEMPLATE + } + /** Deprecated { label: 'System Message', name: 'systemMessagePrompt', @@ -70,6 +99,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { placeholder: 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' }, + // TODO: create standalone chains for these 3 modes as they are not compatible with memory { label: 'Chain Option', name: 'chainOption', @@ -95,124 +125,246 @@ class ConversationalRetrievalQAChain_Chains implements INode { additionalParams: true, optional: true } + */ ] + this.sessionId = fields?.sessionId } async init(nodeData: INodeData): Promise { const model = nodeData.inputs?.model as BaseLanguageModel const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string - const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean - const chainOption = nodeData.inputs?.chainOption as string + const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string + const responsePrompt = nodeData.inputs?.responsePrompt as string + + let customResponsePrompt = responsePrompt + // If the deprecated systemMessagePrompt is still exists + if (systemMessagePrompt) { + customResponsePrompt = `${systemMessagePrompt}\n${QA_TEMPLATE}` + } + + const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt) + return answerChain + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as BaseLanguageModel const externalMemory = nodeData.inputs?.memory + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const rephrasePrompt = nodeData.inputs?.rephrasePrompt as string + const responsePrompt = nodeData.inputs?.responsePrompt as string + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean - const obj: any = { - verbose: process.env.DEBUG === 'true' ? true : false, - questionGeneratorChainOptions: { - template: CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT - } + let customResponsePrompt = responsePrompt + // If the deprecated systemMessagePrompt is still exists + if (systemMessagePrompt) { + customResponsePrompt = `${systemMessagePrompt}\n${QA_TEMPLATE}` } - if (returnSourceDocuments) obj.returnSourceDocuments = returnSourceDocuments - - if (chainOption === 'map_reduce') { - obj.qaChainOptions = { - type: 'map_reduce', - combinePrompt: PromptTemplate.fromTemplate( - systemMessagePrompt ? `${systemMessagePrompt}\n${map_reduce_template}` : default_map_reduce_template - ) - } as QAChainParams - } else if (chainOption === 'refine') { - const qprompt = new PromptTemplate({ - inputVariables: ['context', 'question'], - template: refine_question_template(systemMessagePrompt) - }) - const rprompt = new PromptTemplate({ - inputVariables: ['context', 'question', 'existing_answer'], - template: refine_template + let memory: FlowiseMemory | undefined = externalMemory + if (!memory) { + memory = new BufferMemory({ + returnMessages: true, + memoryKey: 'chat_history', + inputKey: 'input' }) - obj.qaChainOptions = { - type: 'refine', - questionPrompt: qprompt, - refinePrompt: rprompt - } as QAChainParams - } else { - obj.qaChainOptions = { - type: 'stuff', - prompt: PromptTemplate.fromTemplate(systemMessagePrompt ? `${systemMessagePrompt}\n${qa_template}` : default_qa_template) - } as QAChainParams } - if (externalMemory) { - externalMemory.memoryKey = 'chat_history' - externalMemory.inputKey = 'question' - externalMemory.outputKey = 'text' - externalMemory.returnMessages = true - if (chainOption === 'refine') externalMemory.outputKey = 'output_text' - obj.memory = externalMemory - } else { - const fields: BufferMemoryInput = { - memoryKey: 'chat_history', - inputKey: 'question', - outputKey: 'text', - returnMessages: true + const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt) + + const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? [] + + const loggerHandler = new ConsoleCallbackHandler(options.logger) + const callbacks = await additionalCallbacks(nodeData, options) + + const stream = answerChain.streamLog( + { question: input, chat_history: history }, + { callbacks: [loggerHandler, ...callbacks] }, + { + includeNames: [sourceRunnableName] + } + ) + + let streamedResponse: Record = {} + let sourceDocuments: ICommonObject[] = [] + let text = '' + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + for await (const chunk of stream) { + streamedResponse = applyPatch(streamedResponse, chunk.ops).newDocument + + if (streamedResponse.final_output) { + text = streamedResponse.final_output?.output + if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('end') + if (Array.isArray(streamedResponse?.logs?.[sourceRunnableName]?.final_output?.output)) { + sourceDocuments = streamedResponse?.logs?.[sourceRunnableName]?.final_output?.output + if (isStreamingEnabled && returnSourceDocuments) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } + + if ( + Array.isArray(streamedResponse?.streamed_output) && + streamedResponse?.streamed_output.length && + !streamedResponse.final_output + ) { + const token = streamedResponse.streamed_output[streamedResponse.streamed_output.length - 1] + + if (!isStreamingStarted) { + isStreamingStarted = true + if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('start', token) + } + if (isStreamingEnabled) options.socketIO.to(options.socketIOClientId).emit('token', token) } - if (chainOption === 'refine') fields.outputKey = 'output_text' - obj.memory = new BufferMemory(fields) } - const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj) - return chain + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: text, + type: 'apiMessage' + } + ], + this.sessionId + ) + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } } +} - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { - const chain = nodeData.instance as ConversationalRetrievalQAChain - const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean - const chainOption = nodeData.inputs?.chainOption as string +const createRetrieverChain = (llm: BaseLanguageModel, retriever: Runnable, rephrasePrompt: string) => { + // Small speed/accuracy optimization: no need to rephrase the first question + // since there shouldn't be any meta-references to prior chat history + const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt) + const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, llm, new StringOutputParser()]).withConfig({ + runName: 'CondenseQuestion' + }) - let model = nodeData.inputs?.model + const hasHistoryCheckFn = RunnableLambda.from((input: RetrievalChainInput) => input.chat_history.length > 0).withConfig({ + runName: 'HasChatHistoryCheck' + }) - // Temporary fix: https://github.com/hwchase17/langchainjs/issues/754 - model.streaming = false - chain.questionGeneratorChain.llm = model + const conversationChain = condenseQuestionChain.pipe(retriever).withConfig({ + runName: 'RetrievalChainWithHistory' + }) - const obj = { question: input } + const basicRetrievalChain = RunnableLambda.from((input: RetrievalChainInput) => input.question) + .withConfig({ + runName: 'Itemgetter:question' + }) + .pipe(retriever) + .withConfig({ runName: 'RetrievalChainWithNoHistory' }) - if (options && options.chatHistory && chain.memory) { - const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - ;(chain.memory as any).chatHistory = mapChatHistory(options) - } + return RunnableBranch.from([[hasHistoryCheckFn, conversationChain], basicRetrievalChain]).withConfig({ runName: sourceRunnableName }) +} + +const formatDocs = (docs: Document[]) => { + return docs.map((doc, i) => `${doc.pageContent}`).join('\n') +} + +const formatChatHistoryAsString = (history: BaseMessage[]) => { + return history.map((message) => `${message._getType()}: ${message.content}`).join('\n') +} + +const serializeHistory = (input: any) => { + const chatHistory: IMessage[] = input.chat_history || [] + const convertedChatHistory = [] + for (const message of chatHistory) { + if (message.type === 'userMessage') { + convertedChatHistory.push(new HumanMessage({ content: message.message })) + } + if (message.type === 'apiMessage') { + convertedChatHistory.push(new AIMessage({ content: message.message })) } + } + return convertedChatHistory +} - const loggerHandler = new ConsoleCallbackHandler(options.logger) - const callbacks = await additionalCallbacks(nodeData, options) +const createChain = ( + llm: BaseLanguageModel, + retriever: Runnable, + rephrasePrompt = REPHRASE_TEMPLATE, + responsePrompt = RESPONSE_TEMPLATE +) => { + const retrieverChain = createRetrieverChain(llm, retriever, rephrasePrompt) - if (options.socketIO && options.socketIOClientId) { - const handler = new CustomChainHandler( - options.socketIO, - options.socketIOClientId, - chainOption === 'refine' ? 4 : undefined, - returnSourceDocuments - ) - const res = await chain.call(obj, [loggerHandler, handler, ...callbacks]) - if (chainOption === 'refine') { - if (res.output_text && res.sourceDocuments) { - return { - text: res.output_text, - sourceDocuments: res.sourceDocuments - } - } - return res?.output_text - } - if (res.text && res.sourceDocuments) return res - return res?.text - } else { - const res = await chain.call(obj, [loggerHandler, ...callbacks]) - if (res.text && res.sourceDocuments) return res - return res?.text + const context = RunnableMap.from({ + context: RunnableSequence.from([ + ({ question, chat_history }) => ({ + question, + chat_history: formatChatHistoryAsString(chat_history) + }), + retrieverChain, + RunnableLambda.from(formatDocs).withConfig({ + runName: 'FormatDocumentChunks' + }) + ]), + question: RunnableLambda.from((input: RetrievalChainInput) => input.question).withConfig({ + runName: 'Itemgetter:question' + }), + chat_history: RunnableLambda.from((input: RetrievalChainInput) => input.chat_history).withConfig({ + runName: 'Itemgetter:chat_history' + }) + }).withConfig({ tags: ['RetrieveDocs'] }) + + const prompt = ChatPromptTemplate.fromMessages([ + ['system', responsePrompt], + new MessagesPlaceholder('chat_history'), + ['human', `{question}`] + ]) + + const responseSynthesizerChain = RunnableSequence.from([prompt, llm, new StringOutputParser()]).withConfig({ + tags: ['GenerateResponse'] + }) + + const conversationalQAChain = RunnableSequence.from([ + { + question: RunnableLambda.from((input: RetrievalChainInput) => input.question).withConfig({ + runName: 'Itemgetter:question' + }), + chat_history: RunnableLambda.from(serializeHistory).withConfig({ + runName: 'SerializeHistory' + }) + }, + context, + responseSynthesizerChain + ]) + + return conversationalQAChain +} + +class BufferMemory extends FlowiseMemory implements MemoryMethods { + constructor(fields: BufferMemoryInput) { + super(fields) + } + + async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise { + await this.chatHistory.clear() + + for (const msg of prevHistory) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) } + + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(): Promise { + // adding chat messages will be done on the fly in getChatMessages() + return + } + + async clearChatMessages(): Promise { + await this.clear() } } diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts index 132e3a97e7a..dccc73588b4 100644 --- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts +++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/prompts.ts @@ -1,64 +1,27 @@ -export const default_qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. - -{context} - -Question: {question} -Helpful Answer:` +export const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, answer in the same language as the follow up question. include it in the standalone question. -export const qa_template = `Use the following pieces of context to answer the question at the end. +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question:` +export const RESPONSE_TEMPLATE = `I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". Using the provided context, answer the user's question to the best of your ability using the resources provided. +If there is nothing in the context relevant to the question at hand, just say "Hmm, I'm not sure" and stop after that. Refuse to answer any question not about the info. Never break character. +------------ {context} +------------ +REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm not sure". Don't try to make up an answer. Never break character.` -Question: {question} -Helpful Answer:` - -export const default_map_reduce_template = `Given the following extracted parts of a long document and a question, create a final answer. -If you don't know the answer, just say that you don't know. Don't try to make up an answer. - -{summaries} - -Question: {question} -Helpful Answer:` - -export const map_reduce_template = `Given the following extracted parts of a long document and a question, create a final answer. +export const QA_TEMPLATE = `Use the following pieces of context to answer the question at the end. -{summaries} +{context} Question: {question} Helpful Answer:` -export const refine_question_template = (sysPrompt?: string) => { - let returnPrompt = '' - if (sysPrompt) - returnPrompt = `Context information is below. ---------------------- -{context} ---------------------- -Given the context information and not prior knowledge, ${sysPrompt} -Answer the question: {question}. -Answer:` - if (!sysPrompt) - returnPrompt = `Context information is below. ---------------------- -{context} ---------------------- -Given the context information and not prior knowledge, answer the question: {question}. -Answer:` - return returnPrompt -} - -export const refine_template = `The original question is as follows: {question} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer (only if needed) with some more context below. ------------- -{context} ------------- -Given the new context, refine the original answer to better answer the question. -If you can't find answer from the context, return the original answer.` - -export const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, answer in the same language as the follow up question. include it in the standalone question. +export const REPHRASE_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} -Standalone question:` +Standalone Question:` diff --git a/packages/components/nodes/memory/BufferMemory/BufferMemory.ts b/packages/components/nodes/memory/BufferMemory/BufferMemory.ts index 0ad8adec9d6..4a6252b5f50 100644 --- a/packages/components/nodes/memory/BufferMemory/BufferMemory.ts +++ b/packages/components/nodes/memory/BufferMemory/BufferMemory.ts @@ -1,4 +1,4 @@ -import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' +import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface' import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { BaseMessage } from 'langchain/schema' @@ -55,36 +55,27 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { super(fields) } - async getChatMessages(_?: string, returnBaseMessages = false): Promise { + async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise { + await this.chatHistory.clear() + + for (const msg of prevHistory) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + const memoryResult = await this.loadMemoryVariables({}) const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) } - async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { - const input = msgArray.find((msg) => msg.type === 'userMessage') - const output = msgArray.find((msg) => msg.type === 'apiMessage') - - const inputValues = { [this.inputKey ?? 'input']: input?.text } - const outputValues = { output: output?.text } - - await this.saveContext(inputValues, outputValues) + async addChatMessages(): Promise { + // adding chat messages will be done on the fly in getChatMessages() + return } async clearChatMessages(): Promise { await this.clear() } - - async resumeMessages(messages: IMessage[]): Promise { - // Clear existing chatHistory to avoid duplication - if (messages.length) await this.clear() - - // Insert into chatHistory - for (const msg of messages) { - if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) - else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) - } - } } module.exports = { nodeClass: BufferMemory_Memory } diff --git a/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts b/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts index ca8d0ddfdbe..c21405a4791 100644 --- a/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts +++ b/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts @@ -1,4 +1,4 @@ -import { FlowiseWindowMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' +import { FlowiseWindowMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface' import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { BufferWindowMemory, BufferWindowMemoryInput } from 'langchain/memory' import { BaseMessage } from 'langchain/schema' @@ -67,36 +67,28 @@ class BufferWindowMemoryExtended extends FlowiseWindowMemory implements MemoryMe super(fields) } - async getChatMessages(_?: string, returnBaseMessages = false): Promise { + async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise { + await this.chatHistory.clear() + + // Insert into chatHistory + for (const msg of prevHistory) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + const memoryResult = await this.loadMemoryVariables({}) const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) } - async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { - const input = msgArray.find((msg) => msg.type === 'userMessage') - const output = msgArray.find((msg) => msg.type === 'apiMessage') - - const inputValues = { [this.inputKey ?? 'input']: input?.text } - const outputValues = { output: output?.text } - - await this.saveContext(inputValues, outputValues) + async addChatMessages(): Promise { + // adding chat messages will be done on the fly in getChatMessages() + return } async clearChatMessages(): Promise { await this.clear() } - - async resumeMessages(messages: IMessage[]): Promise { - // Clear existing chatHistory to avoid duplication - if (messages.length) await this.clear() - - // Insert into chatHistory - for (const msg of messages) { - if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) - else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) - } - } } module.exports = { nodeClass: BufferWindowMemory_Memory } diff --git a/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts b/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts index 107ab7db9b7..45d393269e6 100644 --- a/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts +++ b/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts @@ -1,4 +1,4 @@ -import { FlowiseSummaryMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' +import { FlowiseSummaryMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface' import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory' import { BaseLanguageModel } from 'langchain/base_language' @@ -66,40 +66,32 @@ class ConversationSummaryMemoryExtended extends FlowiseSummaryMemory implements super(fields) } - async getChatMessages(_?: string, returnBaseMessages = false): Promise { + async getChatMessages(_?: string, returnBaseMessages = false, prevHistory: IMessage[] = []): Promise { + await this.chatHistory.clear() + this.buffer = '' + + for (const msg of prevHistory) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + + // Get summary + const chatMessages = await this.chatHistory.getMessages() + this.buffer = chatMessages.length ? await this.predictNewSummary(chatMessages.slice(-2), this.buffer) : '' + const memoryResult = await this.loadMemoryVariables({}) const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) } - async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { - const input = msgArray.find((msg) => msg.type === 'userMessage') - const output = msgArray.find((msg) => msg.type === 'apiMessage') - - const inputValues = { [this.inputKey ?? 'input']: input?.text } - const outputValues = { output: output?.text } - - await this.saveContext(inputValues, outputValues) + async addChatMessages(): Promise { + // adding chat messages will be done on the fly in getChatMessages() + return } async clearChatMessages(): Promise { await this.clear() } - - async resumeMessages(messages: IMessage[]): Promise { - // Clear existing chatHistory to avoid duplication - if (messages.length) await this.clear() - - // Insert into chatHistory - for (const msg of messages) { - if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) - else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) - } - - // Replace buffer - const chatMessages = await this.chatHistory.getMessages() - this.buffer = await this.predictNewSummary(chatMessages.slice(-2), this.buffer) - } } module.exports = { nodeClass: ConversationSummaryMemory_Memory } diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 872ec0b51cf..91c1d369349 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -12,13 +12,7 @@ import { import { DynamoDBChatMessageHistory } from 'langchain/stores/message/dynamodb' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage, BaseMessage } from 'langchain/schema' -import { - convertBaseMessagetoIMessage, - getBaseClasses, - getCredentialData, - getCredentialParam, - serializeChatHistory -} from '../../../src/utils' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' class DynamoDb_Memory implements INode { @@ -70,7 +64,8 @@ class DynamoDb_Memory implements INode { label: 'Session ID', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -88,25 +83,6 @@ class DynamoDb_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeDynamoDB(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const dynamodbMemory = await initalizeDynamoDB(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing DynamoDb memory session ${sessionId ? sessionId : chatId}`) - await dynamodbMemory.clear() - options.logger.info(`Successfully cleared DynamoDb memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const dynamodbMemory = await initalizeDynamoDB(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await dynamodbMemory.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): Promise => { @@ -114,17 +90,7 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P const partitionKey = nodeData.inputs?.partitionKey as string const region = nodeData.inputs?.region as string const memoryKey = nodeData.inputs?.memoryKey as string - const chatId = options.chatId - - let isSessionIdUsingChatMessageId = false - let sessionId = '' - - if (!nodeData.inputs?.sessionId && chatId) { - isSessionIdUsingChatMessageId = true - sessionId = chatId - } else { - sessionId = nodeData.inputs?.sessionId - } + const sessionId = nodeData.inputs?.sessionId as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const accessKeyId = getCredentialParam('accessKey', credentialData, nodeData) @@ -150,7 +116,6 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P const memory = new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: dynamoDb, - isSessionIdUsingChatMessageId, sessionId, dynamodbClient: client }) @@ -158,7 +123,6 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P } interface BufferMemoryExtendedInput { - isSessionIdUsingChatMessageId: boolean dynamodbClient: DynamoDBClient sessionId: string } @@ -178,7 +142,6 @@ interface DynamoDBSerializedChatMessage { } class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { - isSessionIdUsingChatMessageId = false sessionId = '' dynamodbClient: DynamoDBClient @@ -306,10 +269,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { await this.dynamodbClient.send(new DeleteItemCommand(params)) await this.clear() } - - async resumeMessages(): Promise { - return - } } module.exports = { nodeClass: DynamoDb_Memory } diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index b422921e6f9..c593c20d61c 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -2,13 +2,7 @@ import { MongoClient, Collection, Document } from 'mongodb' import { MongoDBChatMessageHistory } from 'langchain/stores/message/mongodb' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, BaseMessage } from 'langchain/schema' -import { - convertBaseMessagetoIMessage, - getBaseClasses, - getCredentialData, - getCredentialParam, - serializeChatHistory -} from '../../../src/utils' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' class MongoDB_Memory implements INode { @@ -55,7 +49,8 @@ class MongoDB_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -73,42 +68,13 @@ class MongoDB_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initializeMongoDB(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const mongodbMemory = await initializeMongoDB(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing MongoDB memory session ${sessionId ? sessionId : chatId}`) - await mongodbMemory.clear() - options.logger.info(`Successfully cleared MongoDB memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const mongodbMemory = await initializeMongoDB(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await mongodbMemory.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): Promise => { const databaseName = nodeData.inputs?.databaseName as string const collectionName = nodeData.inputs?.collectionName as string const memoryKey = nodeData.inputs?.memoryKey as string - const chatId = options?.chatId as string - - let isSessionIdUsingChatMessageId = false - let sessionId = '' - - if (!nodeData.inputs?.sessionId && chatId) { - isSessionIdUsingChatMessageId = true - sessionId = chatId - } else { - sessionId = nodeData.inputs?.sessionId - } + const sessionId = nodeData.inputs?.sessionId as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData) @@ -149,14 +115,12 @@ const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): P return new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: mongoDBChatMessageHistory, - isSessionIdUsingChatMessageId, sessionId, collection }) } interface BufferMemoryExtendedInput { - isSessionIdUsingChatMessageId: boolean collection: Collection sessionId: string } @@ -164,7 +128,6 @@ interface BufferMemoryExtendedInput { class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { sessionId = '' collection: Collection - isSessionIdUsingChatMessageId? = false constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) @@ -221,10 +184,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { await this.collection.deleteOne({ sessionId: id }) await this.clear() } - - async resumeMessages(): Promise { - return - } } module.exports = { nodeClass: MongoDB_Memory } diff --git a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts index 938cc87317b..19506fc1e4e 100644 --- a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts +++ b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts @@ -1,9 +1,14 @@ import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ICommonObject } from '../../../src' -import { MotorheadMemory, MotorheadMemoryInput, InputValues, MemoryVariables, OutputValues, getBufferString } from 'langchain/memory' +import { MotorheadMemory, MotorheadMemoryInput, InputValues, OutputValues } from 'langchain/memory' import fetch from 'node-fetch' -import { BaseMessage } from 'langchain/schema' +import { AIMessage, BaseMessage, ChatMessage, HumanMessage } from 'langchain/schema' + +type MotorheadMessage = { + content: string + role: 'Human' | 'AI' +} class MotorMemory_Memory implements INode { label: string @@ -46,7 +51,8 @@ class MotorMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -64,49 +70,19 @@ class MotorMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeMotorhead(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const motorhead = await initalizeMotorhead(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Motorhead memory session ${sessionId ? sessionId : chatId}`) - await motorhead.clear() - options.logger.info(`Successfully cleared Motorhead memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const motorhead = await initalizeMotorhead(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await motorhead.loadMemoryVariables({}) - return getBufferString(memoryResult[key]) - } - } } const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): Promise => { const memoryKey = nodeData.inputs?.memoryKey as string const baseURL = nodeData.inputs?.baseURL as string - const chatId = options?.chatId as string - - let isSessionIdUsingChatMessageId = false - let sessionId = '' - - if (!nodeData.inputs?.sessionId && chatId) { - isSessionIdUsingChatMessageId = true - sessionId = chatId - } else { - sessionId = nodeData.inputs?.sessionId - } + const sessionId = nodeData.inputs?.sessionId as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const apiKey = getCredentialParam('apiKey', credentialData, nodeData) const clientId = getCredentialParam('clientId', credentialData, nodeData) - let obj: MotorheadMemoryInput & MotorheadMemoryExtendedInput = { + let obj: MotorheadMemoryInput = { returnMessages: true, - isSessionIdUsingChatMessageId, sessionId, memoryKey } @@ -132,23 +108,9 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): return motorheadMemory } -interface MotorheadMemoryExtendedInput { - isSessionIdUsingChatMessageId: boolean -} - class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { - isSessionIdUsingChatMessageId? = false - - constructor(fields: MotorheadMemoryInput & MotorheadMemoryExtendedInput) { + constructor(fields: MotorheadMemoryInput) { super(fields) - this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId - } - - async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise { - if (overrideSessionId) { - this.sessionId = overrideSessionId - } - return super.loadMemoryVariables({ values }) } async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise { @@ -180,9 +142,33 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { const id = overrideSessionId ?? this.sessionId - const memoryVariables = await this.loadMemoryVariables({}, id) - const baseMessages = memoryVariables[this.memoryKey] - return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) + try { + const resp = await this.caller.call(fetch, `${this.url}/sessions/${id}/memory`, { + //@ts-ignore + signal: this.timeout ? AbortSignal.timeout(this.timeout) : undefined, + headers: this._getHeaders() as ICommonObject, + method: 'GET' + }) + const data = await resp.json() + const rawStoredMessages: MotorheadMessage[] = data?.data?.messages ?? [] + + const baseMessages = rawStoredMessages.reverse().map((message) => { + const { content, role } = message + if (role === 'Human') { + return new HumanMessage(content) + } else if (role === 'AI') { + return new AIMessage(content) + } else { + // default to generic ChatMessage + return new ChatMessage(content, role) + } + }) + + return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) + } catch (error) { + console.error('Error getting session: ', error) + return [] + } } async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { diff --git a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts index a02df3ea21c..baf4ea6bb3e 100644 --- a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts @@ -1,15 +1,9 @@ -import { INode, INodeData, INodeParams, ICommonObject, IMessage, MessageType, FlowiseMemory, MemoryMethods } from '../../../src/Interface' -import { - convertBaseMessagetoIMessage, - getBaseClasses, - getCredentialData, - getCredentialParam, - serializeChatHistory -} from '../../../src/utils' +import { Redis } from 'ioredis' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis' import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema' -import { Redis } from 'ioredis' +import { INode, INodeData, INodeParams, ICommonObject, MessageType, IMessage, MemoryMethods, FlowiseMemory } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' class RedisBackedChatMemory_Memory implements INode { label: string @@ -44,7 +38,8 @@ class RedisBackedChatMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -78,47 +73,19 @@ class RedisBackedChatMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return await initalizeRedis(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeRedis(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Redis memory session ${sessionId ? sessionId : chatId}`) - await redis.clear() - options.logger.info(`Successfully cleared Redis memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const redis = await initalizeRedis(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await redis.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Promise => { const sessionTTL = nodeData.inputs?.sessionTTL as number const memoryKey = nodeData.inputs?.memoryKey as string + const sessionId = nodeData.inputs?.sessionId as string const windowSize = nodeData.inputs?.windowSize as number - const chatId = options?.chatId as string - - let isSessionIdUsingChatMessageId = false - let sessionId = '' - - if (!nodeData.inputs?.sessionId && chatId) { - isSessionIdUsingChatMessageId = true - sessionId = chatId - } else { - sessionId = nodeData.inputs?.sessionId - } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const redisUrl = getCredentialParam('redisUrl', credentialData, nodeData) let client: Redis + if (!redisUrl || redisUrl === '') { const username = getCredentialParam('redisCacheUser', credentialData, nodeData) const password = getCredentialParam('redisCachePwd', credentialData, nodeData) @@ -153,7 +120,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom const redisChatMessageHistory = new RedisChatMessageHistory(obj) - redisChatMessageHistory.getMessages = async (): Promise => { + /*redisChatMessageHistory.getMessages = async (): Promise => { const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, windowSize ? -windowSize : 0, -1) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) return orderedMessages.map(mapStoredMessageToChatMessage) @@ -169,44 +136,45 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom redisChatMessageHistory.clear = async (): Promise => { await client.del((redisChatMessageHistory as any).sessionId) - } + }*/ const memory = new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: redisChatMessageHistory, - isSessionIdUsingChatMessageId, sessionId, + windowSize, redisClient: client }) + return memory } interface BufferMemoryExtendedInput { - isSessionIdUsingChatMessageId: boolean redisClient: Redis sessionId: string + windowSize?: number } class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { - isSessionIdUsingChatMessageId? = false sessionId = '' redisClient: Redis + windowSize?: number constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) - this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId this.sessionId = fields.sessionId this.redisClient = fields.redisClient + this.windowSize = fields.windowSize } - async getChatMessages(overrideSessionId = '', returnBaseMessage = false): Promise { + async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.redisClient) return [] const id = overrideSessionId ?? this.sessionId - const rawStoredMessages = await this.redisClient.lrange(id, 0, -1) + const rawStoredMessages = await this.redisClient.lrange(id, this.windowSize ? this.windowSize * -1 : 0, -1) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage) - return returnBaseMessage ? baseMessages : convertBaseMessagetoIMessage(baseMessages) + return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) } async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { @@ -236,10 +204,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { await this.redisClient.del(id) await this.clear() } - - async resumeMessages(): Promise { - return - } } module.exports = { nodeClass: RedisBackedChatMemory_Memory } diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts index c3f9712316c..3d7f6dbfc0c 100644 --- a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts @@ -3,13 +3,7 @@ import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis' import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage, BaseMessage } from 'langchain/schema' import { FlowiseMemory, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' -import { - convertBaseMessagetoIMessage, - getBaseClasses, - getCredentialData, - getCredentialParam, - serializeChatHistory -} from '../../../src/utils' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ICommonObject } from '../../../src/Interface' class UpstashRedisBackedChatMemory_Memory implements INode { @@ -51,7 +45,8 @@ class UpstashRedisBackedChatMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -70,40 +65,12 @@ class UpstashRedisBackedChatMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeUpstashRedis(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeUpstashRedis(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`) - await redis.clear() - options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeUpstashRedis(nodeData, options) - const key = 'chat_history' - const memoryResult = await redis.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise => { const baseURL = nodeData.inputs?.baseURL as string const sessionTTL = nodeData.inputs?.sessionTTL as string - const chatId = options?.chatId as string - - let isSessionIdUsingChatMessageId = false - let sessionId = '' - - if (!nodeData.inputs?.sessionId && chatId) { - isSessionIdUsingChatMessageId = true - sessionId = chatId - } else { - sessionId = nodeData.inputs?.sessionId - } + const sessionId = nodeData.inputs?.sessionId as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData) @@ -122,7 +89,6 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject const memory = new BufferMemoryExtended({ memoryKey: 'chat_history', chatHistory: redisChatMessageHistory, - isSessionIdUsingChatMessageId, sessionId, redisClient: client }) @@ -131,19 +97,16 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject } interface BufferMemoryExtendedInput { - isSessionIdUsingChatMessageId: boolean redisClient: Redis sessionId: string } class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { - isSessionIdUsingChatMessageId? = false sessionId = '' redisClient: Redis constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) - this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId this.sessionId = fields.sessionId this.redisClient = fields.redisClient } @@ -186,10 +149,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { await this.redisClient.del(id) await this.clear() } - - async resumeMessages(): Promise { - return - } } module.exports = { nodeClass: UpstashRedisBackedChatMemory_Memory } diff --git a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts index 4dda76df141..597eee8a094 100644 --- a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts +++ b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts @@ -2,7 +2,7 @@ import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } f import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep' import { ICommonObject } from '../../../src' -import { InputValues, MemoryVariables, OutputValues, getBufferString } from 'langchain/memory' +import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory' import { BaseMessage } from 'langchain/schema' class ZepMemory_Memory implements INode { @@ -55,10 +55,9 @@ class ZepMemory_Memory implements INode { label: 'Size', name: 'k', type: 'number', - placeholder: '10', + default: '10', description: 'Window of size k to surface the last k back-and-forth to use as memory.', - additionalParams: true, - optional: true + additionalParams: true }, { label: 'AI Prefix', @@ -101,27 +100,6 @@ class ZepMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return await initalizeZep(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const zep = await initalizeZep(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Zep memory session ${sessionId ? sessionId : chatId}`) - await zep.clear() - options.logger.info(`Successfully cleared Zep memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const aiPrefix = nodeData.inputs?.aiPrefix as string - const humanPrefix = nodeData.inputs?.humanPrefix as string - const zep = await initalizeZep(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await zep.loadMemoryVariables({}) - return getBufferString(memoryResult[key], humanPrefix, aiPrefix) - } - } } const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promise => { @@ -131,30 +109,19 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis const memoryKey = nodeData.inputs?.memoryKey as string const inputKey = nodeData.inputs?.inputKey as string const k = nodeData.inputs?.k as string - const chatId = options?.chatId as string - - let isSessionIdUsingChatMessageId = false - let sessionId = '' - - if (!nodeData.inputs?.sessionId && chatId) { - isSessionIdUsingChatMessageId = true - sessionId = chatId - } else { - sessionId = nodeData.inputs?.sessionId - } + const sessionId = nodeData.inputs?.sessionId as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const apiKey = getCredentialParam('apiKey', credentialData, nodeData) const obj: ZepMemoryInput & ZepMemoryExtendedInput = { baseURL, - sessionId, aiPrefix, humanPrefix, returnMessages: true, memoryKey, inputKey, - isSessionIdUsingChatMessageId, + sessionId, k: k ? parseInt(k, 10) : undefined } if (apiKey) obj.apiKey = apiKey @@ -163,17 +130,14 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis } interface ZepMemoryExtendedInput { - isSessionIdUsingChatMessageId: boolean k?: number } class ZepMemoryExtended extends ZepMemory implements MemoryMethods { - isSessionIdUsingChatMessageId? = false lastN?: number constructor(fields: ZepMemoryInput & ZepMemoryExtendedInput) { super(fields) - this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId this.lastN = fields.k } diff --git a/packages/components/nodes/tools/CustomTool/CustomTool.ts b/packages/components/nodes/tools/CustomTool/CustomTool.ts index 6ffcc0e2171..a983d0d90e7 100644 --- a/packages/components/nodes/tools/CustomTool/CustomTool.ts +++ b/packages/components/nodes/tools/CustomTool/CustomTool.ts @@ -60,7 +60,7 @@ class CustomTool_Tools implements INode { } } - async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const selectedToolId = nodeData.inputs?.selectedTool as string const customToolFunc = nodeData.inputs?.customToolFunc as string @@ -99,11 +99,7 @@ class CustomTool_Tools implements INode { } } - const flow = { - chatId: options.chatId, // id is uppercase (I) - chatflowId: options.chatflowid, // id is lowercase (i) - input - } + const flow = { chatflowId: options.chatflowid } let dynamicStructuredTool = new DynamicStructuredTool(obj) dynamicStructuredTool.setVariables(variables) diff --git a/packages/components/nodes/tools/CustomTool/core.ts b/packages/components/nodes/tools/CustomTool/core.ts index 338b0ae9a69..b543aefa9b1 100644 --- a/packages/components/nodes/tools/CustomTool/core.ts +++ b/packages/components/nodes/tools/CustomTool/core.ts @@ -55,7 +55,12 @@ export class DynamicStructuredTool< this.schema = fields.schema } - async call(arg: z.output, configArg?: RunnableConfig | Callbacks, tags?: string[], overrideSessionId?: string): Promise { + async call( + arg: z.output, + configArg?: RunnableConfig | Callbacks, + tags?: string[], + flowConfig?: { sessionId?: string; chatId?: string; input?: string } + ): Promise { const config = parseCallbackConfigArg(configArg) if (config.runName === undefined) { config.runName = this.name @@ -86,7 +91,7 @@ export class DynamicStructuredTool< ) let result try { - result = await this._call(parsed, runManager, overrideSessionId) + result = await this._call(parsed, runManager, flowConfig) } catch (e) { await runManager?.handleToolError(e) throw e @@ -95,7 +100,11 @@ export class DynamicStructuredTool< return result } - protected async _call(arg: z.output, _?: CallbackManagerForToolRun, overrideSessionId?: string): Promise { + protected async _call( + arg: z.output, + _?: CallbackManagerForToolRun, + flowConfig?: { sessionId?: string; chatId?: string; input?: string } + ): Promise { let sandbox: any = {} if (typeof arg === 'object' && Object.keys(arg).length) { for (const item in arg) { @@ -126,7 +135,7 @@ export class DynamicStructuredTool< // inject flow properties if (this.flowObj) { - sandbox['$flow'] = { ...this.flowObj, sessionId: overrideSessionId } + sandbox['$flow'] = { ...this.flowObj, ...flowConfig } } const defaultAllowBuiltInDep = [ diff --git a/packages/components/package.json b/packages/components/package.json index a2565430bd4..a77d91e4b88 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -46,6 +46,7 @@ "dotenv": "^16.0.0", "express": "^4.17.3", "faiss-node": "^0.2.2", + "fast-json-patch": "^3.1.1", "form-data": "^4.0.0", "google-auth-library": "^9.0.0", "graphql": "^16.6.0", diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 2a625ff6a0b..676618e574b 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -108,10 +108,6 @@ export interface INode extends INodeProperties { search: (nodeData: INodeData, options?: ICommonObject) => Promise delete: (nodeData: INodeData, options?: ICommonObject) => Promise } - memoryMethods?: { - clearSessionMemory: (nodeData: INodeData, options?: ICommonObject) => Promise - getChatMessages: (nodeData: INodeData, options?: ICommonObject) => Promise - } init?(nodeData: INodeData, input: string, options?: ICommonObject): Promise run?(nodeData: INodeData, input: string, options?: ICommonObject): Promise } @@ -204,29 +200,37 @@ import { BaseMessage } from 'langchain/schema' import { BufferMemory, BufferWindowMemory, ConversationSummaryMemory } from 'langchain/memory' export interface MemoryMethods { - getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise + getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean, prevHistory?: IMessage[]): Promise addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise clearChatMessages(overrideSessionId?: string): Promise - resumeMessages?(messages: IMessage[]): Promise } export abstract class FlowiseMemory extends BufferMemory implements MemoryMethods { - abstract getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise + abstract getChatMessages( + overrideSessionId?: string, + returnBaseMessages?: boolean, + prevHistory?: IMessage[] + ): Promise abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise abstract clearChatMessages(overrideSessionId?: string): Promise - abstract resumeMessages(messages: IMessage[]): Promise } export abstract class FlowiseWindowMemory extends BufferWindowMemory implements MemoryMethods { - abstract getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise + abstract getChatMessages( + overrideSessionId?: string, + returnBaseMessages?: boolean, + prevHistory?: IMessage[] + ): Promise abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise abstract clearChatMessages(overrideSessionId?: string): Promise - abstract resumeMessages(messages: IMessage[]): Promise } export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory implements MemoryMethods { - abstract getChatMessages(overrideSessionId?: string, returnBaseMessages?: boolean): Promise + abstract getChatMessages( + overrideSessionId?: string, + returnBaseMessages?: boolean, + prevHistory?: IMessage[] + ): Promise abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise abstract clearChatMessages(overrideSessionId?: string): Promise - abstract resumeMessages(messages: IMessage[]): Promise } diff --git a/packages/components/src/agents.ts b/packages/components/src/agents.ts new file mode 100644 index 00000000000..e30a0c43ae6 --- /dev/null +++ b/packages/components/src/agents.ts @@ -0,0 +1,615 @@ +import { AgentExecutorInput, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, StoppingMethod } from 'langchain/agents' +import { ChainValues, AgentStep, AgentFinish, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema' +import { OutputParserException } from 'langchain/schema/output_parser' +import { CallbackManager, CallbackManagerForChainRun, Callbacks } from 'langchain/callbacks' +import { ToolInputParsingException, Tool } from '@langchain/core/tools' +import { Runnable } from 'langchain/schema/runnable' +import { BaseChain, SerializedLLMChain } from 'langchain/chains' +import { Serializable } from '@langchain/core/load/serializable' + +type AgentExecutorOutput = ChainValues + +interface AgentExecutorIteratorInput { + agentExecutor: AgentExecutor + inputs: Record + callbacks?: Callbacks + tags?: string[] + metadata?: Record + runName?: string + runManager?: CallbackManagerForChainRun +} + +//TODO: stream tools back +export class AgentExecutorIterator extends Serializable implements AgentExecutorIteratorInput { + lc_namespace = ['langchain', 'agents', 'executor_iterator'] + + agentExecutor: AgentExecutor + + inputs: Record + + callbacks: Callbacks + + tags: string[] | undefined + + metadata: Record | undefined + + runName: string | undefined + + private _finalOutputs: Record | undefined + + get finalOutputs(): Record | undefined { + return this._finalOutputs + } + + /** Intended to be used as a setter method, needs to be async. */ + async setFinalOutputs(value: Record | undefined) { + this._finalOutputs = undefined + if (value) { + const preparedOutputs: Record = await this.agentExecutor.prepOutputs(this.inputs, value, true) + this._finalOutputs = preparedOutputs + } + } + + runManager: CallbackManagerForChainRun | undefined + + intermediateSteps: AgentStep[] = [] + + iterations = 0 + + get nameToToolMap(): Record { + const toolMap = this.agentExecutor.tools.map((tool) => ({ + [tool.name]: tool + })) + return Object.assign({}, ...toolMap) + } + + constructor(fields: AgentExecutorIteratorInput) { + super(fields) + this.agentExecutor = fields.agentExecutor + this.inputs = fields.inputs + this.tags = fields.tags + this.metadata = fields.metadata + this.runName = fields.runName + this.runManager = fields.runManager + } + + /** + * Reset the iterator to its initial state, clearing intermediate steps, + * iterations, and the final output. + */ + reset(): void { + this.intermediateSteps = [] + this.iterations = 0 + this._finalOutputs = undefined + } + + updateIterations(): void { + this.iterations += 1 + } + + async *streamIterator() { + this.reset() + + // Loop to handle iteration + while (true) { + try { + if (this.iterations === 0) { + await this.onFirstStep() + } + + const result = await this._callNext() + yield result + } catch (e: any) { + if ('message' in e && e.message.startsWith('Final outputs already reached: ')) { + if (!this.finalOutputs) { + throw e + } + return this.finalOutputs + } + if (this.runManager) { + await this.runManager.handleChainError(e) + } + throw e + } + } + } + + /** + * Perform any necessary setup for the first step + * of the asynchronous iterator. + */ + async onFirstStep(): Promise { + if (this.iterations === 0) { + const callbackManager = await CallbackManager.configure( + this.callbacks, + this.agentExecutor.callbacks, + this.tags, + this.agentExecutor.tags, + this.metadata, + this.agentExecutor.metadata, + { + verbose: this.agentExecutor.verbose + } + ) + this.runManager = await callbackManager?.handleChainStart( + this.agentExecutor.toJSON(), + this.inputs, + undefined, + undefined, + this.tags, + this.metadata, + this.runName + ) + } + } + + /** + * Execute the next step in the chain using the + * AgentExecutor's _takeNextStep method. + */ + async _executeNextStep(runManager?: CallbackManagerForChainRun): Promise { + return this.agentExecutor._takeNextStep(this.nameToToolMap, this.inputs, this.intermediateSteps, runManager) + } + + /** + * Process the output of the next step, + * handling AgentFinish and tool return cases. + */ + async _processNextStepOutput( + nextStepOutput: AgentFinish | AgentStep[], + runManager?: CallbackManagerForChainRun + ): Promise> { + if ('returnValues' in nextStepOutput) { + const output = await this.agentExecutor._return(nextStepOutput as AgentFinish, this.intermediateSteps, runManager) + if (this.runManager) { + await this.runManager.handleChainEnd(output) + } + await this.setFinalOutputs(output) + return output + } + + this.intermediateSteps = this.intermediateSteps.concat(nextStepOutput as AgentStep[]) + + let output: Record = {} + if (Array.isArray(nextStepOutput) && nextStepOutput.length === 1) { + const nextStep = nextStepOutput[0] + const toolReturn = await this.agentExecutor._getToolReturn(nextStep) + if (toolReturn) { + output = await this.agentExecutor._return(toolReturn, this.intermediateSteps, runManager) + if (this.runManager) { + await this.runManager.handleChainEnd(output) + } + await this.setFinalOutputs(output) + } + } + output = { intermediateSteps: nextStepOutput as AgentStep[] } + return output + } + + async _stop(): Promise> { + const output = await this.agentExecutor.agent.returnStoppedResponse( + this.agentExecutor.earlyStoppingMethod, + this.intermediateSteps, + this.inputs + ) + const returnedOutput = await this.agentExecutor._return(output, this.intermediateSteps, this.runManager) + await this.setFinalOutputs(returnedOutput) + return returnedOutput + } + + async _callNext(): Promise> { + // final output already reached: stopiteration (final output) + if (this.finalOutputs) { + throw new Error(`Final outputs already reached: ${JSON.stringify(this.finalOutputs, null, 2)}`) + } + // timeout/max iterations: stopiteration (stopped response) + if (!this.agentExecutor.shouldContinueGetter(this.iterations)) { + return this._stop() + } + const nextStepOutput = await this._executeNextStep(this.runManager) + const output = await this._processNextStepOutput(nextStepOutput, this.runManager) + this.updateIterations() + return output + } +} + +export class AgentExecutor extends BaseChain { + static lc_name() { + return 'AgentExecutor' + } + + get lc_namespace() { + return ['langchain', 'agents', 'executor'] + } + + agent: BaseSingleActionAgent | BaseMultiActionAgent + + tools: this['agent']['ToolType'][] + + returnIntermediateSteps = false + + maxIterations?: number = 15 + + earlyStoppingMethod: StoppingMethod = 'force' + + sessionId?: string + + chatId?: string + + input?: string + + /** + * How to handle errors raised by the agent's output parser. + Defaults to `False`, which raises the error. + + If `true`, the error will be sent back to the LLM as an observation. + If a string, the string itself will be sent to the LLM as an observation. + If a callable function, the function will be called with the exception + as an argument, and the result of that function will be passed to the agent + as an observation. + */ + handleParsingErrors: boolean | string | ((e: OutputParserException | ToolInputParsingException) => string) = false + + get inputKeys() { + return this.agent.inputKeys + } + + get outputKeys() { + return this.agent.returnValues + } + + constructor(input: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }) { + let agent: BaseSingleActionAgent | BaseMultiActionAgent + if (Runnable.isRunnable(input.agent)) { + agent = new RunnableAgent({ runnable: input.agent }) + } else { + agent = input.agent + } + + super(input) + this.agent = agent + this.tools = input.tools + this.handleParsingErrors = input.handleParsingErrors ?? this.handleParsingErrors + /* Getting rid of this because RunnableAgent doesnt allow return direct + if (this.agent._agentActionType() === "multi") { + for (const tool of this.tools) { + if (tool.returnDirect) { + throw new Error( + `Tool with return direct ${tool.name} not supported for multi-action agent.` + ); + } + } + }*/ + this.returnIntermediateSteps = input.returnIntermediateSteps ?? this.returnIntermediateSteps + this.maxIterations = input.maxIterations ?? this.maxIterations + this.earlyStoppingMethod = input.earlyStoppingMethod ?? this.earlyStoppingMethod + this.sessionId = input.sessionId + this.chatId = input.chatId + this.input = input.input + } + + static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }): AgentExecutor { + const newInstance = new AgentExecutor(fields) + if (fields.sessionId) newInstance.sessionId = fields.sessionId + if (fields.chatId) newInstance.chatId = fields.chatId + if (fields.input) newInstance.input = fields.input + return newInstance + } + + get shouldContinueGetter() { + return this.shouldContinue.bind(this) + } + + /** + * Method that checks if the agent execution should continue based on the + * number of iterations. + * @param iterations The current number of iterations. + * @returns A boolean indicating whether the agent execution should continue. + */ + private shouldContinue(iterations: number): boolean { + return this.maxIterations === undefined || iterations < this.maxIterations + } + + async _call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise { + const toolsByName = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t])) + + const steps: AgentStep[] = [] + let iterations = 0 + + const getOutput = async (finishStep: AgentFinish): Promise => { + const { returnValues } = finishStep + const additional = await this.agent.prepareForOutput(returnValues, steps) + + if (this.returnIntermediateSteps) { + return { ...returnValues, intermediateSteps: steps, ...additional } + } + await runManager?.handleAgentEnd(finishStep) + return { ...returnValues, ...additional } + } + + while (this.shouldContinue(iterations)) { + let output + try { + output = await this.agent.plan(steps, inputs, runManager?.getChild()) + } catch (e) { + if (e instanceof OutputParserException) { + let observation + let text = e.message + if (this.handleParsingErrors === true) { + if (e.sendToLLM) { + observation = e.observation + text = e.llmOutput ?? '' + } else { + observation = 'Invalid or incomplete response' + } + } else if (typeof this.handleParsingErrors === 'string') { + observation = this.handleParsingErrors + } else if (typeof this.handleParsingErrors === 'function') { + observation = this.handleParsingErrors(e) + } else { + throw e + } + output = { + tool: '_Exception', + toolInput: observation, + log: text + } as AgentAction + } else { + throw e + } + } + // Check if the agent has finished + if ('returnValues' in output) { + return getOutput(output) + } + + let actions: AgentAction[] + if (Array.isArray(output)) { + actions = output as AgentAction[] + } else { + actions = [output as AgentAction] + } + + const newSteps = await Promise.all( + actions.map(async (action) => { + await runManager?.handleAgentAction(action) + const tool = action.tool === '_Exception' ? new ExceptionTool() : toolsByName[action.tool?.toLowerCase()] + let observation + try { + /* Here we need to override Tool call method to include sessionId, chatId, input as parameter + * Tool Call Parameters: + * - arg: z.output + * - configArg?: RunnableConfig | Callbacks + * - tags?: string[] + * - flowConfig?: { sessionId?: string, chatId?: string, input?: string } + */ + observation = tool + ? // @ts-ignore + await tool.call(action.toolInput, runManager?.getChild(), undefined, { + sessionId: this.sessionId, + chatId: this.chatId, + input: this.input + }) + : `${action.tool} is not a valid tool, try another one.` + } catch (e) { + if (e instanceof ToolInputParsingException) { + if (this.handleParsingErrors === true) { + observation = 'Invalid or incomplete tool input. Please try again.' + } else if (typeof this.handleParsingErrors === 'string') { + observation = this.handleParsingErrors + } else if (typeof this.handleParsingErrors === 'function') { + observation = this.handleParsingErrors(e) + } else { + throw e + } + observation = await new ExceptionTool().call(observation, runManager?.getChild()) + return { action, observation: observation ?? '' } + } + } + return { action, observation: observation ?? '' } + }) + ) + + steps.push(...newSteps) + + const lastStep = steps[steps.length - 1] + const lastTool = toolsByName[lastStep.action.tool?.toLowerCase()] + + if (lastTool?.returnDirect) { + return getOutput({ + returnValues: { [this.agent.returnValues[0]]: lastStep.observation }, + log: '' + }) + } + + iterations += 1 + } + + const finish = await this.agent.returnStoppedResponse(this.earlyStoppingMethod, steps, inputs) + + return getOutput(finish) + } + + async _takeNextStep( + nameToolMap: Record, + inputs: ChainValues, + intermediateSteps: AgentStep[], + runManager?: CallbackManagerForChainRun + ): Promise { + let output + try { + output = await this.agent.plan(intermediateSteps, inputs, runManager?.getChild()) + } catch (e) { + if (e instanceof OutputParserException) { + let observation + let text = e.message + if (this.handleParsingErrors === true) { + if (e.sendToLLM) { + observation = e.observation + text = e.llmOutput ?? '' + } else { + observation = 'Invalid or incomplete response' + } + } else if (typeof this.handleParsingErrors === 'string') { + observation = this.handleParsingErrors + } else if (typeof this.handleParsingErrors === 'function') { + observation = this.handleParsingErrors(e) + } else { + throw e + } + output = { + tool: '_Exception', + toolInput: observation, + log: text + } as AgentAction + } else { + throw e + } + } + + if ('returnValues' in output) { + return output + } + + let actions: AgentAction[] + if (Array.isArray(output)) { + actions = output as AgentAction[] + } else { + actions = [output as AgentAction] + } + + const result: AgentStep[] = [] + for (const agentAction of actions) { + let observation = '' + if (runManager) { + await runManager?.handleAgentAction(agentAction) + } + if (agentAction.tool in nameToolMap) { + const tool = nameToolMap[agentAction.tool] + try { + /* Here we need to override Tool call method to include sessionId, chatId, input as parameter + * Tool Call Parameters: + * - arg: z.output + * - configArg?: RunnableConfig | Callbacks + * - tags?: string[] + * - flowConfig?: { sessionId?: string, chatId?: string, input?: string } + */ + // @ts-ignore + observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, { + sessionId: this.sessionId, + chatId: this.chatId, + input: this.input + }) + } catch (e) { + if (e instanceof ToolInputParsingException) { + if (this.handleParsingErrors === true) { + observation = 'Invalid or incomplete tool input. Please try again.' + } else if (typeof this.handleParsingErrors === 'string') { + observation = this.handleParsingErrors + } else if (typeof this.handleParsingErrors === 'function') { + observation = this.handleParsingErrors(e) + } else { + throw e + } + observation = await new ExceptionTool().call(observation, runManager?.getChild()) + } + } + } else { + observation = `${agentAction.tool} is not a valid tool, try another available tool: ${Object.keys(nameToolMap).join(', ')}` + } + result.push({ + action: agentAction, + observation + }) + } + return result + } + + async _return( + output: AgentFinish, + intermediateSteps: AgentStep[], + runManager?: CallbackManagerForChainRun + ): Promise { + if (runManager) { + await runManager.handleAgentEnd(output) + } + const finalOutput: Record = output.returnValues + if (this.returnIntermediateSteps) { + finalOutput.intermediateSteps = intermediateSteps + } + return finalOutput + } + + async _getToolReturn(nextStepOutput: AgentStep): Promise { + const { action, observation } = nextStepOutput + const nameToolMap = Object.fromEntries(this.tools.map((t) => [t.name.toLowerCase(), t])) + const [returnValueKey = 'output'] = this.agent.returnValues + // Invalid tools won't be in the map, so we return False. + if (action.tool in nameToolMap) { + if (nameToolMap[action.tool].returnDirect) { + return { + returnValues: { [returnValueKey]: observation }, + log: '' + } + } + } + return null + } + + _returnStoppedResponse(earlyStoppingMethod: StoppingMethod) { + if (earlyStoppingMethod === 'force') { + return { + returnValues: { + output: 'Agent stopped due to iteration limit or time limit.' + }, + log: '' + } as AgentFinish + } + throw new Error(`Got unsupported early_stopping_method: ${earlyStoppingMethod}`) + } + + async *_streamIterator(inputs: Record): AsyncGenerator { + const agentExecutorIterator = new AgentExecutorIterator({ + inputs, + agentExecutor: this, + metadata: this.metadata, + tags: this.tags, + callbacks: this.callbacks + }) + const iterator = agentExecutorIterator.streamIterator() + for await (const step of iterator) { + if (!step) { + continue + } + yield step + } + } + + _chainType() { + return 'agent_executor' as const + } + + serialize(): SerializedLLMChain { + throw new Error('Cannot serialize an AgentExecutor') + } +} + +class ExceptionTool extends Tool { + name = '_Exception' + + description = 'Exception tool' + + async _call(query: string) { + return query + } +} + +export const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] => + steps.flatMap(({ action, observation }) => { + if ('messageLog' in action && action.messageLog !== undefined) { + const log = action.messageLog as BaseMessage[] + return log.concat(new FunctionMessage(observation, action.tool)) + } else { + return [new AIMessage(action.log)] + } + }) diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index 9327084837b..eabc8f2ed70 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -936,7 +936,7 @@ "id": "conversationalAgent_0-input-tools-Tool" }, { - "label": "Language Model", + "label": "Chat Model", "name": "model", "type": "BaseChatModel", "id": "conversationalAgent_0-input-model-BaseChatModel" diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index 2a9e05b54bb..0a5d4ac6e50 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -13,7 +13,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -28,47 +28,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -89,9 +78,8 @@ "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "memory": "", - "returnSourceDocuments": "", - "systemMessagePrompt": "", - "chainOption": "" + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { @@ -625,9 +613,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index 0ead3dd82b5..39d4d4007db 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -90,7 +90,7 @@ ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", "type": "BaseChatModel", "id": "conversationChain_0-input-model-BaseChatModel" diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index 8994594a169..b27d38863f4 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -354,7 +354,7 @@ "id": "conversationalAgent_0-input-tools-Tool" }, { - "label": "Language Model", + "label": "Chat Model", "name": "model", "type": "BaseChatModel", "id": "conversationalAgent_0-input-model-BaseChatModel" diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index 5c55d8332ee..e2fd64210e5 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -249,10 +249,10 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", - "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"], + "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Document QA - built on RetrievalQAChain to provide a chat history component", "inputParams": [ @@ -264,47 +264,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -325,16 +314,15 @@ "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "memory": "", - "returnSourceDocuments": "", - "systemMessagePrompt": "", - "chainOption": "" + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { - "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain", + "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable", "name": "conversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain", - "type": "ConversationalRetrievalQAChain | BaseChain" + "type": "ConversationalRetrievalQAChain | BaseChain | Runnable" } ], "outputs": {}, @@ -704,9 +692,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index ac84cf56490..16f708015db 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -156,9 +156,9 @@ "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", "name": "conversationalRetrievalQAChain", - "version": 1, + "version": 2, "type": "ConversationalRetrievalQAChain", - "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"], + "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Document QA - built on RetrievalQAChain to provide a chat history component", "inputParams": [ @@ -170,47 +170,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -232,15 +221,15 @@ "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "memory": "", "returnSourceDocuments": true, - "systemMessagePrompt": "", - "chainOption": "" + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { - "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain", + "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable", "name": "conversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain", - "type": "ConversationalRetrievalQAChain | BaseChain" + "type": "ConversationalRetrievalQAChain | BaseChain | Runnable" } ], "outputs": {}, @@ -668,9 +657,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index e24ad7cafec..6f78cb05698 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -83,10 +83,10 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", - "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "BaseLangChain"], + "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Document QA - built on RetrievalQAChain to provide a chat history component", "inputParams": [ @@ -98,47 +98,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -158,14 +147,16 @@ "inputs": { "model": "{{chatOllama_0.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}", - "memory": "" + "memory": "", + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { - "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|BaseLangChain", + "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable", "name": "conversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain", - "type": "ConversationalRetrievalQAChain | BaseChain | BaseLangChain" + "type": "ConversationalRetrievalQAChain | BaseChain | Runnable" } ], "outputs": {}, @@ -649,9 +640,9 @@ "source": "chatOllama_0", "sourceHandle": "chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOllama_0-chatOllama_0-output-chatOllama-ChatOllama|SimpleChatModel|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index c39f746a27d..cf0fa4d4416 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -13,10 +13,10 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", - "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "BaseLangChain"], + "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Document QA - built on RetrievalQAChain to provide a chat history component", "inputParams": [ @@ -28,47 +28,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -89,14 +78,16 @@ "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{qdrant_0.data.instance}}", "memory": "{{ZepMemory_0.data.instance}}", - "returnSourceDocuments": true + "returnSourceDocuments": true, + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { - "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|BaseLangChain", + "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable", "name": "conversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain", - "type": "ConversationalRetrievalQAChain | BaseChain | BaseLangChain" + "type": "ConversationalRetrievalQAChain | BaseChain | Runnable" } ], "outputs": {}, @@ -232,7 +223,7 @@ "label": "Session Id", "name": "sessionId", "type": "string", - "description": "if empty, chatId will be used automatically", + "description": "If not specified, a random id will be used. Learn more", "default": "", "additionalParams": true, "optional": true, @@ -709,9 +700,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index 9865ae70173..abd85d3661c 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -249,10 +249,10 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", - "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "BaseLangChain"], + "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Document QA - built on RetrievalQAChain to provide a chat history component", "inputParams": [ @@ -264,47 +264,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -323,14 +312,16 @@ ], "inputs": { "model": "{{chatOpenAI_0.data.instance}}", - "vectorStoreRetriever": "{{pinecone_0.data.instance}}" + "vectorStoreRetriever": "{{pinecone_0.data.instance}}", + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { - "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|BaseLangChain", + "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable", "name": "conversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain", - "type": "ConversationalRetrievalQAChain | BaseChain | BaseLangChain" + "type": "ConversationalRetrievalQAChain | BaseChain | Runnable" } ], "outputs": {}, @@ -763,9 +754,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index a2a807cdc76..d53cb55ee75 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -1567,7 +1567,7 @@ "id": "conversationalAgent_0-input-tools-Tool" }, { - "label": "Language Model", + "label": "Chat Model", "name": "model", "type": "BaseChatModel", "id": "conversationalAgent_0-input-model-BaseChatModel" diff --git a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json index 2dac382345e..2322136c91e 100644 --- a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json +++ b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json @@ -262,7 +262,7 @@ ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", "type": "BaseChatModel", "id": "conversationChain_0-input-model-BaseChatModel" diff --git a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json index d9f9fb49c34..6f0edeea9a0 100644 --- a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json +++ b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json @@ -190,7 +190,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -205,47 +205,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -267,8 +256,8 @@ "vectorStoreRetriever": "{{vectara_0.data.instance}}", "memory": "", "returnSourceDocuments": true, - "systemMessagePrompt": "", - "chainOption": "" + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { @@ -427,9 +416,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index 0547366a6da..d905b54b01c 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -578,7 +578,7 @@ "id": "conversationalAgent_0-input-tools-Tool" }, { - "label": "Language Model", + "label": "Chat Model", "name": "model", "type": "BaseChatModel", "id": "conversationalAgent_0-input-model-BaseChatModel" diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 9b1119b9015..1b1d8de6611 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -162,10 +162,10 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 1, + "version": 2, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", - "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"], + "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Document QA - built on RetrievalQAChain to provide a chat history component", "inputParams": [ @@ -177,47 +177,36 @@ "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean" }, { - "label": "System Message", - "name": "systemMessagePrompt", + "label": "Rephrase Prompt", + "name": "rephrasePrompt", "type": "string", + "description": "Using previous chat history, rephrase question into a standalone question", + "warning": "Prompt must include input variables: {chat_history} and {question}", "rows": 4, "additionalParams": true, "optional": true, - "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", - "id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string" + "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string" }, { - "label": "Chain Option", - "name": "chainOption", - "type": "options", - "options": [ - { - "label": "MapReduceDocumentsChain", - "name": "map_reduce", - "description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time" - }, - { - "label": "RefineDocumentsChain", - "name": "refine", - "description": "Suitable for QA tasks over a large number of documents." - }, - { - "label": "StuffDocumentsChain", - "name": "stuff", - "description": "Suitable for QA tasks over a small number of documents." - } - ], + "label": "Response Prompt", + "name": "responsePrompt", + "type": "string", + "description": "Taking the rephrased question, search for answer from the provided context", + "warning": "Prompt must include input variable: {context}", + "rows": 4, "additionalParams": true, "optional": true, - "id": "conversationalRetrievalQAChain_0-input-chainOption-options" + "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.", + "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel" + "type": "BaseChatModel", + "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel" }, { "label": "Vector Store Retriever", @@ -239,15 +228,15 @@ "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}", "returnSourceDocuments": true, - "systemMessagePrompt": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given context. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Do not make up any information that is not in the context. Refuse to answer any question not about the info. Never break character.", - "chainOption": "" + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer." }, "outputAnchors": [ { - "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain", + "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable", "name": "conversationalRetrievalQAChain", "label": "ConversationalRetrievalQAChain", - "type": "ConversationalRetrievalQAChain | BaseChain" + "type": "ConversationalRetrievalQAChain | BaseChain | Runnable" } ], "outputs": {}, @@ -589,7 +578,7 @@ "label": "Session Id", "name": "sessionId", "type": "string", - "description": "If not specified, the first CHAT_MESSAGE_ID will be used as sessionId", + "description": "If not specified, a random id will be used. Learn more", "default": "", "additionalParams": true, "optional": true, @@ -772,9 +761,9 @@ "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "target": "conversationalRetrievalQAChain_0", - "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel", "data": { "label": "" } diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 1ad4c7955e3..8f5ab5dbd1e 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -20,7 +20,6 @@ import { ICredentialReturnResponse, chatType, IChatMessage, - IReactFlowEdge, IDepthQueue, INodeDirectedGraph } from './Interface' @@ -39,14 +38,14 @@ import { databaseEntities, transformToCredentialEntity, decryptCredentialData, - clearAllSessionMemory, replaceInputsWithConfig, getEncryptionKey, - checkMemorySessionId, - clearSessionMemoryFromViewMessageDialog, + getMemorySessionId, getUserHome, - replaceChatHistory, - getAllConnectedNodes + getSessionChatHistory, + getAllConnectedNodes, + clearSessionMemory, + findMemoryNode } from './utils' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { getDataSource } from './DataSource' @@ -532,17 +531,18 @@ export class App { const parsedFlowData: IReactFlowObject = JSON.parse(flowData) const nodes = parsedFlowData.nodes - if (isClearFromViewMessageDialog) { - await clearSessionMemoryFromViewMessageDialog( + try { + await clearSessionMemory( nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId, - memoryType + memoryType, + isClearFromViewMessageDialog ) - } else { - await clearAllSessionMemory(nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId) + } catch (e) { + return res.status(500).send('Error clearing chat messages') } const deleteOptions: FindOptionsWhere = { chatflowid, chatId } @@ -1397,26 +1397,6 @@ export class App { return await this.AppDataSource.getRepository(ChatMessage).save(chatmessage) } - /** - * Method that find memory label that is connected within chatflow - * In a chatflow, there should only be 1 memory node - * @param {IReactFlowNode[]} nodes - * @param {IReactFlowEdge[]} edges - * @returns {string | undefined} - */ - findMemoryLabel(nodes: IReactFlowNode[], edges: IReactFlowEdge[]): IReactFlowNode | undefined { - const memoryNodes = nodes.filter((node) => node.data.category === 'Memory') - const memoryNodeIds = memoryNodes.map((mem) => mem.data.id) - - for (const edge of edges) { - if (memoryNodeIds.includes(edge.source)) { - const memoryNode = nodes.find((node) => node.data.id === edge.source) - return memoryNode - } - } - return undefined - } - async upsertVector(req: Request, res: Response, isInternal: boolean = false) { try { const chatflowid = req.params.id @@ -1585,7 +1565,6 @@ export class App { * - Still in sync (i.e the flow has not been modified since) * - Existing overrideConfig and new overrideConfig are the same * - Flow doesn't start with/contain nodes that depend on incomingInput.question - * - Its not an Upsert request * TODO: convert overrideConfig to hash when we no longer store base64 string but filepath ***/ const isFlowReusable = () => { @@ -1639,22 +1618,28 @@ export class App { isStreamValid = isFlowValidForStream(nodes, endingNodeData) } - let chatHistory: IMessage[] | string = incomingInput.history + let chatHistory: IMessage[] = incomingInput.history ?? [] - // When {{chat_history}} is used in Prompt Template, fetch the chat conversations from memory + // When {{chat_history}} is used in Prompt Template, fetch the chat conversations from memory node for (const endingNode of endingNodes) { const endingNodeData = endingNode.data + if (!endingNodeData.inputs?.memory) continue - if ( - endingNodeData.inputs?.memory && - !incomingInput.history && - (incomingInput.chatId || incomingInput.overrideConfig?.sessionId) - ) { - const memoryNodeId = endingNodeData.inputs?.memory.split('.')[0].replace('{{', '') - const memoryNode = nodes.find((node) => node.data.id === memoryNodeId) - if (memoryNode) { - chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger) - } + + const memoryNodeId = endingNodeData.inputs?.memory.split('.')[0].replace('{{', '') + const memoryNode = nodes.find((node) => node.data.id === memoryNodeId) + + if (!memoryNode) continue + + if (!chatHistory.length && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) { + chatHistory = await getSessionChatHistory( + memoryNode, + this.nodesPool.componentNodes, + incomingInput, + this.AppDataSource, + databaseEntities, + logger + ) } } @@ -1713,16 +1698,11 @@ export class App { logger.debug(`[server]: Running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`) - let sessionId = undefined - if (nodeToExecuteData.instance) sessionId = checkMemorySessionId(nodeToExecuteData.instance, chatId) - - const memoryNode = this.findMemoryLabel(nodes, edges) + const memoryNode = findMemoryNode(nodes, edges) const memoryType = memoryNode?.data.label - let chatHistory: IMessage[] | string = incomingInput.history - if (memoryNode && !incomingInput.history && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) { - chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger) - } + let sessionId = undefined + if (memoryNode) sessionId = getMemorySessionId(memoryNode, incomingInput, chatId, isInternal) const nodeInstanceFilePath = this.nodesPool.componentNodes[nodeToExecuteData.name].filePath as string const nodeModule = await import(nodeInstanceFilePath) @@ -1730,24 +1710,24 @@ export class App { let result = isStreamValid ? await nodeInstance.run(nodeToExecuteData, incomingInput.question, { + chatId, chatflowid, - chatHistory, - socketIO, - socketIOClientId: incomingInput.socketIOClientId, + chatHistory: incomingInput.history, logger, appDataSource: this.AppDataSource, databaseEntities, analytic: chatflow.analytic, - chatId + socketIO, + socketIOClientId: incomingInput.socketIOClientId }) : await nodeInstance.run(nodeToExecuteData, incomingInput.question, { + chatId, chatflowid, - chatHistory, + chatHistory: incomingInput.history, logger, appDataSource: this.AppDataSource, databaseEntities, - analytic: chatflow.analytic, - chatId + analytic: chatflow.analytic }) result = typeof result === 'string' ? { text: result } : result diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index e7a35c82098..7569d54142e 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -26,7 +26,8 @@ import { getEncryptionKeyPath, ICommonObject, IDatabaseEntity, - IMessage + IMessage, + FlowiseMemory } from 'flowise-components' import { randomBytes } from 'crypto' import { AES, enc } from 'crypto-js' @@ -270,7 +271,7 @@ export const buildLangchain = async ( depthQueue: IDepthQueue, componentNodes: IComponentNodes, question: string, - chatHistory: IMessage[] | string, + chatHistory: IMessage[], chatId: string, chatflowid: string, appDataSource: DataSource, @@ -317,9 +318,10 @@ export const buildLangchain = async ( await newNodeInstance.vectorStoreMethods!['upsert']!.call(newNodeInstance, reactFlowNodeData, { chatId, chatflowid, + chatHistory, + logger, appDataSource, databaseEntities, - logger, cachePool, dynamicVariables }) @@ -330,9 +332,10 @@ export const buildLangchain = async ( let outputResult = await newNodeInstance.init(reactFlowNodeData, question, { chatId, chatflowid, + chatHistory, + logger, appDataSource, databaseEntities, - logger, cachePool, dynamicVariables }) @@ -424,66 +427,52 @@ export const buildLangchain = async ( } /** - * Clear all session memories on the canvas - * @param {IReactFlowNode[]} reactFlowNodes - * @param {IComponentNodes} componentNodes - * @param {string} chatId - * @param {DataSource} appDataSource - * @param {string} sessionId - */ -export const clearAllSessionMemory = async ( - reactFlowNodes: IReactFlowNode[], - componentNodes: IComponentNodes, - chatId: string, - appDataSource: DataSource, - sessionId?: string -) => { - for (const node of reactFlowNodes) { - if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue - const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string - const nodeModule = await import(nodeInstanceFilePath) - const newNodeInstance = new nodeModule.nodeClass() - - if (sessionId && node.data.inputs) { - node.data.inputs.sessionId = sessionId - } - - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { - await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) - } - } -} - -/** - * Clear specific session memory from View Message Dialog UI + * Clear session memories * @param {IReactFlowNode[]} reactFlowNodes * @param {IComponentNodes} componentNodes * @param {string} chatId * @param {DataSource} appDataSource * @param {string} sessionId * @param {string} memoryType + * @param {string} isClearFromViewMessageDialog */ -export const clearSessionMemoryFromViewMessageDialog = async ( +export const clearSessionMemory = async ( reactFlowNodes: IReactFlowNode[], componentNodes: IComponentNodes, chatId: string, appDataSource: DataSource, sessionId?: string, - memoryType?: string + memoryType?: string, + isClearFromViewMessageDialog?: string ) => { - if (!sessionId) return for (const node of reactFlowNodes) { if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue - if (memoryType && node.data.label !== memoryType) continue + + // Only clear specific session memory from View Message Dialog UI + if (isClearFromViewMessageDialog && memoryType && node.data.label !== memoryType) continue + const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string const nodeModule = await import(nodeInstanceFilePath) const newNodeInstance = new nodeModule.nodeClass() + const options: ICommonObject = { chatId, appDataSource, databaseEntities, logger } - if (sessionId && node.data.inputs) node.data.inputs.sessionId = sessionId - - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { - await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) - return + // SessionId always take priority first because it is the sessionId used for 3rd party memory node + if (sessionId && node.data.inputs) { + if (node.data.type === 'OpenAIAssistant') { + await newNodeInstance.clearChatMessages(node.data, options, { type: 'threadId', id: sessionId }) + } else { + node.data.inputs.sessionId = sessionId + const initializedInstance: FlowiseMemory = await newNodeInstance.init(node.data, '', options) + await initializedInstance.clearChatMessages(sessionId) + } + } else if (chatId && node.data.inputs) { + if (node.data.type === 'OpenAIAssistant') { + await newNodeInstance.clearChatMessages(node.data, options, { type: 'chatId', id: chatId }) + } else { + node.data.inputs.sessionId = chatId + const initializedInstance: FlowiseMemory = await newNodeInstance.init(node.data, '', options) + await initializedInstance.clearChatMessages(chatId) + } } } } @@ -500,7 +489,7 @@ export const getVariableValue = ( paramValue: string, reactFlowNodes: IReactFlowNode[], question: string, - chatHistory: IMessage[] | string, + chatHistory: IMessage[], isAcceptVariable = false ) => { let returnVal = paramValue @@ -533,10 +522,7 @@ export const getVariableValue = ( } if (isAcceptVariable && variableFullPath === CHAT_HISTORY_VAR_PREFIX) { - variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters( - typeof chatHistory === 'string' ? chatHistory : convertChatHistoryToText(chatHistory), - false - ) + variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(convertChatHistoryToText(chatHistory), false) } // Split by first occurrence of '.' to get just nodeId @@ -579,7 +565,7 @@ export const resolveVariables = ( reactFlowNodeData: INodeData, reactFlowNodes: IReactFlowNode[], question: string, - chatHistory: IMessage[] | string + chatHistory: IMessage[] ): INodeData => { let flowNodeData = cloneDeep(reactFlowNodeData) const types = 'inputs' @@ -966,21 +952,43 @@ export const redactCredentialWithPasswordType = ( } /** - * Replace sessionId with new chatId - * Ex: after clear chat history, use the new chatId as sessionId + * Get sessionId + * Hierarchy of sessionId (top down) + * API/Embed: + * (1) Provided in API body - incomingInput.overrideConfig: { sessionId: 'abc' } + * (2) Provided in API body - incomingInput.chatId + * + * API/Embed + UI: + * (3) Hard-coded sessionId in UI + * (4) Not specified on UI nor API, default to chatId * @param {any} instance + * @param {IncomingInput} incomingInput * @param {string} chatId */ -export const checkMemorySessionId = (instance: any, chatId: string): string | undefined => { - if (instance.memory && instance.memory.isSessionIdUsingChatMessageId && chatId) { - instance.memory.sessionId = chatId - instance.memory.chatHistory.sessionId = chatId +export const getMemorySessionId = ( + memoryNode: IReactFlowNode, + incomingInput: IncomingInput, + chatId: string, + isInternal: boolean +): string | undefined => { + if (!isInternal) { + // Provided in API body - incomingInput.overrideConfig: { sessionId: 'abc' } + if (incomingInput.overrideConfig?.sessionId) { + return incomingInput.overrideConfig?.sessionId + } + // Provided in API body - incomingInput.chatId + if (incomingInput.chatId) { + return incomingInput.chatId + } } - if (instance.memory && instance.memory.sessionId) return instance.memory.sessionId - else if (instance.memory && instance.memory.chatHistory && instance.memory.chatHistory.sessionId) - return instance.memory.chatHistory.sessionId - return undefined + // Hard-coded sessionId in UI + if (memoryNode.data.inputs?.sessionId) { + return memoryNode.data.inputs.sessionId + } + + // Default chatId + return chatId } /** @@ -992,31 +1000,52 @@ export const checkMemorySessionId = (instance: any, chatId: string): string | un * @param {any} logger * @returns {string} */ -export const replaceChatHistory = async ( +export const getSessionChatHistory = async ( memoryNode: IReactFlowNode, + componentNodes: IComponentNodes, incomingInput: IncomingInput, appDataSource: DataSource, databaseEntities: IDatabaseEntity, logger: any -): Promise => { - const nodeInstanceFilePath = memoryNode.data.filePath as string +): Promise => { + const nodeInstanceFilePath = componentNodes[memoryNode.data.name].filePath as string const nodeModule = await import(nodeInstanceFilePath) const newNodeInstance = new nodeModule.nodeClass() + // Replace memory's sessionId/chatId if (incomingInput.overrideConfig?.sessionId && memoryNode.data.inputs) { memoryNode.data.inputs.sessionId = incomingInput.overrideConfig.sessionId + } else if (incomingInput.chatId && memoryNode.data.inputs) { + memoryNode.data.inputs.sessionId = incomingInput.chatId } - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.getChatMessages) { - return await newNodeInstance.memoryMethods.getChatMessages(memoryNode.data, { - chatId: incomingInput.chatId, - appDataSource, - databaseEntities, - logger - }) - } + const initializedInstance: FlowiseMemory = await newNodeInstance.init(memoryNode.data, '', { + appDataSource, + databaseEntities, + logger + }) - return '' + return (await initializedInstance.getChatMessages()) as IMessage[] +} + +/** + * Method that find memory that is connected within chatflow + * In a chatflow, there should only be 1 memory node + * @param {IReactFlowNode[]} nodes + * @param {IReactFlowEdge[]} edges + * @returns {string | undefined} + */ +export const findMemoryNode = (nodes: IReactFlowNode[], edges: IReactFlowEdge[]): IReactFlowNode | undefined => { + const memoryNodes = nodes.filter((node) => node.data.category === 'Memory') + const memoryNodeIds = memoryNodes.map((mem) => mem.data.id) + + for (const edge of edges) { + if (memoryNodeIds.includes(edge.source)) { + const memoryNode = nodes.find((node) => node.data.id === edge.source) + return memoryNode + } + } + return undefined } /** diff --git a/packages/ui/src/views/canvas/NodeInputHandler.js b/packages/ui/src/views/canvas/NodeInputHandler.js index 617d1066cd6..a673d6b74cc 100644 --- a/packages/ui/src/views/canvas/NodeInputHandler.js +++ b/packages/ui/src/views/canvas/NodeInputHandler.js @@ -280,6 +280,7 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA style={{ display: 'flex', flexDirection: 'row', + alignItems: 'center', borderRadius: 10, background: 'rgb(254,252,191)', padding: 10, @@ -287,7 +288,7 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA marginBottom: 10 }} > - + {inputParam.warning} )} From 244093923d77f0b2a09d3ffbecb30d74777b5102 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Mon, 8 Jan 2024 07:25:54 -0800 Subject: [PATCH 31/51] updates per PR comments --- .../components/nodes/chains/VectaraChain/VectaraChain.ts | 3 ++- packages/components/nodes/vectorstores/Vectara/Vectara.ts | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts index 986d587aa49..7d65c9cd078 100644 --- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts +++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts @@ -248,7 +248,8 @@ class VectaraChain_Chains implements INode { lexicalInterpolationConfig: { lambda: vectaraFilter?.lambda ?? 0.025 } })) - const mmrRerankerId = 272725718 // Vectara reranker ID for MMR + // Vectara reranker ID for MMR (https://docs.vectara.com/docs/api-reference/search-apis/reranking#maximal-marginal-relevance-mmr-reranker) + const mmrRerankerId = 272725718 const mmrEnabled = vectaraFilter?.mmrConfig?.enabled const data = { diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index be63d58228b..df709e0b617 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -110,7 +110,10 @@ class Vectara_VectorStores implements INode { { label: 'MMR diversity bias', name: 'mmrDiversityBias', - description: 'The diversity bias to use for MMR. Defaults to 0 (MMR disabled)', + description: + 'The diversity bias to use for MMR. This is a value between 0.0 and 1.0' + + 'Values closer to 1.0 optimize for the most diverse results.' + + 'Defaults to 0 (MMR disabled)', placeholder: '0.0', type: 'number', additionalParams: true, From bb77e3f591b6bb03b00ca7fc3901984ed9d8241f Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 8 Jan 2024 17:13:07 +0000 Subject: [PATCH 32/51] fix chatbot config --- packages/server/src/index.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 1ad4c7955e3..b6f5919178e 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -362,7 +362,8 @@ export class App { const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({ id: req.params.id }) - if (chatflow && chatflow.chatbotConfig) { + if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`) + if (chatflow.chatbotConfig) { try { const parsedConfig = JSON.parse(chatflow.chatbotConfig) return res.json(parsedConfig) @@ -370,7 +371,7 @@ export class App { return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`) } } - return res.status(404).send(`Chatbot Config for Chatflow ${req.params.id} not found`) + return res.status(200).send('OK') }) // Save chatflow From 78a6926ca3f049007bd9777316f6083aee733220 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Mon, 8 Jan 2024 10:26:17 -0800 Subject: [PATCH 33/51] added step to diversityBias --- packages/components/nodes/vectorstores/Vectara/Vectara.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index df709e0b617..45825b4f927 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -110,6 +110,7 @@ class Vectara_VectorStores implements INode { { label: 'MMR diversity bias', name: 'mmrDiversityBias', + step: 0.1, description: 'The diversity bias to use for MMR. This is a value between 0.0 and 1.0' + 'Values closer to 1.0 optimize for the most diverse results.' + From 4622fd8a02a39517f264af63acc411f0b282d89c Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 8 Jan 2024 18:39:36 +0000 Subject: [PATCH 34/51] update self-host readme --- README.md | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 82e92832bc7..3e6b7e5610c 100644 --- a/README.md +++ b/README.md @@ -145,37 +145,40 @@ Flowise support different environment variables to configure your instance. You ## 🌐 Self Host -### [Railway](https://docs.flowiseai.com/deployment/railway) +Deploy Flowise self-hosted in your existing infrastructure, we support various [deployments](https://docs.flowiseai.com/configuration/deployment) -[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) +- [AWS](https://docs.flowiseai.com/deployment/aws) +- [Azure](https://docs.flowiseai.com/deployment/azure) +- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean) +- [GCP](https://docs.flowiseai.com/deployment/gcp) +-
+ Others -### [Render](https://docs.flowiseai.com/deployment/render) + - [Railway](https://docs.flowiseai.com/deployment/railway) -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) + [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) -### [Elestio](https://elest.io/open-source/flowiseai) + - [Render](https://docs.flowiseai.com/deployment/render) -[![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) + [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) -### [RepoCloud](https://repocloud.io/details/?app_id=29) + - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) -[![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29) + HuggingFace Spaces -### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) + - [Elestio](https://elest.io/open-source/flowiseai) -HuggingFace Spaces + [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) -### Sealos + - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) -[![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + [![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) -### [AWS](https://docs.flowiseai.com/deployment/aws) + - [RepoCloud](https://repocloud.io/details/?app_id=29) -### [Azure](https://docs.flowiseai.com/deployment/azure) + [![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29) -### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean) - -### [GCP](https://docs.flowiseai.com/deployment/gcp) +
## 💻 Cloud Hosted From 07411a78a3378e1eac112c337c131fb11a44b9d6 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 8 Jan 2024 23:34:53 +0000 Subject: [PATCH 35/51] removed zapier nla credential --- .../credentials/ZapierNLAApi.credential.ts | 24 ------------------- 1 file changed, 24 deletions(-) delete mode 100644 packages/components/credentials/ZapierNLAApi.credential.ts diff --git a/packages/components/credentials/ZapierNLAApi.credential.ts b/packages/components/credentials/ZapierNLAApi.credential.ts deleted file mode 100644 index 72035660eca..00000000000 --- a/packages/components/credentials/ZapierNLAApi.credential.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { INodeParams, INodeCredential } from '../src/Interface' - -class ZapierNLAApi implements INodeCredential { - label: string - name: string - version: number - description: string - inputs: INodeParams[] - - constructor() { - this.label = 'Zapier NLA API' - this.name = 'zapierNLAApi' - this.version = 1.0 - this.inputs = [ - { - label: 'Zapier NLA Api Key', - name: 'zapierNLAApiKey', - type: 'password' - } - ] - } -} - -module.exports = { credClass: ZapierNLAApi } From a26167ac5afff1df527f19d60e3b94c4ca539ac7 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Mon, 8 Jan 2024 16:09:50 -0800 Subject: [PATCH 36/51] updated component to V2.0 Updated marketplace "Chain Upload" JSON file --- .../nodes/vectorstores/Vectara/Vectara.ts | 2 +- .../chatflows/Vectara LLM Chain Upload.json | 31 +++++++++++++++++-- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/packages/components/nodes/vectorstores/Vectara/Vectara.ts b/packages/components/nodes/vectorstores/Vectara/Vectara.ts index 45825b4f927..939a4ac3d0e 100644 --- a/packages/components/nodes/vectorstores/Vectara/Vectara.ts +++ b/packages/components/nodes/vectorstores/Vectara/Vectara.ts @@ -22,7 +22,7 @@ class Vectara_VectorStores implements INode { constructor() { this.label = 'Vectara' this.name = 'vectara' - this.version = 1.0 + this.version = 2.0 this.type = 'Vectara' this.icon = 'vectara.png' this.category = 'Vector Stores' diff --git a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json index d9f9fb49c34..3f6fcda504c 100644 --- a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json +++ b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json @@ -361,13 +361,36 @@ { "label": "Top K", "name": "topK", - "description": "Number of top results to fetch. Defaults to 4", - "placeholder": "4", + "description": "Number of top results to fetch. Defaults to 5", + "placeholder": "5", "type": "number", "additionalParams": true, "optional": true, "id": "vectara_0-input-topK-number" + }, + { + "label": "MMR K", + "name": "mmrK", + "description": "The number of results to rerank if MMR is enabled.", + "placeholder": "50", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "vectara_0-input-mmrK-number" + }, + { + "label": "MMR Diversity Bias", + "name": "mmrDiversityBias", + "step": 0.1, + "description": "Diversity Bias parameter for MMR, if enabled. 0.0 means no diversiry bias, 1.0 means maximum diversity bias. Defaults to 0.0 (MMR disabled).", + "placeholder": "0.0", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "vectara_0-input-mmrDiversityBias-number" } + + ], "inputAnchors": [ { @@ -385,7 +408,9 @@ "sentencesBefore": "", "sentencesAfter": "", "lambda": "", - "topK": "" + "topK": "", + "mmrK": "", + "mmrDiversityBias": "" }, "outputAnchors": [ { From b5bcfc0d5c32f0ed91b950479555fb93a0c5d087 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Mon, 8 Jan 2024 16:46:36 -0800 Subject: [PATCH 37/51] after yarn lint-fix --- .../marketplaces/chatflows/Vectara LLM Chain Upload.json | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json index 3f6fcda504c..33b93578004 100644 --- a/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json +++ b/packages/server/marketplaces/chatflows/Vectara LLM Chain Upload.json @@ -377,7 +377,7 @@ "additionalParams": true, "optional": true, "id": "vectara_0-input-mmrK-number" - }, + }, { "label": "MMR Diversity Bias", "name": "mmrDiversityBias", @@ -389,8 +389,6 @@ "optional": true, "id": "vectara_0-input-mmrDiversityBias-number" } - - ], "inputAnchors": [ { From 6ec1c9249b58e1a05c9bcb70c67aa8417bd6155a Mon Sep 17 00:00:00 2001 From: Keith Kacsh Date: Mon, 8 Jan 2024 17:53:18 -0700 Subject: [PATCH 38/51] Revert model var to string, refactor for case without a key and just override if so --- CONTRIBUTING.md | 1 - .../nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts | 17 ++++++----------- packages/server/.env.example | 2 -- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2c91906c9c5..04cb80b4d61 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -141,7 +141,6 @@ Flowise support different environment variables to configure your instance. You | DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | | SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | | FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | -| LOCALAI_CHAT_MODELS | JSON-encoded string representing an array of chat models for LocalAI. Each object in the array should have a 'label' and 'name' property. | String | '[]' (Empty Array) | You can also specify the env variables when using `npx`. For example: diff --git a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts index 258db1f805c..c44f03ce192 100644 --- a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts +++ b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts @@ -33,9 +33,6 @@ class ChatLocalAI_ChatModels implements INode { credentialNames: ['LocalAIApi'], optional: true } - - const modelOptions = JSON.parse(process.env.LOCALAI_CHAT_MODELS || '[]'); - this.inputs = [ { label: 'Cache', @@ -52,10 +49,8 @@ class ChatLocalAI_ChatModels implements INode { { label: 'Model Name', name: 'modelName', - type: 'options', - options: modelOptions, - default: modelOptions.length > 0 ? modelOptions[0].name : '', - optional: true + type: 'string', + placeholder: 'gpt4all-lora-quantized.bin' }, { label: 'Temperature', @@ -99,22 +94,22 @@ class ChatLocalAI_ChatModels implements INode { const topP = nodeData.inputs?.topP as string const timeout = nodeData.inputs?.timeout as string const basePath = nodeData.inputs?.basePath as string - const credentialData = await getCredentialData(nodeData.credential ?? '', options) - const openAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData) + const localAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData) const cache = nodeData.inputs?.cache as BaseCache - const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = { + const obj: Partial & BaseLLMParams & { localAIApiKey?: string } = { temperature: parseFloat(temperature), modelName, - openAIApiKey + openAIApiKey: 'sk-' } if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) if (topP) obj.topP = parseFloat(topP) if (timeout) obj.timeout = parseInt(timeout, 10) if (cache) obj.cache = cache + if (localAIApiKey) obj.openAIApiKey = localAIApiKey const model = new OpenAIChat(obj, { basePath }) diff --git a/packages/server/.env.example b/packages/server/.env.example index 9b7be0ff899..6e746a4df4e 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -26,5 +26,3 @@ PORT=3000 # LANGCHAIN_ENDPOINT=https://api.smith.langchain.com # LANGCHAIN_API_KEY=your_api_key # LANGCHAIN_PROJECT=your_project - -# LOCALAI_CHAT_MODELS='[{"label": "model1", "name": "model1"}, {"label": "model2", "name": "model2"}]' From 6a114b3717206e742187593a0ddb0ba5f964ae58 Mon Sep 17 00:00:00 2001 From: Carson Yang Date: Tue, 9 Jan 2024 13:58:40 +0800 Subject: [PATCH 39/51] Update README-ZH.md --- README-ZH.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README-ZH.md b/README-ZH.md index 2805ef9bcab..208eee92da1 100644 --- a/README-ZH.md +++ b/README-ZH.md @@ -153,6 +153,10 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package [![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) +### [Sealos](https://docs.flowiseai.com/configuration/deployment/sealos) + +[![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://template.cloud.sealos.io/deploy?templateName=flowise) + ### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) HuggingFace Spaces From 1dc96906966394cd999a8b69676076274c9a38d3 Mon Sep 17 00:00:00 2001 From: YISH Date: Wed, 10 Jan 2024 17:32:46 +0800 Subject: [PATCH 40/51] Add milvusTextField configuration for Milvus langchain python use `text` https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/vectorstores/milvus.py#L119 while langchian js use `langchain` https://github.com/langchain-ai/langchainjs/blob/main/libs/langchain-community/src/vectorstores/milvus.ts#L61 so it is necessary to add milvusTextField configuration for Milvus. --- .../components/nodes/vectorstores/Milvus/Milvus.ts | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Milvus/Milvus.ts b/packages/components/nodes/vectorstores/Milvus/Milvus.ts index 090f35f7497..7566f8a8a90 100644 --- a/packages/components/nodes/vectorstores/Milvus/Milvus.ts +++ b/packages/components/nodes/vectorstores/Milvus/Milvus.ts @@ -65,6 +65,14 @@ class Milvus_VectorStores implements INode { name: 'milvusCollection', type: 'string' }, + { + label: 'Milvus Text Field', + name: 'milvusTextField', + type: 'string', + placeholder: 'langchain_text', + optional: true, + additionalParams: true + }, { label: 'Milvus Filter', name: 'milvusFilter', @@ -150,6 +158,7 @@ class Milvus_VectorStores implements INode { const address = nodeData.inputs?.milvusServerUrl as string const collectionName = nodeData.inputs?.milvusCollection as string const milvusFilter = nodeData.inputs?.milvusFilter as string + const textField = nodeData.inputs?.milvusTextField as string // embeddings const embeddings = nodeData.inputs?.embeddings as Embeddings @@ -169,7 +178,8 @@ class Milvus_VectorStores implements INode { // init MilvusLibArgs const milVusArgs: MilvusLibArgs = { url: address, - collectionName: collectionName + collectionName: collectionName, + textField: textField } if (milvusUser) milVusArgs.username = milvusUser From be0fff4d9d9cc2f910fad491b7af07549f52e82a Mon Sep 17 00:00:00 2001 From: YISH Date: Wed, 10 Jan 2024 17:41:53 +0800 Subject: [PATCH 41/51] Fix OpenAIFunctionAgent that function not return string result refer to https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/agents/format_scratchpad/openai_functions.py#L29 and fix the role of systemMessage from `ai` to `system`. --- .../OpenAIFunctionAgent/OpenAIFunctionAgent.ts | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts index c0095cee16b..ac000c1ee4f 100644 --- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts +++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts @@ -109,9 +109,18 @@ class OpenAIFunctionAgent_Agents implements INode { const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] => steps.flatMap(({ action, observation }) => { + const create_function_message = (observation: string, action: AgentAction) => { + let content: string + if (typeof observation !== 'string') { + content = JSON.stringify(observation) + } else { + content = observation + } + return new FunctionMessage(content, action.tool) + } if ('messageLog' in action && action.messageLog !== undefined) { const log = action.messageLog as BaseMessage[] - return log.concat(new FunctionMessage(observation, action.tool)) + return log.concat(create_function_message(observation, action)) } else { return [new AIMessage(action.log)] } @@ -127,7 +136,7 @@ const prepareAgent = (nodeData: INodeData, sessionId?: string) => { const inputKey = memory.inputKey ? memory.inputKey : 'input' const prompt = ChatPromptTemplate.fromMessages([ - ['ai', systemMessage ? systemMessage : `You are a helpful AI assistant.`], + ['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`], new MessagesPlaceholder(memoryKey), ['human', `{${inputKey}}`], new MessagesPlaceholder('agent_scratchpad') From 3bc52426bace3a79613e4b4587ad7961ac551cd4 Mon Sep 17 00:00:00 2001 From: Joshua Carter Date: Wed, 10 Jan 2024 12:30:01 -0800 Subject: [PATCH 42/51] Correct DockerHub link in docs from private to public repo page --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index d3ad1c19708..11b29cf3827 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,6 +1,6 @@ # Flowise Docker Hub Image -Starts Flowise from [DockerHub Image](https://hub.docker.com/repository/docker/flowiseai/flowise/general) +Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise) ## Usage From e634a6b584b9d01381b5d31eeab68445a78763dd Mon Sep 17 00:00:00 2001 From: hakeemsyd Date: Fri, 12 Jan 2024 00:31:21 +0500 Subject: [PATCH 43/51] feature: Integrate Astra Vectorstore --- .../credentials/AstraApi.credential.ts | 34 ++++ .../nodes/vectorstores/Astra/Astra.ts | 190 ++++++++++++++++++ .../nodes/vectorstores/Astra/astra.svg | 1 + packages/components/package.json | 2 + 4 files changed, 227 insertions(+) create mode 100644 packages/components/credentials/AstraApi.credential.ts create mode 100644 packages/components/nodes/vectorstores/Astra/Astra.ts create mode 100644 packages/components/nodes/vectorstores/Astra/astra.svg diff --git a/packages/components/credentials/AstraApi.credential.ts b/packages/components/credentials/AstraApi.credential.ts new file mode 100644 index 00000000000..ad4c65a811c --- /dev/null +++ b/packages/components/credentials/AstraApi.credential.ts @@ -0,0 +1,34 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class AstraApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Astra API' + this.name = 'AstraApi' + this.version = 1.0 + this.inputs = [ + { + label: 'Colection Name', + name: 'collectionName', + type: 'string' + }, + { + label: 'Astra DB Application Token', + name: 'applicationToken', + type: 'password' + }, + { + label: 'Astra DB Api Endpoint', + name: 'dbEndPoint', + type: 'string' + } + ] + } +} + +module.exports = { credClass: AstraApi } diff --git a/packages/components/nodes/vectorstores/Astra/Astra.ts b/packages/components/nodes/vectorstores/Astra/Astra.ts new file mode 100644 index 00000000000..648a8b49108 --- /dev/null +++ b/packages/components/nodes/vectorstores/Astra/Astra.ts @@ -0,0 +1,190 @@ +import { flatten } from 'lodash' +import { Embeddings } from 'langchain/embeddings/base' +import { Document } from 'langchain/document' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData } from '../../../src/utils' +import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb' + +class Astra_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + badge: string + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Astra' + this.name = 'Astra' + this.version = 1.0 + this.type = 'Astra' + this.icon = 'astra.svg' + this.category = 'Vector Stores' + this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads` + this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever'] + this.badge = 'NEW' + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['AstraApi'] + } + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'Embeddings' + }, + { + label: 'Vector Dimension', + name: 'vectorDimension', + type: 'number', + placeholder: '1536', + optional: true, + description: 'Dimension used for storing vector embedding' + }, + { + label: 'Similarity Metric', + name: 'similarityMetric', + type: 'string', + placeholder: 'cosine', + optional: true, + description: 'cosine | euclidean | dot_product' + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + additionalParams: true, + optional: true + } + ] + this.outputs = [ + { + label: 'Astra Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Astra Vector Store', + name: 'vectorStore', + baseClasses: [this.type, ...getBaseClasses(AstraDBVectorStore)] + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData, options: ICommonObject): Promise { + const docs = nodeData.inputs?.document as Document[] + const embeddings = nodeData.inputs?.embeddings as Embeddings + const vectorDimension = nodeData.inputs?.vectorDimension as number + const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + + const expectedSimilarityMetric = ['cosine', 'euclidean', 'dot_product'] + if (similarityMetric && !expectedSimilarityMetric.includes(similarityMetric)) { + throw new Error(`Invalid Similarity Metric should be one of 'cosine' | 'euclidean' | 'dot_product'`) + } + + const clientConfig = { + token: credentialData?.applicationToken ?? 'dummy', + endpoint: credentialData?.dbEndPoint ?? 'dummy' + } + + const astraConfig: AstraLibArgs = { + ...clientConfig, + collection: credentialData.collectionName ?? 'flowise_test', + collectionOptions: { + vector: { + dimension: vectorDimension ?? 1536, + metric: similarityMetric ?? 'cosine' + } + } + } + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new Document(flattenDocs[i])) + } + } + + try { + await AstraDBVectorStore.fromDocuments(finalDocs, embeddings, astraConfig) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const docs = nodeData.inputs?.document as Document[] + const embeddings = nodeData.inputs?.embeddings as Embeddings + const vectorDimension = nodeData.inputs?.vectorDimension as number + const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined + const output = nodeData.outputs?.output as string + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + + const expectedSimilarityMetric = ['cosine', 'euclidean', 'dot_product'] + if (similarityMetric && !expectedSimilarityMetric.includes(similarityMetric)) { + throw new Error(`Invalid Similarity Metric should be one of 'cosine' | 'euclidean' | 'dot_product'`) + } + + const clientConfig = { + token: credentialData?.applicationToken ?? 'dummy', + endpoint: credentialData?.dbEndPoint ?? 'dummy' + } + + const astraConfig: AstraLibArgs = { + ...clientConfig, + collection: credentialData.collectionName ?? 'flowise_test', + collectionOptions: { + vector: { + dimension: vectorDimension ?? 1536, + metric: similarityMetric ?? 'cosine' + } + } + } + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new Document(flattenDocs[i])) + } + } + + const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig) + + if (output === 'retriever') { + const retriever = vectorStore.asRetriever(k) + return retriever + } else if (output === 'vectorStore') { + ;(vectorStore as any).k = k + return vectorStore + } + return vectorStore + } +} + +module.exports = { nodeClass: Astra_VectorStores } diff --git a/packages/components/nodes/vectorstores/Astra/astra.svg b/packages/components/nodes/vectorstores/Astra/astra.svg new file mode 100644 index 00000000000..59c2fc3f652 --- /dev/null +++ b/packages/components/nodes/vectorstores/Astra/astra.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/package.json b/packages/components/package.json index a2565430bd4..07b2c3dfb50 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -19,6 +19,7 @@ "@aws-sdk/client-bedrock-runtime": "3.422.0", "@aws-sdk/client-dynamodb": "^3.360.0", "@aws-sdk/client-s3": "^3.427.0", + "@datastax/astra-db-ts": "^0.1.2", "@dqbd/tiktoken": "^1.0.7", "@elastic/elasticsearch": "^8.9.0", "@getzep/zep-js": "^0.9.0", @@ -26,6 +27,7 @@ "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^0.2.1", "@huggingface/inference": "^2.6.1", + "@langchain/community": "^0.0.16", "@langchain/google-genai": "^0.0.6", "@langchain/mistralai": "^0.0.6", "@notionhq/client": "^2.2.8", From 8a470a85b77499c2ad4ff5773b508b7d9e99aefe Mon Sep 17 00:00:00 2001 From: hakeemsyd Date: Fri, 12 Jan 2024 01:36:57 +0500 Subject: [PATCH 44/51] chore: refactoring (naming convention) --- packages/components/credentials/AstraApi.credential.ts | 10 +++++----- packages/components/nodes/vectorstores/Astra/Astra.ts | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/components/credentials/AstraApi.credential.ts b/packages/components/credentials/AstraApi.credential.ts index ad4c65a811c..3bec1a49e56 100644 --- a/packages/components/credentials/AstraApi.credential.ts +++ b/packages/components/credentials/AstraApi.credential.ts @@ -1,6 +1,6 @@ import { INodeParams, INodeCredential } from '../src/Interface' -class AstraApi implements INodeCredential { +class AstraDBApi implements INodeCredential { label: string name: string version: number @@ -8,12 +8,12 @@ class AstraApi implements INodeCredential { inputs: INodeParams[] constructor() { - this.label = 'Astra API' - this.name = 'AstraApi' + this.label = 'Astra DB API' + this.name = 'AstraDBApi' this.version = 1.0 this.inputs = [ { - label: 'Colection Name', + label: 'Collection Name', name: 'collectionName', type: 'string' }, @@ -31,4 +31,4 @@ class AstraApi implements INodeCredential { } } -module.exports = { credClass: AstraApi } +module.exports = { credClass: AstraDBApi } diff --git a/packages/components/nodes/vectorstores/Astra/Astra.ts b/packages/components/nodes/vectorstores/Astra/Astra.ts index 648a8b49108..e3377cb5fcd 100644 --- a/packages/components/nodes/vectorstores/Astra/Astra.ts +++ b/packages/components/nodes/vectorstores/Astra/Astra.ts @@ -33,7 +33,7 @@ class Astra_VectorStores implements INode { label: 'Connect Credential', name: 'credential', type: 'credential', - credentialNames: ['AstraApi'] + credentialNames: ['AstraDBApi'] } this.inputs = [ { From e2365ff22000b1942bf1523ac58136afc46873af Mon Sep 17 00:00:00 2001 From: hakeemsyd Date: Fri, 12 Jan 2024 01:43:22 +0500 Subject: [PATCH 45/51] Update AstraApi.credential.ts --- packages/components/credentials/AstraApi.credential.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/credentials/AstraApi.credential.ts b/packages/components/credentials/AstraApi.credential.ts index 3bec1a49e56..a89a259f52f 100644 --- a/packages/components/credentials/AstraApi.credential.ts +++ b/packages/components/credentials/AstraApi.credential.ts @@ -13,7 +13,7 @@ class AstraDBApi implements INodeCredential { this.version = 1.0 this.inputs = [ { - label: 'Collection Name', + label: 'Astra DB Collection Name', name: 'collectionName', type: 'string' }, From 9aaa4313cbddac3bf66a977ba4e7ee11d1b22dbd Mon Sep 17 00:00:00 2001 From: hakeemsyd Date: Fri, 12 Jan 2024 02:43:43 +0500 Subject: [PATCH 46/51] svg added and refactored again --- .../components/nodes/vectorstores/Astra/Astra.ts | 8 ++++---- .../components/nodes/vectorstores/Astra/astra.svg | 13 ++++++++++++- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/packages/components/nodes/vectorstores/Astra/Astra.ts b/packages/components/nodes/vectorstores/Astra/Astra.ts index e3377cb5fcd..865f10446e5 100644 --- a/packages/components/nodes/vectorstores/Astra/Astra.ts +++ b/packages/components/nodes/vectorstores/Astra/Astra.ts @@ -103,8 +103,8 @@ class Astra_VectorStores implements INode { } const clientConfig = { - token: credentialData?.applicationToken ?? 'dummy', - endpoint: credentialData?.dbEndPoint ?? 'dummy' + token: credentialData?.applicationToken, + endpoint: credentialData?.dbEndPoint } const astraConfig: AstraLibArgs = { @@ -151,8 +151,8 @@ class Astra_VectorStores implements INode { } const clientConfig = { - token: credentialData?.applicationToken ?? 'dummy', - endpoint: credentialData?.dbEndPoint ?? 'dummy' + token: credentialData?.applicationToken, + endpoint: credentialData?.dbEndPoint } const astraConfig: AstraLibArgs = { diff --git a/packages/components/nodes/vectorstores/Astra/astra.svg b/packages/components/nodes/vectorstores/Astra/astra.svg index 59c2fc3f652..de58397d9be 100644 --- a/packages/components/nodes/vectorstores/Astra/astra.svg +++ b/packages/components/nodes/vectorstores/Astra/astra.svg @@ -1 +1,12 @@ - \ No newline at end of file + + + + + + + + + + + + From 8a4b6a72247d6dfb36a088b861cbc0efd3cde5e4 Mon Sep 17 00:00:00 2001 From: Keith Kacsh Date: Sat, 13 Jan 2024 19:14:45 -0700 Subject: [PATCH 47/51] Fixing naming, handling embeddings for LocalAI also --- ....credential.ts => LocalAIApi.credential.ts} | 4 ++-- .../chatmodels/ChatLocalAI/ChatLocalAI.ts | 6 +++--- .../LocalAIEmbedding/LocalAIEmbedding.ts | 18 ++++++++++++++++-- 3 files changed, 21 insertions(+), 7 deletions(-) rename packages/components/credentials/{LcoalAIApi.credential.ts => LocalAIApi.credential.ts} (86%) diff --git a/packages/components/credentials/LcoalAIApi.credential.ts b/packages/components/credentials/LocalAIApi.credential.ts similarity index 86% rename from packages/components/credentials/LcoalAIApi.credential.ts rename to packages/components/credentials/LocalAIApi.credential.ts index 624e07fa46f..4aafe040dc3 100644 --- a/packages/components/credentials/LcoalAIApi.credential.ts +++ b/packages/components/credentials/LocalAIApi.credential.ts @@ -8,12 +8,12 @@ class LocalAIApi implements INodeCredential { constructor() { this.label = 'LocalAI API' - this.name = 'LocalAIApi' + this.name = 'localAIApi' this.version = 1.0 this.inputs = [ { label: 'LocalAI Api Key', - name: 'LocalAIApiKey', + name: 'localAIApiKey', type: 'password' } ] diff --git a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts index c44f03ce192..f2825d0d3fe 100644 --- a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts +++ b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts @@ -30,7 +30,7 @@ class ChatLocalAI_ChatModels implements INode { label: 'Connect Credential', name: 'credential', type: 'credential', - credentialNames: ['LocalAIApi'], + credentialNames: ['localAIApi'], optional: true } this.inputs = [ @@ -95,11 +95,11 @@ class ChatLocalAI_ChatModels implements INode { const timeout = nodeData.inputs?.timeout as string const basePath = nodeData.inputs?.basePath as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) - const localAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData) + const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData) const cache = nodeData.inputs?.cache as BaseCache - const obj: Partial & BaseLLMParams & { localAIApiKey?: string } = { + const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = { temperature: parseFloat(temperature), modelName, openAIApiKey: 'sk-' diff --git a/packages/components/nodes/embeddings/LocalAIEmbedding/LocalAIEmbedding.ts b/packages/components/nodes/embeddings/LocalAIEmbedding/LocalAIEmbedding.ts index 557e35d68fa..24efaf8c6cc 100644 --- a/packages/components/nodes/embeddings/LocalAIEmbedding/LocalAIEmbedding.ts +++ b/packages/components/nodes/embeddings/LocalAIEmbedding/LocalAIEmbedding.ts @@ -1,4 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getCredentialData, getCredentialParam } from '../../../src/utils' import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from 'langchain/embeddings/openai' class LocalAIEmbedding_Embeddings implements INode { @@ -10,6 +11,7 @@ class LocalAIEmbedding_Embeddings implements INode { category: string description: string baseClasses: string[] + credential: INodeParams inputs: INodeParams[] constructor() { @@ -21,6 +23,13 @@ class LocalAIEmbedding_Embeddings implements INode { this.category = 'Embeddings' this.description = 'Use local embeddings models like llama.cpp' this.baseClasses = [this.type, 'Embeddings'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['localAIApi'], + optional: true + } this.inputs = [ { label: 'Base Path', @@ -37,15 +46,20 @@ class LocalAIEmbedding_Embeddings implements INode { ] } - async init(nodeData: INodeData): Promise { + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const modelName = nodeData.inputs?.modelName as string const basePath = nodeData.inputs?.basePath as string + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData) + const obj: Partial & { openAIApiKey?: string } = { modelName, openAIApiKey: 'sk-' } + if (localAIApiKey) obj.openAIApiKey = localAIApiKey + const model = new OpenAIEmbeddings(obj, { basePath }) return model From ec50493851332e49b9e896713cdcc33a24b23241 Mon Sep 17 00:00:00 2001 From: Henry Date: Sun, 14 Jan 2024 11:57:53 +0000 Subject: [PATCH 48/51] delete message API --- packages/server/src/index.ts | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index b6f5919178e..cdb3dc3e73c 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -523,7 +523,7 @@ export class App { res.status(404).send(`Chatflow ${chatflowid} not found`) return } - const chatId = (req.query?.chatId as string) ?? (await getChatId(chatflowid)) + const chatId = req.query?.chatId as string const memoryType = req.query?.memoryType as string | undefined const sessionId = req.query?.sessionId as string | undefined const chatType = req.query?.chatType as string | undefined @@ -546,7 +546,8 @@ export class App { await clearAllSessionMemory(nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId) } - const deleteOptions: FindOptionsWhere = { chatflowid, chatId } + const deleteOptions: FindOptionsWhere = { chatflowid } + if (chatId) deleteOptions.chatId = chatId if (memoryType) deleteOptions.memoryType = memoryType if (sessionId) deleteOptions.sessionId = sessionId if (chatType) deleteOptions.chatType = chatType @@ -634,7 +635,7 @@ export class App { return res.json(result) }) - // Delete all chatmessages from chatflowid + // Delete all credentials from chatflowid this.app.delete('/api/v1/credentials/:id', async (req: Request, res: Response) => { const results = await this.AppDataSource.getRepository(Credential).delete({ id: req.params.id }) return res.json(results) @@ -1811,23 +1812,6 @@ export class App { } } -/** - * Get first chat message id - * @param {string} chatflowid - * @returns {string} - */ -export async function getChatId(chatflowid: string): Promise { - // first chatmessage id as the unique chat id - const firstChatMessage = await getDataSource() - .getRepository(ChatMessage) - .createQueryBuilder('cm') - .select('cm.id') - .where('chatflowid = :chatflowid', { chatflowid }) - .orderBy('cm.createdDate', 'ASC') - .getOne() - return firstChatMessage ? firstChatMessage.id : '' -} - let serverApp: App | undefined export async function getAllChatFlow(): Promise { From 04ef695b3f9b2ab0ff7d42f65a5d4f19178d8664 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 15 Jan 2024 02:46:21 +0000 Subject: [PATCH 49/51] update README-ZH md --- README-ZH.md | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/README-ZH.md b/README-ZH.md index 208eee92da1..8750ebc7fc3 100644 --- a/README-ZH.md +++ b/README-ZH.md @@ -145,29 +145,40 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package ## 🌐 自托管 -### [Railway](https://docs.flowiseai.com/deployment/railway) +在您现有的基础设施中部署自托管的 Flowise,我们支持各种[部署](https://docs.flowiseai.com/configuration/deployment) -[![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) +- [AWS](https://docs.flowiseai.com/deployment/aws) +- [Azure](https://docs.flowiseai.com/deployment/azure) +- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean) +- [GCP](https://docs.flowiseai.com/deployment/gcp) +-
+ 其他 -### [Render](https://docs.flowiseai.com/deployment/render) + - [Railway](https://docs.flowiseai.com/deployment/railway) -[![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) + [![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) -### [Sealos](https://docs.flowiseai.com/configuration/deployment/sealos) + - [Render](https://docs.flowiseai.com/deployment/render) -[![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://template.cloud.sealos.io/deploy?templateName=flowise) + [![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) -### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) + - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) -HuggingFace Spaces + HuggingFace Spaces -### [AWS](https://docs.flowiseai.com/deployment/aws) + - [Elestio](https://elest.io/open-source/flowiseai) -### [Azure](https://docs.flowiseai.com/deployment/azure) + [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) -### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean) + - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) -### [GCP](https://docs.flowiseai.com/deployment/gcp) + [![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + + - [RepoCloud](https://repocloud.io/details/?app_id=29) + + [![部署到 RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29) + +
## 💻 云托管 From 4ef9c1f5118bbf4f910215e1cd56e1f6e0762891 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 15 Jan 2024 18:28:46 +0000 Subject: [PATCH 50/51] add sessionId tracking --- packages/components/package.json | 2 +- packages/components/src/handler.ts | 42 +++++++++++++++++++++--------- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/packages/components/package.json b/packages/components/package.json index a2565430bd4..86405c2b464 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -54,7 +54,7 @@ "ioredis": "^5.3.2", "langchain": "^0.0.214", "langfuse": "2.0.2", - "langfuse-langchain": "2.0.2", + "langfuse-langchain": "2.3.3", "langsmith": "0.0.53", "linkifyjs": "^4.1.1", "llmonitor": "^0.5.5", diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index 1eb05a51746..df72a685f79 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -1,13 +1,13 @@ -import { BaseTracer, Run, BaseCallbackHandler } from 'langchain/callbacks' +import { BaseTracer, Run, BaseCallbackHandler, LangChainTracer } from 'langchain/callbacks' import { AgentAction, ChainValues } from 'langchain/schema' import { Logger } from 'winston' import { Server } from 'socket.io' import { Client } from 'langsmith' -import { LangChainTracer } from 'langchain/callbacks' -import { LLMonitorHandler } from 'langchain/callbacks/handlers/llmonitor' +import { LLMonitorHandler, LLMonitorHandlerFields } from 'langchain/callbacks/handlers/llmonitor' import { getCredentialData, getCredentialParam } from './utils' import { ICommonObject, INodeData } from './Interface' import CallbackHandler from 'langfuse-langchain' +import { LangChainTracerFields } from '@langchain/core/tracers/tracer_langchain' import { RunTree, RunTreeConfig, Client as LangsmithClient } from 'langsmith' import { Langfuse, LangfuseTraceClient, LangfuseSpanClient, LangfuseGenerationClient } from 'langfuse' import monitor from 'llmonitor' @@ -235,11 +235,16 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO apiKey: langSmithApiKey }) - const tracer = new LangChainTracer({ + let langSmithField: LangChainTracerFields = { projectName: langSmithProject ?? 'default', - //@ts-ignore client - }) + } + + if (nodeData?.inputs?.analytics?.langSmith) { + langSmithField = { ...langSmithField, ...nodeData?.inputs?.analytics?.langSmith } + } + + const tracer = new LangChainTracer(langSmithField) callbacks.push(tracer) } else if (provider === 'langFuse') { const release = analytic[provider].release as string @@ -248,13 +253,17 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO const langFusePublicKey = getCredentialParam('langFusePublicKey', credentialData, nodeData) const langFuseEndpoint = getCredentialParam('langFuseEndpoint', credentialData, nodeData) - const langFuseOptions: any = { + let langFuseOptions: any = { secretKey: langFuseSecretKey, publicKey: langFusePublicKey, baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com' } if (release) langFuseOptions.release = release - if (options.chatId) langFuseOptions.userId = options.chatId + if (options.chatId) langFuseOptions.sessionId = options.chatId + + if (nodeData?.inputs?.analytics?.langFuse) { + langFuseOptions = { ...langFuseOptions, ...nodeData?.inputs?.analytics?.langFuse } + } const handler = new CallbackHandler(langFuseOptions) callbacks.push(handler) @@ -262,11 +271,15 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO const llmonitorAppId = getCredentialParam('llmonitorAppId', credentialData, nodeData) const llmonitorEndpoint = getCredentialParam('llmonitorEndpoint', credentialData, nodeData) - const llmonitorFields: ICommonObject = { + let llmonitorFields: LLMonitorHandlerFields = { appId: llmonitorAppId, apiUrl: llmonitorEndpoint ?? 'https://app.llmonitor.com' } + if (nodeData?.inputs?.analytics?.llmonitor) { + llmonitorFields = { ...llmonitorFields, ...nodeData?.inputs?.analytics?.llmonitor } + } + const handler = new LLMonitorHandler(llmonitorFields) callbacks.push(handler) } @@ -360,7 +373,8 @@ export class AnalyticHandler { }, serialized: {}, project_name: this.handlers['langSmith'].langSmithProject, - client: this.handlers['langSmith'].client + client: this.handlers['langSmith'].client, + ...this.nodeData?.inputs?.analytics?.langSmith } const parentRun = new RunTree(parentRunConfig) await parentRun.postRun() @@ -390,8 +404,9 @@ export class AnalyticHandler { const langfuse: Langfuse = this.handlers['langFuse'].client langfuseTraceClient = langfuse.trace({ name, - userId: this.options.chatId, - metadata: { tags: ['openai-assistant'] } + sessionId: this.options.chatId, + metadata: { tags: ['openai-assistant'] }, + ...this.nodeData?.inputs?.analytics?.langFuse }) } else { langfuseTraceClient = this.handlers['langFuse'].trace[parentIds['langFuse']] @@ -420,7 +435,8 @@ export class AnalyticHandler { runId, name, userId: this.options.chatId, - input + input, + ...this.nodeData?.inputs?.analytics?.llmonitor }) this.handlers['llmonitor'].chainEvent = { [runId]: runId } returnIds['llmonitor'].chainEvent = runId From b3ab8527f5c03d4dda4ab22ded7a648cb6cc44d0 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 15 Jan 2024 18:36:31 +0000 Subject: [PATCH 51/51] add ts-ignore --- packages/components/src/handler.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index df72a685f79..5d2b53f64f7 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -237,6 +237,7 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO let langSmithField: LangChainTracerFields = { projectName: langSmithProject ?? 'default', + //@ts-ignore client }