Skip to content

fix: chat bugs #3308

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions apps/application/chat_pipeline/step/chat_step/i_chat_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class InstanceSerializer(serializers.Serializer):
no_references_setting = NoReferencesSetting(required=True,
label=_("No reference segment settings"))

user_id = serializers.UUIDField(required=True, label=_("User ID"))
workspace_id = serializers.CharField(required=True, label=_("Workspace ID"))

model_setting = serializers.DictField(required=True, allow_null=True,
label=_("Model settings"))
Expand All @@ -102,7 +102,7 @@ def execute(self, message_list: List[BaseMessage],
chat_id, problem_text,
post_response_handler: PostResponseHandler,
model_id: str = None,
user_id: str = None,
workspace_id: str = None,
paragraph_list=None,
manage: PipelineManage = None,
padding_problem_text: str = None, stream: bool = True, chat_user_id=None, chat_user_type=None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from application.chat_pipeline.step.chat_step.i_chat_step import IChatStep, PostResponseHandler
from application.flow.tools import Reasoning
from application.models import ApplicationChatUserStats, ChatUserType
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id


def add_access_num(chat_user_id=None, chat_user_type=None, application_id=None):
Expand Down Expand Up @@ -157,7 +157,7 @@ def execute(self, message_list: List[BaseMessage],
problem_text,
post_response_handler: PostResponseHandler,
model_id: str = None,
user_id: str = None,
workspace_id: str = None,
paragraph_list=None,
manage: PipelineManage = None,
padding_problem_text: str = None,
Expand All @@ -167,8 +167,8 @@ def execute(self, message_list: List[BaseMessage],
model_params_setting=None,
model_setting=None,
**kwargs):
chat_model = get_model_instance_by_model_user_id(model_id, user_id,
**model_params_setting) if model_id is not None else None
chat_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting) if model_id is not None else None
if stream:
return self.execute_stream(message_list, chat_id, problem_text, post_response_handler, chat_model,
paragraph_list,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class InstanceSerializer(serializers.Serializer):
label=_("History Questions"))
# 大语言模型
model_id = serializers.UUIDField(required=False, allow_null=True, label=_("Model id"))
user_id = serializers.UUIDField(required=True, label=_("User ID"))
workspace_id = serializers.CharField(required=True, label=_("User ID"))
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
label=_("Question completion prompt"))

Expand All @@ -50,6 +50,6 @@ def _run(self, manage: PipelineManage):
@abstractmethod
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None,
problem_optimization_prompt=None,
user_id=None,
workspace_id=None,
**kwargs):
pass
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from application.chat_pipeline.step.reset_problem_step.i_reset_problem_step import IResetProblemStep
from application.models import ChatRecord
from common.utils.split_model import flat_map
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id

prompt = _(
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")
Expand All @@ -23,9 +23,9 @@
class BaseResetProblemStep(IResetProblemStep):
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None,
problem_optimization_prompt=None,
user_id=None,
workspace_id=None,
**kwargs) -> str:
chat_model = get_model_instance_by_model_user_id(model_id, user_id) if model_id is not None else None
chat_model = get_model_instance_by_model_workspace_id(model_id, workspace_id) if model_id is not None else None
if chat_model is None:
return problem_text
start_index = len(history_chat_record) - 3
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class InstanceSerializer(serializers.Serializer):
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
message=_("The type only supports embedding|keywords|blend"), code=500)
], label=_("Retrieval Mode"))
user_id = serializers.UUIDField(required=True, label=_("User ID"))
workspace_id = serializers.CharField(required=True, label=_("Workspace ID"))

def get_step_serializer(self, manage: PipelineManage) -> Type[InstanceSerializer]:
return self.InstanceSerializer
Expand All @@ -58,19 +58,19 @@ def _run(self, manage: PipelineManage):
def execute(self, problem_text: str, knowledge_id_list: list[str], exclude_document_id_list: list[str],
exclude_paragraph_id_list: list[str], top_n: int, similarity: float, padding_problem_text: str = None,
search_mode: str = None,
user_id=None,
workspace_id=None,
**kwargs) -> List[ParagraphPipelineModel]:
"""
关于 用户和补全问题 说明: 补全问题如果有就使用补全问题去查询 反之就用用户原始问题查询
:param similarity: 相关性
:param top_n: 查询多少条
:param problem_text: 用户问题
:param knowledge_id_list: 需要查询的数据集id列表
:param knowledge_id_list: 需要查询的数据集id列表
:param exclude_document_id_list: 需要排除的文档id
:param exclude_paragraph_id_list: 需要排除段落id
:param padding_problem_text 补全问题
:param search_mode 检索模式
:param user_id 用户id
:param workspace_id 工作空间id
:return: 段落列表
"""
pass
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@
from models_provider.tools import get_model


def get_model_by_id(_id, user_id):
model = QuerySet(Model).filter(id=_id).first()
def get_model_by_id(_id, workspace_id):
model = QuerySet(Model).filter(id=_id, model_type="EMBEDDING").first()
if model is None:
raise Exception(_("Model does not exist"))
if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id):
message = lazy_format(_('No permission to use this model {model_name}'), model_name=model.name)
raise Exception(message)
if model.workspace_id is not None:
if model.workspace_id != workspace_id:
raise Exception(_("Model does not exist"))
return model


Expand All @@ -50,13 +50,13 @@ class BaseSearchDatasetStep(ISearchDatasetStep):
def execute(self, problem_text: str, knowledge_id_list: list[str], exclude_document_id_list: list[str],
exclude_paragraph_id_list: list[str], top_n: int, similarity: float, padding_problem_text: str = None,
search_mode: str = None,
user_id=None,
workspace_id=None,
**kwargs) -> List[ParagraphPipelineModel]:
if len(knowledge_id_list) == 0:
return []
exec_problem_text = padding_problem_text if padding_problem_text is not None else problem_text
model_id = get_embedding_id(knowledge_id_list)
model = get_model_by_id(model_id, user_id)
model = get_model_by_id(model_id, workspace_id)
self.context['model_name'] = model.name
embedding_model = ModelManage.get_model(model_id, lambda _id: get_model(model))
embedding_value = embedding_model.embed_query(exec_problem_text)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import re
import time
from functools import reduce
from types import AsyncGeneratorType
from typing import List, Dict

from django.db.models import QuerySet
Expand All @@ -24,7 +23,7 @@
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
from application.flow.tools import Reasoning
from models_provider.models import Model
from models_provider.tools import get_model_credential, get_model_instance_by_model_user_id
from models_provider.tools import get_model_credential, get_model_instance_by_model_workspace_id

tool_message_template = """
<details>
Expand Down Expand Up @@ -206,8 +205,9 @@ def execute(self, model_id, system, prompt, dialogue_number, history_chat_record
model_setting = {'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
'reasoning_content_start': '<think>'}
self.context['model_setting'] = model_setting
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
workspace_id = self.workflow_manage.get_body().get('workspace_id')
chat_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type,
self.runtime_node_id)
self.context['history_message'] = history_message
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from application.flow.step_node.image_generate_step_node.i_image_generate_node import IImageGenerateNode
from common.utils.common import bytes_to_uploaded_file
from oss.serializers.file import FileSerializer
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id


class BaseImageGenerateNode(IImageGenerateNode):
Expand All @@ -25,8 +25,9 @@ def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_t
**kwargs) -> NodeResult:
print(model_params_setting)
application = self.workflow_manage.work_flow_post_handler.chat_info.application
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
workspace_id = self.workflow_manage.get_body().get('workspace_id')
tti_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.image_understand_step_node.i_image_understand_node import IImageUnderstandNode
from knowledge.models import File
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id


def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
Expand Down Expand Up @@ -79,9 +79,9 @@ def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, hist
# 处理不正确的参数
if image is None or not isinstance(image, list):
image = []
print(model_params_setting)
image_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
workspace_id = self.workflow_manage.get_body().get('workspace_id')
image_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting)
# 执行详情中的历史消息不需要图片内容
history_message = self.get_history_message_for_details(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.question_node.i_question_node import IQuestionNode
from models_provider.models import Model
from models_provider.tools import get_model_instance_by_model_user_id, get_model_credential
from models_provider.tools import get_model_instance_by_model_workspace_id, get_model_credential


def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
Expand Down Expand Up @@ -87,8 +87,9 @@ def execute(self, model_id, system, prompt, dialogue_number, history_chat_record
**kwargs) -> NodeResult:
if model_params_setting is None:
model_params_setting = get_default_model_params_setting(model_id)
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
workspace_id = self.workflow_manage.get_body().get('workspace_id')
chat_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

from application.flow.i_step_node import NodeResult
from application.flow.step_node.reranker_node.i_reranker_node import IRerankerNode
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id


def merge_reranker_list(reranker_list, result=None):
Expand Down Expand Up @@ -78,8 +78,9 @@ def execute(self, question, reranker_setting, reranker_list, reranker_model_id,
self.context['document_list'] = [{'page_content': document.page_content, 'metadata': document.metadata} for
document in documents]
self.context['question'] = question
reranker_model = get_model_instance_by_model_user_id(reranker_model_id,
self.flow_params_serializer.data.get('user_id'),
workspace_id = self.workflow_manage.get_body().get('workspace_id')
reranker_model = get_model_instance_by_model_workspace_id(reranker_model_id,
workspace_id,
top_n=top_n)
result = reranker_model.compress_documents(
documents,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from common.utils.common import get_file_content
from knowledge.models import Document, Paragraph, Knowledge, SearchMode
from maxkb.conf import PROJECT_DIR
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id


def get_embedding_id(dataset_id_list):
Expand Down Expand Up @@ -67,7 +67,8 @@ def execute(self, dataset_id_list, dataset_setting, question,
if len(dataset_id_list) == 0:
return get_none_result(question)
model_id = get_embedding_id(dataset_id_list)
embedding_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'))
workspace_id = self.workflow_manage.get_body().get('workspace_id')
embedding_model = get_model_instance_by_model_workspace_id(model_id, workspace_id)
embedding_value = embedding_model.embed_query(question)
vector = VectorStore.get_embedding_vector()
exclude_document_id_list = [str(document.id) for document in
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from application.flow.step_node.speech_to_text_step_node.i_speech_to_text_node import ISpeechToTextNode
from common.utils.common import split_and_transcribe, any_to_mp3
from knowledge.models import File
from models_provider.tools import get_model_instance_by_model_user_id
from models_provider.tools import get_model_instance_by_model_workspace_id


class BaseSpeechToTextNode(ISpeechToTextNode):
Expand All @@ -20,7 +20,8 @@ def save_context(self, details, workflow_manage):
self.answer_text = details.get('answer')

def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult:
stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id'))
workspace_id = self.workflow_manage.get_body().get('workspace_id')
stt_model = get_model_instance_by_model_workspace_id(stt_model_id, workspace_id)
audio_list = audio
self.context['audio_list'] = audio

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

from application.flow.i_step_node import NodeResult
from application.flow.step_node.text_to_speech_step_node.i_text_to_speech_node import ITextToSpeechNode
from models_provider.tools import get_model_instance_by_model_workspace_id
from oss.serializers.file import FileSerializer
from models_provider.tools import get_model_instance_by_model_user_id


def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
Expand Down Expand Up @@ -42,8 +42,9 @@ def execute(self, tts_model_id, chat_id,
content, model_params_setting=None,
**kwargs) -> NodeResult:
self.context['content'] = content
model = get_model_instance_by_model_user_id(tts_model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
workspace_id = self.workflow_manage.get_body().get('workspace_id')
model = get_model_instance_by_model_workspace_id(tts_model_id, workspace_id,
**model_params_setting)
audio_byte = model.text_to_speech(content)
# 需要把这个音频文件存储到数据库中
file_name = 'generated_audio.mp3'
Expand Down
Loading
Loading