-
Notifications
You must be signed in to change notification settings - Fork 0
/
streamlit_chat_client.py
101 lines (83 loc) · 3.29 KB
/
streamlit_chat_client.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import streamlit as st
import os
from dotenv import load_dotenv, find_dotenv
import openai
from langchain.chains import (create_history_aware_retriever, create_retrieval_chain)
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import (ChatOpenAI, OpenAIEmbeddings)
from langchain_chroma import Chroma
from langchain_core.messages import AIMessage, HumanMessage
# Chargement de la clé d'API OpenAI
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
# Création d'un retriever de documents
embedding = OpenAIEmbeddings()
vectordb = Chroma(embedding_function=embedding, persist_directory="./chromadb")
retriever = vectordb.as_retriever()
# Création du modèle
llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
# Création d'un retriever gérant l'historique
contextualize_q_system_prompt = (
"Given a chat history and the latest user question "
"which might reference context in the chat history, "
"formulate a standalone question which can be understood "
"without the chat history. Do NOT answer the question, just "
"reformulate it if needed and otherwise return it as is."
)
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
llm, retriever, contextualize_q_prompt
)
# Création de la chaine RAG
system_prompt = (
"You are an assistant for question-answering tasks. "
"Use the following pieces of retrieved context to answer "
"the question. If you don't know the answer, say that you "
"don't know. Use three sentences maximum and keep the "
"answer concise."
"\n\n"
"{context}"
)
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
# Interface
st.title("Ask about Marvinpac policies")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("Your question here!"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Dissplay user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
stream = map(
lambda chunk: chunk["answer"],
filter(
lambda chunk: "answer" in chunk,
rag_chain.stream({"input": prompt, "chat_history": st.session_state.messages})
)
)
response = st.write_stream(stream)
st.session_state.messages.append({"role": "assistant", "content": response})