forked from definitive-io/conversational-chatbot-groq
-
Notifications
You must be signed in to change notification settings - Fork 2
/
app.py
119 lines (93 loc) · 4.28 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import streamlit as st
import os
from groq import Groq
import random
import time
from langchain.chains import ConversationChain, LLMChain
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.messages import SystemMessage
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain_groq import ChatGroq
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv
from streamlit_autorefresh import st_autorefresh
load_dotenv()
def typing_effect(text):
placeholder = st.empty()
typed_text = ""
for char in text:
typed_text += char
placeholder.markdown(f"**Accounting Bot:** {typed_text}")
time.sleep(0.0005) # Adjust the speed of the typing effect
return typed_text
def main():
"""
This function is the main entry point of the application. It sets up the Groq client, the Streamlit interface, and handles the chat interaction.
"""
groq_api_key = os.environ['GROQ_API_KEY']
# Get Groq API key
#groq_api_key =
# Display the Groq logo
spacer, col = st.columns([5, 1])
with col:
st.image('groqcloud_darkmode.png')
# The title and greeting message of the Streamlit application
st.title("Chat with Accounting Bot!")
st.write("Hello! I'm your friendly Accounting chatbot. I can help answer your questions, provide information, or just chat. I'm also super fast! Let's start our conversation!")
# Add customization options to the sidebar
st.sidebar.title('Customization')
system_prompt = st.sidebar.text_input("System prompt:", value="You are Accounting Bot, an accounting chatbot that specializes on journal entry, financial analysis and budgeting.")
model = 'llama3-70b-8192'
conversational_memory_length = st.sidebar.slider('Conversational memory length:', 1, 10, value = 5)
temperature = st.sidebar.slider('Response Temperature:', 0.0, 1.0, value=0.7)
memory = ConversationBufferWindowMemory(k=conversational_memory_length, memory_key="chat_history", return_messages=True)
user_question = st.text_input("Ask a question:")
# session state variable
if 'chat_history' not in st.session_state:
st.session_state.chat_history=[]
else:
for message in st.session_state.chat_history:
memory.save_context(
{'input':message['human']},
{'output':message['AI']}
)
# Initialize Groq Langchain chat object and conversation
groq_chat = ChatGroq(
groq_api_key=groq_api_key,
model_name=model
)
# If the user has asked a question,
if user_question:
# Construct a chat prompt template using various components
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=system_prompt
), # This is the persistent system prompt that is always included at the start of the chat.
MessagesPlaceholder(
variable_name="chat_history"
), # This placeholder will be replaced by the actual chat history during the conversation. It helps in maintaining context.
HumanMessagePromptTemplate.from_template(
"{human_input}"
), # This template is where the user's current input will be injected into the prompt.
]
)
# Create a conversation chain using the LangChain LLM (Language Learning Model)
conversation = LLMChain(
llm=groq_chat, # The Groq LangChain chat object initialized earlier.
prompt=prompt, # The constructed prompt template.
verbose=True, # Enables verbose output, which can be useful for debugging.
memory=memory, # The conversational memory object that stores and manages the conversation history.
)
# The chatbot's answer is generated by sending the full prompt to the Groq API.
response = conversation.predict(human_input=user_question)
message = {'human':user_question,'AI':response}
st.session_state.chat_history.append(message)
#st.write("Chatbot:", response)
typing_effect(response)
if __name__ == "__main__":
main()