-
Notifications
You must be signed in to change notification settings - Fork 0
/
almstream_app.py
275 lines (241 loc) · 9.56 KB
/
almstream_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
import streamlit as st
from streamlit_monaco import st_monaco
from code_editor import code_editor
from openai import OpenAI
from dotenv import load_dotenv
import os
import uuid
import time
import hmac
load_dotenv()
# Constants
ENV_VARS = ["WEAVIATE_URL", "WEAVIATE_API_KEY", "OPENAI_KEY"]
NUM_IMAGES_PER_ROW = 3
# Initialize the client with local server settings
client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
model_system_request = "From now on, act as a tech professional. Pay close attention to user questions. Provide outputs that users would regarding the input."
# Functions
def check_password():
"""Returns `True` if the user had a correct password."""
def login_form():
"""Form with widgets to collect user information"""
with st.form("Credentials"):
st.title("Welcome to AlmStream !") # Title of the form.
st.text_input("Username", key="username")
st.text_input("Password", type="password", key="password")
st.form_submit_button("Log in", on_click=password_entered)
def password_entered():
"""Checks whether a password entered by the user is correct."""
if st.session_state["username"] in st.secrets[
"passwords"
] and hmac.compare_digest(
st.session_state["password"],
st.secrets.passwords[st.session_state["username"]],
):
st.session_state["password_correct"] = True
del st.session_state["password"] # Don't store the username or password.
del st.session_state["username"]
else:
st.session_state["password_correct"] = False
# Return True if the username + password is validated.
if st.session_state.get("password_correct", False):
return True
# Show inputs for username + password.
login_form()
if "password_correct" in st.session_state:
st.error("😕 User not known or password incorrect")
return False
def stream_data(content: str):
for word in content.split(" "):
yield word + " "
time.sleep(0.02)
def get_openai_response_by_model(prompt: str, model_option: str) -> str:
print("User Prompt:" + prompt)
completion = client.chat.completions.create(
model=model_option,
messages=[
{"role": "system", "content": model_system_request},
{"role": "user", "content": prompt}
],
temperature = 0.7,
max_tokens = -1
)
# print(completion.choices[0].message.content)
response_content = "DONE."
return response_content, completion.choices[0].message.content
# print("Response Content: " + tmp)
# return extract_dockerfile_content(tmp)
# return tmp
def get_env_vars(env_vars: list) -> dict:
"""Retrieve environment variables
@parameter env_vars : list - List containing keys of environment variables
@returns dict - A dictionary of environment variables
"""
env_vars_dict = {}
for var in env_vars:
value = os.environ.get(var, "")
if value == "":
env_vars_dict[var] = value
env_vars_dict[var] = value
return env_vars_dict
def display_chat_messages() -> None:
"""Print the latest question and response in the sidebar
@returns None
"""
if len(st.session_state.messages) >= 2:
user_message = st.session_state.messages[-2]
assistant_message = st.session_state.messages[-1]
with st.sidebar:
with st.chat_message(user_message["role"]):
st.markdown(user_message["content"])
with st.chat_message(assistant_message["role"]):
st.markdown(assistant_message["content"])
if "images" in assistant_message:
for i in range(0, len(assistant_message["images"]), NUM_IMAGES_PER_ROW):
cols = st.columns(NUM_IMAGES_PER_ROW)
for j in range(NUM_IMAGES_PER_ROW):
if i + j < len(assistant_message["images"]):
cols[j].image(assistant_message["images"][i + j], width=200)
def display_main_body_messages() -> None:
"""Print message history in the main body
@returns None
"""
st.write("<div class='flowchart'>", unsafe_allow_html=True)
version = 0;
for message in st.session_state.messages:
if message["role"] == "assistant":
# Generate a unique key using uuid
unique_key = str(uuid.uuid4())
# Display user question in the Main body
st.write(f"Version {version} -- UUID: {unique_key}")
st.write(f"<div class='block assistant' style='background-color:black'><strong>Assistant:</strong> {message['content']}</div>", unsafe_allow_html=True)
st.write("<div class='line'></div>", unsafe_allow_html=True)
version += 1;
st.write("</div>", unsafe_allow_html=True)
# Environment variables
env_vars = get_env_vars(ENV_VARS)
# Custom CSS to adjust the sidebar width and add flowchart styles
st.markdown(
"""
<style>
[data-testid="stSidebar"] {
width: 50% !important;
min-width: 50% !important;
}
[data-testid="stSidebarNav"] {
width: 50% !important;
min-width: 50% !important;
}
.css-1d391kg {
width: 50% !important;
min-width: 50% !important;
}
.flowchart {
position: relative;
width: 100%;
}
.block {
position: relative;
padding: 10px;
margin: 10px;
border: 1px solid #ccc;
border-radius: 5px;
background: #f9f9f9;
}
.user {
background: #e0f7fa;
}
.assistant {
background: #ffe0b2;
}
.line {
width: 2px;
height: 20px;
background: #ccc;
margin: 0 auto;
}
</style>
""",
unsafe_allow_html=True,
)
# =======================================
# check for invalid password input values
if not check_password():
st.stop()
# =======================================
# Title
st.title("AImStream")
with st.sidebar:
st.title("💡 AImStream")
st.subheader("Speed up Building!")
st.markdown(
"""AImStream is a stunning tool using AI to speed up your Docker image building process!"""
)
model_option = st.selectbox(
"Which Model would you like to be used?",
("lmstudio-ai/gemma-2b-it-GGUF", "TheBloke/Mistral-7B-Instruct-v0.2-GGUF","ChatGPT"),
index=None,
placeholder="Please Select Your Model ...",
)
st.header("Workplace: ")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.greetings = False
# Display the latest chat messages in the sidebar
display_chat_messages()
# Display all chat messages in the main body
st.header("Chain of Messages:")
display_main_body_messages()
# Greet user
with st.sidebar:
if not st.session_state.greetings:
with st.chat_message("assistant"):
intro = "Hey, Docker dev! Time to kick your builds into high gear with AImStream! It's like having a jetpack strapped to your Docker images – they'll be soaring to completion before you can say 'dockerize me, Captain!'"
st.markdown(intro)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": intro})
st.session_state.greetings = True
# Example prompts
with st.sidebar:
example_prompts = [
"Set up an Alpine Linux Docker image with Nginx web server.",
"Build a CentOS 7 image with basic tools like Git, net-tools, and curl.",
"Create an Ubuntu 18.04 Docker image with OpenJDK 11 and Maven."
]
button_cols = st.columns(3)
button_pressed = ""
if button_cols[0].button(example_prompts[0]):
button_pressed = example_prompts[0]
elif button_cols[1].button(example_prompts[1]):
button_pressed = example_prompts[1]
elif button_cols[2].button(example_prompts[2]):
button_pressed = example_prompts[2]
st.divider()
# Handle prompt input and responses
if prompt := (st.sidebar.chat_input("What type of IMAGE would you like to create?") or button_pressed):
# Display user message in chat message container
with st.sidebar:
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
prompt = prompt.replace('"', "").replace("'", "")
images = []
if prompt != "":
query = prompt.strip().lower()
# Here you can handle the prompt processing and response generation
# response_content = "response content (using AI-generated result to change this)"
response_content, code = get_openai_response_by_model(prompt=prompt, model_option=model_option)
with st.sidebar:
with st.chat_message("assistant"):
st.markdown(response_content)
st.session_state.messages.append(
{"role": "assistant", "content": code, "images": images}
)
# Display response in the main body
st.write(f"<div class='block user'><strong>User:</strong> {prompt}</div>", unsafe_allow_html=True)
st.write(f"<div class='block assistant'><strong>Assistant:</strong> {response_content}</div>", unsafe_allow_html=True)
st.write("<div class='line'></div>", unsafe_allow_html=True)
# Rerun to keep the state
st.rerun()