-
Notifications
You must be signed in to change notification settings - Fork 0
/
backend_functions.py
131 lines (98 loc) · 3.66 KB
/
backend_functions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import streamlit as st
from key_generator.key_generator import generate
from openai import OpenAI
import numpy as np
import pandas as pd
import time
import yaml
from dotenv import load_dotenv
from helper_functions import refresh
import os
config = yaml.load(open('./configs/config.star.yaml', 'r'),
Loader=yaml.FullLoader)
load_dotenv()
API_KEY = os.getenv('OPENAI_API_KEY')
client = OpenAI(
api_key=API_KEY
)
df = pd.read_csv('./data/star_questions.csv')
tech = pd.read_excel('./data/Data_analyst_question.xlsx')
def get_evaluation(content: str) -> dict:
"""
Evaluate if the provided answer follows the STAR methodology
:param content: {'question': the question, 'answer': the answer}
:return: {"eval": your detailed evaluation of the answer}
"""
key = generate()
st.session_state.messages.append(
{"role": "assistant", "content": "", "key": "assistant-"+key.get_key()})
st.session_state.response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
temperature=0,
response_format={"type": "json_object"},
messages=[
{"role": "system",
"content": config['prompts']['evaluation_prompt']},
{"role": "user", "content": content}
],
stream=True
)
st.session_state.rateAnswer = False
st.session_state.responding = True
refresh("chatcontainer")
def get_random_question():
"""
get a random question from the csv file
:return: question
"""
random_question_idx = np.random.randint(0, 60)
data = df.iloc[random_question_idx]
return data['Question']
def messageFromChatBot():
"""
get each chunk streamed from the API and add it to the message then refresh except for the first
4 and last 2 chunks which are these token {eval:" "}
:return: nothing
"""
for chunk in st.session_state.response:
if chunk.choices[0].delta.content is not None:
if chunk.choices[0].delta.content not in '{\"eval\":\"':
st.session_state.messages[-1]["content"] += chunk.choices[0].delta.content
time.sleep(0.01)
refresh("chatcontainer")
else:
st.session_state.skip += chunk.choices[0].delta.content
st.session_state.messages[-1]["content"] = st.session_state.messages[-1]["content"][:-1]
st.session_state.messages[-1]["content"] = st.session_state.messages[-1]["content"][:-1]
# st.session_state.skip=""
def get_technichal_question(selected_category):
"""
get a random question from the csv file
:return: question
"""
category_data = tech[tech["Category"] == selected_category]
category_data = category_data.sample(frac=1).reset_index(drop=True)
return category_data.loc[0, "Question"]
def get_technichal_evaluation(content: str) -> dict:
"""
Evaluate if the provided answer follows the STAR methodology
:param content: {'question': the question, 'answer': the answer}
:return: {"eval": your detailed evaluation of the answer}
"""
key = generate()
st.session_state.messages.append(
{"role": "assistant", "content": "", "key": "assistant-"+key.get_key()})
st.session_state.response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
temperature=0,
response_format={"type": "json_object"},
messages=[
{"role": "system",
"content": config['prompts']['tech_evaluation_prompt']},
{"role": "user", "content": content}
],
stream=True
)
st.session_state.rateAnswer = False
st.session_state.responding = True
refresh("chatcontainer")