Skip to content

Commit

Permalink
Merge pull request #7 from sbslee/0.8.0-dev
Browse files Browse the repository at this point in the history
0.8.0 dev
  • Loading branch information
sbslee authored Jun 26, 2023
2 parents 40e027f + 6ec177f commit d1ae859
Show file tree
Hide file tree
Showing 12 changed files with 251 additions and 11 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# CHANGELOG

## 0.8.0 (2023-06-26)
* Enable ChatGPT to answer questions by making calls to external tools, APIs, or databases.

## 0.7.0 (2023-06-19)
* Implement token counter and price monitor in chat window.
* Enable the Enter key for sending messages in chat window.
Expand Down
12 changes: 12 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ There are currently two chatbots available in KANU:

- [ChatGPT](#chatgpt) harnesses the power of ChatGPT, bringing it directly to your local computer
- [DocGPT](#docgpt) allows you to effortlessly interact with your documents and ask questions about them
- [FuncGPT](#funcgpt) can answer your questions by making calls to external tools, APIs, or databases

Other features of KANU inclde:

Expand Down Expand Up @@ -63,6 +64,17 @@ unstructured # Optional. Only required for .doc and .docx documents.
tabulate # Optional. Only required for .doc and .docx documents.
```

<a id="funcgpt"></a>
### FuncGPT

![Alt Text](https://raw.githubusercontent.com/sbslee/kanu/main/images/funcgpt.gif)

The following packages are required to run FuncGPT:

```
openai # Required.
```

## Changelog

See the [CHANGELOG.md](https://github.com/sbslee/kanu/blob/main/CHANGELOG.md) file for details.
Binary file modified images/chatgpt.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified images/docgpt.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/funcgpt.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
134 changes: 133 additions & 1 deletion kanu/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,57 @@
from .version import __version__
from .gui import Tooltip

GPT_MODELS = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"]
GPT_MODELS = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0613",
]
CHATGPT_PROMPT = """You are a helpful assistant."""
DOCGPT_PROMPT = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
FUNCGPT_PROMPT = """You are a helpful assistant."""
FUNCGPT_EXAMPLE = """import json
def get_current_weather(location, unit="fahrenheit"):
weather_info = {
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"],
}
return json.dumps(weather_info)
get_current_weather_json = {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
functions = {
"get_current_weather": {
"function": get_current_weather,
"json": get_current_weather_json,
}
}"""

class KANU:
def __init__(self, root):
Expand All @@ -35,6 +78,8 @@ def homepage(self):
b.pack()
b = tk.Button(self.container, text="DocGPT", command=lambda: self.config_docgpt())
b.pack()
b = tk.Button(self.container, text="FuncGPT", command=lambda: self.config_funcgpt())
b.pack()

def config_chatgpt(self):
self.container.pack_forget()
Expand Down Expand Up @@ -183,6 +228,89 @@ def template_docgpt_config(self):
with open(file_path, "w") as f:
config.write(f)

def config_funcgpt(self):
self.container.pack_forget()
self.container = tk.Frame(self.root)
self.container.pack()
l = tk.Label(self.container, text="FuncGPT")
l.grid(row=0, column=0, columnspan=2)
l = tk.Label(self.container, text="Required packages:")
l.grid(row=1, column=0, columnspan=2)
self.display_required_dependency(2, "openai")
m = tk.Message(self.container, width=300, text="Option 1. Upload a configuration file")
m.grid(row=3, column=0, columnspan=2)
b = tk.Button(self.container, text="Browse", command=self.parse_funcgpt_config)
b.grid(row=4, column=0)
b = tk.Button(self.container, text="Template", command=self.template_funcgpt_config)
b.grid(row=4, column=1)
m = tk.Message(self.container, width=300, text="Option 2. Configure manually")
m.grid(row=5, column=0, columnspan=2)
l = tk.Label(self.container, text="Model:")
l.grid(row=6, column=0, columnspan=2)
self.model = tk.StringVar(self.container, value="gpt-3.5-turbo-0613")
om = ttk.OptionMenu(self.container, self.model, "gpt-3.5-turbo-0613", *GPT_MODELS)
om.grid(row=7, column=0, columnspan=2)
l = tk.Label(self.container, text="System message ⓘ:")
Tooltip(l, "The system message helps set the behavior of the chatbot.")
l.grid(row=8, column=0, columnspan=2)
self.prompt = tk.Text(self.container, height=9, width=42)
sb = tk.Scrollbar(self.container, command=self.prompt.yview)
self.prompt.insert("1.0", FUNCGPT_PROMPT)
self.prompt.grid(row=9, column=0, columnspan=2, sticky="nsew")
sb.grid(row=9, column=2, sticky="ns")
self.prompt["yscrollcommand"] = sb.set
l = tk.Label(self.container, text="Function script:")
l.grid(row=10, column=0, columnspan=2)
b = tk.Button(self.container, text="Browse", command=self.get_function_script)
b.grid(row=11, column=0)
b = tk.Button(self.container, text="Example", command=self.example_function_script)
b.grid(row=11, column=1)
l = tk.Label(self.container, text="Temperature ⓘ:")
Tooltip(l, "The randomness in generating responses, which ranges between 0 and 1, with 0 indicating almost deterministic behavior.")
l.grid(row=12, column=0, columnspan=2)
self.temperature = tk.DoubleVar(self.container, value=0.5)
e = tk.Entry(self.container, textvariable=self.temperature)
e.grid(row=13, column=0, columnspan=2)
l = tk.Label(self.container, text="OpenAI API key:")
l.grid(row=14, column=0, columnspan=2)
e = tk.Entry(self.container)
e.grid(row=15, column=0, columnspan=2)
b = tk.Button(self.container, text="Submit", command=lambda: self.deploy_agent("FuncGPT", e.get(), self.model.get(), self.temperature.get(), self.prompt.get("1.0", "end-1c")))
b.grid(row=16, column=0)
b = tk.Button(self.container, text="Go back", command=lambda: self.homepage())
b.grid(row=16, column=1)

def parse_funcgpt_config(self):
config = configparser.ConfigParser()
file_path = filedialog.askopenfilename()
if not file_path:
return
config.read(file_path)
self.deploy_agent("FuncGPT", config["USER"]["openai_key"], config["DEFAULT"]["model"], float(config["DEFAULT"]["temperature"]), config["DEFAULT"]["prompt"], config["USER"]["function_script"])

def template_funcgpt_config(self):
file_path = filedialog.asksaveasfilename()
if not file_path:
return
config = configparser.ConfigParser()
config["DEFAULT"] = {"model": "gpt-3.5-turbo-0613", "temperature": "0.5", "prompt": FUNCGPT_PROMPT}
config["USER"] = {"openai_key": "", "function_script": ""}
with open(file_path, "w") as f:
config.write(f)

def get_function_script(self):
file_path = filedialog.askopenfilename()
if not file_path:
return
self.script = file_path

def example_function_script(self):
file_path = filedialog.asksaveasfilename()
if not file_path:
return
with open(file_path, "w") as f:
f.write(FUNCGPT_EXAMPLE)

def deploy_agent(self, agent, *args, **kwargs):
if agent == "ChatGPT":
from .chatgpt import ChatGPT
Expand All @@ -192,6 +320,10 @@ def deploy_agent(self, agent, *args, **kwargs):
from .docgpt import DocGPT
docgpt = DocGPT(self, *args, **kwargs)
docgpt.run()
elif agent == "FuncGPT":
from .funcgpt import FuncGPT
docgpt = FuncGPT(self, *args, **kwargs)
docgpt.run()
else:
raise ValueError(f"Unknown agent {agent}")

Expand Down
5 changes: 2 additions & 3 deletions kanu/chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ def send_message(self):
self.messages += [{"role": "assistant", "content": response}]
self.session.insert(tk.END, "You: " + self.user_input.get() + "\n", "user")
self.session.insert(tk.END, f"Bot: " + response + "\n", "bot")
usage = self.calculate_usage(bot_response)
self.system.insert(tk.END, f"{usage}\n", "system")
self.calculate_usage(bot_response)
self.chatbox.delete(0, tk.END)

def calculate_usage(self, response):
Expand All @@ -46,7 +45,7 @@ def calculate_usage(self, response):
self.price += prompt_price + completion_price
self.tokens += total_tokens
message = f"System: Used {prompt_tokens:,} prompt + {completion_tokens:,} completion = {total_tokens:,} tokens (total: {self.tokens:,} or ${self.price:.6f})."
return message
self.system.insert(tk.END, f"{message}\n", "system")

def clear_session(self):
self.tokens = self.price = 0
Expand Down
9 changes: 4 additions & 5 deletions kanu/docgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
".pdf": (PDFMinerLoader, {}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".csv": (CSVLoader, {}),
".csv": (CSVLoader, {"encoding": "utf8"}),
}

class DocGPT:
Expand Down Expand Up @@ -138,9 +138,8 @@ def send_message(self):
self.session.insert(tk.END, "You: " + self.user_input.get() + "\n", "user")
with get_openai_callback() as cb:
response = self.qa(self.user_input.get())
usage = self.calculate_usage(cb)
self.calculate_usage(cb)
self.session.insert(tk.END, "Bot: " + response["answer"] + "\n", "bot")
self.system.insert(tk.END, f"{usage}\n", "system")
self.chatbox.delete(0, tk.END)

def calculate_usage(self, cb):
Expand All @@ -149,7 +148,7 @@ def calculate_usage(self, cb):
self.price += prompt_price + completion_price
self.tokens += cb.total_tokens
message = f"System: Used {cb.prompt_tokens:,} prompt + {cb.completion_tokens:,} completion = {cb.total_tokens:,} tokens (total: {self.tokens:,} or ${self.price:.6f})."
return message
self.system.insert(tk.END, f"{message}\n", "system")

def go_with_option1(self):
self.database_directory = self.new_database_directory
Expand All @@ -168,7 +167,7 @@ def go_with_option1(self):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=self.chunk_size.get(), chunk_overlap=self.chunk_overlap.get())
texts = text_splitter.split_documents(documents)
for text in texts:
self.tokens += text2tokens("text-embedding-ada-002", text.page_content)
self.tokens += 2 * text2tokens("text-embedding-ada-002", text.page_content)
self.price = tokens2price("text-embedding-ada-002", "embedding", self.tokens)
db = Chroma.from_documents(texts, OpenAIEmbeddings(model="text-embedding-ada-002"), persist_directory=self.database_directory)
db.add_documents(texts)
Expand Down
89 changes: 89 additions & 0 deletions kanu/funcgpt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import tkinter as tk
import importlib.util
import json

import openai

from .gui import Settings, Conversation
from .utils import tokens2price

class FuncGPT:
def __init__(
self,
kanu,
openai_key,
model,
temperature,
prompt,
function_script
):
self.kanu = kanu
self.model = model
self.temperature = temperature
self.prompt = prompt
self.function_script = function_script
openai.api_key = openai_key
self.settings = Settings(self)
self.conversation = Conversation(self)
self.tokens = 0
self.price = 0
self.module = importlib.machinery.SourceFileLoader("", self.function_script).load_module()

def run(self):
self.conversation.page()

def send_message(self):
if not self.messages:
self.messages.append({"role": "system", "content": self.prompt})
self.messages += [{"role": "user", "content": self.user_input.get()}]
bot_response = openai.ChatCompletion.create(
model=self.model,
messages=self.messages,
temperature=self.temperature,
functions=[x["json"] for x in self.module.functions.values()],
function_call="auto",
)
message = bot_response["choices"][0]["message"]
if message.get("function_call"):
function_name = message["function_call"]["name"]
function_args = json.loads(message["function_call"]["arguments"])
function_response = self.module.functions[function_name]["function"](**function_args)
second_response = openai.ChatCompletion.create(
model=self.model,
messages=[
{"role": "user", "content": self.user_input.get()},
message,
{
"role": "function",
"name": function_name,
"content": function_response,
},
],
)
self.calculate_usage(second_response, function=function_name)
response = second_response["choices"][0]["message"]["content"]
else:
response = bot_response["choices"][0]["message"]["content"]
self.messages += [{"role": "assistant", "content": response}]
self.session.insert(tk.END, "You: " + self.user_input.get() + "\n", "user")
self.session.insert(tk.END, f"Bot: " + response + "\n", "bot")
self.calculate_usage(bot_response)
self.chatbox.delete(0, tk.END)

def calculate_usage(self, response, function=None):
total_tokens = response["usage"]["total_tokens"]
prompt_tokens = response["usage"]["prompt_tokens"]
completion_tokens = response["usage"]["completion_tokens"]
prompt_price = tokens2price(self.model, "prompt", prompt_tokens)
completion_price = tokens2price(self.model, "completion", completion_tokens)
self.price += prompt_price + completion_price
self.tokens += total_tokens
if function is None:
message = f"System: Used {prompt_tokens:,} prompt + {completion_tokens:,} completion = {total_tokens:,} tokens (total: {self.tokens:,} or ${self.price:.6f})."
else:
message = f"System: Used {prompt_tokens:,} prompt + {completion_tokens:,} completion = {total_tokens:,} tokens (total: {self.tokens:,} or ${self.price:.6f}) (called function: {function})."
self.system.insert(tk.END, f"{message}\n", "system")

def clear_session(self):
self.tokens = self.price = 0
self.run()
4 changes: 3 additions & 1 deletion kanu/gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ def __init__(self, agent):
self.name = self.agent.__class__.__name__
if self.name == "ChatGPT":
self.go_back = self.agent.kanu.config_chatgpt
elif self.name == "FuncGPT":
self.go_back = self.agent.kanu.config_funcgpt
else:
self.go_back = self.agent.run

Expand Down Expand Up @@ -61,7 +63,7 @@ def save(self):
data += self.agent.system.get("1.0", tk.END).rstrip()
data += "\n\n[Session]\n"
data += self.agent.session.get("1.0", tk.END).rstrip()
with open(file_path, 'w') as f:
with open(file_path, 'w', encoding="utf-8") as f:
f.write(data)

class Settings:
Expand Down
4 changes: 4 additions & 0 deletions kanu/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,13 @@
def tokens2price(model, task, tokens):
models = {
"gpt-3.5-turbo" : {"prompt": 0.0015, "completion": 0.002},
"gpt-3.5-turbo-0613" : {"prompt": 0.0015, "completion": 0.002},
"gpt-3.5-turbo-16k" : {"prompt": 0.003, "completion": 0.004},
"gpt-3.5-turbo-16k-0613" : {"prompt": 0.003, "completion": 0.004},
"gpt-4" : {"prompt": 0.03, "completion": 0.06},
"gpt-4-0613" : {"prompt": 0.03, "completion": 0.06},
"gpt-4-32k" : {"prompt": 0.06, "completion": 0.12},
"gpt-4-32k-0613" : {"prompt": 0.06, "completion": 0.12},
"text-embedding-ada-002" : {"embedding": 0.0001},
}
return models[model][task] / 1000 * tokens
Expand Down
2 changes: 1 addition & 1 deletion kanu/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.7.0"
__version__ = "0.8.0"

0 comments on commit d1ae859

Please sign in to comment.