Skip to content

Commit

Permalink
Rewriting Winman to command pattern + ensuring that request to wingma…
Browse files Browse the repository at this point in the history
…n is non-blocking.
  • Loading branch information
dvorka committed Jan 23, 2024
1 parent 63809df commit 0ce8f8a
Show file tree
Hide file tree
Showing 9 changed files with 148 additions and 202 deletions.
116 changes: 71 additions & 45 deletions app/src/qt/main_window_presenter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2148,6 +2148,8 @@ void MainWindowPresenter::handleActionWingman()

void MainWindowPresenter::slotRunWingmanFromDialog()
{
bool runAsynchronously = true;

// pull prompt from the dialog & prepare prompt from the dialog
string prompt = this->wingmanDialog->getPrompt();

Expand All @@ -2163,67 +2165,91 @@ void MainWindowPresenter::slotRunWingmanFromDialog()
// RUN Wingman
statusBar->showInfo(QString(tr("Wingman is talking to GPT provider...")));

string httpResponse{};
WingmanStatusCode status{
WingmanStatusCode::WINGMAN_STATUS_CODE_OK
// run
CommandWingmanChat commandWingmanChat{
prompt,
"",
WingmanStatusCode::WINGMAN_STATUS_CODE_OK,
"",
"",
0,
0,
""
};
string errorMessage{};
string answerLlmModel{};
int promptTokens{};
int answerTokens{};
string answerHtml{};
// chat
// - create a queue of tasks, store the task for distributor there, let it run :)
if(wingmanProgressDialog == nullptr) {
wingmanProgressDialog = new QProgressDialog(
tr("Wingman is talking to GPT provider..."),
tr("Cancel"),
0,
100,
&view);
} else {
wingmanProgressDialog->reset();
}
wingmanProgressDialog->setWindowModality(Qt::WindowModal);
wingmanProgressDialog->show();
wingmanProgressDialog->setValue(5);
// force processing of all events and refresh
QCoreApplication::processEvents();
// measure time
auto start = std::chrono::high_resolution_clock::now();
// run
// TODO let AsyncTaskNotificationsDistributor to run it so that progress gets events
mind->wingmanChat(
prompt,
httpResponse,
status,
errorMessage,
answerLlmModel,
promptTokens,
answerTokens,
answerHtml
);
if(runAsynchronously) {
if(wingmanProgressDialog == nullptr) {
wingmanProgressDialog = new QProgressDialog(
tr("Wingman is talking to GPT provider..."),
tr("Cancel"),
0,
100,
&view);
} else {
wingmanProgressDialog->reset();
}
wingmanProgressDialog->setWindowModality(Qt::WindowModal);

int limit = 100*10*15; // 15s

wingmanProgressDialog->setMinimum(0);
wingmanProgressDialog->setMaximum(limit);
wingmanProgressDialog->show();
wingmanProgressDialog->setValue(5);

QFuture<CommandWingmanChat> future = QtConcurrent::run(
this->mind,
&Mind::wingmanChat,
commandWingmanChat);

// wait for the future to finish
QFutureWatcher<void> futureWatcher{};
futureWatcher.setFuture(future);
// blocking wait: futureWatcher.waitForFinished();

// event non-blocking wait
while(!futureWatcher.isFinished() && limit > 0) {
MF_DEBUG("Wingman is talking to GPT provider..." << endl);
QApplication::processEvents();
std::this_thread::sleep_for(std::chrono::milliseconds(100));
limit-=100;
wingmanProgressDialog->setValue(limit);
}
// TODO if limit < 0, then raise error & show critical dialog

commandWingmanChat = future.result();

// HIDE progress dialog
wingmanProgressDialog->hide();

// check the result
if (future.isFinished()) {
statusBar->showInfo(QString(tr("Wingman got an answer from the GPT provider")));
} else {
// TODO show critical dialog
statusBar->showError(QString(tr("Wingman call to GPT provider failed")));
}
} else {
mind->wingmanChat(commandWingmanChat);
}
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::seconds>(end - start);
// wingmanProgressDialog->hide();
string answerDescriptor{
"[model: " + answerLlmModel +
"[model: " + commandWingmanChat.answerLlmModel +
", tokens (prompt/answer): " +
std::to_string(promptTokens) + "/" + std::to_string(answerTokens) +
std::to_string(commandWingmanChat.promptTokens) + "/" + std::to_string(commandWingmanChat.answerTokens) +
", time: " +
std::to_string(duration.count()) +
"s, status: " +
(status==WingmanStatusCode::WINGMAN_STATUS_CODE_OK?"OK":"ERROR") +
(commandWingmanChat.status==WingmanStatusCode::WINGMAN_STATUS_CODE_OK?"OK":"ERROR") +
"]"
};

// HIDE progress dialog
wingmanProgressDialog->setValue(100);
wingmanProgressDialog->hide();

// PUSH answer to the chat dialog
this->wingmanDialog->appendAnswerToChat(
answerHtml,
commandWingmanChat.answerHtml,
answerDescriptor,
this->wingmanDialog->getContextType()
);
Expand Down
1 change: 1 addition & 0 deletions lib/src/config/configuration.h
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,7 @@ class Configuration {
bool isAutolinkingCaseInsensitive() const { return autolinkingCaseInsensitive; }
void setAutolinkingCaseInsensitive(bool autolinkingCaseInsensitive) { this->autolinkingCaseInsensitive=autolinkingCaseInsensitive; }
bool isWingman();
WingmanLlmProviders getWingmanLlmProvider() const { return wingmanProvider; }
std::string getWingmanApiKey() const { return wingmanApiKey; }
std::string getWingmanLlmModel() const { return wingmanLlmModel; }
unsigned int getMd2HtmlOptions() const { return md2HtmlOptions; }
Expand Down
33 changes: 12 additions & 21 deletions lib/src/mind/ai/llm/mock_wingman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,27 +32,18 @@ MockWingman::~MockWingman()
{
}

void MockWingman::chat(
const string& prompt,
string& httpResponse,
WingmanStatusCode& status,
string& errorMessage,
string& answerLlmModel,
int& promptTokens,
int& answerTokens,
string& answerHtml
) {
MF_DEBUG("MockWingman::chat() prompt:" << prompt << endl);

httpResponse.clear();
status=WingmanStatusCode::WINGMAN_STATUS_CODE_OK;
errorMessage.clear();
answerLlmModel.assign(this->llmModel);
promptTokens=42;
answerTokens=42198;
answerHtml.assign("chat(MOCK, '"+prompt+"')");

MF_DEBUG("MockWingman::chat() answer:" << answerHtml << endl);
void MockWingman::chat(CommandWingmanChat& command) {
MF_DEBUG("MockWingman::chat() prompt:" << command.prompt << endl);

command.httpResponse.clear();
command.status=WingmanStatusCode::WINGMAN_STATUS_CODE_OK;
command.errorMessage.clear();
command.answerLlmModel.assign(this->llmModel);
command.promptTokens=42;
command.answerTokens=42198;
command.answerHtml.assign("chat(MOCK, '"+command.prompt+"')");

MF_DEBUG("MockWingman::chat() answer:" << command.answerHtml << endl);
}

} // m8r namespace
11 changes: 1 addition & 10 deletions lib/src/mind/ai/llm/mock_wingman.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,7 @@ class MockWingman: Wingman

std::string getWingmanLlmModel() const { return llmModel; }

virtual void chat(
const std::string& prompt,
std::string& httpResponse,
WingmanStatusCode& status,
std::string& errorMessage,
std::string& answerLlmModel,
int& promptTokens,
int& answerTokens,
std::string& answerHtml
) override;
virtual void chat(CommandWingmanChat& command) override;
};

}
Expand Down
99 changes: 35 additions & 64 deletions lib/src/mind/ai/llm/openai_wingman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,20 +61,10 @@ OpenAiWingman::~OpenAiWingman()
* @see https://github.com/nlohmann/json?tab=readme-ov-file
* @see https://json.nlohmann.me/
*/
void OpenAiWingman::curlGet(
const string& prompt,
const string& llmModel,
string& httpResponse,
WingmanStatusCode& status,
string& errorMessage,
string& answerLlmModel,
int& promptTokens,
int& answerTokens,
string& answerHtml
) {
void OpenAiWingman::curlGet(CommandWingmanChat& command) {
CURL* curl = curl_easy_init();
if (curl) {
string escapedPrompt{prompt};
string escapedPrompt{command.prompt};
replaceAll("\n", " ", escapedPrompt);
replaceAll("\"", "\\\"", escapedPrompt);

Expand Down Expand Up @@ -110,7 +100,7 @@ void OpenAiWingman::curlGet(
<< endl);

// set up cURL options
httpResponse.clear();
command.httpResponse.clear();
curl_easy_setopt(
curl, CURLOPT_URL,
"https://api.openai.com/v1/chat/completions");
Expand All @@ -122,7 +112,7 @@ void OpenAiWingman::curlGet(
openaiCurlWriteCallback);
curl_easy_setopt(
curl, CURLOPT_WRITEDATA,
&httpResponse);
&command.httpResponse);

struct curl_slist* headers = NULL;
headers = curl_slist_append(headers, ("Authorization: Bearer " + apiKey).c_str());
Expand All @@ -137,14 +127,14 @@ void OpenAiWingman::curlGet(
curl_slist_free_all(headers);

if (res != CURLE_OK) {
status = WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
errorMessage = curl_easy_strerror(res);
std::cerr << "Error: Wingman OpenAI cURL request failed: " << errorMessage << std::endl;
command.status = WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
command.errorMessage = curl_easy_strerror(res);
std::cerr << "Error: Wingman OpenAI cURL request failed: " << command.errorMessage << std::endl;

httpResponse.clear();
answerHtml.clear();
answerTokens = 0;
answerLlmModel = llmModel;
command.httpResponse.clear();
command.answerHtml.clear();
command.answerTokens = 0;
command.answerLlmModel = llmModel;

return;
}
Expand Down Expand Up @@ -176,7 +166,7 @@ void OpenAiWingman::curlGet(
"system_fingerprint": null
}
*/
auto httpResponseJSon = nlohmann::json::parse(httpResponse);
auto httpResponseJSon = nlohmann::json::parse(command.httpResponse);

MF_DEBUG(
"OpenAiWingman::curlGet() parsed response:" << endl
Expand All @@ -187,17 +177,17 @@ void OpenAiWingman::curlGet(

MF_DEBUG("OpenAiWingman::curlGet() fields:" << endl);
if(httpResponseJSon.contains("model")) {
httpResponseJSon["model"].get_to(answerLlmModel);
MF_DEBUG(" model: " << answerLlmModel << endl);
httpResponseJSon["model"].get_to(command.answerLlmModel);
MF_DEBUG(" model: " << command.answerLlmModel << endl);
}
if(httpResponseJSon.contains("usage")) {
if(httpResponseJSon["usage"].contains("prompt_tokens")) {
httpResponseJSon["usage"]["prompt_tokens"].get_to(promptTokens);
MF_DEBUG(" prompt_tokens: " << promptTokens << endl);
httpResponseJSon["usage"]["prompt_tokens"].get_to(command.promptTokens);
MF_DEBUG(" prompt_tokens: " << command.promptTokens << endl);
}
if(httpResponseJSon["usage"].contains("completion_tokens")) {
httpResponseJSon["usage"]["completion_tokens"].get_to(answerTokens);
MF_DEBUG(" answer_tokens: " << answerTokens << endl);
httpResponseJSon["usage"]["completion_tokens"].get_to(command.answerTokens);
MF_DEBUG(" answer_tokens: " << command.answerTokens << endl);
}
}
if(httpResponseJSon.contains("choices")
Expand All @@ -208,64 +198,45 @@ void OpenAiWingman::curlGet(
if(choice.contains("message")
&& choice["message"].contains("content")
) {
choice["message"]["content"].get_to(answerHtml);
choice["message"]["content"].get_to(command.answerHtml);
// TODO ask GPT for HTML formatted response
m8r::replaceAll(
"\n",
"<br/>",
answerHtml);
MF_DEBUG(" answer (HTML): " << answerHtml << endl);
command.answerHtml);
MF_DEBUG(" answer (HTML): " << command.answerHtml << endl);
}
if(choice.contains("finish_reason")) {
string statusStr{};
choice["finish_reason"].get_to(statusStr);
if(statusStr == "stop") {
status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_OK;
command.status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_OK;
} else {
status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
errorMessage.assign(
command.status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
command.errorMessage.assign(
"OpenAI API HTTP required failed with finish_reason: "
+ statusStr);
}
MF_DEBUG(" status: " << status << endl);
MF_DEBUG(" status: " << command.status << endl);
}
} else {
status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
errorMessage.assign(
command.status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
command.errorMessage.assign(
"No choices in the OpenAI API HTTP response");
}
} else {
status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
errorMessage.assign(
command.status = m8r::WingmanStatusCode::WINGMAN_STATUS_CODE_ERROR;
command.errorMessage.assign(
"OpenAI API HTTP request failed: unable to initialize cURL");
}
}

void OpenAiWingman::chat(
const string& prompt,
string& httpResponse,
WingmanStatusCode& status,
string& errorMessage,
string& answerLlmModel,
int& promptTokens,
int& answerTokens,
string& answerHtml
) {
MF_DEBUG("OpenAiWingman::chat() prompt:" << endl << prompt << endl);

curlGet(
prompt,
this->llmModel,
httpResponse,
status,
errorMessage,
answerLlmModel,
promptTokens,
answerTokens,
answerHtml
);

MF_DEBUG("OpenAiWingman::chat() answer:" << endl << answerHtml << endl);
void OpenAiWingman::chat(CommandWingmanChat& command) {
MF_DEBUG("OpenAiWingman::chat() prompt:" << endl << command.prompt << endl);

curlGet(command);

MF_DEBUG("OpenAiWingman::chat() answer:" << endl << command.answerHtml << endl);
}

} // m8r namespace
Loading

0 comments on commit 0ce8f8a

Please sign in to comment.