Skip to content

Commit bdd664b

Browse files
authored
Merge pull request #10 from sean1832/patch-only-davinci-model-works
Patch #9.
2 parents e03756f + aaea6a8 commit bdd664b

File tree

4 files changed

+17
-24
lines changed

4 files changed

+17
-24
lines changed

Seanium_Brain.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import os
2-
import time
32

43
import streamlit as st
54
import streamlit_toggle as st_toggle
@@ -60,11 +59,16 @@
6059
help=_('What sampling temperature to use, between 0 and 1. Higher values like 0.8 will make the '
6160
'output more random, while lower values like 0.2 will make it more focused and '
6261
'deterministic. \n\nIt is generally recommend altering this or `top_p` but not both.'))
63-
max_tokens = st.slider(_('Max Tokens'), 850, 4096, value=util.read_json_at(INFO.BRAIN_MEMO, 'max_tokens', 1000),
62+
max_tokens = st.slider(_('Max Tokens'), 10, 4096, value=util.read_json_at(INFO.BRAIN_MEMO, 'max_tokens', 1000),
6463
help=_("The maximum number of tokens to generate in the completion.\n\nThe token count of "
6564
"your prompt plus `max_tokens` cannot exceed the model's context length. Most "
6665
"models have a context length of 2048 tokens (except for the newest models, "
6766
"which support 4096)."))
67+
chunk_size = st.slider(_('Chunk size'), 1500, 4500,
68+
value=util.read_json_at(INFO.BRAIN_MEMO, 'chunk_size', 4000),
69+
help=_("The number of tokens to consider at each step. The larger this is, the more "
70+
"context the model has to work with, but the slower generation and expensive "
71+
"will it be."))
6872

6973
with st.expander(label=_('Advanced Options')):
7074
top_p = st.slider(_('Top_P'), 0.0, 1.0, value=util.read_json_at(INFO.BRAIN_MEMO, 'top_p', 1.0),
@@ -84,15 +88,9 @@
8488
"new tokens based on their existing frequency in the text so far."
8589
"\n\n[See more information about frequency and presence penalties.]"
8690
"(https://platform.openai.com/docs/api-reference/parameter-details)"))
87-
88-
chunk_size = st.slider(_('Chunk size'), 1500, 4500,
89-
value=util.read_json_at(INFO.BRAIN_MEMO, 'chunk_size', 4000),
90-
help=_("The number of tokens to consider at each step. The larger this is, the more "
91-
"context the model has to work with, but the slower generation and expensive "
92-
"will it be."))
9391
enable_stream = st_toggle.st_toggle_switch(_('Stream (experimental)'),
9492
default_value=util.read_json_at(INFO.BRAIN_MEMO, 'enable_stream',
95-
True))
93+
False))
9694

9795
if not enable_stream:
9896
chunk_count = st.slider(_('Answer count'), 1, 5, value=util.read_json_at(INFO.BRAIN_MEMO, 'chunk_count', 1),

modules/check_update.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import os
22
import time
33
import modules.utilities as util
4+
import modules as mod
45

56
file_path = r'.user\input.txt'
67
temp_file = r'.user\input_last-run.temp'
@@ -10,7 +11,7 @@ def compare_time(t1, t2):
1011
return t1 == t2
1112

1213

13-
def isUpdated():
14+
def is_input_updated():
1415
if os.path.exists(file_path):
1516
# get modification time of the file
1617
mod_time = os.path.getmtime(file_path)
@@ -35,3 +36,9 @@ def isUpdated():
3536
return True
3637
else:
3738
raise FileNotFoundError(f'File: {file_path} does not exist.')
39+
40+
41+
def is_param_updated(param_val, param_infile_key):
42+
infile_val = util.read_json_at(mod.INFO.BRAIN_MEMO, param_infile_key)
43+
if infile_val != param_val:
44+
return True

pages/1_Configs.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,6 @@
1414

1515
_ = language.set_language()
1616

17-
# st.set_page_config(
18-
# page_title='Configs'
19-
# )
20-
2117
body = st.container()
2218

2319

streamlit_toolkit/tools.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -62,15 +62,7 @@ def save(content, path, page='', json_value: dict = None):
6262
if page == '💽Brain Memory':
6363
for key, value in json_value.items():
6464
util.update_json(INFO.BRAIN_MEMO, key, value)
65-
#
66-
#
67-
# util.update_json(INFO.BRAIN_MEMO, 'delimiter', json_value['delimiter'])
68-
# util.update_json(INFO.BRAIN_MEMO, 'append_mode', json_value['append_mode'])
69-
# util.update_json(INFO.BRAIN_MEMO, 'force_mode', json_value['force_mode'])
70-
# util.update_json(INFO.BRAIN_MEMO, 'advanced_mode', json_value['advanced_mode'])
71-
# util.update_json(INFO.BRAIN_MEMO, 'filter_info', json_value['filter_info'])
72-
# util.update_json(INFO.BRAIN_MEMO, 'filter_row_count', json_value['filter_row_count'])
73-
# util.update_json(INFO.BRAIN_MEMO, 'exclude_dir', json_value['exclude_dir'])
65+
7466
time.sleep(1)
7567
# refresh page
7668
st.experimental_rerun()
@@ -270,7 +262,7 @@ def execute_brain(q, params: GPT.model.param,
270262
# log question
271263
log(f'\n\n\n\n[{str(time.ctime())}] - QUESTION: {q}')
272264

273-
if mod.check_update.isUpdated():
265+
if mod.check_update.is_input_updated() or mod.check_update.is_param_updated(params.chunk_size, 'chunk_size'):
274266
msg = st.warning(_('Updating Brain...'), icon="⏳")
275267
progress_bar = st.progress(0)
276268
for idx, chunk_num in GPT.query.build(params.chunk_size):

0 commit comments

Comments
 (0)