From 7c199bf05fb7a6c0e3ab66988f66c48fb2d06b0c Mon Sep 17 00:00:00 2001 From: corradodebari Date: Fri, 15 Nov 2024 12:50:55 +0100 Subject: [PATCH] top_p/check_hybrid --- app/src/modules/metadata.py | 14 +++++++------- app/src/modules/st_common.py | 8 +++----- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/app/src/modules/metadata.py b/app/src/modules/metadata.py index 4abfd12..1132121 100644 --- a/app/src/modules/metadata.py +++ b/app/src/modules/metadata.py @@ -95,7 +95,7 @@ def ll_models(): "openai_compat": False, "context_length": 127072, "temperature": [0.3, 0.3, 0.0, 2.0], - "top_p": [0.75, 0.75, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [100, 100, 1, 4096], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], @@ -108,7 +108,7 @@ def ll_models(): "openai_compat": True, "context_length": 4191, "temperature": [1.0, 1.0, 0.0, 2.0], - "top_p": [0.9, 0.9, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [256, 256, 1, 4096], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], @@ -121,7 +121,7 @@ def ll_models(): "openai_compat": True, "context_length": 127072, "temperature": [1.0, 1.0, 0.0, 2.0], - "top_p": [0.9, 0.9, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [256, 256, 1, 4096], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], @@ -134,7 +134,7 @@ def ll_models(): "openai_compat": True, "context_length": 127072, "temperature": [1.0, 1.0, 0.0, 2.0], - "top_p": [0.9, 0.9, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [256, 256, 1, 8191], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], @@ -147,7 +147,7 @@ def ll_models(): "openai_compat": True, "context_length": 127072, "temperature": [1.0, 1.0, 0.0, 2.0], - "top_p": [0.9, 0.9, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [256, 256, 1, 4095], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], @@ -160,7 +160,7 @@ def ll_models(): "openai_compat": False, "context_length": 127072, "temperature": [0.2, 0.2, 0.0, 2.0], - "top_p": [0.9, 0.9, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [256, 256, 1, 28000], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], @@ -173,7 +173,7 @@ def ll_models(): "openai_compat": False, "context_length": 127072, "temperature": [0.2, 0.2, 0.0, 2.0], - "top_p": [0.9, 0.9, 0.0, 1.0], + "top_p": [1.0, 1.0, 0.0, 1.0], "max_tokens": [256, 256, 1, 28000], "frequency_penalty": [0.0, 0.0, -1.0, 1.0], "presence_penalty": [0.0, 0.0, -2.0, 2.0], diff --git a/app/src/modules/st_common.py b/app/src/modules/st_common.py index fbc3fc8..c3540bc 100644 --- a/app/src/modules/st_common.py +++ b/app/src/modules/st_common.py @@ -436,11 +436,9 @@ def create_zip(state_dict_filt, provider): # Check if the conf is full ollama or openai, currently supported for springai export def check_hybrid_conf(session_state_json): - embedding_models = meta.embedding_models() - chat_models = meta.ll_models() - - embModel = embedding_models.get(session_state_json["rag_params"].get("model")) - chatModel = chat_models.get(session_state_json["ll_model"]) + chatModel = state.ll_model_config.get(session_state_json["ll_model"]) + embModel = state.embed_model_config.get(state.rag_params["model"]) + logger.info("Model: %s",session_state_json["ll_model"]) logger.info("Embedding Model embModel: %s",embModel) logger.info("Chat Model: %s",chatModel)