Skip to content

Commit

Permalink
updates model catalog functions
Browse files Browse the repository at this point in the history
  • Loading branch information
DARREN OBERST authored and DARREN OBERST committed Dec 19, 2024
1 parent 488c9ba commit 87c6316
Showing 1 changed file with 14 additions and 11 deletions.
25 changes: 14 additions & 11 deletions llmware/models.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,10 @@ def add_model_catalog_vars(cls, new_attr):
cls.model_catalog_state_attributes.append(new_attr)
return True

@classmethod
def reset_to_default_catalog(cls):
cls.registered_models = global_model_repo_catalog_list


def pull_model_from_hf(model_card, local_model_repo_path, api_key=None, **kwargs):

Expand Down Expand Up @@ -503,6 +507,12 @@ def pull_latest_manifest(self):
# will add to check manifest in global repo and make available for pull down
return 0

def reset_to_default_catalog(self):
""" Resets model catalog to default list in model_configs """

_ModelRegistry().reset_to_default_catalog()
self.global_model_list = _ModelRegistry().get_model_list()

def save_model_registry(self, fp=None, fn="llmware_model_catalog.json"):

""" Utility method to export global model list to json file """
Expand Down Expand Up @@ -3933,14 +3943,6 @@ def __init__(self, model=None, tokenizer=None, model_name=None, api_key=None, mo

self.get_token_counts = OVConfig().get_config("get_token_counts")

# check for llmware path & create if not already set up
if not os.path.exists(LLMWareConfig.get_llmware_path()):
# if not explicitly set up by user, then create folder directory structure
LLMWareConfig.setup_llmware_workspace()

if not os.path.exists(LLMWareConfig.get_model_repo_path()):
os.mkdir(LLMWareConfig.get_model_repo_path())

# please note that the external tokenizer is used solely for producing
# input and output token counts - and can be switched off in OVConfig
if self.get_token_counts:
Expand Down Expand Up @@ -10072,7 +10074,6 @@ def __init__(self, model_name=None, model_card=None, use_gpu_if_available=True,

# set verbose level in environ level - will be picked up by callback in whisper_cpp
os.environ["whisper_cpp_verbose"] = GGUFConfigs().get_config("whisper_cpp_verbose")

self.WHISPER_SR = GGUFConfigs().get_config("whisper_sr")
self.strategy = GGUFConfigs().get_config("whisper_strategy")
self.n_threads = GGUFConfigs().get_config("whisper_threads")
Expand Down Expand Up @@ -10154,7 +10155,6 @@ def load_model_for_inference(self, model_repo_path, model_card = None, **kwargs)

# set to True by default - will display in 'real-time' the transcription
self.params.print_realtime = GGUFConfigs().get_config("whisper_cpp_realtime_display")

self.params.print_timestamps = True
self.params.tdrz_enable = self.tiny_diarize
self.params.progress_callback = whisper_progress_callback(self.callback)
Expand Down Expand Up @@ -10316,7 +10316,8 @@ def inference(self, prompt, inference_dict=None):
self.remove_segment_markers = inference_dict["remove_segment_markers"]

# preview before starting inference
self.preview()

# self.preview()

# note: updated dependencies for improved efficiency
# previously, used librosa library
Expand Down Expand Up @@ -10438,6 +10439,8 @@ def _generate(self, data):

""" Executes lib_whisper generation on data from audio file. """

print("self.context len - ",self.context)

w = self._lib.whisper_full(ctypes.c_void_p(self.context),
self.params,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
Expand Down

0 comments on commit 87c6316

Please sign in to comment.