You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Error transcribing recording_1.wav: ffmpeg was not found but is required to load audio files from filename
Error transcribing recording_2.wav: ffmpeg was not found but is required to load audio files from filename
Traceback (most recent call last):
File "", line 1, in
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 122, in spawn_main
exitcode = _main(fd, parent_sentinel)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 131, in _main
prepare(preparation_data)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 246, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 297, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
^^^^^^^^^^^^^^^^^^^^^^^^^
File "", line 291, in run_path
File "", line 98, in _run_module_code
File "", line 88, in _run_code
File "/Users/masterh/Desktop/WORK /Coding/viet-ttsts/text-to-speech-local-fiel.py", line 5, in
transcriber = pipeline("automatic-speech-recognition", model="vinai/PhoWhisper-small")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/init.py", line 940, in pipeline
framework, model = infer_framework_load_model(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 302, in infer_framework_load_model
raise ValueError(
ValueError: Could not load model vinai/PhoWhisper-small with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCTC'>, <class 'transformers.models.auto.modeling_auto.AutoModelForSpeechSeq2Seq'>, <class 'transformers.models.whisper.modeling_whisper.WhisperForConditionalGeneration'>). See the original errors:
while loading with AutoModelForCTC, an error is thrown:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 567, in from_pretrained
raise ValueError(
ValueError: Unrecognized configuration class <class 'transformers.models.whisper.configuration_whisper.WhisperConfig'> for this kind of AutoModel: AutoModelForCTC.
Model type should be one of Data2VecAudioConfig, HubertConfig, MCTCTConfig, SEWConfig, SEWDConfig, UniSpeechConfig, UniSpeechSatConfig, Wav2Vec2Config, Wav2Vec2BertConfig, Wav2Vec2ConformerConfig, WavLMConfig.
while loading with AutoModelForSpeechSeq2Seq, an error is thrown:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3897, in from_pretrained
).start()
^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 288, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 32, in init
super().init(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_fork.py", line 19, in init
self._launch(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 42, in _launch
prep_data = spawn.get_preparation_data(process_obj._name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 164, in get_preparation_data
_check_not_importing_main()
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 140, in _check_not_importing_main
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
To fix this issue, refer to the "Safe importing of main module"
section in https://docs.python.org/3/library/multiprocessing.html
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
return model_class.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3941, in from_pretrained
raise EnvironmentError(
OSError: Can't load the model for 'vinai/PhoWhisper-small'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'vinai/PhoWhisper-small' is the correct path to a directory containing a file named pytorch_model.bin, tf_model.h5, model.ckpt or flax_model.msgpack.
while loading with WhisperForConditionalGeneration, an error is thrown:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3897, in from_pretrained
).start()
^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 288, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 32, in init
super().init(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_fork.py", line 19, in init
self._launch(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 42, in _launch
prep_data = spawn.get_preparation_data(process_obj._name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 164, in get_preparation_data
_check_not_importing_main()
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 140, in _check_not_importing_main
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
To fix this issue, refer to the "Safe importing of main module"
section in https://docs.python.org/3/library/multiprocessing.html
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3941, in from_pretrained
raise EnvironmentError(
OSError: Can't load the model for 'vinai/PhoWhisper-small'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'vinai/PhoWhisper-small' is the correct path to a directory containing a file named pytorch_model.bin, tf_model.h5, model.ckpt or flax_model.msgpack.
The text was updated successfully, but these errors were encountered:
Device set to use mps:0
Error transcribing recording_1.wav: ffmpeg was not found but is required to load audio files from filename
Error transcribing recording_2.wav: ffmpeg was not found but is required to load audio files from filename
Traceback (most recent call last):
File "", line 1, in
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 122, in spawn_main
exitcode = _main(fd, parent_sentinel)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 131, in _main
prepare(preparation_data)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 246, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 297, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
^^^^^^^^^^^^^^^^^^^^^^^^^
File "", line 291, in run_path
File "", line 98, in _run_module_code
File "", line 88, in _run_code
File "/Users/masterh/Desktop/WORK /Coding/viet-ttsts/text-to-speech-local-fiel.py", line 5, in
transcriber = pipeline("automatic-speech-recognition", model="vinai/PhoWhisper-small")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/init.py", line 940, in pipeline
framework, model = infer_framework_load_model(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 302, in infer_framework_load_model
raise ValueError(
ValueError: Could not load model vinai/PhoWhisper-small with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCTC'>, <class 'transformers.models.auto.modeling_auto.AutoModelForSpeechSeq2Seq'>, <class 'transformers.models.whisper.modeling_whisper.WhisperForConditionalGeneration'>). See the original errors:
while loading with AutoModelForCTC, an error is thrown:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 567, in from_pretrained
raise ValueError(
ValueError: Unrecognized configuration class <class 'transformers.models.whisper.configuration_whisper.WhisperConfig'> for this kind of AutoModel: AutoModelForCTC.
Model type should be one of Data2VecAudioConfig, HubertConfig, MCTCTConfig, SEWConfig, SEWDConfig, UniSpeechConfig, UniSpeechSatConfig, Wav2Vec2Config, Wav2Vec2BertConfig, Wav2Vec2ConformerConfig, WavLMConfig.
while loading with AutoModelForSpeechSeq2Seq, an error is thrown:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3897, in from_pretrained
).start()
^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 288, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 32, in init
super().init(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_fork.py", line 19, in init
self._launch(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 42, in _launch
prep_data = spawn.get_preparation_data(process_obj._name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 164, in get_preparation_data
_check_not_importing_main()
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 140, in _check_not_importing_main
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
return model_class.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3941, in from_pretrained
raise EnvironmentError(
OSError: Can't load the model for 'vinai/PhoWhisper-small'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'vinai/PhoWhisper-small' is the correct path to a directory containing a file named pytorch_model.bin, tf_model.h5, model.ckpt or flax_model.msgpack.
while loading with WhisperForConditionalGeneration, an error is thrown:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3897, in from_pretrained
).start()
^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/context.py", line 288, in _Popen
return Popen(process_obj)
^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 32, in init
super().init(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_fork.py", line 19, in init
self._launch(process_obj)
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 42, in _launch
prep_data = spawn.get_preparation_data(process_obj._name)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 164, in get_preparation_data
_check_not_importing_main()
File "/Users/masterh/.pyenv/versions/3.11.11/lib/python3.11/multiprocessing/spawn.py", line 140, in _check_not_importing_main
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/pipelines/base.py", line 289, in infer_framework_load_model
model = model_class.from_pretrained(model, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/masterh/.pyenv/versions/venv/lib/python3.11/site-packages/transformers/modeling_utils.py", line 3941, in from_pretrained
raise EnvironmentError(
OSError: Can't load the model for 'vinai/PhoWhisper-small'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'vinai/PhoWhisper-small' is the correct path to a directory containing a file named pytorch_model.bin, tf_model.h5, model.ckpt or flax_model.msgpack.
The text was updated successfully, but these errors were encountered: