Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,14 @@ class TranscriptionConfig:
# use_cer: bool = False
debug_mode: bool = False # Whether to print more detail in the output.

# Language-ID prompt for prompt-conditioned models (e.g. EncDecRNNTBPEModelWithPrompt).
# Set to a language key from the model's prompt_dictionary (e.g. "en-US", "auto").
# Ignored for models without prompt support.
target_lang: Optional[str] = None
# whether to strip the language tags from the transcriptions
# Ignored for model without prompt support
strip_lang_tags: bool = False


def extract_transcriptions(hyps):
"""
Expand Down Expand Up @@ -363,6 +371,13 @@ def main(cfg: TranscriptionConfig):
else:
asr_model.change_decoding_strategy(cfg.ctc_decoding)

# Set language-ID prompt for prompt-conditioned models
if hasattr(asr_model, 'set_inference_prompt'):
lang = cfg.target_lang if cfg.target_lang is not None else "auto"
asr_model.set_inference_prompt(lang)
asr_model.decoding.strip_lang_tags = cfg.strip_lang_tags
asr_model.decoding.set_strip_lang_tags(cfg.strip_lang_tags)

asr_model = asr_model.to(device=device, dtype=compute_dtype)
asr_model.eval()

Expand Down
95 changes: 95 additions & 0 deletions examples/asr/asr_transducer/speech_to_text_rnnt_bpe_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.

# Manifest file example:
{"audio_filepath":"/data/audio.wav","duration":12.12,"text":"The transcript.","target_lang":"en-US"}

```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```

# Training the model
```sh
python speech_to_text_rnnt_bpe_prompt.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
trainer.devices=-1 \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```

# Fine-tune a model

For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations

"""

import lightning.pytorch as pl
from omegaconf import OmegaConf

from nemo.collections.asr.models import EncDecRNNTBPEModelWithPrompt
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
from nemo.utils.trainer_utils import resolve_trainer_cfg


@hydra_runner(
config_path="../conf/fastconformer/cache_aware_streaming/",
config_name="fastconformer_transducer_bpe_streaming_prompt.yaml",
)
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')

trainer = pl.Trainer(**resolve_trainer_cfg(cfg.trainer))
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecRNNTBPEModelWithPrompt(cfg=cfg.model, trainer=trainer)

# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)

trainer.fit(asr_model)

if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)


if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
Loading
Loading