Skip to content

Commit

Permalink
Merge pull request #44 from idiap/phoneme-cleaners
Browse files Browse the repository at this point in the history
Add multilingual phoneme cleaner
  • Loading branch information
eginhard authored Jun 17, 2024
2 parents 3a20f47 + 9cfcc0a commit bd9b21d
Show file tree
Hide file tree
Showing 7 changed files with 45 additions and 24 deletions.
52 changes: 34 additions & 18 deletions TTS/tts/utils/text/cleaners.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# TODO: pick the cleaner for languages dynamically

import re
from typing import Optional

from anyascii import anyascii

Expand All @@ -17,35 +18,38 @@
_whitespace_re = re.compile(r"\s+")


def expand_abbreviations(text, lang="en"):
def expand_abbreviations(text: str, lang: str = "en") -> str:
if lang == "en":
_abbreviations = abbreviations_en
elif lang == "fr":
_abbreviations = abbreviations_fr
else:
msg = f"Language {lang} not supported in expand_abbreviations"
raise ValueError(msg)
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text


def lowercase(text):
def lowercase(text: str) -> str:
return text.lower()


def collapse_whitespace(text):
def collapse_whitespace(text: str) -> str:
return re.sub(_whitespace_re, " ", text).strip()


def convert_to_ascii(text):
def convert_to_ascii(text: str) -> str:
return anyascii(text)


def remove_aux_symbols(text):
def remove_aux_symbols(text: str) -> str:
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
return text


def replace_symbols(text, lang="en"):
"""Replace symbols based on the lenguage tag.
def replace_symbols(text: str, lang: Optional[str] = "en") -> str:
"""Replace symbols based on the language tag.
Args:
text:
Expand Down Expand Up @@ -77,38 +81,38 @@ def replace_symbols(text, lang="en"):
return text


def basic_cleaners(text):
def basic_cleaners(text: str) -> str:
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
text = lowercase(text)
text = collapse_whitespace(text)
return text


def transliteration_cleaners(text):
def transliteration_cleaners(text: str) -> str:
"""Pipeline for non-English text that transliterates to ASCII."""
# text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text


def basic_german_cleaners(text):
def basic_german_cleaners(text: str) -> str:
"""Pipeline for German text"""
text = lowercase(text)
text = collapse_whitespace(text)
return text


# TODO: elaborate it
def basic_turkish_cleaners(text):
def basic_turkish_cleaners(text: str) -> str:
"""Pipeline for Turkish text"""
text = text.replace("I", "ı")
text = lowercase(text)
text = collapse_whitespace(text)
return text


def english_cleaners(text):
def english_cleaners(text: str) -> str:
"""Pipeline for English text, including number and abbreviation expansion."""
# text = convert_to_ascii(text)
text = lowercase(text)
Expand All @@ -121,8 +125,12 @@ def english_cleaners(text):
return text


def phoneme_cleaners(text):
"""Pipeline for phonemes mode, including number and abbreviation expansion."""
def phoneme_cleaners(text: str) -> str:
"""Pipeline for phonemes mode, including number and abbreviation expansion.
NB: This cleaner converts numbers into English words, for other languages
use multilingual_phoneme_cleaners().
"""
text = en_normalize_numbers(text)
text = expand_abbreviations(text)
text = replace_symbols(text)
Expand All @@ -131,7 +139,15 @@ def phoneme_cleaners(text):
return text


def french_cleaners(text):
def multilingual_phoneme_cleaners(text: str) -> str:
"""Pipeline for phonemes mode, including number and abbreviation expansion."""
text = replace_symbols(text, lang=None)
text = remove_aux_symbols(text)
text = collapse_whitespace(text)
return text


def french_cleaners(text: str) -> str:
"""Pipeline for French text. There is no need to expand numbers, phonemizer already does that"""
text = expand_abbreviations(text, lang="fr")
text = lowercase(text)
Expand All @@ -141,7 +157,7 @@ def french_cleaners(text):
return text


def portuguese_cleaners(text):
def portuguese_cleaners(text: str) -> str:
"""Basic pipeline for Portuguese text. There is no need to expand abbreviation and
numbers, phonemizer already does that"""
text = lowercase(text)
Expand All @@ -157,7 +173,7 @@ def chinese_mandarin_cleaners(text: str) -> str:
return text


def multilingual_cleaners(text):
def multilingual_cleaners(text: str) -> str:
"""Pipeline for multilingual text"""
text = lowercase(text)
text = replace_symbols(text, lang=None)
Expand All @@ -166,7 +182,7 @@ def multilingual_cleaners(text):
return text


def no_cleaners(text):
def no_cleaners(text: str) -> str:
# remove newline characters
text = text.replace("\n", "")
return text
2 changes: 1 addition & 1 deletion recipes/thorsten_DE/align_tts/train_aligntts.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
run_eval=True,
test_delay_epochs=-1,
epochs=1000,
text_cleaner="phoneme_cleaners",
text_cleaner="multilingual_phoneme_cleaners",
use_phonemes=False,
phoneme_language="de",
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
Expand Down
2 changes: 1 addition & 1 deletion recipes/thorsten_DE/glow_tts/train_glowtts.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
run_eval=True,
test_delay_epochs=-1,
epochs=1000,
text_cleaner="phoneme_cleaners",
text_cleaner="multilingual_phoneme_cleaners",
use_phonemes=True,
phoneme_language="de",
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
Expand Down
2 changes: 1 addition & 1 deletion recipes/thorsten_DE/speedy_speech/train_speedy_speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
test_delay_epochs=-1,
epochs=1000,
min_audio_len=11050, # need to up min_audio_len to avois speedy speech error
text_cleaner="phoneme_cleaners",
text_cleaner="multilingual_phoneme_cleaners",
use_phonemes=True,
phoneme_language="de",
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
Expand Down
2 changes: 1 addition & 1 deletion recipes/thorsten_DE/tacotron2-DDC/train_tacotron_ddc.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
gradual_training=[[0, 6, 64], [10000, 4, 32], [50000, 3, 32], [100000, 2, 32]],
double_decoder_consistency=True,
epochs=1000,
text_cleaner="phoneme_cleaners",
text_cleaner="multilingual_phoneme_cleaners",
use_phonemes=True,
phoneme_language="de",
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
Expand Down
2 changes: 1 addition & 1 deletion recipes/thorsten_DE/vits_tts/train_vits.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
run_eval=True,
test_delay_epochs=-1,
epochs=1000,
text_cleaner="phoneme_cleaners",
text_cleaner="multilingual_phoneme_cleaners",
use_phonemes=True,
phoneme_language="de",
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
Expand Down
7 changes: 6 additions & 1 deletion tests/text_tests/test_text_cleaners.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/usr/bin/env python3

from TTS.tts.utils.text.cleaners import english_cleaners, phoneme_cleaners
from TTS.tts.utils.text.cleaners import english_cleaners, multilingual_phoneme_cleaners, phoneme_cleaners


def test_time() -> None:
Expand All @@ -19,3 +19,8 @@ def test_currency() -> None:
def test_expand_numbers() -> None:
assert phoneme_cleaners("-1") == "minus one"
assert phoneme_cleaners("1") == "one"


def test_multilingual_phoneme_cleaners() -> None:
assert multilingual_phoneme_cleaners("(Hello)") == "Hello"
assert multilingual_phoneme_cleaners("1:") == "1,"

0 comments on commit bd9b21d

Please sign in to comment.