diff --git a/nemo_text_processing/text_normalization/normalize.py b/nemo_text_processing/text_normalization/normalize.py index 5e2f9ebb5..3dbe5b138 100644 --- a/nemo_text_processing/text_normalization/normalize.py +++ b/nemo_text_processing/text_normalization/normalize.py @@ -185,6 +185,9 @@ def __init__( if post_process: self.post_processor = PostProcessingFst(cache_dir=cache_dir, overwrite_cache=overwrite_cache) + elif lang == 'pt': + from nemo_text_processing.text_normalization.pt.taggers.tokenize_and_classify import ClassifyFst + from nemo_text_processing.text_normalization.pt.verbalizers.verbalize_final import VerbalizeFinalFst elif lang == 'ko': from nemo_text_processing.text_normalization.ko.taggers.tokenize_and_classify import ClassifyFst from nemo_text_processing.text_normalization.ko.verbalizers.verbalize_final import VerbalizeFinalFst diff --git a/nemo_text_processing/text_normalization/pt/__init__.py b/nemo_text_processing/text_normalization/pt/__init__.py new file mode 100644 index 000000000..ffd13e2d6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/__init__.py b/nemo_text_processing/text_normalization/pt/data/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/ordinal_exceptions.tsv b/nemo_text_processing/text_normalization/pt/data/fractions/ordinal_exceptions.tsv new file mode 100644 index 000000000..22c9ed16c --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/ordinal_exceptions.tsv @@ -0,0 +1,2 @@ +segundo meio +terceiro terço diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/powers_of_ten.tsv b/nemo_text_processing/text_normalization/pt/data/fractions/powers_of_ten.tsv new file mode 100644 index 000000000..b19c44364 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/powers_of_ten.tsv @@ -0,0 +1 @@ +mil milésimo diff --git a/nemo_text_processing/text_normalization/pt/data/fractions/specials.tsv b/nemo_text_processing/text_normalization/pt/data/fractions/specials.tsv new file mode 100644 index 000000000..c140ca4ba --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/fractions/specials.tsv @@ -0,0 +1,4 @@ +connector e +minus menos +plural_suffix s +avos_suffix avos diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/__init__.py b/nemo_text_processing/text_normalization/pt/data/numbers/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/cardinal_specials.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/cardinal_specials.tsv new file mode 100644 index 000000000..04ea91ee4 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/cardinal_specials.tsv @@ -0,0 +1,4 @@ +connector e +thousand mil +hundred_100 cem +hundred_1 cento diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/decimal_fractional_specials.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_fractional_specials.tsv new file mode 100644 index 000000000..c84a95f53 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_fractional_specials.tsv @@ -0,0 +1,3 @@ +001 mil e um +010 mil e dez +100 mil e cem diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/decimal_specials.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_specials.tsv new file mode 100644 index 000000000..f6257d9d1 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/decimal_specials.tsv @@ -0,0 +1,2 @@ +separator vírgula +minus menos diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/digit.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/digit.tsv new file mode 100644 index 000000000..1859416c8 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/digit.tsv @@ -0,0 +1,9 @@ +1 um +2 dois +3 três +4 quatro +5 cinco +6 seis +7 sete +8 oito +9 nove diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/hundreds.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/hundreds.tsv new file mode 100644 index 000000000..620f512b3 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/hundreds.tsv @@ -0,0 +1,8 @@ +2 duzentos +3 trezentos +4 quatrocentos +5 quinhentos +6 seiscentos +7 setecentos +8 oitocentos +9 novecentos diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/quantity_words.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/quantity_words.tsv new file mode 100644 index 000000000..a94cbd553 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/quantity_words.tsv @@ -0,0 +1,9 @@ +mil +milhão +milhões +bilhão +bilhões +trilhão +trilhões +quatrilhão +quatrilhões diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/scales.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/scales.tsv new file mode 100644 index 000000000..a3dffe4e0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/scales.tsv @@ -0,0 +1,4 @@ +one_label plural_suffix magnitude_zeros +um milhão milhões 0 +um bilhão bilhões 9 +um trilhão trilhões 12 diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/teens.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/teens.tsv new file mode 100644 index 000000000..299c3dbf2 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/teens.tsv @@ -0,0 +1,10 @@ +10 dez +11 onze +12 doze +13 treze +14 quatorze +15 quinze +16 dezesseis +17 dezessete +18 dezoito +19 dezenove diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/tens.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/tens.tsv new file mode 100644 index 000000000..43c4a8bc6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/tens.tsv @@ -0,0 +1,8 @@ +2 vinte +3 trinta +4 quarenta +5 cinquenta +6 sessenta +7 setenta +8 oitenta +9 noventa diff --git a/nemo_text_processing/text_normalization/pt/data/numbers/zero.tsv b/nemo_text_processing/text_normalization/pt/data/numbers/zero.tsv new file mode 100644 index 000000000..29be0f38b --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/numbers/zero.tsv @@ -0,0 +1 @@ +0 zero diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/__init__.py b/nemo_text_processing/text_normalization/pt/data/ordinals/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/digit.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/digit.tsv new file mode 100644 index 000000000..5fefbc3b8 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/digit.tsv @@ -0,0 +1,10 @@ +primeiro um +segundo dois +terceiro três +quarto quatro +quinto cinco +sexto seis +sétimo sete +oitavo oito +nono nove +décimo dez diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/feminine.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/feminine.tsv new file mode 100644 index 000000000..c75ae5ed0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/feminine.tsv @@ -0,0 +1,11 @@ +primeiro primeira +segundo segunda +terceiro terceira +quarto quarta +quinto quinta +sexto sexta +sétimo sétima +oitavo oitava +nono nona +décimo décima +ésimo ésima diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/hundreds.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/hundreds.tsv new file mode 100644 index 000000000..6d919a86c --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/hundreds.tsv @@ -0,0 +1,10 @@ +centésimo cem +centésimo cento +ducentésimo duzentos +trecentésimo trezentos +quadringentésimo quatrocentos +quincentésimo quinhentos +sexcentésimo seiscentos +septingentésimo setecentos +octingentésimo oitocentos +noningentésimo novecentos diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/specials.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/specials.tsv new file mode 100644 index 000000000..bb6933fe6 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/specials.tsv @@ -0,0 +1,2 @@ +connector_in e +connector_out diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/teen.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/teen.tsv new file mode 100644 index 000000000..1b1e191c9 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/teen.tsv @@ -0,0 +1,9 @@ +décimo primeiro onze +décimo segundo doze +décimo terceiro treze +décimo quarto catorze +décimo quinto quinze +décimo sexto dezesseis +décimo sétimo dezessete +décimo oitavo dezoito +décimo nono dezenove diff --git a/nemo_text_processing/text_normalization/pt/data/ordinals/ties.tsv b/nemo_text_processing/text_normalization/pt/data/ordinals/ties.tsv new file mode 100644 index 000000000..f40700034 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/data/ordinals/ties.tsv @@ -0,0 +1,8 @@ +vigésimo vinte +trigésimo trinta +quadragésimo quarenta +quinquagésimo cinquenta +sexagésimo sessenta +septuagésimo setenta +octogésimo oitenta +nonagésimo noventa diff --git a/nemo_text_processing/text_normalization/pt/graph_utils.py b/nemo_text_processing/text_normalization/pt/graph_utils.py new file mode 100644 index 000000000..d2e6c9ec7 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/graph_utils.py @@ -0,0 +1,171 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Portuguese (PT) text normalization graph utilities. + +Self-contained module with no dependency on en.graph_utils. Provides character/digit +symbols (NEMO_*), space helpers (delete_space, insert_space, delete_extra_space), +GraphFst base class, generator_main for FAR export, and PT-specific helpers +(filter_cardinal_punctuation, shift_cardinal_gender_pt). +""" + +import os +import string +from pathlib import Path +from typing import Dict + +import pynini +from pynini import Far +from pynini.export import export +from pynini.lib import byte, pynutil, utf8 + +from nemo_text_processing.utils.logging import logger + +# ---- Character/digit symbols (same semantics as EN) ---- +NEMO_CHAR = utf8.VALID_UTF8_CHAR +NEMO_DIGIT = byte.DIGIT +NEMO_LOWER = pynini.union(*string.ascii_lowercase).optimize() +NEMO_UPPER = pynini.union(*string.ascii_uppercase).optimize() +NEMO_ALPHA = pynini.union(NEMO_LOWER, NEMO_UPPER).optimize() +NEMO_SPACE = " " +NEMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", "\u00a0").optimize() +NEMO_NOT_QUOTE = pynini.difference(NEMO_CHAR, pynini.accep('"')).optimize() +NEMO_SIGMA = pynini.closure(NEMO_CHAR) + +delete_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE)) +insert_space = pynutil.insert(" ") +delete_extra_space = pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 1), " ").optimize() + + +def generator_main(file_name: str, graphs: Dict[str, "pynini.FstLike"]) -> None: + """ + Export one or more graphs to an OpenFst Finite State Archive (FAR) file. + + Args: + file_name: path to the output .far file. + graphs: mapping of rule names to FST graphs to export. + """ + exporter = export.Exporter(file_name) + for rule, graph in graphs.items(): + exporter[rule] = graph.optimize() + exporter.close() + logger.info(f"Created {file_name}") + + +class GraphFst: + """ + Base class for all Portuguese text normalization grammar FSTs. + + Args: + name: name of the grammar (e.g. "cardinal", "decimal"). + kind: either "classify" or "verbalize". + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization). + """ + + def __init__(self, name: str, kind: str, deterministic: bool = True): + self.name = name + self.kind = kind + self._fst = None + self.deterministic = deterministic + + self.far_path = Path(os.path.dirname(os.path.abspath(__file__)) + "/grammars/" + kind + "/" + name + ".far") + if self.far_exist(): + self._fst = Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst() + + def far_exist(self) -> bool: + return self.far_path.exists() + + @property + def fst(self) -> "pynini.FstLike": + return self._fst + + @fst.setter + def fst(self, fst): + self._fst = fst + + def add_tokens(self, fst) -> "pynini.FstLike": + return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }") + + def delete_tokens(self, fst) -> "pynini.FstLike": + res = ( + pynutil.delete(f"{self.name}") + + delete_space + + pynutil.delete("{") + + delete_space + + fst + + delete_space + + pynutil.delete("}") + ) + return res @ pynini.cdrewrite(pynini.cross("\u00a0", " "), "", "", NEMO_SIGMA) + + +# ---- PT-specific (Brazilian: 1.000.000 or 1 000 000) ---- +cardinal_separator = pynini.string_map([".", " "]) + + +def filter_cardinal_punctuation(fst: "pynini.FstLike") -> "pynini.FstLike": + """ + Parse digit groups separated by cardinal_separator (e.g. 1.000.000) then apply fst. + + Args: + fst: FST that maps digit string to verbalized cardinal. + + Returns: + Composed FST that accepts digit strings with optional thousand separators. + """ + exactly_three = NEMO_DIGIT**3 + up_to_three = pynini.closure(NEMO_DIGIT, 1, 3) + cardinal_string = pynini.closure(NEMO_DIGIT, 1) + cardinal_string |= ( + up_to_three + + pynutil.delete(cardinal_separator) + + pynini.closure(exactly_three + pynutil.delete(cardinal_separator)) + + exactly_three + ) + return cardinal_string @ fst + + +def shift_cardinal_gender_pt(fst: "pynini.FstLike") -> "pynini.FstLike": + """ + Apply Portuguese masculine-to-feminine conversion for cardinal strings, e.g. + "um" -> "uma", "dois" -> "duas", "duzentos" -> "duzentas". + + Args: + fst: FST producing masculine cardinal verbalization. + + Returns: + FST that produces feminine form when composed with the same input. + """ + fem_ones = pynini.cdrewrite( + pynini.cross("um", "uma"), + "", + pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep('"')), + NEMO_SIGMA, + ) + fem_twos = pynini.cdrewrite( + pynini.cross("dois", "duas"), + "", + pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep('"')), + NEMO_SIGMA, + ) + fem_hundreds = pynini.cdrewrite( + pynini.cross("entos", "entas"), + pynini.union("duz", "trez", "quatroc", "quinh", "seisc", "setec", "oitoc", "novec"), + pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep('"')), + NEMO_SIGMA, + ) + return fst @ fem_ones @ fem_twos @ fem_hundreds diff --git a/nemo_text_processing/text_normalization/pt/taggers/__init__.py b/nemo_text_processing/text_normalization/pt/taggers/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/taggers/cardinal.py b/nemo_text_processing/text_normalization/pt/taggers/cardinal.py new file mode 100644 index 000000000..a56057852 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/cardinal.py @@ -0,0 +1,297 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from functools import reduce + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_ALPHA, + NEMO_DIGIT, + NEMO_SIGMA, + NEMO_SPACE, + NEMO_WHITE_SPACE, + GraphFst, + delete_space, + filter_cardinal_punctuation, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class CardinalFst(GraphFst): + """ + Finite state transducer for classifying Portuguese cardinals, e.g. + "1000" -> cardinal { integer: "mil" } + "2.000.000" -> cardinal { integer: "dois milhões" } + "-5" -> cardinal { negative: "true" integer: "cinco" } + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="cardinal", kind="classify", deterministic=deterministic) + + specials = { + row[0]: row[1] for row in load_labels(get_abs_path("data/numbers/cardinal_specials.tsv")) if len(row) >= 2 + } + connector_e = insert_space + pynutil.insert(specials["connector"]) + insert_space + thousand = specials["thousand"] + hundred_100 = specials["hundred_100"] + hundred_1 = specials["hundred_1"] + + scale_rows = load_labels(get_abs_path("data/numbers/scales.tsv")) + scales = [(row[0], row[1], int(row[2])) for row in scale_rows if len(row) >= 3 and row[2].strip().isdigit()] + + _num = lambda p: pynini.string_file(get_abs_path(f"data/numbers/{p}")) + zero, digit, teens, tens, hundreds = ( + _num("zero.tsv"), + _num("digit.tsv"), + _num("teens.tsv"), + _num("tens.tsv"), + _num("hundreds.tsv"), + ) + digits_no_one = (NEMO_DIGIT - "1") @ digit + + graph_tens = teens | (tens + (pynutil.delete("0") | (connector_e + digit))) + self.tens = graph_tens.optimize() + self.two_digit_non_zero = pynini.union(digit, graph_tens, (pynini.cross("0", NEMO_SPACE) + digit)).optimize() + + graph_hundreds = hundreds + pynini.union( + pynutil.delete("00"), + (connector_e + graph_tens), + (connector_e + digit), + ) + graph_hundreds |= pynini.cross("100", hundred_100) + graph_hundreds |= pynini.cross("1", hundred_1) + pynini.union( + pynutil.delete("00"), + (connector_e + graph_tens), + (connector_e + pynutil.delete("0") + digit), + ) + self.hundreds = graph_hundreds.optimize() + + h_comp_base = pynini.union(graph_hundreds, pynutil.delete("0") + graph_tens) + h_comp = h_comp_base | (pynutil.delete("00") + digit) + h_comp_no_one = h_comp_base | (pynutil.delete("00") + digits_no_one) + + pure_tens_input = pynini.union(*[pynini.accep(str(d * 10)) for d in range(1, 10)]) + graph_pure_tens_only = pure_tens_input @ graph_tens + graph_compound_tens = (pynini.closure(NEMO_DIGIT, 2, 2) - pure_tens_input) @ graph_tens + + graph_pure_components = pynini.union( + pynutil.delete("0") + graph_pure_tens_only, + pynutil.delete("00") + digit, + hundreds + pynutil.delete("00"), + pynini.cross("100", hundred_100), + ) + graph_compound_hundreds = pynini.union( + pynini.cross("1", hundred_1) + + pynini.union( + (connector_e + graph_tens), + (connector_e + pynutil.delete("0") + digit), + ), + hundreds + + pynini.union( + (connector_e + graph_tens), + (connector_e + digit), + ), + ) + + suffix_after_mil = pynini.union( + pynutil.delete("000"), + (connector_e + graph_pure_components), + (insert_space + graph_compound_hundreds), + (insert_space + pynutil.delete("0") + graph_compound_tens), + ) + + t_comp = pynini.union( + pynutil.delete("000") + h_comp, + h_comp_no_one + insert_space + pynutil.insert(thousand) + suffix_after_mil, + pynini.cross("001", thousand) + suffix_after_mil, + ) + t_comp_no_one = pynini.union( + pynutil.delete("000") + h_comp_no_one, + h_comp_no_one + + insert_space + + pynutil.insert(thousand) + + ((insert_space + h_comp) | pynutil.delete("000")), + pynini.cross("001", thousand) + ((insert_space + h_comp) | pynutil.delete("000")), + ) + + graph_large_scales = pynini.accep("") + for one_label, plural_suffix, _ in reversed(scales): + g = pynutil.add_weight(pynini.cross("000001", one_label), -0.001) + g |= t_comp_no_one + pynutil.insert(plural_suffix) + g |= pynutil.delete("000000") + g += insert_space + graph_large_scales += g + + # 9/12-digit: scale block + trailing (million+thousands, billion+9digits) + scale_3_mil = self._scale_block_3(scales[0][0], scales[0][1], h_comp_no_one) + scale_3_bi = self._scale_block_3(scales[1][0], scales[1][1], h_comp_no_one) + graph_9 = self._build_scale_trailing_graph(scale_3_mil, t_comp, 6, 9) + graph_12 = self._build_scale_trailing_graph(scale_3_bi, graph_9, 9, 12) + pure_9, pure_12 = self._pure_inputs(9), self._pure_inputs(12) + trail_9 = (pure_9 @ graph_9, (NEMO_DIGIT**9 - pure_9) @ graph_9) + trail_12 = (pure_12 @ graph_12, (NEMO_DIGIT**12 - pure_12) @ graph_12) + + # Units 6 (u6): pure get "e" after scale; compound no "e" + u6_one = pynini.cross("000001", "1") @ digit + u6_pure = pynini.union( + u6_one, + pynini.cross("001000", thousand), + pynini.cross("000010", "10") @ graph_tens, + pynini.cross("000100", hundred_100), + (pynini.cross("010000", "10") @ graph_tens) + insert_space + pynutil.insert(thousand), + pynini.cross("100000", hundred_100) + insert_space + pynutil.insert(thousand), + ) + u6_compound = (NEMO_DIGIT**6 - self._pure_inputs(6)) @ t_comp + u6 = u6_pure | u6_compound + z18 = pynini.accep("0" * 18) # 18 zeros: branch no "e" + smaller_e = (connector_e + u6_pure) | u6_compound | pynutil.delete("0" * 6) + smaller = u6 | pynutil.delete("0" * 6) + graph_24 = (((NEMO_DIGIT**18 - z18) + NEMO_DIGIT**6) @ (graph_large_scales + smaller_e)) | ( + (z18 + NEMO_DIGIT**6) @ (pynutil.delete(z18) + smaller) + ) + + trail_by_z = {9: trail_9, 12: trail_12} + magnitude_patterns = [ + self._build_magnitude_pattern( + one_label, + plural_suffix, + magnitude_zeros, + trail_by_z.get(magnitude_zeros), + connector_e, + insert_space, + digit, + graph_tens, + graph_hundreds, + ) + for one_label, plural_suffix, magnitude_zeros in scales + if magnitude_zeros > 0 + ] + + pad = (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 0) + pad = pad @ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", NEMO_SIGMA) @ NEMO_DIGIT**24 + norm = pynini.cdrewrite(delete_space, "[BOS]", "", NEMO_SIGMA) @ pynini.cdrewrite( + delete_space, "", "[EOS]", NEMO_SIGMA + ) + norm = norm @ pynini.cdrewrite( + pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 2), NEMO_SPACE), NEMO_ALPHA, NEMO_ALPHA, NEMO_SIGMA + ) + self.graph = reduce(lambda a, b: a | b, magnitude_patterns, pad @ graph_24 @ norm) | zero + self.graph = filter_cardinal_punctuation(self.graph).optimize() + + optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1) + final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"") + final_graph = self.add_tokens(final_graph) + self.fst = final_graph.optimize() + + def _scale_block_3(self, one_label, plural_suffix, component_no_one): + """001->one_label, 000->'', else component+plural.""" + return pynini.union( + pynini.cross("001", one_label), + pynini.cross("000", ""), + (NEMO_DIGIT**3 - pynini.accep("001") - pynini.accep("000")) + @ (component_no_one + insert_space + pynutil.insert(plural_suffix)), + ) + + def _build_scale_trailing_graph(self, scale_3, sub_graph, trailing_len, total_len): + """total_len digits = scale_3 + trailing; no trailing space when trailing all zeros.""" + zt, ztotal = "0" * trailing_len, "0" * total_len + scale_nonzero = NEMO_DIGIT**3 - pynini.accep("000") + branches = [ + (pynini.accep("000") + NEMO_DIGIT**trailing_len) @ (pynutil.delete("000") + sub_graph), + (scale_nonzero + (NEMO_DIGIT**trailing_len - pynini.accep(zt))) @ (scale_3 + insert_space + sub_graph), + (scale_nonzero + pynini.accep(zt)) @ (scale_3 + pynutil.delete(zt)), + (pynini.accep("000") + pynini.accep(zt)) @ pynutil.delete(ztotal), + ] + return pynini.union(*branches) + + @staticmethod + def _pure_inputs(num_digits): + """Inputs 1, 10, 100, ... as num_digits-digit strings.""" + return pynini.union(*[pynini.accep(str(10**k).zfill(num_digits)) for k in range(0, num_digits)]) + + def _magnitude_graph( + self, + one_word, + plural_suffix, + zero_count, + graph_digit, + graph_tens, + graph_hundreds, + connector_e, + insert_space, + trailing_pair=None, + ): + """Round (1–3 digit + scale + zeros); optional trailing (e + pure | space + compound).""" + zeros = "0" * zero_count + round_pats = [] + trail_pats = [] if trailing_pair else None + for L in (1, 2, 3): + total = zero_count + L + if L == 1: + lead = pynini.cross("1", one_word) | ((NEMO_DIGIT - "1") @ graph_digit + pynutil.insert(plural_suffix)) + else: + lead = pynini.closure(NEMO_DIGIT, L, L) @ (graph_tens if L == 2 else graph_hundreds) + pynutil.insert( + plural_suffix + ) + lead_fst = NEMO_DIGIT**L @ lead + round_pats.append(pynini.closure(NEMO_DIGIT, total, total) @ (lead_fst + pynutil.delete(zeros))) + if trailing_pair: + pure, compound = trailing_pair + trail_part = NEMO_DIGIT**zero_count @ (connector_e + pure) | NEMO_DIGIT**zero_count @ ( + insert_space + compound + ) + trail_pats.append(pynini.closure(NEMO_DIGIT, total, total) @ (lead_fst + trail_part)) + graph_round = pynini.union(*round_pats) + graph_trail = pynini.union(*trail_pats) if trail_pats else None + return graph_round, graph_trail + + def _build_magnitude_pattern( + self, + one_label, + plural_suffix, + magnitude_zeros, + trailing_pair, + connector_e, + insert_space, + graph_digit, + graph_tens, + graph_hundreds, + ): + """Restrict length; round + optional non-zero trailing.""" + restrict = (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, magnitude_zeros, magnitude_zeros + 2) + graph_round, graph_trail = self._magnitude_graph( + one_label, + plural_suffix, + magnitude_zeros, + graph_digit, + graph_tens, + graph_hundreds, + connector_e, + insert_space, + trailing_pair, + ) + if graph_trail is None: + return pynutil.add_weight(restrict @ graph_round, -1.0) + non_zero_trail = pynini.union( + *[NEMO_DIGIT**n + (NEMO_DIGIT**magnitude_zeros - pynini.accep("0" * magnitude_zeros)) for n in (1, 2, 3)] + ) + return pynutil.add_weight(restrict @ (graph_round | (non_zero_trail @ graph_trail)), -1.0) diff --git a/nemo_text_processing/text_normalization/pt/taggers/decimal.py b/nemo_text_processing/text_normalization/pt/taggers/decimal.py new file mode 100644 index 000000000..c03fc7770 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/decimal.py @@ -0,0 +1,116 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_DIGIT, GraphFst, delete_space, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class DecimalFst(GraphFst): + """ + Finite state transducer for classifying Portuguese decimal numbers, e.g. + "1,26" -> decimal { integer_part: "um" fractional_part: "vinte e seis" } + "0,01" -> decimal { integer_part: "zero" fractional_part: "um" } (leading zeros stripped) + "1,001" -> decimal { integer_part: "um" fractional_part: "mil e um" } (data: decimal_fractional_specials) + "-1,26" -> decimal { negative: "true" ... } + "1,33 milhões" / "1 milhão" -> decimal { ... quantity: "milhões" / "milhão" } + + Args: + cardinal: CardinalFst instance for integer verbalization in tags. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, deterministic: bool = True): + super().__init__(name="decimal", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph + _num = lambda name: pynini.string_file(get_abs_path(f"data/numbers/{name}")).optimize() + + comma = pynutil.delete(",") + quantity_words = _num("quantity_words.tsv") + digit = _num("digit.tsv") + zero = _num("zero.tsv") + fractional_specials = _num("decimal_fractional_specials.tsv") + graph_digit_or_zero = pynini.union(digit, zero) + digit_by_digit = (graph_digit_or_zero + pynini.closure(insert_space + graph_digit_or_zero)).optimize() + + # Fractional: strip leading zeros → rest @ cardinal; all zeros → "zero" + delete_leading_zero = pynini.cross("0", "") + rest = pynini.difference(NEMO_DIGIT, pynini.accep("0")) + pynini.closure(NEMO_DIGIT, 0) + with_rest = (pynini.closure(delete_leading_zero, 0) + rest) @ (pynini.closure(NEMO_DIGIT, 1) @ cardinal_graph) + only_zeros = pynini.closure(delete_leading_zero, 1) + pynini.cross("0", "zero") + fractional_strip = pynini.union(with_rest, only_zeros).optimize() + # Prefer specials (001→mil e um, 010→mil e dez, 100→mil e cem) over strip when both match + fractional_with_specials = pynini.union( + pynutil.add_weight(fractional_specials, -0.01), + fractional_strip, + ).optimize() + + fractional_short = pynini.closure(NEMO_DIGIT, 1, 9) + fractional_long = pynini.closure(NEMO_DIGIT, 10, 15) + non_zero_lead = pynini.difference(NEMO_DIGIT, pynini.accep("0")) + + # Integer "0" → fractional strip only (no specials) + graph_integer_zero = ( + pynutil.insert('integer_part: "') + pynini.cross("0", "zero") + pynutil.insert('"') + insert_space + ) + graph_fractional_zero = ( + pynutil.insert('fractional_part: "') + + pynini.union( + fractional_short @ fractional_strip, + fractional_long @ digit_by_digit, + ) + + pynutil.insert('"') + ) + decimal_when_zero = graph_integer_zero + comma + insert_space + graph_fractional_zero + + # Integer non-zero → fractional: specials | strip + cardinal | digit-by-digit + graph_integer_pos = ( + pynutil.insert('integer_part: "') + + (non_zero_lead + pynini.closure(NEMO_DIGIT, 0, 11)) @ cardinal_graph + + pynutil.insert('"') + + insert_space + ) + graph_fractional_pos = ( + pynutil.insert('fractional_part: "') + + pynini.union( + fractional_short @ fractional_with_specials, + fractional_long @ digit_by_digit, + ) + + pynutil.insert('"') + ) + decimal_when_pos = graph_integer_pos + comma + insert_space + graph_fractional_pos + + decimal_core = pynini.union(decimal_when_zero, decimal_when_pos) + integer_quantity = ( + pynutil.insert('integer_part: "') + + (pynini.closure(NEMO_DIGIT, 1, 12) @ cardinal_graph) + + pynutil.insert('"') + + insert_space + + delete_space + + pynutil.insert('quantity: "') + + quantity_words + + pynutil.insert('"') + ) + decimal_quantity = ( + decimal_core + delete_space + pynutil.insert('quantity: "') + quantity_words + pynutil.insert('"') + ) + final_graph_wo_sign = pynini.union(decimal_core, integer_quantity, decimal_quantity) + optional_minus = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1) + final_graph = optional_minus + final_graph_wo_sign + + self.fst = self.add_tokens(final_graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/fraction.py b/nemo_text_processing/text_normalization/pt/taggers/fraction.py new file mode 100644 index 000000000..b5a206ff0 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/fraction.py @@ -0,0 +1,117 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_DIGIT, + NEMO_SIGMA, + NEMO_WHITE_SPACE, + GraphFst, + insert_space, +) +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class FractionFst(GraphFst): + """ + Finite state transducer for classifying Portuguese fraction numbers, e.g. + "1/2" -> fraction { numerator: "um" denominator: "meio" morphosyntactic_features: "ordinal" } + "2 3/4" -> fraction { integer_part: "dois" numerator: "três" denominator: "quarto" ... } + "2/11" -> fraction { numerator: "dois" denominator: "onze" morphosyntactic_features: "avos" } + + Args: + cardinal: CardinalFst instance for number parts. + ordinal: OrdinalFst instance for denominator 2-10 and exceptions. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True): + super().__init__(name="fraction", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph + + # Denominators 2–10 use ordinal form (no data file: fixed set) + two_to_ten = pynini.union(*[pynini.accep(str(d)) for d in range(2, 11)]).optimize() + + ord_digit_rows = load_labels(get_abs_path("data/ordinals/digit.tsv")) + ordinal_digit = pynini.string_map([(r[1], r[0]) for r in ord_digit_rows if len(r) >= 2]).optimize() + + ord_exc_rows = load_labels(get_abs_path("data/fractions/ordinal_exceptions.tsv")) + ordinal_exceptions = pynini.string_map([(r[0], r[1]) for r in ord_exc_rows if len(r) >= 2]).optimize() + + ord_hundreds_rows = load_labels(get_abs_path("data/ordinals/hundreds.tsv")) + ordinal_hundreds = pynini.string_map([(r[1], r[0]) for r in ord_hundreds_rows if len(r) >= 2]).optimize() + + powers_rows = load_labels(get_abs_path("data/fractions/powers_of_ten.tsv")) + powers_of_ten = pynini.string_map([(r[0], r[1]) for r in powers_rows if len(r) >= 2]).optimize() + + denom_ordinal_form = two_to_ten @ cardinal_graph @ ordinal_digit + denom_ordinal_form = denom_ordinal_form @ pynini.cdrewrite(ordinal_exceptions, "", "", NEMO_SIGMA) + denom_ordinal = ( + pynutil.insert('denominator: "') + + denom_ordinal_form + + pynutil.insert('" morphosyntactic_features: "ordinal"') + ) + + denom_100 = ( + pynutil.insert('denominator: "') + + (pynini.accep("100") @ cardinal_graph @ ordinal_hundreds) + + pynutil.insert('" morphosyntactic_features: "ordinal"') + ) + denom_1000 = ( + pynutil.insert('denominator: "') + + (pynini.accep("1000") @ cardinal_graph @ powers_of_ten) + + pynutil.insert('" morphosyntactic_features: "ordinal"') + ) + + denom_ordinal_2_10_100_1000 = pynini.union(denom_ordinal, denom_100, denom_1000) + digit_plus = pynini.closure(NEMO_DIGIT, 1) + denom_avos_input = pynini.difference( + digit_plus, + pynini.union( + two_to_ten, + pynini.accep("100"), + pynini.accep("1000"), + ), + ) + denom_avos = ( + pynutil.insert('denominator: "') + + (denom_avos_input @ cardinal_graph) + + pynutil.insert('" morphosyntactic_features: "avos"') + ) + + denominator = pynini.union(denom_ordinal_2_10_100_1000, denom_avos) + + # Slash variants: ASCII /, Unicode ⁄ (U+2044), ∕ (U+2215); with or without spaces + slash_or_space_slash = pynini.union( + pynini.cross("/", '" '), + pynini.cross(" / ", '" '), + pynini.cross("\u2044", '" '), # fraction slash ⁄ + pynini.cross(" \u2044 ", '" '), + pynini.cross("\u2215", '" '), # division slash ∕ + pynini.cross(" \u2215 ", '" '), + ) + numerator = pynutil.insert('numerator: "') + cardinal_graph + slash_or_space_slash + fraction_core = numerator + denominator + + integer_part = pynutil.insert('integer_part: "') + cardinal_graph + pynutil.insert('"') + insert_space + + optional_minus = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1) + + mixed = integer_part + pynini.closure(NEMO_WHITE_SPACE, 1) + fraction_core + graph = optional_minus + pynini.union(mixed, fraction_core) + + self.fst = self.add_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/ordinal.py b/nemo_text_processing/text_normalization/pt/taggers/ordinal.py new file mode 100644 index 000000000..de04269fb --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/ordinal.py @@ -0,0 +1,83 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_DIGIT, GraphFst +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class OrdinalFst(GraphFst): + """ + Finite state transducer for classifying Portuguese ordinals, e.g. + "1º" / "1ª" -> ordinal { integer: "primeiro" / "primeira" morphosyntactic_features: "gender_masc" / "gender_fem" } + "21º" -> ordinal { integer: "vigésimo primeiro" morphosyntactic_features: "gender_masc" } + + Args: + cardinal: CardinalFst instance for composing compound ordinals. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, cardinal: GraphFst, deterministic: bool = True): + super().__init__(name="ordinal", kind="classify", deterministic=deterministic) + cardinal_graph = cardinal.graph + + spec_rows = load_labels(get_abs_path("data/ordinals/specials.tsv")) + spec = {r[0]: r[1] for r in spec_rows if len(r) >= 2} + conn_in = spec.get("connector_in", " e ") + conn_out = spec.get("connector_out", " ") + conn = pynini.cross(conn_in, conn_out) + + # Data: ordinal \t cardinal → FST cardinal→ordinal via load_labels + digit_rows = load_labels(get_abs_path("data/ordinals/digit.tsv")) + graph_digit = pynini.string_map([(r[1], r[0]) for r in digit_rows if len(r) >= 2]).optimize() + teen_rows = load_labels(get_abs_path("data/ordinals/teen.tsv")) + graph_teens = pynini.string_map([(r[1], r[0]) for r in teen_rows if len(r) >= 2]).optimize() + ties_rows = load_labels(get_abs_path("data/ordinals/ties.tsv")) + graph_ties = pynini.string_map([(r[1], r[0]) for r in ties_rows if len(r) >= 2]).optimize() + hundreds_rows = load_labels(get_abs_path("data/ordinals/hundreds.tsv")) + graph_hundreds = pynini.string_map([(r[1], r[0]) for r in hundreds_rows if len(r) >= 2]).optimize() + + graph_tens = pynini.union( + graph_teens, + graph_ties + pynini.closure(conn + graph_digit, 0, 1), + ) + graph_hundred_component = pynini.union( + graph_hundreds + pynini.closure(conn + pynini.union(graph_tens, graph_digit), 0, 1), + graph_tens, + graph_digit, + ) + ordinal_rewrite = graph_hundred_component.optimize() + ordinal_inner = cardinal_graph @ ordinal_rewrite + + opt_dot = pynini.closure(pynutil.delete("."), 0, 1) + suffix_masc = opt_dot + pynutil.delete(pynini.union("º", "°")) + suffix_fem = opt_dot + pynutil.delete("ª") + digit_block = pynini.closure(NEMO_DIGIT, 1, 3) + + to_ordinal_masc = (digit_block + suffix_masc) @ ordinal_inner + to_ordinal_fem = (digit_block + suffix_fem) @ ordinal_inner + + graph_masc = ( + pynutil.insert('integer: "') + + to_ordinal_masc + + pynutil.insert('" morphosyntactic_features: "gender_masc"') + ) + graph_fem = ( + pynutil.insert('integer: "') + to_ordinal_fem + pynutil.insert('" morphosyntactic_features: "gender_fem"') + ) + self.fst = self.add_tokens(pynini.union(graph_masc, graph_fem)).optimize() diff --git a/nemo_text_processing/text_normalization/pt/taggers/tokenize_and_classify.py b/nemo_text_processing/text_normalization/pt/taggers/tokenize_and_classify.py new file mode 100644 index 000000000..3c196df50 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/taggers/tokenize_and_classify.py @@ -0,0 +1,117 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst +from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst +from nemo_text_processing.text_normalization.en.taggers.word import WordFst +from nemo_text_processing.text_normalization.pt.graph_utils import ( + NEMO_WHITE_SPACE, + GraphFst, + delete_extra_space, + delete_space, + generator_main, +) +from nemo_text_processing.text_normalization.pt.taggers.cardinal import CardinalFst +from nemo_text_processing.text_normalization.pt.taggers.decimal import DecimalFst +from nemo_text_processing.text_normalization.pt.taggers.fraction import FractionFst +from nemo_text_processing.text_normalization.pt.taggers.ordinal import OrdinalFst +from nemo_text_processing.utils.logging import logger + + +class ClassifyFst(GraphFst): + """ + Final class that composes all Portuguese classification grammars. This class can process an entire sentence (lower cased). + For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File. + More details to deployment at NeMo/tools/text_processing_deployment. + + Args: + input_case: accepting either "lower_cased" or "cased" input. + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache. + overwrite_cache: set to True to overwrite .far files. + whitelist: path to a file with whitelist replacements. + """ + + def __init__( + self, + input_case: str, + deterministic: bool = False, + cache_dir: str = None, + overwrite_cache: bool = False, + whitelist: str = None, + ): + super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic) + far_file = None + if cache_dir is not None and cache_dir != "None": + os.makedirs(cache_dir, exist_ok=True) + whitelist_file = os.path.basename(whitelist) if whitelist else "" + far_file = os.path.join( + cache_dir, + f"_{input_case}_pt_tn_{deterministic}_deterministic{whitelist_file}.far", + ) + if not overwrite_cache and far_file and os.path.exists(far_file): + self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"] + logger.info(f"ClassifyFst.fst was restored from {far_file}.") + else: + logger.info(f"Creating ClassifyFst grammars. This might take some time...") + + # Initialize Portuguese taggers + cardinal = CardinalFst(deterministic=deterministic) + ordinal = OrdinalFst(cardinal, deterministic=deterministic) + fraction = FractionFst(cardinal, ordinal, deterministic=deterministic) + decimal = DecimalFst(cardinal, deterministic=deterministic) + + punctuation = PunctuationFst(deterministic=deterministic) + word_graph = WordFst(punctuation=punctuation, deterministic=deterministic).fst + whitelist = WhiteListFst(input_case=input_case, deterministic=deterministic, input_file=whitelist) + + classify = ( + pynutil.add_weight(whitelist.fst, 1.01) + | pynutil.add_weight(fraction.fst, 1.1) + | pynutil.add_weight(decimal.fst, 1.1) + | pynutil.add_weight(ordinal.fst, 1.1) + | pynutil.add_weight(cardinal.fst, 1.1) + | pynutil.add_weight(word_graph, 100) + ) + + # Wrap tokens properly + token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }") + punct_graph = ( + pynutil.insert("tokens { ") + pynutil.add_weight(punctuation.fst, weight=2.1) + pynutil.insert(" }") + ) + + # Simple graph structure + graph = token + pynini.closure( + pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space) + token + ) + + # Allow punctuation + graph |= punct_graph + + self.fst = delete_space + graph + delete_space + + if far_file: + generator_main(far_file, {"tokenize_and_classify": self.fst}) + logger.info(f"ClassifyFst grammars are saved to {far_file}.") + + +if __name__ == "__main__": + ClassifyFst(input_case="cased", deterministic=False) diff --git a/nemo_text_processing/text_normalization/pt/utils.py b/nemo_text_processing/text_normalization/pt/utils.py new file mode 100644 index 000000000..da4be3f89 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/utils.py @@ -0,0 +1,49 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Portuguese (PT) text normalization utilities. + +Provides get_abs_path for resolving data paths and load_labels for reading TSV label files. +""" +import csv +import os + + +def get_abs_path(rel_path: str) -> str: + """ + Resolve a path relative to this module to an absolute path. + + Args: + rel_path: path relative to the PT text normalization data directory. + + Returns: + Absolute path string. + """ + return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path + + +def load_labels(abs_path: str): + """ + Load a TSV file as a list of rows (list of lists). + + Args: + abs_path: absolute path to a UTF-8 TSV file. + + Returns: + List of rows, each row a list of fields (e.g. from csv.reader). + """ + with open(abs_path, encoding="utf-8") as label_tsv: + labels = list(csv.reader(label_tsv, delimiter="\t")) + return labels diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/__init__.py b/nemo_text_processing/text_normalization/pt/verbalizers/__init__.py new file mode 100644 index 000000000..9e3fb699d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/cardinal.py b/nemo_text_processing/text_normalization/pt/verbalizers/cardinal.py new file mode 100644 index 000000000..f735d95e3 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/cardinal.py @@ -0,0 +1,63 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, GraphFst, shift_cardinal_gender_pt + + +class CardinalFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese cardinal numbers, e.g. + cardinal { integer: "dois" } -> dois + cardinal { integer: "dois" } -> duas (feminine context via shift_cardinal_gender_pt) + cardinal { negative: "true" integer: "cinco" } -> menos cinco + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic) + optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "menos "), 0, 1) + self.optional_sign = optional_sign + + integer = pynini.closure(NEMO_NOT_QUOTE, 1) + self.integer = pynutil.delete(" \"") + integer + pynutil.delete("\"") + + integer = pynutil.delete("integer:") + self.integer + + # Generate masculine form (default) + graph_masc = optional_sign + integer + + # Generate feminine form using Portuguese gender conversion + graph_fem = shift_cardinal_gender_pt(graph_masc) + + self.graph_masc = pynini.optimize(graph_masc) + self.graph_fem = pynini.optimize(graph_fem) + + # Default to masculine for standalone numbers + # Context-aware gender selection will be handled by higher-level components + graph = graph_masc + + if not deterministic: + # For alternate renderings and contractions + # Portuguese doesn't have apocope like Spanish, but may have contractions + pass + + delete_tokens = self.delete_tokens(graph) + self.fst = delete_tokens.optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/decimal.py b/nemo_text_processing/text_normalization/pt/verbalizers/decimal.py new file mode 100644 index 000000000..eea1e4d17 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/decimal.py @@ -0,0 +1,65 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class DecimalFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese decimal numbers, e.g. + decimal { integer_part: "um" fractional_part: "vinte e seis" } -> um vírgula vinte e seis + decimal { negative: "true" integer_part: "um" ... } -> menos um vírgula ... + decimal { integer_part: "um" quantity: "milhão" } -> um milhão + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="decimal", kind="verbalize", deterministic=deterministic) + labels = load_labels(get_abs_path("data/numbers/decimal_specials.tsv")) + spec = {r[0]: r[1] for r in labels if len(r) >= 2} + sep = spec.get("separator", "vírgula") + minus = spec.get("minus", "menos") + + optional_sign = pynini.closure(pynini.cross('negative: "true" ', minus + " "), 0, 1) + + integer = pynutil.delete('integer_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + fractional = pynutil.delete('fractional_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + quantity = ( + delete_space + + insert_space + + pynutil.delete('quantity: "') + + pynini.closure(NEMO_NOT_QUOTE, 1) + + pynutil.delete('"') + ) + + integer_quantity = integer + quantity + decimal_part = ( + integer + + delete_space + + insert_space + + pynutil.insert(sep + " ") + + fractional + + pynini.closure(quantity, 0, 1) + ) + + graph = optional_sign + pynini.union(integer_quantity, decimal_part) + + self.fst = self.delete_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/fraction.py b/nemo_text_processing/text_normalization/pt/verbalizers/fraction.py new file mode 100644 index 000000000..f289b4865 --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/fraction.py @@ -0,0 +1,88 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, GraphFst, insert_space +from nemo_text_processing.text_normalization.pt.utils import get_abs_path, load_labels + + +class FractionFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese fraction numbers, e.g. + fraction { numerator: "um" denominator: "meio" morphosyntactic_features: "ordinal" } -> um meio + fraction { integer_part: "dois" numerator: "três" denominator: "quarto" } -> dois e três quartos + fraction { numerator: "dois" denominator: "onze" morphosyntactic_features: "avos" } -> dois onze avos + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="fraction", kind="verbalize", deterministic=deterministic) + labels = load_labels(get_abs_path("data/fractions/specials.tsv")) + spec = {r[0]: r[1] for r in labels if len(r) >= 2} + connector = pynutil.insert(spec.get("connector", " e ")) + minus = spec.get("minus", "menos ") + plural_suffix = spec.get("plural_suffix", "s") + avos_suffix = spec.get("avos_suffix", " avos") + numerator_one_val = spec.get("numerator_one", "um") + denominator_half_val = spec.get("denominator_half", "meio") + + optional_sign = pynini.closure(pynini.cross('negative: "true" ', minus) + insert_space, 0, 1) + + integer = pynutil.delete('integer_part: "') + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete('" ') + + numerator_one = pynutil.delete('numerator: "') + pynini.accep(numerator_one_val) + pynutil.delete('" ') + numerator_rest = ( + pynutil.delete('numerator: "') + + pynini.difference(pynini.closure(NEMO_NOT_QUOTE), pynini.accep(numerator_one_val)) + + pynutil.delete('" ') + ) + + denom_ordinal = ( + pynutil.delete('denominator: "') + + pynini.closure(NEMO_NOT_QUOTE) + + pynutil.delete('" morphosyntactic_features: "ordinal"') + ) + denom_meio = ( + pynutil.delete('denominator: "') + + pynini.accep(denominator_half_val) + + pynutil.delete('" morphosyntactic_features: "ordinal"') + ) + denom_avos = ( + pynutil.delete('denominator: "') + + pynini.closure(NEMO_NOT_QUOTE) + + pynutil.delete('" morphosyntactic_features: "avos"') + ) + + fraction_ordinal_singular = numerator_one + insert_space + denom_ordinal + fraction_ordinal_plural = numerator_rest + insert_space + denom_ordinal + pynutil.insert(plural_suffix) + fraction_ordinal = pynini.union(fraction_ordinal_singular, fraction_ordinal_plural) + + fraction_avos = ( + pynini.union(numerator_one, numerator_rest) + insert_space + denom_avos + pynutil.insert(avos_suffix) + ) + + fraction = pynini.union(fraction_ordinal, fraction_avos) + mixed_um_meio = integer + connector + pynutil.delete('numerator: "' + numerator_one_val + '" " ') + denom_meio + optional_integer = pynini.closure(integer + connector + insert_space, 0, 1) + graph = optional_sign + pynini.union( + pynutil.add_weight(mixed_um_meio, -0.01), + optional_integer + fraction, + ) + + self.fst = self.delete_tokens(graph).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/ordinal.py b/nemo_text_processing/text_normalization/pt/verbalizers/ordinal.py new file mode 100644 index 000000000..9be8876fd --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/ordinal.py @@ -0,0 +1,47 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.pt.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst +from nemo_text_processing.text_normalization.pt.utils import get_abs_path + + +class OrdinalFst(GraphFst): + """ + Finite state transducer for verbalizing Portuguese ordinals, e.g. + ordinal { integer: "primeiro" morphosyntactic_features: "gender_masc" } -> primeiro + ordinal { integer: "primeira" morphosyntactic_features: "gender_fem" } -> primeira (feminine rewrite applied) + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic) + integer = pynutil.delete('integer: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') + + fem_rewrite = pynini.string_file(get_abs_path("data/ordinals/feminine.tsv")) + feminine_rewrite = pynini.cdrewrite( + fem_rewrite, + "", + pynini.union(NEMO_SPACE, pynini.accep("[EOS]")), + NEMO_SIGMA, + ) + + graph_masc = integer + pynutil.delete(' morphosyntactic_features: "gender_masc"') + graph_fem = (integer @ feminine_rewrite) + pynutil.delete(' morphosyntactic_features: "gender_fem"') + self.fst = self.delete_tokens(pynini.union(graph_masc, graph_fem)).optimize() diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/verbalize.py b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize.py new file mode 100644 index 000000000..76f2a032a --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize.py @@ -0,0 +1,43 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from nemo_text_processing.text_normalization.pt.graph_utils import GraphFst +from nemo_text_processing.text_normalization.pt.verbalizers.cardinal import CardinalFst +from nemo_text_processing.text_normalization.pt.verbalizers.decimal import DecimalFst +from nemo_text_processing.text_normalization.pt.verbalizers.fraction import FractionFst +from nemo_text_processing.text_normalization.pt.verbalizers.ordinal import OrdinalFst + + +class VerbalizeFst(GraphFst): + """ + Composes Portuguese verbalizer grammars (cardinal, ordinal, fraction, decimal). + For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File. + More details to deployment at NeMo/tools/text_processing_deployment. + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + """ + + def __init__(self, deterministic: bool = True): + super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic) + + cardinal = CardinalFst(deterministic=deterministic) + ordinal = OrdinalFst(deterministic=deterministic) + fraction = FractionFst(deterministic=deterministic) + decimal = DecimalFst(deterministic=deterministic) + graph = fraction.fst | decimal.fst | ordinal.fst | cardinal.fst + + self.fst = graph diff --git a/nemo_text_processing/text_normalization/pt/verbalizers/verbalize_final.py b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize_final.py new file mode 100644 index 000000000..cc2eaae3d --- /dev/null +++ b/nemo_text_processing/text_normalization/pt/verbalizers/verbalize_final.py @@ -0,0 +1,71 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pynini +from pynini.lib import pynutil + +from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst +from nemo_text_processing.text_normalization.pt.graph_utils import ( + GraphFst, + delete_extra_space, + delete_space, + generator_main, +) +from nemo_text_processing.text_normalization.pt.verbalizers.verbalize import VerbalizeFst +from nemo_text_processing.utils.logging import logger + + +class VerbalizeFinalFst(GraphFst): + """ + Finite state transducer that verbalizes an entire Portuguese sentence, e.g. + tokens { cardinal { integer: "dois" } } tokens { name: "e" } tokens { cardinal { integer: "três" } } -> dois e três + + Args: + deterministic: if True will provide a single transduction option, + for False multiple options (used for audio-based normalization) + cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache. + overwrite_cache: set to True to overwrite .far files + """ + + def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False): + super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic) + + far_file = None + if cache_dir is not None and cache_dir != "None": + os.makedirs(cache_dir, exist_ok=True) + far_file = os.path.join(cache_dir, f"pt_tn_{deterministic}_deterministic_verbalizer.far") + if not overwrite_cache and far_file and os.path.exists(far_file): + self.fst = pynini.Far(far_file, mode="r")["verbalize"] + logger.info(f'VerbalizeFinalFst graph was restored from {far_file}.') + else: + + verbalize = VerbalizeFst(deterministic=deterministic).fst + word = WordFst(deterministic=deterministic).fst + types = verbalize | word + graph = ( + pynutil.delete("tokens") + + delete_space + + pynutil.delete("{") + + delete_space + + types + + delete_space + + pynutil.delete("}") + ) + graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space + + self.fst = graph.optimize() + if far_file: + generator_main(far_file, {"verbalize": self.fst}) diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_cardinal.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_cardinal.txt new file mode 100644 index 000000000..d4c0b33fc --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_cardinal.txt @@ -0,0 +1,118 @@ +0~zero +1~um +2~dois +3~três +4~quatro +5~cinco +6~seis +7~sete +8~oito +9~nove +10~dez +11~onze +12~doze +13~treze +14~catorze +15~quinze +16~dezesseis +17~dezessete +18~dezoito +19~dezenove +20~vinte +21~vinte e um +22~vinte e dois +23~vinte e três +24~vinte e quatro +25~vinte e cinco +26~vinte e seis +27~vinte e sete +28~vinte e oito +29~vinte e nove +30~trinta +40~quarenta +50~cinquenta +60~sessenta +70~setenta +80~oitenta +90~noventa +100~cem +101~cento e um +102~cento e dois +110~cento e dez +120~cento e vinte +130~cento e trinta +200~duzentos +300~trezentos +400~quatrocentos +500~quinhentos +600~seiscentos +700~setecentos +800~oitocentos +900~novecentos +1000~mil +1 000~mil +1.000~mil +1010~mil e dez +1020~mil e vinte +1100~mil e cem +1110~mil cento e dez +1111~mil cento e onze +2000~dois mil +2002~dois mil e dois +2010~dois mil e dez +2020~dois mil e vinte +2100~dois mil e cem +2110~dois mil cento e dez +2111~dois mil cento e onze +10000~dez mil +10 000~dez mil +10.000~dez mil +100000~cem mil +100 000~cem mil +100.000~cem mil +1 000 000~um milhão +1.000.000~um milhão +1 034 068~um milhão trinta e quatro mil sessenta e oito +2.000.000~dois milhões +1.000.000.000~um bilhão +1000000000~um bilhão +2.000.000.000~dois bilhões +2000000000~dois bilhões +3 000 000 000 000~três trilhões +3.000.000.000.000~três trilhões +1001~mil e um +1010~mil e dez +1100~mil e cem +1101~mil cento e um +1111~mil cento e onze +1999~mil novecentos e noventa e nove +100000~cem mil +100001~cem mil e um +101000~cento e um mil +101001~cento e um mil e um +110000~cento e dez mil +111000~cento e onze mil +111111~cento e onze mil cento e onze +1001000~um milhão e mil +1001001~um milhão mil e um +1010000~um milhão e dez mil +1010101~um milhão dez mil cento e um +1100000~um milhão e cem mil +1110000~um milhão cento e dez mil +1001010101~um bilhão um milhão dez mil cento e um +1010101010~um bilhão dez milhões cento e um mil e dez +1234567890~um bilhão duzentos e trinta e quatro milhões quinhentos e sessenta e sete mil oitocentos e noventa +987654321~novecentos e oitenta e sete milhões seiscentos e cinquenta e quatro mil trezentos e vinte e um +999999999~novecentos e noventa e nove milhões novecentos e noventa e nove mil novecentos e noventa e nove +2000000001~dois bilhões e um +3000001000~três bilhões e mil +4000100000~quatro bilhões e cem mil +5000000100~cinco bilhões e cem +6001000000~seis bilhões e um milhão +1000000000000~um trilhão +1000000000001~um trilhão e um +1230000000000~um trilhão duzentos e trinta bilhões +3004005006007~três trilhões quatro bilhões cinco milhões seis mil e sete +1000001~um milhão e um +1001100~um milhão mil e cem +1001110~um milhão mil cento e dez \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_decimal.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_decimal.txt new file mode 100644 index 000000000..1f9b59c69 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_decimal.txt @@ -0,0 +1,58 @@ +0,1~zero vírgula um +0,2~zero vírgula dois +0,5~zero vírgula cinco +0,9~zero vírgula nove +0,01~zero vírgula um +0,02~zero vírgula dois +0,05~zero vírgula cinco +0,10~zero vírgula dez +0,11~zero vírgula onze +0,15~zero vírgula quinze +0,20~zero vírgula vinte +0,25~zero vírgula vinte e cinco +0,50~zero vírgula cinquenta +0,99~zero vírgula noventa e nove +1,1~um vírgula um +1,2~um vírgula dois +1,5~um vírgula cinco +1,10~um vírgula dez +1,15~um vírgula quinze +1,20~um vírgula vinte +1,26~um vírgula vinte e seis +1,33~um vírgula trinta e três +1,50~um vírgula cinquenta +3,141~três vírgula cento e quarenta e um +3,256~três vírgula duzentos e cinquenta e seis +3,999~três vírgula novecentos e noventa e nove +3,1415~três vírgula mil quatrocentos e quinze +3,14159~três vírgula quatorze mil cento e cinquenta e nove +3,1001~três vírgula mil e um +3,2003~três vírgula dois mil e três +3,014~três vírgula quatorze +3,0141~três vírgula cento e quarenta e um +3,1005~três vírgula mil e cinco +3,1050~três vírgula mil e cinquenta +3,1010~três vírgula mil e dez +-1,2~menos um vírgula dois +-1,26~menos um vírgula vinte e seis +-3,5~menos três vírgula cinco +-0,5~menos zero vírgula cinco +1,2 milhões~um vírgula dois milhões +1,5 milhões~um vírgula cinco milhões +1,25 milhões~um vírgula vinte e cinco milhões +2,5 bilhões~dois vírgula cinco bilhões +3,75 bilhões~três vírgula setenta e cinco bilhões +0,001~zero vírgula um +0,0001~zero vírgula um +1,001~um vírgula mil e um +1,010~um vírgula mil e dez +1,100~um vírgula mil e cem +10,01~dez vírgula um +10,001~dez vírgula mil e um +100,5~cem vírgula cinco +100,05~cem vírgula cinco +3,14~três vírgula quatorze +3,141~três vírgula cento e quarenta e um +3,1415~três vírgula mil quatrocentos e quinze +3,14159~três vírgula quatorze mil cento e cinquenta e nove +3,1415926535~três vírgula um quatro um cinco nove dois seis cinco três cinco \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_fraction.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_fraction.txt new file mode 100644 index 000000000..256fecd86 --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_fraction.txt @@ -0,0 +1,21 @@ +1/2~um meio +1/3~um terço +1/4~um quarto +2/3~dois terços +3/4~três quartos +1/5~um quinto +2/5~dois quintos +1/6~um sexto +5/6~cinco sextos +1/8~um oitavo +3/8~três oitavos +7/8~sete oitavos +1/10~um décimo +3/10~três décimos +3/11~três onze avos +5/13~cinco treze avos +1/100~um centésimo +1/1000~um milésimo +1 1/2~um e um meio +2 1/4~dois e um quarto +3 2/3~três e dois terços \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/data_text_normalization/test_cases_ordinal.txt b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_ordinal.txt new file mode 100644 index 000000000..f9a58a9ce --- /dev/null +++ b/tests/nemo_text_processing/pt/data_text_normalization/test_cases_ordinal.txt @@ -0,0 +1,39 @@ +1º~primeiro +2º~segundo +3º~terceiro +4º~quarto +5º~quinto +6º~sexto +7º~sétimo +8º~oitavo +9º~nono +10º~décimo +11º~décimo primeiro +12º~décimo segundo +13º~décimo terceiro +20º~vigésimo +21º~vigésimo primeiro +22º~vigésimo segundo +23º~vigésimo terceiro +100º~centésimo +111º~centésimo décimo primeiro +134º~centésimo trigésimo quarto +1ª~primeira +2ª~segunda +3ª~terceira +4ª~quarta +5ª~quinta +6ª~sexta +7ª~sétima +8ª~oitava +9ª~nona +10ª~décima +11ª~décima primeira +12ª~décima segunda +13ª~décima terceira +20ª~vigésima +21ª~vigésima primeira +22ª~vigésima segunda +23ª~vigésima terceira +100ª~centésima +11ª casa~décima primeira casa \ No newline at end of file diff --git a/tests/nemo_text_processing/pt/test_cardinal.py b/tests/nemo_text_processing/pt/test_cardinal.py index dafa3e358..dfadad09f 100644 --- a/tests/nemo_text_processing/pt/test_cardinal.py +++ b/tests/nemo_text_processing/pt/test_cardinal.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +31,12 @@ class TestCardinal: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_cardinal.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_decimal.py b/tests/nemo_text_processing/pt/test_decimal.py index afbec329b..67376d476 100644 --- a/tests/nemo_text_processing/pt/test_decimal.py +++ b/tests/nemo_text_processing/pt/test_decimal.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -29,3 +30,12 @@ class TestDecimal: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_decimal.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected diff --git a/tests/nemo_text_processing/pt/test_fraction.py b/tests/nemo_text_processing/pt/test_fraction.py new file mode 100644 index 000000000..16e6c5f30 --- /dev/null +++ b/tests/nemo_text_processing/pt/test_fraction.py @@ -0,0 +1,32 @@ +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use it except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from parameterized import parameterized + +from nemo_text_processing.text_normalization.normalize import Normalizer +from ..utils import CACHE_DIR, parse_test_case_file + + +class TestFraction: + normalizer = Normalizer( + lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased', post_process=True + ) + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_fraction.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False, punct_post_process=False) + assert pred == expected, f"input: {test_input}" diff --git a/tests/nemo_text_processing/pt/test_ordinal.py b/tests/nemo_text_processing/pt/test_ordinal.py index a830e2d21..8602e8700 100644 --- a/tests/nemo_text_processing/pt/test_ordinal.py +++ b/tests/nemo_text_processing/pt/test_ordinal.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ from parameterized import parameterized from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer - +from nemo_text_processing.text_normalization.normalize import Normalizer from ..utils import CACHE_DIR, parse_test_case_file @@ -30,3 +30,12 @@ class TestOrdinal: def test_denorm(self, test_input, expected): pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False) assert pred == expected + + normalizer = Normalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased') + + @parameterized.expand(parse_test_case_file('pt/data_text_normalization/test_cases_ordinal.txt')) + @pytest.mark.run_only_on('CPU') + @pytest.mark.unit + def test_norm(self, test_input, expected): + pred = self.normalizer.normalize(test_input, verbose=False) + assert pred == expected