Spaces:
Build error
Build error
| #!/usr/bin/env/python3 | |
| """Recipe for training a wav2vec-based ctc ASR system with librispeech. | |
| The system employs wav2vec as its encoder. Decoding is performed with | |
| ctc greedy decoder. | |
| To run this recipe, do the following: | |
| > python train_with_wav2vec.py hparams/train_with_wav2vec.yaml | |
| The neural network is trained on CTC likelihood target and character units | |
| are used as basic recognition tokens. Training is performed on the full | |
| LibriSpeech dataset (960 h). | |
| Authors | |
| * Sung-Lin Yeh 2021 | |
| * Titouan Parcollet 2021 | |
| * Ju-Chieh Chou 2020 | |
| * Mirco Ravanelli 2020 | |
| * Abdel Heba 2020 | |
| * Peter Plantinga 2020 | |
| * Samuele Cornell 2020 | |
| """ | |
| import os | |
| import sys | |
| import torch | |
| import logging | |
| import speechbrain as sb | |
| from speechbrain.utils.distributed import run_on_main | |
| from hyperpyyaml import load_hyperpyyaml | |
| from pathlib import Path | |
| import torchaudio.transforms as T | |
| from pyctcdecode import build_ctcdecoder | |
| logger = logging.getLogger(__name__) | |
| # Define training procedure | |
| class ASR(sb.Brain): | |
| def compute_forward(self, batch, stage): | |
| """Forward computations from the waveform batches to the output probabilities.""" | |
| batch = batch.to(self.device) | |
| wavs, wav_lens = batch.sig | |
| tokens_bos, _ = batch.tokens_bos | |
| wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) | |
| # Forward pass | |
| feats = self.modules.wav2vec2(wavs) | |
| x = self.modules.enc(feats) | |
| # Compute outputs | |
| p_tokens = None | |
| logits = self.modules.ctc_lin(x) | |
| p_ctc = self.hparams.log_softmax(logits) | |
| if stage != sb.Stage.TRAIN: | |
| p_tokens = sb.decoders.ctc_greedy_decode( | |
| p_ctc, wav_lens, blank_id=self.hparams.blank_index | |
| ) | |
| return p_ctc, wav_lens, p_tokens | |
| def compute_objectives(self, predictions, batch, stage): | |
| """Computes the loss (CTC+NLL) given predictions and targets.""" | |
| p_ctc, wav_lens, predicted_tokens = predictions | |
| ids = batch.id | |
| tokens_eos, tokens_eos_lens = batch.tokens_eos | |
| tokens, tokens_lens = batch.tokens | |
| if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN: | |
| tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0) | |
| tokens_eos_lens = torch.cat( | |
| [tokens_eos_lens, tokens_eos_lens], dim=0 | |
| ) | |
| tokens = torch.cat([tokens, tokens], dim=0) | |
| tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0) | |
| loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) | |
| loss = loss_ctc | |
| if stage != sb.Stage.TRAIN: | |
| # Decode token terms to words | |
| predicted_words =[] | |
| for logs in p_ctc: | |
| text = decoder.decode(logs.detach().cpu().numpy()) | |
| predicted_words.append(text.split(" ")) | |
| target_words = [wrd.split(" ") for wrd in batch.wrd] | |
| self.wer_metric.append(ids, predicted_words, target_words) | |
| self.cer_metric.append(ids, predicted_words, target_words) | |
| return loss | |
| def fit_batch(self, batch): | |
| """Train the parameters given a single batch in input""" | |
| predictions = self.compute_forward(batch, sb.Stage.TRAIN) | |
| loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN) | |
| loss.backward() | |
| if self.check_gradients(loss): | |
| self.wav2vec_optimizer.step() | |
| self.model_optimizer.step() | |
| self.wav2vec_optimizer.zero_grad() | |
| self.model_optimizer.zero_grad() | |
| return loss.detach() | |
| def evaluate_batch(self, batch, stage): | |
| """Computations needed for validation/test batches""" | |
| predictions = self.compute_forward(batch, stage=stage) | |
| with torch.no_grad(): | |
| loss = self.compute_objectives(predictions, batch, stage=stage) | |
| return loss.detach() | |
| def on_stage_start(self, stage, epoch): | |
| """Gets called at the beginning of each epoch""" | |
| if stage != sb.Stage.TRAIN: | |
| self.cer_metric = self.hparams.cer_computer() | |
| self.wer_metric = self.hparams.error_rate_computer() | |
| def on_stage_end(self, stage, stage_loss, epoch): | |
| """Gets called at the end of an epoch.""" | |
| # Compute/store important stats | |
| stage_stats = {"loss": stage_loss} | |
| if stage == sb.Stage.TRAIN: | |
| self.train_stats = stage_stats | |
| else: | |
| stage_stats["CER"] = self.cer_metric.summarize("error_rate") | |
| stage_stats["WER"] = self.wer_metric.summarize("error_rate") | |
| # Perform end-of-iteration things, like annealing, logging, etc. | |
| if stage == sb.Stage.VALID: | |
| old_lr_model, new_lr_model = self.hparams.lr_annealing_model( | |
| stage_stats["loss"] | |
| ) | |
| old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec( | |
| stage_stats["loss"] | |
| ) | |
| sb.nnet.schedulers.update_learning_rate( | |
| self.model_optimizer, new_lr_model | |
| ) | |
| sb.nnet.schedulers.update_learning_rate( | |
| self.wav2vec_optimizer, new_lr_wav2vec | |
| ) | |
| self.hparams.train_logger.log_stats( | |
| stats_meta={ | |
| "epoch": epoch, | |
| "lr_model": old_lr_model, | |
| "lr_wav2vec": old_lr_wav2vec, | |
| }, | |
| train_stats=self.train_stats, | |
| valid_stats=stage_stats, | |
| ) | |
| self.checkpointer.save_and_keep_only( | |
| meta={"WER": stage_stats["WER"]}, min_keys=["WER"], | |
| ) | |
| elif stage == sb.Stage.TEST: | |
| self.hparams.train_logger.log_stats( | |
| stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, | |
| test_stats=stage_stats, | |
| ) | |
| with open(self.hparams.wer_file, "w") as w: | |
| self.wer_metric.write_stats(w) | |
| def init_optimizers(self): | |
| "Initializes the wav2vec2 optimizer and model optimizer" | |
| self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( | |
| self.modules.wav2vec2.parameters() | |
| ) | |
| self.model_optimizer = self.hparams.model_opt_class( | |
| self.hparams.model.parameters() | |
| ) | |
| if self.checkpointer is not None: | |
| self.checkpointer.add_recoverable( | |
| "wav2vec_opt", self.wav2vec_optimizer | |
| ) | |
| self.checkpointer.add_recoverable("modelopt", self.model_optimizer) | |
| def dataio_prepare(hparams): | |
| """This function prepares the datasets to be used in the brain class. | |
| It also defines the data processing pipeline through user-defined functions.""" | |
| data_folder = hparams["data_folder"] | |
| train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( | |
| csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, | |
| ) | |
| if hparams["sorting"] == "ascending": | |
| # we sort training data to speed up training and get better results. | |
| train_data = train_data.filtered_sorted(sort_key="duration") | |
| # when sorting do not shuffle in dataloader ! otherwise is pointless | |
| hparams["train_dataloader_opts"]["shuffle"] = False | |
| elif hparams["sorting"] == "descending": | |
| train_data = train_data.filtered_sorted( | |
| sort_key="duration", reverse=True | |
| ) | |
| # when sorting do not shuffle in dataloader ! otherwise is pointless | |
| hparams["train_dataloader_opts"]["shuffle"] = False | |
| elif hparams["sorting"] == "random": | |
| pass | |
| else: | |
| raise NotImplementedError( | |
| "sorting must be random, ascending or descending" | |
| ) | |
| valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( | |
| csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, | |
| ) | |
| valid_data = valid_data.filtered_sorted(sort_key="duration") | |
| # test is separate | |
| test_datasets = {} | |
| for csv_file in hparams["test_csv"]: | |
| name = Path(csv_file).stem | |
| test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv( | |
| csv_path=csv_file, replacements={"data_root": data_folder} | |
| ) | |
| test_datasets[name] = test_datasets[name].filtered_sorted( | |
| sort_key="duration" | |
| ) | |
| datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()] | |
| # 2. Define audio pipeline: | |
| def audio_pipeline(wav, sr): | |
| sig = sb.dataio.dataio.read_audio(wav) | |
| sig = resamplers[sr](sig) | |
| return sig | |
| sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) | |
| label_encoder = sb.dataio.encoder.CTCTextEncoder() | |
| # 3. Define text pipeline: | |
| def text_pipeline(wrd): | |
| yield wrd | |
| char_list = list(wrd) | |
| yield char_list | |
| tokens_list = label_encoder.encode_sequence(char_list) | |
| yield tokens_list | |
| tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) | |
| yield tokens_bos | |
| tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) | |
| yield tokens_eos | |
| tokens = torch.LongTensor(tokens_list) | |
| yield tokens | |
| sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) | |
| lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt") | |
| special_labels = { | |
| "bos_label": hparams["bos_index"], | |
| "eos_label": hparams["eos_index"], | |
| "blank_label": hparams["blank_index"], | |
| } | |
| label_encoder.load_or_create( | |
| path=lab_enc_file, | |
| from_didatasets=[train_data], | |
| output_key="char_list", | |
| special_labels=special_labels, | |
| sequence_input=True, | |
| ) | |
| # 4. Set output: | |
| sb.dataio.dataset.set_output_keys( | |
| datasets, | |
| ["id", "sig", "wrd", "char_list", "tokens_bos", "tokens_eos", "tokens"], | |
| ) | |
| return train_data, valid_data, test_datasets, label_encoder | |
| if __name__ == "__main__": | |
| # CLI: | |
| hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) | |
| # If distributed_launch=True then | |
| # create ddp_group with the right communication protocol | |
| sb.utils.distributed.ddp_init_group(run_opts) | |
| with open(hparams_file) as fin: | |
| hparams = load_hyperpyyaml(fin, overrides) | |
| # Create experiment directory | |
| sb.create_experiment_directory( | |
| experiment_directory=hparams["output_folder"], | |
| hyperparams_to_save=hparams_file, | |
| overrides=overrides, | |
| ) | |
| def read_labels_file(labels_file): | |
| with open(labels_file, "r") as lf: | |
| lines = lf.read().splitlines() | |
| division = "===" | |
| numbers = {} | |
| for line in lines : | |
| if division in line : | |
| break | |
| string, number = line.split("=>") | |
| number = int(number) | |
| string = string[1:-2] | |
| numbers[number] = string | |
| return [numbers[x] for x in range(len(numbers))] | |
| labels = read_labels_file(os.path.join(hparams["save_folder"], "label_encoder.txt")) | |
| print(labels) | |
| labels = [""] + labels[1:] | |
| print(len(labels)) | |
| decoder = build_ctcdecoder( | |
| labels, | |
| kenlm_model_path="tunisian.arpa", # either .arpa or .bin file | |
| alpha=0.5, # tuned on a val set | |
| beta=1.0, # tuned on a val set | |
| ) | |
| # Dataset prep (parsing Librispeech) | |
| resampler_8000 = T.Resample(8000, 16000, dtype=torch.float) | |
| resampler_44100 =T.Resample(44100, 16000, dtype=torch.float) | |
| resampler_48000 =T.Resample(48000, 16000, dtype=torch.float) | |
| resamplers = {"8000": resampler_8000, "44100":resampler_44100, "48000": resampler_48000} | |
| # here we create the datasets objects as well as tokenization and encoding | |
| train_data, valid_data, test_datasets, label_encoder = dataio_prepare( | |
| hparams | |
| ) | |
| # Trainer initialization | |
| asr_brain = ASR( | |
| modules=hparams["modules"], | |
| hparams=hparams, | |
| run_opts=run_opts, | |
| checkpointer=hparams["checkpointer"], | |
| ) | |
| asr_brain.device= "cpu" | |
| asr_brain.modules.to("cpu") | |
| # We dynamicaly add the tokenizer to our brain class. | |
| # NB: This tokenizer corresponds to the one used for the LM!! | |
| asr_brain.tokenizer = label_encoder | |
| # Training | |
| asr_brain.fit( | |
| asr_brain.hparams.epoch_counter, | |
| train_data, | |
| valid_data, | |
| train_loader_kwargs=hparams["train_dataloader_opts"], | |
| valid_loader_kwargs=hparams["valid_dataloader_opts"], | |
| ) | |
| # Testing | |
| for k in test_datasets.keys(): # keys are test_clean, test_other etc | |
| asr_brain.hparams.wer_file = os.path.join( | |
| hparams["output_folder"], "wer_{}.txt".format(k) | |
| ) | |
| asr_brain.evaluate( | |
| test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"] | |
| ) | |