Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python3 | |
| import sys | |
| import torch | |
| import logging | |
| import speechbrain as sb | |
| import torchaudio | |
| from hyperpyyaml import load_hyperpyyaml | |
| from speechbrain.tokenizers.SentencePiece import SentencePiece | |
| from speechbrain.utils.data_utils import undo_padding | |
| from speechbrain.utils.distributed import run_on_main | |
| """Recipe for training a sequence-to-sequence ASR system with CommonVoice. | |
| The system employs a wav2vec2 encoder and a CTC decoder. | |
| Decoding is performed with greedy decoding (will be extended to beam search). | |
| To run this recipe, do the following: | |
| > python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml | |
| With the default hyperparameters, the system employs a pretrained wav2vec2 encoder. | |
| The wav2vec2 model is pretrained following the model given in the hprams file. | |
| It may be dependent on the language. | |
| The neural network is trained with CTC on sub-word units estimated with | |
| Byte Pairwise Encoding (BPE). | |
| The experiment file is flexible enough to support a large variety of | |
| different systems. By properly changing the parameter files, you can try | |
| different encoders, decoders, tokens (e.g, characters instead of BPE), | |
| training languages (all CommonVoice languages), and many | |
| other possible variations. | |
| Authors | |
| * Titouan Parcollet 2021 | |
| """ | |
| logger = logging.getLogger(__name__) | |
| # Define training procedure | |
| class ASRCV(sb.core.Brain): | |
| def compute_forward(self, batch, stage): | |
| """Forward computations from the waveform batches to the output probabilities.""" | |
| batch = batch.to(self.device) | |
| wavs, wav_lens = batch.sig | |
| tokens_bos, _ = batch.tokens_bos | |
| wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device) | |
| if stage == sb.Stage.TRAIN: | |
| if hasattr(self.hparams, "augmentation"): | |
| wavs = self.hparams.augmentation(wavs, wav_lens) | |
| # Forward pass | |
| feats = self.modules.wav2vec2(wavs, wav_lens) | |
| x = self.modules.enc(feats) | |
| logits = self.modules.ctc_lin(x) | |
| p_ctc = self.hparams.log_softmax(logits) | |
| return p_ctc, wav_lens | |
| def compute_objectives(self, predictions, batch, stage): | |
| """Computes the loss (CTC) given predictions and targets.""" | |
| p_ctc, wav_lens = predictions | |
| ids = batch.id | |
| tokens_eos, tokens_eos_lens = batch.tokens_eos | |
| tokens, tokens_lens = batch.tokens | |
| loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens) | |
| if stage != sb.Stage.TRAIN: | |
| # Decode token terms to words | |
| sequence = sb.decoders.ctc_greedy_decode( | |
| p_ctc, wav_lens, blank_id=self.hparams.blank_index | |
| ) | |
| predicted_words = self.tokenizer(sequence, task="decode_from_list") | |
| # Convert indices to words | |
| target_words = undo_padding(tokens, tokens_lens) | |
| target_words = self.tokenizer(target_words, task="decode_from_list") | |
| self.wer_metric.append(ids, predicted_words, target_words) | |
| self.cer_metric.append(ids, predicted_words, target_words) | |
| return loss | |
| def fit_batch(self, batch): | |
| """Train the parameters given a single batch in input""" | |
| should_step = self.step % self.grad_accumulation_factor == 0 | |
| # Managing automatic mixed precision | |
| # TOFIX: CTC fine-tuning currently is unstable | |
| # This is certainly due to CTC being done in fp16 instead of fp32 | |
| if self.auto_mix_prec: | |
| with torch.cuda.amp.autocast(): | |
| with self.no_sync(): | |
| outputs = self.compute_forward(batch, sb.Stage.TRAIN) | |
| loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) | |
| with self.no_sync(not should_step): | |
| self.scaler.scale( | |
| loss / self.grad_accumulation_factor | |
| ).backward() | |
| if should_step: | |
| if not self.hparams.wav2vec2.freeze: | |
| self.scaler.unscale_(self.wav2vec_optimizer) | |
| self.scaler.unscale_(self.model_optimizer) | |
| if self.check_gradients(loss): | |
| if not self.hparams.wav2vec2.freeze: | |
| if self.optimizer_step >= self.hparams.warmup_steps: | |
| self.scaler.step(self.wav2vec_optimizer) | |
| self.scaler.step(self.model_optimizer) | |
| self.scaler.update() | |
| self.zero_grad() | |
| self.optimizer_step += 1 | |
| else: | |
| # This is mandatory because HF models have a weird behavior with DDP | |
| # on the forward pass | |
| with self.no_sync(): | |
| outputs = self.compute_forward(batch, sb.Stage.TRAIN) | |
| loss = self.compute_objectives(outputs, batch, sb.Stage.TRAIN) | |
| with self.no_sync(not should_step): | |
| (loss / self.grad_accumulation_factor).backward() | |
| if should_step: | |
| if self.check_gradients(loss): | |
| if not self.hparams.wav2vec2.freeze: | |
| if self.optimizer_step >= self.hparams.warmup_steps: | |
| self.wav2vec_optimizer.step() | |
| self.model_optimizer.step() | |
| self.zero_grad() | |
| self.optimizer_step += 1 | |
| self.on_fit_batch_end(batch, outputs, loss, should_step) | |
| return loss.detach().cpu() | |
| def evaluate_batch(self, batch, stage): | |
| """Computations needed for validation/test batches""" | |
| predictions = self.compute_forward(batch, stage=stage) | |
| with torch.no_grad(): | |
| loss = self.compute_objectives(predictions, batch, stage=stage) | |
| return loss.detach() | |
| def on_stage_start(self, stage, epoch): | |
| """Gets called at the beginning of each epoch""" | |
| if stage != sb.Stage.TRAIN: | |
| self.cer_metric = self.hparams.cer_computer() | |
| self.wer_metric = self.hparams.error_rate_computer() | |
| def on_stage_end(self, stage, stage_loss, epoch): | |
| """Gets called at the end of an epoch.""" | |
| # Compute/store important stats | |
| stage_stats = {"loss": stage_loss} | |
| if stage == sb.Stage.TRAIN: | |
| self.train_stats = stage_stats | |
| else: | |
| stage_stats["CER"] = self.cer_metric.summarize("error_rate") | |
| stage_stats["WER"] = self.wer_metric.summarize("error_rate") | |
| # Perform end-of-iteration things, like annealing, logging, etc. | |
| if stage == sb.Stage.VALID: | |
| old_lr_model, new_lr_model = self.hparams.lr_annealing_model( | |
| stage_stats["loss"] | |
| ) | |
| old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec( | |
| stage_stats["loss"] | |
| ) | |
| sb.nnet.schedulers.update_learning_rate( | |
| self.model_optimizer, new_lr_model | |
| ) | |
| if not self.hparams.wav2vec2.freeze: | |
| sb.nnet.schedulers.update_learning_rate( | |
| self.wav2vec_optimizer, new_lr_wav2vec | |
| ) | |
| self.hparams.train_logger.log_stats( | |
| stats_meta={ | |
| "epoch": epoch, | |
| "lr_model": old_lr_model, | |
| "lr_wav2vec": old_lr_wav2vec, | |
| }, | |
| train_stats=self.train_stats, | |
| valid_stats=stage_stats, | |
| ) | |
| self.checkpointer.save_and_keep_only( | |
| meta={"WER": stage_stats["WER"]}, min_keys=["WER"], | |
| ) | |
| elif stage == sb.Stage.TEST: | |
| self.hparams.train_logger.log_stats( | |
| stats_meta={"Epoch loaded": self.hparams.epoch_counter.current}, | |
| test_stats=stage_stats, | |
| ) | |
| with open(self.hparams.wer_file, "w") as w: | |
| self.wer_metric.write_stats(w) | |
| def init_optimizers(self): | |
| "Initializes the wav2vec2 optimizer and model optimizer" | |
| # If the wav2vec encoder is unfrozen, we create the optimizer | |
| if not self.hparams.wav2vec2.freeze: | |
| self.wav2vec_optimizer = self.hparams.wav2vec_opt_class( | |
| self.modules.wav2vec2.parameters() | |
| ) | |
| if self.checkpointer is not None: | |
| self.checkpointer.add_recoverable( | |
| "wav2vec_opt", self.wav2vec_optimizer | |
| ) | |
| self.model_optimizer = self.hparams.model_opt_class( | |
| self.hparams.model.parameters() | |
| ) | |
| if self.checkpointer is not None: | |
| self.checkpointer.add_recoverable("modelopt", self.model_optimizer) | |
| def zero_grad(self, set_to_none=False): | |
| if not self.hparams.wav2vec2.freeze: | |
| self.wav2vec_optimizer.zero_grad(set_to_none) | |
| self.model_optimizer.zero_grad(set_to_none) | |
| # Define custom data procedure | |
| def dataio_prepare(hparams, tokenizer): | |
| """This function prepares the datasets to be used in the brain class. | |
| It also defines the data processing pipeline through user-defined functions.""" | |
| # 1. Define datasets | |
| data_folder = hparams["data_folder"] | |
| train_data = sb.dataio.dataset.DynamicItemDataset.from_csv( | |
| csv_path=hparams["train_csv"], replacements={"data_root": data_folder}, | |
| ) | |
| if hparams["sorting"] == "ascending": | |
| # we sort training data to speed up training and get better results. | |
| train_data = train_data.filtered_sorted( | |
| sort_key="duration", | |
| key_max_value={"duration": hparams["avoid_if_longer_than"]}, | |
| ) | |
| # when sorting do not shuffle in dataloader ! otherwise is pointless | |
| hparams["dataloader_options"]["shuffle"] = False | |
| elif hparams["sorting"] == "descending": | |
| train_data = train_data.filtered_sorted( | |
| sort_key="duration", | |
| reverse=True, | |
| key_max_value={"duration": hparams["avoid_if_longer_than"]}, | |
| ) | |
| # when sorting do not shuffle in dataloader ! otherwise is pointless | |
| hparams["dataloader_options"]["shuffle"] = False | |
| elif hparams["sorting"] == "random": | |
| pass | |
| else: | |
| raise NotImplementedError( | |
| "sorting must be random, ascending or descending" | |
| ) | |
| valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv( | |
| csv_path=hparams["valid_csv"], replacements={"data_root": data_folder}, | |
| ) | |
| # We also sort the validation data so it is faster to validate | |
| valid_data = valid_data.filtered_sorted(sort_key="duration") | |
| test_data = sb.dataio.dataset.DynamicItemDataset.from_csv( | |
| csv_path=hparams["test_csv"], replacements={"data_root": data_folder}, | |
| ) | |
| # We also sort the validation data so it is faster to validate | |
| test_data = test_data.filtered_sorted(sort_key="duration") | |
| datasets = [train_data, valid_data, test_data] | |
| # 2. Define audio pipeline: | |
| def audio_pipeline(wav): | |
| info = torchaudio.info(wav) | |
| sig = sb.dataio.dataio.read_audio(wav) | |
| resampled = torchaudio.transforms.Resample( | |
| info.sample_rate, hparams["sample_rate"], | |
| )(sig) | |
| return resampled | |
| sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline) | |
| # 3. Define text pipeline: | |
| def text_pipeline(wrd): | |
| tokens_list = tokenizer.sp.encode_as_ids(wrd) | |
| yield tokens_list | |
| tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list)) | |
| yield tokens_bos | |
| tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]]) | |
| yield tokens_eos | |
| tokens = torch.LongTensor(tokens_list) | |
| yield tokens | |
| sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline) | |
| # 4. Set output: | |
| sb.dataio.dataset.set_output_keys( | |
| datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"], | |
| ) | |
| return train_data, valid_data, test_data | |
| if __name__ == "__main__": | |
| # Load hyperparameters file with command-line overrides | |
| hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:]) | |
| with open(hparams_file) as fin: | |
| hparams = load_hyperpyyaml(fin, overrides) | |
| # If --distributed_launch then | |
| # create ddp_group with the right communication protocol | |
| sb.utils.distributed.ddp_init_group(run_opts) | |
| # Dataset preparation (parsing CommonVoice) | |
| from common_voice_prepare import prepare_common_voice # noqa | |
| # Create experiment directory | |
| sb.create_experiment_directory( | |
| experiment_directory=hparams["output_folder"], | |
| hyperparams_to_save=hparams_file, | |
| overrides=overrides, | |
| ) | |
| # Due to DDP, we do the preparation ONLY on the main python process | |
| run_on_main( | |
| prepare_common_voice, | |
| kwargs={ | |
| "data_folder": hparams["data_folder"], | |
| "save_folder": hparams["save_folder"], | |
| "train_tsv_file": hparams["train_tsv_file"], | |
| "dev_tsv_file": hparams["dev_tsv_file"], | |
| "test_tsv_file": hparams["test_tsv_file"], | |
| "accented_letters": hparams["accented_letters"], | |
| "language": hparams["language"], | |
| "skip_prep": hparams["skip_prep"], | |
| }, | |
| ) | |
| # Defining tokenizer and loading it | |
| tokenizer = SentencePiece( | |
| model_dir=hparams["save_folder"], | |
| vocab_size=hparams["output_neurons"], | |
| annotation_train=hparams["train_csv"], | |
| annotation_read="wrd", | |
| model_type=hparams["token_type"], | |
| character_coverage=hparams["character_coverage"], | |
| ) | |
| # Create the datasets objects as well as tokenization and encoding :-D | |
| train_data, valid_data, test_data = dataio_prepare(hparams, tokenizer) | |
| # Trainer initialization | |
| asr_brain = ASRCV( | |
| modules=hparams["modules"], | |
| hparams=hparams, | |
| run_opts=run_opts, | |
| checkpointer=hparams["checkpointer"], | |
| ) | |
| # Adding objects to trainer. | |
| asr_brain.tokenizer = tokenizer | |
| # Training | |
| asr_brain.fit( | |
| asr_brain.hparams.epoch_counter, | |
| train_data, | |
| valid_data, | |
| train_loader_kwargs=hparams["dataloader_options"], | |
| valid_loader_kwargs=hparams["test_dataloader_options"], | |
| ) | |
| # Test | |
| asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt" | |
| asr_brain.evaluate( | |
| test_data, | |
| min_key="WER", | |
| test_loader_kwargs=hparams["test_dataloader_options"], | |
| ) | |