!/usr/bin/env bash DOCKER_EXP_DIR="/checkpoints/results" ### Good CKPT="/checkpoints/streaming/magpie/jason/magpieTTS--val_loss=5.1255-epoch=89-last.ckpt" HPARAM="/checkpoints/streaming/magpie/jason/magpietts_en_jason_inference.yaml" PROJECT='magpieTTS_en_newcodec_exps' CODECMODEL_NAME="C21FPS_Causal_8cb" # CODEC_PATH="/paarth_gitrepos/checkpoints/21fps_causal_codecmodel.nemo" CODEC_PATH="/nemo_codec_checkpoints/21fps_causal_codecmodel.nemo" DATASETS="HRLLM" MODEL_STRUCTURE="MP_CE" LR=1e-4 PRECISION=bf16 SAMPLE_WEIGHT_LIBRIVOX=0.2 BATCH_SIZE=12 EXP_NAME="${MODEL_STRUCTURE}_${CODECMODEL_NAME}_${DATASETS}_lr${LR}_bs${BATCH_SIZE}_precision${PRECISION}_w${SAMPLE_WEIGHT_LIBRIVOX}_bin_4" # EXP_DIR="/lustre/fsw/llmservice_nemo_speechlm/users/subhankarg/experiments_slurm_logs/T5TTSMarch2025/magpie2503_causal_codec_causal_enc_attnprior_nolocaltransformer" DOCKER_EXP_DIR="/checkpoints/local_training/magpie2503_causal_codec_causal_enc_attnprior_nolocaltransformer" export CUDA_VISIBLE_DEVICES=0 # ++model.train_ds.dataset.include_wait_k=True \ # ++model.train_ds.dataset.max_wait_k=2 \ # ++model.validation_ds.dataset.include_wait_k=True \ # ++model.validation_ds.dataset.max_wait_k=2 \ # +model.text_tokenizers.english_chartokenizer._target_=AutoTokenizer \ # +model.text_tokenizers.english_chartokenizer.pretrained_model="google/byt5-small" \ python examples/tts/magpietts.py \ --config-path=/workspace/NeMo/examples/tts/conf/magpietts \ --config-name=magpietts_dc_en \ exp_manager.exp_dir="${DOCKER_EXP_DIR}" \ ++exp_manager.name="magpieTTS" \ ++exp_manager.create_tensorboard_logger=true \ ++exp_manager.create_wandb_logger=false \ +exp_manager.version=0 \ weighted_sampling_steps_per_epoch=1000 \ max_epochs=500 \ batch_size=${BATCH_SIZE} \ +train_ds_meta.libri100train.manifest_path="/Data/manifests/libri100__phoneme__nemo_audio_21fps_8codebooks_2kcodes_v2bWithWavLM_simplet5_withContextAudioPaths.json" \ +train_ds_meta.libri100train.audio_dir="/Data/LibriTTS" \ +train_ds_meta.libri100train.feature_dir="/Data/LibriTTS" \ +train_ds_meta.libri100train.sample_weight=1.0 \ +train_ds_meta.libri100train.tokenizer_names="[english_phoneme]" \ +val_ds_meta.libridev.manifest_path="/Data/manifests/dev_clean_withContextAudioPaths.json" \ +val_ds_meta.libridev.audio_dir="/Data/LibriTTS" \ +val_ds_meta.libridev.feature_dir="/Data/LibriTTS" \ +val_ds_meta.libridev.tokenizer_names="[english_phoneme]" \ model.train_ds.dataset.min_duration=0.2 \ model.validation_ds.dataset.min_duration=0.2 \ model.context_duration_min=5.0 \ model.context_duration_max=5.0 \ model.codecmodel_path=${CODEC_PATH} \ model.model_type="decoder_context_tts" \ model.use_text_conditioning_encoder=true \ model.use_alignment_encoder=true \ model.local_transformer_type="none" \ model.train_ds.dataloader_params.num_workers=1 \ model.validation_ds.dataloader_params.num_workers=1 \ trainer.devices=-1 \ ++trainer.num_sanity_val_steps=0 \ trainer.precision=${PRECISION} \ model.optim.lr=${LR} # trainer.val_check_interval=200 \ # model.encoder.is_causal=true \ # model.encoder.kernel_size=3 \ # model.context_encoder.is_causal=true \ # model.decoder.kernel_size=3 \ # model.decoder.xa_n_heads=1 \ # +trainer.check_val_every_n_epoch=1 \