├── .gitignore
├── .gitmodules
├── .pylintrc
├── .style.yapf
├── LICENSE
├── README.md
├── audio
├── code
├── analysis-playback-head.py
├── analysis-tape.py
├── critics.py
├── dataset.py
├── datasets_diffusion.py
├── evaluation.py
├── generate-inputs.py
├── generate-targets.py
├── matplotlibrc
├── model.py
├── networks
│ └── unet_1d.py
├── normalize-loudness.py
├── tape.py
├── test-dataset.py
├── test-model.py
├── test_noise_generator.ipynb
├── test_trajectories_generator.ipynb
├── train-adversarial.py
├── train-diffusion.py
├── train.py
└── utilities
│ ├── training_utils.py
│ └── utilities.py
├── configs
├── AdversarialConfig.py
├── conf_noise.yaml
├── conf_toytrajectories.yaml
└── conf_trajectories.yaml
├── environment.yaml
├── matplotlibrc
├── presentation
├── NeuralTape_Presentation_DAFx.pdf
└── NeuralTape_Presentation_DAFx.pptx
├── reel-to-reel.png
├── results
├── DataAnalysis_RealData
│ ├── MAXELL_IPS[7.5]
│ │ ├── delay_trajectories
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_1.png
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_2.png
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_3.png
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_4.png
│ │ │ └── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_5.png
│ │ ├── pulse_recovery
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC.png
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Test.png
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Train.png
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Val.png
│ │ │ └── SinesFadedShortContinuousPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC.png
│ │ ├── sounds
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_input_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_input_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_target_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_target_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_input_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_input_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_target_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_target_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_input_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_input_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_target_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_target_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_input_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_input_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_target_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_target_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_input_L.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_input_R.wav
│ │ │ ├── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_target_L.wav
│ │ │ └── ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_target_R.wav
│ │ └── sweeps
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_0.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_1.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_2.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_3.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_4.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_5.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_6.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_7.png
│ │ │ ├── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_8.png
│ │ │ └── LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_9.png
│ └── MAXELL_IPS[7.5]_HIRES
│ │ ├── delay_trajectories
│ │ ├── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_1.png
│ │ ├── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_2.png
│ │ ├── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_3.png
│ │ ├── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_4.png
│ │ └── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_5.png
│ │ └── pulse_recovery
│ │ ├── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Test.png
│ │ ├── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Train.png
│ │ └── ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Val.png
├── EXP1A_ToyData
│ ├── hysteresis
│ │ └── exp1a-hysteresis.pdf
│ └── predictions
│ │ ├── 109_input.wav
│ │ ├── 109_supervised-I.wav
│ │ ├── 109_target.wav
│ │ ├── 21_input.wav
│ │ ├── 21_supervised-I.wav
│ │ ├── 21_target.wav
│ │ ├── 52_input.wav
│ │ ├── 52_supervised-I.wav
│ │ ├── 52_target.wav
│ │ ├── 77_input.wav
│ │ ├── 77_supervised-I.wav
│ │ ├── 77_target.wav
│ │ ├── 78_input.wav
│ │ ├── 78_supervised-I.wav
│ │ └── 78_target.wav
├── EXP1B_ToyData
│ ├── delay_compare
│ │ ├── 109_1_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 109_1_DELAY[Real]_prediction_Supervised_2.wav
│ │ ├── 109_1_DELAY[True]_prediction_Supervised_2.wav
│ │ ├── 109_2_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 109_input.wav
│ │ ├── 109_target.wav
│ │ ├── 16_1_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 16_1_DELAY[Real]_prediction_Supervised_2.wav
│ │ ├── 16_1_DELAY[True]_prediction_Supervised_2.wav
│ │ ├── 16_2_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 16_input.wav
│ │ ├── 16_target.wav
│ │ ├── 40_1_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 40_1_DELAY[Real]_prediction_Supervised_2.wav
│ │ ├── 40_1_DELAY[True]_prediction_Supervised_2.wav
│ │ ├── 40_2_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 40_input.wav
│ │ ├── 40_target.wav
│ │ ├── 70_1_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 70_1_DELAY[Real]_prediction_Supervised_2.wav
│ │ ├── 70_1_DELAY[True]_prediction_Supervised_2.wav
│ │ ├── 70_2_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 70_input.wav
│ │ ├── 70_target.wav
│ │ ├── 77_1_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 77_1_DELAY[Real]_prediction_Supervised_2.wav
│ │ ├── 77_1_DELAY[True]_prediction_Supervised_2.wav
│ │ ├── 77_2_DELAY[Generated]_prediction_Supervised_2.wav
│ │ ├── 77_input.wav
│ │ └── 77_target.wav
│ ├── hysteresis
│ │ └── exp1b-hysteresis_combined.pdf
│ ├── predictions
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 109_input.wav
│ │ ├── 109_target.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 52_input.wav
│ │ ├── 52_target.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 70_input.wav
│ │ ├── 70_target.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 84_input.wav
│ │ ├── 84_target.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 98_input.wav
│ │ └── 98_target.wav
│ └── trajectories
│ │ ├── toyspectra.pdf
│ │ └── toytrajectories.pdf
├── EXP2_RealData
│ └── MAXELL_IPS[7.5]
│ │ ├── noise
│ │ └── noise_spectra.pdf
│ │ ├── noise_compare
│ │ ├── 109_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 109_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 109_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 109_input.wav
│ │ ├── 109_target.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 52_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 52_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 52_input.wav
│ │ ├── 52_target.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 70_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 70_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 70_input.wav
│ │ ├── 70_target.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 84_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 84_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 84_input.wav
│ │ ├── 84_target.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 98_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
│ │ ├── 98_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 98_input.wav
│ │ └── 98_target.wav
│ │ ├── predictions
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 109_input.wav
│ │ ├── 109_target.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 52_input.wav
│ │ ├── 52_target.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 70_input.wav
│ │ ├── 70_target.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 84_input.wav
│ │ ├── 84_target.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
│ │ ├── 98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
│ │ ├── 98_input.wav
│ │ └── 98_target.wav
│ │ ├── sweep
│ │ ├── sweep-maxell-approach_1.pdf
│ │ ├── sweep-maxell-approach_2.pdf
│ │ └── sweep-maxell-approach_3.pdf
│ │ └── trajectories
│ │ ├── spectra.pdf
│ │ └── trajectories.pdf
└── Simulations
│ ├── playback_losses.pdf
│ └── tape_magnetization.pdf
├── scripts
├── README.md
├── analysis-data-all.sh
├── analysis-data-delay_trajectories.sh
├── analysis-data-hysteresis.sh
├── analysis-data-io.sh
├── analysis-data-pulse_recovery.sh
├── analysis-data-sweeps.sh
├── generate-targets.sh
├── generate-trajectories.sh
├── matplotlibrc
├── sbatch-generate.sh
├── sbatch-simulate-tape.sh
├── sbatch-test-dataset.sh
├── sbatch-train-all.sh
├── sbatch-train-exp1a.sh
├── sbatch-train-exp1b.sh
├── sbatch-train-exp2.sh
├── sbatch-train.sh
├── simulate-playback.sh
├── simulate-tape.sh
├── test-model-all.sh
├── test-model-hysteresis.sh
├── test-model-loss.sh
├── test-model-noise.sh
├── test-model-prediction.sh
├── test-model-sweep.sh
├── train-adversarial.sh
└── train.sh
└── weights
├── 83
└── noise-73000.pt
├── 90
└── toytrajectories-36000.pt
├── 91
└── trajectories-41000.pt
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
├── best.pth
└── running.pth
├── DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST
├── DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
└── best.pth
├── DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST
├── DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
└── best.pth
├── DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_BEST
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
├── best.pth
└── running.pth
├── GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_BEST
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2
├── best.pth
└── running.pth
├── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
├── best.pth
└── running.pth
└── GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST
/.gitignore:
--------------------------------------------------------------------------------
1 | # python
2 | __pycache__/
3 |
4 | # spyder
5 | .spyproject
6 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "code/AnalogTapeModel"]
2 | path = code/AnalogTapeModel
3 | url = https://github.com/jatinchowdhury18/AnalogTapeModel.git
4 | [submodule "code/micro_tcn"]
5 | path = code/micro_tcn
6 | url = https://github.com/csteinmetz1/micro-tcn
7 | [submodule "code/Automated_GuitarAmpModelling"]
8 | path = code/Automated_GuitarAmpModelling
9 | url = https://github.com/Alec-Wright/Automated-GuitarAmpModelling
10 | [submodule "code/GreyBoxDRC"]
11 | path = code/GreyBoxDRC
12 | url = git@github.com:01tot10/GreyBoxDRC.git
13 | [submodule "code/auraloss"]
14 | path = code/auraloss
15 | url = git@github.com:csteinmetz1/auraloss.git
16 | [submodule "code/edm"]
17 | path = code/edm
18 | url = git@github.com:01tot10/edm.git
19 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [TYPECHECK]
2 | generated-members=torch.*
3 |
4 | [MASTER]
5 | disable=consider-using-f-string
6 |
7 | [BASIC]
8 | good-names=x, y, z, fs, fs_OS, t, i, GRU
9 |
10 | [MESSAGES CONTROL]
11 | disable=W0622, R1725, C0103
12 |
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style = google
3 | spaces_before_comment = 2,4
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 ✝o✝
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Neural Modeling of Magnetic Tape Recorders
4 |
5 | This repository contains the code for the research article:
6 |
7 | ```
8 | O. Mikkonen, A. Wright, E. Moliner and V. Välimäki, “Neural Modeling Of Magnetic Tape Recorders,”
9 | in Proceedings of the International Conference on Digital Audio Effects (DAFx),
10 | Copenhagen, Denmark, 4-7 September 2023.
11 | ```
12 |
13 | The pre-print of the article can be found from [here](https://arxiv.org/abs/2305.16862).
14 | The accompanying web page can be found from [here](http://research.spa.aalto.fi/publications/papers/dafx23-neural-tape/).
15 | The datasets can be found from [here](https://zenodo.org/record/8026272).
16 |
17 | 
18 |
19 |
20 |
21 | - [NEURAL MODELING OF MAGNETIC TAPE RECORDERS](#neural-modeling-of-magnetic-tape-recorders)
22 | - [SETUP](#setup)
23 | - [USAGE](#usage)
24 | - [CITATION](#citation)
25 | - [ACKNOWLEDGMENTS](#acknowledgments)
26 |
27 | ## SETUP
28 |
29 | Clone the repository and submodules
30 | ```
31 | git clone git@github.com:01tot10/neural-tape-modeling.git
32 | cd neural-tape-modeling
33 | git submodule init && git submodule update
34 | ```
35 |
36 | Create the Python virtual environment with [mamba](https://mamba.readthedocs.io/en/latest/) (preferred) / [conda](https://docs.conda.io/en/latest/)
37 | ```
38 | mamba/conda env create --file environment.yaml
39 | ```
40 |
41 | Download data to symlinked location `audio/ -> ../neural-tape-audio/`
42 | ```
43 | # create a directory for contents
44 | mkdir ../neural-tape-audio
45 | # download and extract toy data
46 | wget -P ../neural-tape-audio 'https://zenodo.org/record/8026272/files/neural-tape-audio_CHOWTAPE.tar'
47 | tar -xzvf ../neural-tape-audio/neural-tape-audio_CHOWTAPE.tar -C ../neural-tape-audio/
48 | # download and extract real data
49 | wget -P ../neural-tape-audio 'https://zenodo.org/record/8026272/files/neural-tape-audio_AKAI.tar'
50 | tar -xzvf ../neural-tape-audio/neural-tape-audio_AKAI.tar -C ../neural-tape-audio/
51 | ```
52 |
53 | Optional: To generate target audio with [CHOWTape](https://github.com/jatinchowdhury18/AnalogTapeModel), a VST instance of the plugin should be compiled. Check instructions in the corresponding repository.
54 |
55 | ## USAGE
56 |
57 | The folder `scripts/` contains the various processing pipelines for interacting with the system, as well as a separate `README.md` with instructions.
58 |
59 | ## CITATION
60 |
61 | Cite the work as follows
62 | ```
63 | @conference{mikkonen_neural_2023,
64 | title = {Neural Modeling of Magnetic Tape Recorders},
65 | booktitle = {Proceedings of the {{International Conference}} on {{Digital Audio Effects}} ({{DAFx}})},
66 | author = {Mikkonen, Otto and Wright, Alec and Moliner, Eloi and V{\"a}lim{\"a}ki, Vesa},
67 | year = {2023},
68 | month = sep,
69 | address = {{Copenhagen, Denmark}}
70 | }
71 | ```
72 |
73 | ## ACKNOWLEDGMENTS
74 |
75 | :black_heart::black_heart::black_heart:
76 | - VST instance of a reel-to-reel tape machine: [CHOWTape](https://github.com/jatinchowdhury18/AnalogTapeModel)
77 | - Python VST wrapper: [pedalboard](https://github.com/spotify/pedalboard)
78 | - Dataloader extended and customized from [microtcn](https://github.com/csteinmetz1/micro-tcn)
79 | - Error-to-signal ratio (ESR) loss from [Automated-GuitarAmpModelling](https://github.com/Alec-Wright/Automated-GuitarAmpModelling)
80 | - ESR loss with DC blocker from [GreyBoxDRC](https://github.com/Alec-Wright/GreyBoxDRC)
81 | - Multi-resolution short-time Fourier transform (STFT) loss from [auraloss](https://github.com/csteinmetz1/auraloss)-library
82 | - Codebase kept clean with with [yapf](https://github.com/google/yapf), [isort](https://github.com/pycqa/isort/), [pylint](https://github.com/pylint-dev/pylint) and [beautysh](https://github.com/lovesegfault/beautysh)
83 |
84 | :black_heart::black_heart::black_heart:
85 |
--------------------------------------------------------------------------------
/audio:
--------------------------------------------------------------------------------
1 | ../neural-tape-audio
--------------------------------------------------------------------------------
/code/analysis-playback-head.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Thu Jan 12 14:44:53 2023
5 |
6 | @author: 01tot10
7 | """
8 |
9 | #%% Imports
10 |
11 | import argparse
12 | import os
13 |
14 | import matplotlib as mpl
15 | import matplotlib.pyplot as plt
16 | import numpy as np
17 | import scipy
18 | import torch
19 | from matplotlib import ticker
20 |
21 | from tape import Tape
22 | from utilities.utilities import Prettyfier
23 |
24 | #%% Argument parser
25 |
26 | # Initialize tape to get default values
27 | tape = Tape()
28 |
29 | # Add argument parser
30 | parser = argparse.ArgumentParser(
31 | description='Simulate a reel-to-reel tape recorder playback head.')
32 |
33 | parser.add_argument('--TAPE_V', type=float, default=tape.TAPE_V)
34 | parser.add_argument('--TAPE_DELTA', type=float, default=tape.TAPE_DELTA)
35 | parser.add_argument('--PLAY_D', type=float, default=tape.PLAY_D)
36 | parser.add_argument('--PLAY_G', type=float, default=tape.PLAY_G)
37 | parser.add_argument('--SAVE_FIGS', action='store_true', default=False)
38 | parser.add_argument('--ENABLE_PLAYBACK_LOSS',
39 | action='store_false',
40 | default=True)
41 |
42 | args = parser.parse_args()
43 | print(args)
44 |
45 | del tape
46 |
47 | #%% Config
48 |
49 | # global
50 | fs = int(48e3)
51 | OVERSAMPLING = 16
52 | SAVE_PATH = '../results/'
53 | SAVE_FIGS = args.SAVE_FIGS
54 |
55 | # tape
56 | TAPE_V = args.TAPE_V # tape speed
57 | TAPE_DELTA = args.TAPE_DELTA # tape thickness
58 |
59 | # head
60 | PLAY_D = args.PLAY_D # playback head spacing
61 | PLAY_G = args.PLAY_G # playback head gap width
62 |
63 | # filter
64 | ENABLE_PLAYBACK_LOSS = args.ENABLE_PLAYBACK_LOSS
65 | FIR_order = 2**12 # approximation filter order
66 |
67 | # input
68 | F_IN = 100
69 | T_MAX = 10 * (1 / F_IN)
70 | A = 1.0
71 | ALLOW_SETTLE = True # Add warmup/settling time before exciting circuits
72 |
73 | #%% Playback Loss
74 |
75 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
76 | torch.inference_mode(True)
77 |
78 | tape = Tape(FIR_order=FIR_order, playback_loss_enable=ENABLE_PLAYBACK_LOSS)
79 |
80 | # Modify internal parameters
81 | tape.TAPE_V = TAPE_V
82 | tape.TAPE_DELTA = TAPE_DELTA
83 | tape.PLAY_D = PLAY_D
84 | tape.PLAY_G = PLAY_G
85 |
86 | # Get playback loss effects
87 | f, k, spacing_loss, thickness_loss, gap_loss, loss, H, h = tape.return_filter()
88 |
89 | # Gain term
90 | gain = tape.PLAY_N * tape.PLAY_W * tape.PLAY_E * tape.TAPE_V * tape.PLAY_MU0 * tape.PLAY_G
91 |
92 | #%% Test Audio
93 |
94 | # time
95 | Ts = 1 / fs
96 | t = np.arange(0, T_MAX, Ts)
97 | if ALLOW_SETTLE is True:
98 | t = np.hstack((-t[::-1] - Ts, t))
99 |
100 | # Input
101 | x = np.sin(2 * np.pi * F_IN * t)
102 | amp = A * (0.5 +
103 | 0.5 * scipy.signal.sawtooth(2 * np.pi * 1 / T_MAX * t, width=0.5))
104 | x = x * amp
105 | if ALLOW_SETTLE is True:
106 | x[:len(x) // 2] = np.zeros(x[:len(x) // 2].shape)
107 | x = torch.tensor(x, requires_grad=False, device=device,
108 | dtype=tape.dtype).expand(1, -1)
109 |
110 | # Process
111 | y = tape.H_play(x)
112 | y /= gain # normalize
113 |
114 | x, y = x.squeeze().numpy(), y.squeeze().numpy()
115 |
116 | #%% Analyze
117 |
118 | # Settings
119 | FIG_MULTIPLIER = 1.0
120 | COLS = 2
121 | ROWS = 2
122 | RATIO = (1.5, 1)
123 |
124 | # Setup
125 | prettyfier = Prettyfier(COLS, ROWS, ratio=RATIO, size_multiplier=FIG_MULTIPLIER)
126 | prettyfier.font_size *= 1.1
127 |
128 | mpl.rcParams['font.family'] = prettyfier.font_type
129 | mpl.rcParams['font.size'] = prettyfier.font_size
130 | mpl.rcParams['axes.labelsize'] = prettyfier.font_size * 6.0 / 6.0
131 | mpl.rcParams['xtick.labelsize'] = prettyfier.font_size * 5 / 6.0
132 | mpl.rcParams['ytick.labelsize'] = prettyfier.font_size * 5 / 6.0
133 | mpl.rcParams['legend.fontsize'] = prettyfier.font_size * FIG_MULTIPLIER
134 | mpl.rcParams['lines.linewidth'] = prettyfier.line_width
135 |
136 | # Collect signals
137 | titles = ['Spacing Loss', 'Thickness Loss', 'Gap Loss', 'Total Loss']
138 | signals = [spacing_loss, thickness_loss, gap_loss, loss]
139 |
140 | # Gridspec
141 | PLOT_WIDTH = 0.2
142 | width_ratios = [PLOT_WIDTH] * (len(signals) - 1)
143 | gs = plt.GridSpec(2, len(signals) - 1, width_ratios=width_ratios)
144 |
145 | # Colors
146 | prop_cycle = plt.rcParams['axes.prop_cycle']
147 | colors = prop_cycle.by_key()['color']
148 |
149 | # Axis limits
150 | X_LIMS = [20, fs / 2]
151 | Y_LIMS = [-48, 6]
152 |
153 | # Textbox and figure name
154 | params = [
155 | f"tape_v = {'{:.3f}'.format(TAPE_V / 2.54e-2)}",
156 | f"tape_delta = {'{:.2f}'.format(TAPE_DELTA / 1e-6)}",
157 | f"play_d = {'{:.2f}'.format(PLAY_D / 1e-6)}",
158 | f"play_g = {'{:.2f}'.format(PLAY_G / 1e-6)}"
159 | ]
160 | units = [
161 | 'ips', # TAPE_V
162 | 'um', # TAPE_D
163 | 'um', # PLAY_V
164 | 'um', # PLAY_G
165 | ]
166 | textbox = [f"{a} {b}" for a, b in zip(params, units)]
167 | textbox = "\n".join(textbox)
168 | box = {
169 | "boxstyle": 'round',
170 | "edgecolor": 'black',
171 | "facecolor": 'None',
172 | "alpha": 0.25
173 | }
174 |
175 | #% PLOTTING
176 |
177 | # Loss Effects
178 | fig = plt.figure(0, prettyfier.fig_size)
179 | fig.clf()
180 |
181 | for idx, signal in enumerate(signals):
182 |
183 | if idx <= 2: # individual losses
184 | ax = fig.add_subplot(gs[0, idx])
185 | else: # combined loss
186 | ax = fig.add_subplot(gs[1, :-1])
187 |
188 | ax.semilogx(f[1:int(FIR_order / 2)],
189 | 20 * np.log10(signal),
190 | color=colors[idx])
191 |
192 | ax.grid()
193 | ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
194 | ax.set_xlabel('Frequency [Hz]')
195 | if idx in [0, 3]:
196 | ax.set_ylabel('Magnitude [dB]')
197 | ax.set_title(titles[idx])
198 | ax.set_xlim(X_LIMS)
199 | ax.set_ylim(Y_LIMS)
200 |
201 | fig.text(.7, 0.2, textbox, bbox=box, horizontalalignment='left')
202 | # fig.suptitle('Playback Losses',
203 | # fontsize=prettyfier.font_size,
204 | # fontweight='bold')
205 | fig.tight_layout()
206 |
207 | # Time Domain
208 | fig2 = plt.figure(1, prettyfier.fig_size)
209 | fig2.clf()
210 |
211 | ax = fig2.add_subplot(1, 1, 1)
212 | ax.plot(t, x, label='input')
213 | ax.plot(t, y, label='output')
214 |
215 | ax.grid()
216 | ax.set_xlabel('Time [s]')
217 | ax.set_ylabel('Amplitude [1]')
218 | ax.set_ylim([-1.25, 1.25])
219 | ax.legend()
220 |
221 | fig2.suptitle('Time Domain')
222 |
223 | if SAVE_FIGS:
224 |
225 | fig_name = "fig_playback_losses"
226 | fig_name = f"{fig_name}_[{'_'.join(params).replace(' ', '').upper()}]"
227 | full_name = os.path.join(SAVE_PATH, f"{fig_name}.pdf")
228 |
229 | print(f'Saving {full_name} ...')
230 | fig.savefig(full_name, format='pdf')
231 |
232 | fig_name = "fig_playback_losses_timedomain"
233 | fig_name = f"{fig_name}_[{'_'.join(params).replace(' ', '').upper()}]"
234 | full_name = os.path.join(SAVE_PATH, f"{fig_name}.pdf")
235 |
236 | print(f'Saving {full_name} ...')
237 | fig2.savefig(full_name, format='pdf')
238 |
239 | plt.close('all')
240 |
--------------------------------------------------------------------------------
/code/datasets_diffusion.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | @author: eloimoliner, 01tot10
5 | """
6 |
7 | #%% Imports
8 |
9 | import glob
10 | import os
11 | import random
12 |
13 | import numpy as np
14 | import soundfile as sf
15 | import torch
16 |
17 | #%% Classes
18 |
19 |
20 | class TapeHissdset(torch.utils.data.IterableDataset):
21 |
22 | def __init__(self, dset_args, overfit=False, seed=42):
23 | """
24 | torch.utils.data.IterableDataset subclass
25 | """
26 | super().__init__()
27 |
28 | # Attributes
29 | self.seg_len = int(dset_args.seg_len)
30 | self.fs = dset_args.fs
31 | self.overfit = overfit
32 |
33 | # Seed the random
34 | random.seed(seed)
35 | np.random.seed(seed)
36 |
37 | # Read files
38 | path = dset_args.path
39 | orig_p = os.getcwd()
40 | os.chdir(path)
41 | filelist = glob.glob("target*.wav")
42 | filelist = [os.path.join(path, f) for f in filelist]
43 | os.chdir(orig_p)
44 | assert len(
45 | filelist) > 0, "error in dataloading: empty or nonexistent folder"
46 | self.train_samples = filelist
47 |
48 | def __iter__(self):
49 | while True:
50 | num = random.randint(0, len(self.train_samples) - 1)
51 | file = self.train_samples[num]
52 | data, samplerate = sf.read(file)
53 |
54 | assert samplerate == self.fs, "wrong sampling rate"
55 | data_clean = data
56 |
57 | # stereo to mono
58 | if len(data.shape) > 1:
59 | data_clean = np.mean(data_clean, axis=1)
60 |
61 | ## Normalization
62 | # no normalization!!
63 | # data_clean=data_clean/np.max(np.abs(data_clean))
64 | # normalize mean
65 | # data_clean-=np.mean(data_clean, axis=-1)
66 |
67 | # get 8 random batches to be a bit faster
68 | idx = np.random.randint(0, len(data_clean) - self.seg_len)
69 | segment = data_clean[idx:idx + self.seg_len]
70 | segment = segment.astype('float32')
71 | segment -= np.mean(segment, axis=-1)
72 |
73 | yield segment
74 |
75 |
76 | class TapeHissTest(torch.utils.data.Dataset):
77 |
78 | def __init__(self,
79 | dset_args,
80 | fs=44100,
81 | seg_len=131072,
82 | num_samples=4,
83 | seed=42):
84 | """
85 | torch.utils.data.Dataset subclass
86 | """
87 | super().__init__()
88 |
89 | # Attributes
90 | self.seg_len = int(seg_len)
91 | self.fs = fs
92 |
93 | # Seed the random
94 | random.seed(seed)
95 | np.random.seed(seed)
96 |
97 | # Read files
98 | path = dset_args.test.path
99 | print(path)
100 | test_file = os.path.join(path, "input_3_.wav")
101 | self.test_samples = []
102 | self.filenames = []
103 | self._fs = []
104 | for _ in range(num_samples):
105 | file = test_file
106 | self.filenames.append(os.path.basename(file))
107 | data, samplerate = sf.read(file)
108 | data = data.T
109 | self._fs.append(samplerate)
110 | if data.shape[-1] >= self.seg_len:
111 | idx = np.random.randint(0, data.shape[-1] - self.seg_len)
112 | data = data[..., idx:idx + self.seg_len]
113 | else:
114 | idx = 0
115 | data = np.tile(
116 | data, (self.seg_len // data.shape[-1] + 1))[..., idx:idx +
117 | self.seg_len]
118 |
119 | self.test_samples.append(data[...,
120 | 0:self.seg_len]) # use only `seg_len`
121 |
122 | def __getitem__(self, idx):
123 | return self.test_samples[idx], self._fs[idx], self.filenames[idx]
124 |
125 | def __len__(self):
126 | return len(self.test_samples)
127 |
128 |
129 | class ToyTrajectories(torch.utils.data.IterableDataset):
130 |
131 | def __init__(self, dset_args, overfit=False, seed=42):
132 | """
133 | torch.utils.data.IterableDataset subclass
134 | """
135 | super().__init__()
136 |
137 | # Attributes
138 | self.overfit = overfit
139 | self.seg_len = int(dset_args.seg_len)
140 | self.fs = dset_args.fs
141 |
142 | # Seed the random
143 | random.seed(seed)
144 | np.random.seed(seed)
145 |
146 | # Read files
147 | path = dset_args.path
148 | orig_p = os.getcwd()
149 | os.chdir(path)
150 | filelist = glob.glob("*.wav")
151 | filelist = [os.path.join(path, f) for f in filelist]
152 | os.chdir(orig_p)
153 | assert len(
154 | filelist) > 0, "error in dataloading: empty or nonexistent folder"
155 | self.train_samples = filelist
156 |
157 | def __iter__(self):
158 | while True:
159 | num = random.randint(0, len(self.train_samples) - 1)
160 | file = self.train_samples[num]
161 | data, samplerate = sf.read(file)
162 | assert samplerate == self.fs, "wrong sampling rate"
163 | data_clean = data
164 |
165 | # stereo to mono
166 | if len(data.shape) > 1:
167 | data_clean = np.mean(data_clean, axis=1)
168 |
169 | # Normalization
170 | # no normalization!!
171 | # data_clean=data_clean/np.max(np.abs(data_clean))
172 | # normalize mean
173 | # data_clean-=np.mean(data_clean, axis=-1)
174 |
175 | for _ in range(8):
176 | # get 8 random batches to be a bit faster
177 | idx = np.random.randint(0, len(data_clean) - self.seg_len)
178 | segment = data_clean[idx:idx + self.seg_len]
179 | segment = segment.astype('float32')
180 | segment -= np.mean(segment, axis=-1)
181 |
182 | yield segment
183 |
184 |
185 | class TestTrajectories(torch.utils.data.Dataset):
186 |
187 | def __init__(self,
188 | dset_args,
189 | fs=44100,
190 | seg_len=131072,
191 | num_samples=4,
192 | seed=42):
193 | """
194 | torch.utils.data.Dataset subclass
195 | """
196 | super().__init__()
197 |
198 | # Attributes
199 | self.fs = fs
200 | self.seg_len = int(seg_len)
201 |
202 | # Seed the random
203 | random.seed(seed)
204 | np.random.seed(seed)
205 |
206 | # Read files
207 | path = dset_args.test.path
208 | print(path)
209 | orig_p = os.getcwd()
210 | os.chdir(path)
211 | filelist = glob.glob("*.wav")
212 | filelist = [os.path.join(path, f) for f in filelist]
213 | test_file = filelist[0]
214 | os.chdir(orig_p)
215 |
216 | self.test_samples = []
217 | self.filenames = []
218 | self._fs = []
219 | for _ in range(num_samples):
220 | file = test_file
221 | self.filenames.append(os.path.basename(file))
222 | data, samplerate = sf.read(file)
223 | data = data.T
224 | self._fs.append(samplerate)
225 | if data.shape[-1] >= self.seg_len:
226 | idx = np.random.randint(0, data.shape[-1] - self.seg_len)
227 | data = data[..., idx:idx + self.seg_len]
228 | else:
229 | idx = 0
230 | data = np.tile(
231 | data, (self.seg_len // data.shape[-1] + 1))[..., idx:idx +
232 | self.seg_len]
233 |
234 | self.test_samples.append(data[...,
235 | 0:self.seg_len]) # use only `seg_len`
236 |
237 | def __getitem__(self, idx):
238 | return self.test_samples[idx], self._fs[idx], self.filenames[idx]
239 |
240 | def __len__(self):
241 | return len(self.test_samples)
242 |
--------------------------------------------------------------------------------
/code/evaluation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | @author: Alec-Wright
5 | """
6 |
7 | #%% Imports
8 |
9 | import torch
10 | import torch.nn.functional as F
11 | from GreyBoxDRC import loss_funcs
12 | from torch import nn
13 | from utilities.utilities import TimeFreqConverter
14 |
15 | #%% Classes
16 |
17 |
18 | class val_loss_supervised(nn.Module):
19 | """ Supervised validation loss. """
20 |
21 | def __init__(self,
22 | device='cpu',
23 | spec_scales=[2048, 1024, 512, 256, 128, 64]):
24 | super().__init__()
25 | self.spec_scales = spec_scales
26 | self.specs = [
27 | TimeFreqConverter(n_fft=scale,
28 | hop_length=scale // 4,
29 | win_length=scale,
30 | sampling_rate=44100,
31 | n_mel_channels=160).to(device)
32 | for scale in self.spec_scales
33 | ]
34 | self.mel_spec = TimeFreqConverter(n_fft=2048,
35 | hop_length=2048 // 4,
36 | win_length=2048,
37 | sampling_rate=44100,
38 | n_mel_channels=160).to(device)
39 |
40 | self.ESR = loss_funcs.ESRLoss(dc_pre=False)
41 | self.ESRDCPre = loss_funcs.ESRLoss(dc_pre=True)
42 | self.MSE = nn.MSELoss()
43 |
44 | self.log_eps = 1e-5
45 | self.losses = {
46 | 'ms_spec_loss': 0,
47 | 'ms_log_spec_loss': 0,
48 | 'mel_spec_loss': 0,
49 | 'log_mel_spec_loss': 0,
50 | 'ESR': 0,
51 | 'MSE': 0,
52 | 'ESRDCPre': 0
53 | }
54 | self.bst_losses = {
55 | 'ms_spec_loss': [0, 1e9],
56 | 'ms_log_spec_loss': [0, 1e9],
57 | 'mel_spec_loss': [0, 1e9],
58 | 'log_mel_spec_loss': [0, 1e9],
59 | 'ESR': [0, 1e9],
60 | 'MSE': [0, 1e9],
61 | 'ESRDCPre': [0, 1e9]
62 | }
63 | self.iter_count = 0
64 |
65 | def forward(self, output, target):
66 | """ Forward. """
67 |
68 | losses = {
69 | 'ms_spec_loss': 0,
70 | 'ms_log_spec_loss': 0,
71 | 'mel_spec_loss': 0,
72 | 'log_mel_spec_loss': 0,
73 | 'ESR': 0,
74 | 'MSE': 0,
75 | 'ESRDCPre': 0
76 | }
77 |
78 | for spec in self.specs:
79 | magx = spec(output, mel=False)
80 | magy = spec(target, mel=False)
81 | losses['ms_spec_loss'] += F.l1_loss(magx, magy)
82 |
83 | logx = torch.log10(torch.clamp(magx, self.log_eps))
84 | logy = torch.log10(torch.clamp(magy, self.log_eps))
85 | losses['ms_log_spec_loss'] += F.l1_loss(logx, logy)
86 |
87 | _, melx = self.mel_spec(output, mel=True)
88 | _, mely = self.mel_spec(target, mel=True)
89 | losses['mel_spec_loss'] = F.l1_loss(melx, mely)
90 |
91 | logmelx = torch.log10(torch.clamp(melx, min=self.log_eps))
92 | logmely = torch.log10(torch.clamp(mely, min=self.log_eps))
93 | losses['log_mel_spec_loss'] = F.l1_loss(logmelx, logmely)
94 |
95 | output, target = output.unsqueeze(1), target.unsqueeze(1)
96 | losses['ESR'] = self.ESR(output, target)
97 | losses['ESRDCPre'] = self.ESRDCPre(output, target)
98 | losses['MSE'] = self.MSE(output, target)
99 |
100 | return losses
101 |
102 | def add_loss(self, output, target):
103 | """ Add loss. """
104 | if self.iter_count == 0:
105 | self.losses = {
106 | 'ms_spec_loss': 0,
107 | 'ms_log_spec_loss': 0,
108 | 'mel_spec_loss': 0,
109 | 'log_mel_spec_loss': 0,
110 | 'ESR': 0,
111 | 'MSE': 0,
112 | 'ESRDCPre': 0
113 | }
114 |
115 | losses = self(output, target)
116 | for key in losses:
117 | self.losses[key] += losses[key]
118 | self.iter_count += 1
119 |
120 | def end_val(self, cur_step):
121 | """ End validation. """
122 | for key in self.losses:
123 | loss = self.losses[key] / self.iter_count
124 | if loss < self.bst_losses[key][1]:
125 | self.bst_losses[key] = [cur_step, loss]
126 | self.iter_count = 0
127 |
128 |
129 | #%% Methods
130 |
131 |
132 | def multi_mag_loss(output,
133 | target,
134 | fft_sizes=(2048, 1024, 512, 256, 128, 64),
135 | log=False):
136 | """ Multi-resolution magnitude response loss. """
137 | losses = {}
138 | total_loss = 0
139 | for fft in fft_sizes:
140 | hop = fft // 4
141 | losses[fft] = mag_spec_loss(output, target, fft, hop, log).item()
142 | total_loss += losses[fft]
143 | losses['total'] = total_loss
144 | return losses
145 |
146 |
147 | def mag_spec_loss(output, target, fft_size=512, hop_size=128):
148 | """ Magnitude response loss. """
149 |
150 | magx = torch.abs(
151 | torch.stft(output,
152 | n_fft=fft_size,
153 | hop_length=hop_size,
154 | return_complex=True))
155 | magy = torch.abs(
156 | torch.stft(target,
157 | n_fft=fft_size,
158 | hop_length=hop_size,
159 | return_complex=True))
160 |
161 | return F.l1_loss(magx, magy)
162 |
--------------------------------------------------------------------------------
/code/generate-inputs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Thu Jan 26 11:20:41 2023
5 |
6 | @author: 01tot10
7 | """
8 |
9 | #%% Imports
10 |
11 | import argparse
12 | import os
13 | import sys
14 |
15 | import numpy as np
16 | import scipy
17 | import soundfile as sf
18 |
19 | #%% Argument parser
20 |
21 | # Add argument parser
22 | parser = argparse.ArgumentParser(description='Generate input files.')
23 |
24 | # GLOBAL
25 | parser.add_argument('--DRY_RUN', action='store_true', default=False)
26 | parser.add_argument('--DESCRIPTIVE_NAME', type=str, default=None)
27 |
28 | # SIGNALS
29 | parser.add_argument('--FRAME_RATE', type=int, default=int(44100))
30 | parser.add_argument('--N_SIGNALS', type=int, default=int(100))
31 | parser.add_argument('--SEGMENT_LENGTH', type=float, default=float(1 * 5))
32 | parser.add_argument('--TYPE', type=str, default="sine")
33 | parser.add_argument('--FREQUENCY', type=float, default=None)
34 | parser.add_argument('--AMPLITUDE', type=float, default=None)
35 | parser.add_argument('--ENABLE_ENVELOPE', action='store_true', default=False)
36 |
37 | args = parser.parse_args()
38 |
39 | print("\nArguments:")
40 | print(args)
41 |
42 | assert args.TYPE.lower() in ["sine", "pulse", "sweep"
43 | ], "Choose either sine, pulse or sweep as TYPE"
44 |
45 | #%% Config
46 |
47 | # Global
48 | RESULTS_PATH = '../results/'
49 | DRY_RUN = args.DRY_RUN
50 |
51 | # Dataset
52 | fs = args.FRAME_RATE
53 | N_SIGNALS = args.N_SIGNALS
54 | SEGMENT_LENGTH = args.SEGMENT_LENGTH
55 | DATASET_NAME = 'Signals' # results subdirectory
56 | DESCRIPTIVE_NAME = args.DESCRIPTIVE_NAME
57 |
58 | # SIGNALS
59 | TYPE = args.TYPE
60 | AMPLITUDE = args.AMPLITUDE
61 | FREQUENCY = args.FREQUENCY
62 | F_MIN = 1
63 | F_MAX = 20000
64 | ENABLE_ENVELOPE = args.ENABLE_ENVELOPE
65 |
66 | #%% Setup
67 |
68 | rng = np.random.default_rng()
69 |
70 | #%% Process
71 |
72 | # Initializations
73 | t = np.arange(0, int(SEGMENT_LENGTH * fs)) / fs
74 |
75 | amplitude_env = (
76 | 0.5 +
77 | 0.5 * scipy.signal.sawtooth(2 * np.pi * 1 / SEGMENT_LENGTH * t, width=0.5))
78 | if isinstance(AMPLITUDE, str):
79 | START_AMPLITUDE_DB = -54
80 | END_AMPLITUDE_DB = 0
81 | amplitude_increment_dB = (END_AMPLITUDE_DB -
82 | START_AMPLITUDE_DB) / (N_SIGNALS - 1)
83 |
84 | # Results Path
85 | if DESCRIPTIVE_NAME:
86 | DATASET_NAME = f"{DATASET_NAME}_{DESCRIPTIVE_NAME}"
87 | results_path = os.path.join(RESULTS_PATH, DATASET_NAME, "Train")
88 | if os.path.exists(results_path):
89 | print(f"Path {results_path} exists ...")
90 | else:
91 | os.makedirs(results_path)
92 |
93 | for signal_idx in range(N_SIGNALS):
94 | sys.stdout.write(f"Generating {signal_idx+1}/{N_SIGNALS}...")
95 | sys.stdout.flush()
96 |
97 | # Get signal properties
98 | if isinstance(AMPLITUDE, float):
99 | amplitude = AMPLITUDE
100 | elif AMPLITUDE is None:
101 | amplitude = rng.random()
102 | else:
103 | amplitude_dB = START_AMPLITUDE_DB + signal_idx * amplitude_increment_dB
104 | amplitude = 10**(amplitude_dB / 20)
105 | print(
106 | f"signal_idx: {signal_idx}, amplitude(lin): {amplitude}, amplitude(dB): {20*np.log10(amplitude)}"
107 | )
108 |
109 | if FREQUENCY:
110 | frequency = FREQUENCY
111 | else:
112 | frequency = F_MIN + (scipy.stats.loguniform.rvs(
113 | 1e-3, 1.0, size=1)[0]) * (F_MAX - F_MIN)
114 |
115 | # Generate signal
116 | if TYPE.lower() == "sine":
117 | signal = amplitude * np.sin(2 * np.pi * frequency * t)
118 | elif TYPE.lower() == "pulse":
119 | period_n = int((1.0 / frequency) * fs)
120 | signal = np.zeros_like(t)
121 | for n in range(len(signal)):
122 | if (n % period_n == 0) and n > 0:
123 | signal[n] = 1.0
124 | else:
125 | signal = amplitude * scipy.signal.chirp(t,
126 | f0=F_MIN,
127 | f1=F_MAX,
128 | t1=SEGMENT_LENGTH,
129 | method='logarithmic',
130 | phi=-90)
131 |
132 | if ENABLE_ENVELOPE:
133 | signal *= amplitude_env
134 | signal = signal / np.max(signal) * amplitude # retain amplitude
135 |
136 | # Save
137 | if not DRY_RUN:
138 | sys.stdout.write(" Saving...")
139 |
140 | filename = f"input_{signal_idx}"
141 | filename = f"{filename}_{TYPE.lower()}"
142 | filename = f"{filename}_F[{'{:d}'.format(int(frequency))}]"
143 | # filename = f"{filename}_A[{('{:.3f}'.format(amplitude)).replace('.','_')}]" # linear
144 | filename = f"{filename}_A[{int(20*np.log10(amplitude))}dB]" # log
145 |
146 | filepath = os.path.join(results_path, f"{filename}.wav")
147 |
148 | sf.write(filepath, signal, fs, subtype='FLOAT')
149 |
150 | sys.stdout.write(" Done! \n")
151 | sys.stdout.flush()
152 |
--------------------------------------------------------------------------------
/code/generate-targets.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Jan 18 14:31:58 2023
5 |
6 | @author: 01tot10
7 | """
8 |
9 | #%% Imports
10 |
11 | import argparse
12 | import os
13 | import sys
14 | import time
15 |
16 | import numpy as np
17 | import soundfile as sf
18 | import torch
19 | from torch.utils.data import DataLoader
20 |
21 | from dataset import VADataset
22 | from tape import Tape, VSTWrapper
23 |
24 | print("=" * 10, " SCRIPT START ", "=" * 10)
25 |
26 | #%% Argument parser
27 |
28 |
29 | def none_or_int(value):
30 | """ Parse NoneType or int input arguments from CLI """
31 | if value == 'None':
32 | return None
33 | return int(value)
34 |
35 |
36 | # Add argument parser
37 | parser = argparse.ArgumentParser(
38 | description='Generate target files using VA reel-to-reel tape recorder.')
39 |
40 | # GLOBAL
41 | parser.add_argument('--DRY_RUN', action='store_true', default=False)
42 |
43 | # DATASET
44 | parser.add_argument('--DATASET', type=str, default="ReelToReel_Dataset_Mini")
45 | parser.add_argument('--SUBSET', type=str, default="Train")
46 | parser.add_argument('--DESCRIPTIVE_NAME', type=str, default=None)
47 | parser.add_argument('--FRACTION', type=float, default=1.0)
48 | parser.add_argument('--SEGMENT_LENGTH', type=none_or_int,
49 | default=None) # in samples
50 | parser.add_argument('--PRELOAD', action='store_true', default=True)
51 | parser.add_argument('--NO_SHUFFLE', action='store_true', default=False)
52 |
53 | ## TAPE
54 | parser.add_argument('--BACKEND', type=str, default="VST")
55 |
56 | # VST PARAMS
57 | parser.add_argument('--TAPE_DISABLE', action='store_true', default=False)
58 | parser.add_argument('--TAPE_SATURATION', type=float, default=0.5)
59 | parser.add_argument('--TAPE_DRIVE', type=float, default=0.5)
60 | parser.add_argument('--TAPE_BIAS', type=float, default=0.5)
61 |
62 | # DSP PARAMS
63 | parser.add_argument('--BIAS_ENABLE', action='store_true', default=True)
64 | parser.add_argument('--DELAY_ENABLE', action='store_true', default=False)
65 | parser.add_argument('--WOW_FLUTTER_ENABLE', action='store_true', default=False)
66 | parser.add_argument('--PLAYBACK_LOSS_ENABLE',
67 | action='store_true',
68 | default=False)
69 | parser.add_argument('--STARTUP_ENABLE', action='store_true', default=False)
70 | parser.add_argument('--SIGNAL_AMPLITUDE', type=float, default=10e-3)
71 | parser.add_argument('--BIAS_AMPLITUDE', type=float, default=50e-3)
72 |
73 | args = parser.parse_args()
74 |
75 | print("\nArguments:")
76 | print(args)
77 |
78 | assert args.BACKEND in ["VST",
79 | "Python"], "Choose either VST or Python for Backend"
80 |
81 | #%% Config
82 |
83 | script_path = os.path.dirname(__file__)
84 |
85 | # global
86 | fs = int(44.1e3)
87 | BATCH_SIZE = 1
88 | DRY_RUN = args.DRY_RUN
89 |
90 | # results
91 | RESULTS_PATH = '../results/'
92 | DESCRIPTIVE_NAME = args.DESCRIPTIVE_NAME
93 |
94 | ## tape
95 | BACKEND = args.BACKEND
96 | OVERSAMPLING = 16
97 | DELAY_ENABLE = args.DELAY_ENABLE
98 |
99 | # VST
100 | VST_PATH = os.path.join(
101 | script_path,
102 | 'AnalogTapeModel/Plugin/build/CHOWTapeModel_artefacts/Release/VST3/CHOWTapeModel.vst3'
103 | )
104 |
105 | TAPE_DISABLE = args.TAPE_DISABLE
106 | TAPE_SATURATION = args.TAPE_SATURATION
107 | TAPE_BIAS = args.TAPE_BIAS
108 | TAPE_DRIVE = args.TAPE_DRIVE
109 |
110 | WOW_FLUTTER_ENABLE = args.WOW_FLUTTER_ENABLE
111 |
112 | # DSP
113 | BIAS_ENABLE = args.BIAS_ENABLE
114 |
115 | PLAYBACK_LOSS_ENABLE = args.PLAYBACK_LOSS_ENABLE
116 | STARTUP_ENABLE = args.STARTUP_ENABLE
117 | SIGNAL_AMPLITUDE = args.SIGNAL_AMPLITUDE
118 | BIAS_AMPLITUDE = args.BIAS_AMPLITUDE
119 |
120 | # dataset
121 | AUDIO_PATH = "../audio/"
122 | PRELOAD = args.PRELOAD
123 | SHUFFLE = not args.NO_SHUFFLE
124 | DATASET = args.DATASET
125 | SUBSET = args.SUBSET
126 | INPUT_ONLY = True
127 | FRACTION = args.FRACTION
128 | SEGMENT_LENGTH = args.SEGMENT_LENGTH
129 |
130 | #%% Setup
131 |
132 | dataset_path = os.path.join(AUDIO_PATH, DATASET)
133 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
134 |
135 | print(f"\nUsing device: {device}")
136 | print(f"Batch size: {BATCH_SIZE}")
137 |
138 | print(f"\ninitializing DSP... Using backend {BACKEND}")
139 | if BACKEND == "VST":
140 | tape = VSTWrapper(VST_PATH,
141 | fs,
142 | oversampling=OVERSAMPLING,
143 | wow_flutter_enable=WOW_FLUTTER_ENABLE,
144 | tape_enable=not TAPE_DISABLE,
145 | tape_drive=TAPE_DRIVE,
146 | tape_saturation=TAPE_SATURATION,
147 | tape_bias=TAPE_BIAS)
148 | else:
149 | tape = Tape(batch_size=BATCH_SIZE,
150 | fs=fs,
151 | oversampling=OVERSAMPLING,
152 | signal_amplitude=SIGNAL_AMPLITUDE,
153 | bias_amplitude=BIAS_AMPLITUDE,
154 | bias_enable=BIAS_ENABLE,
155 | delay_enable=DELAY_ENABLE,
156 | playback_loss_enable=PLAYBACK_LOSS_ENABLE,
157 | startup_enable=STARTUP_ENABLE)
158 |
159 | # Data
160 | print("initializing dataset...\n")
161 | dataset = VADataset(dataset_path,
162 | subset=SUBSET,
163 | double=True,
164 | length=SEGMENT_LENGTH,
165 | fraction=FRACTION,
166 | input_only=INPUT_ONLY,
167 | shuffle=SHUFFLE,
168 | preload=PRELOAD)
169 | assert dataset.fs == tape.fs, "Model and dataset sampling rates don't match!"
170 |
171 | dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False)
172 |
173 | #%% Process
174 |
175 | # Results path
176 | dataset_name = os.path.basename(dataset.data_dir)
177 | if DESCRIPTIVE_NAME:
178 | dataset_name = f"{dataset_name}_{DESCRIPTIVE_NAME}"
179 | results_path = os.path.join(RESULTS_PATH, dataset_name,
180 | dataset.subset.capitalize())
181 | if os.path.exists(results_path):
182 | print(f"Path {results_path} exists ...")
183 | else:
184 | os.makedirs(results_path)
185 |
186 | # Initializations
187 | target_size = len(dataset) * dataset.length / fs
188 | generated_size = 0.0
189 | time_per_batch = 0.0
190 | start = time.time()
191 |
192 | print("\nProcessing start!")
193 | for batch_idx, batch in enumerate(dataloader):
194 | sys.stdout.write(f"Processing batch {batch_idx+1} / {len(dataloader)}... ")
195 |
196 | # Retrieve batch
197 | input, meta = batch
198 | input = input.to(device)
199 | filename = meta['input_name']
200 |
201 | # Process
202 | target = tape(input)
203 |
204 | end = time.time()
205 | sys.stdout.write("Done!")
206 |
207 | # Stats
208 | generated_size += input.shape[0] * input.shape[-1] / fs
209 | time_per_batch = (end - start) / (batch_idx + 1)
210 | real_time_factor = (np.prod(input.shape) / fs) / time_per_batch
211 | batches_left = len(dataloader) - (batch_idx + 1)
212 | time_left = time_per_batch * batches_left
213 |
214 | sys.stdout.write(
215 | f" Average time/batch {'{:.3f}'.format(time_per_batch)} s, ")
216 | sys.stdout.write(
217 | f"Generated {'{:.3f}'.format(generated_size / 60)} min of {'{:.3f}'.format(target_size / 60)} min, "
218 | )
219 | sys.stdout.write(
220 | f"Time left ~ {'{:.3f}'.format(time_left / 60)} min (RT factor: {'{:.3f}'.format(real_time_factor)})"
221 | )
222 | sys.stdout.flush()
223 |
224 | if not DRY_RUN:
225 | # Save
226 | sys.stdout.write(" Saving results...")
227 |
228 | inputs, filenames, targets = input.numpy(), filename, target.numpy()
229 | for idx, (input, filename,
230 | target) in enumerate(zip(inputs, filenames, targets)):
231 | inputname = f"input_{filename.split('_', 1)[1]}"
232 | filepath = os.path.join(results_path, inputname)
233 | sf.write(filepath, input.T, fs, subtype='FLOAT')
234 |
235 | targetname = f"target_{filename.split('_', 1)[1]}"
236 | filepath = os.path.join(results_path, targetname)
237 | sf.write(filepath, target.T, fs, subtype='FLOAT')
238 | sys.stdout.write(" Done!\n")
239 | else:
240 | sys.stdout.write("\n")
241 | sys.stdout.flush()
242 |
243 | print("Processing finished!")
244 |
245 | print("=" * 10, " SCRIPT END ", "=" * 10)
246 |
--------------------------------------------------------------------------------
/code/matplotlibrc:
--------------------------------------------------------------------------------
1 | #### MATPLOTLIBRC FORMAT
2 |
3 | axes.prop_cycle: cycler('color', ['darkgrey', 'red', 'green', 'orange', 'grey',])
4 |
--------------------------------------------------------------------------------
/code/normalize-loudness.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Tue Jan 18 14:42:23 2022
5 |
6 | @author: 01tot10
7 | """
8 |
9 | #%% IMPORTS
10 |
11 | import glob
12 | import os
13 | import re
14 |
15 | import numpy as np
16 | import pyloudnorm as pyln
17 | import soundfile as sf
18 | from natsort import index_natsorted, order_by_index
19 |
20 | #%% CONFIG
21 |
22 | # global
23 | VERBOSE = False
24 | SAVE_MODE = False
25 | RETAIN_BALANCE = False # normalize loudness relative to output
26 | INCLUDE_INPUT = True # include input audio in normalization
27 |
28 | # audio
29 | AUDIO_PATH = "../results/"
30 | AUDIO = "EXP1B_ToyData/predictions"
31 |
32 | # analysis
33 | TARGET_LUFS = -23 # target LUFS
34 |
35 | #%% SETUP
36 |
37 | input_path = os.path.join(AUDIO_PATH, AUDIO)
38 |
39 | # get all filenames
40 | search_dir = re.sub('([\[\]])', '[\\1]', input_path) # escape [ and ]
41 | filenames = glob.glob(os.path.join(search_dir, "*.wav"))
42 |
43 | #%% PROCESS
44 |
45 |
46 | def get_number(string):
47 | """
48 | Get a number from `string` representing the groups of inputs/outputs/predictions.
49 |
50 | Parameters
51 | ----------
52 | string : string
53 | Filename.
54 |
55 | Returns
56 | -------
57 | match : int
58 | Extracted number.
59 |
60 | """
61 | match = os.path.basename(string)
62 | match = int(match[:match.find('_')])
63 | return match
64 |
65 |
66 | def get_target(string):
67 | """
68 | Get the portion from `string` defining the target
69 |
70 | Parameters
71 | ----------
72 | string : str
73 | Model full string.
74 |
75 | Returns
76 | -------
77 | trainset : str
78 | Portion of string defining the target.
79 |
80 | """
81 | match = re.search('T\[(.*)\]_D', string).group(1)
82 | return match
83 |
84 |
85 | # sorting
86 | natindex = index_natsorted(filenames)
87 | filenames = order_by_index(filenames, natindex)
88 |
89 | # take outputs
90 | output_names = [item for item in filenames if 'target' in item]
91 |
92 | # create groups
93 | groups = []
94 | for output_name in output_names:
95 |
96 | # define current group
97 | result_number = get_number(output_name) # by file number
98 |
99 | # get current group
100 | group = [item for item in filenames if result_number is get_number(item)]
101 |
102 | # exclude input
103 | if INCLUDE_INPUT is False:
104 | group = [item for item in group if 'input' not in item]
105 |
106 | # appends to groups
107 | groups.append(group)
108 |
109 | #%% LOUDNESS
110 |
111 | ## Normalize loudness
112 | groups_audio = []
113 |
114 | # iterate over output_names
115 | print("Processing ...")
116 | for idx, output_name in enumerate(output_names):
117 | print()
118 | filename = output_name[1:] if output_name[
119 | 0] == '/' else output_name # fix filename
120 | print(filename)
121 |
122 | # read and analyze
123 | data, rate = sf.read(filename) # load audio (with shape (samples, channels))
124 | meter = pyln.Meter(rate) # create BS.1770 meter
125 | loudness_orig = meter.integrated_loudness(data) # measure loudness
126 |
127 | # compute difference in loudness and linear correction gain
128 | loudness_difference_db = TARGET_LUFS - loudness_orig
129 | gain = 10**(loudness_difference_db / 20)
130 |
131 | if VERBOSE:
132 | print(f"Loudness (original): {np.round(loudness_orig, 3)}")
133 | print(
134 | f"Loudness difference (dB): {np.round(loudness_difference_db, 3)}")
135 | print(f"Gain: {np.round(gain, 3)}")
136 |
137 | # normalize audio beloging to same group
138 | group_audio = []
139 |
140 | for group_item in groups[idx]:
141 |
142 | filename = group_item[1:] if group_item[
143 | 0] == '/' else group_item # fix filename
144 | if VERBOSE:
145 | print(filename)
146 |
147 | # read and normalize
148 | data, rate = sf.read(
149 | filename) # load audio (with shape (samples, channels))
150 |
151 | # normalize
152 | if RETAIN_BALANCE: # relate to output
153 | data_processed = gain * data
154 | else: # individually
155 | loudness = meter.integrated_loudness(data)
156 | data_processed = pyln.normalize.loudness(data, loudness,
157 | TARGET_LUFS)
158 |
159 | # measure loudness
160 | loudness_processed = meter.integrated_loudness(data_processed)
161 |
162 | if VERBOSE:
163 | print(f"Loudness (processed): {np.round(loudness_processed, 3)}")
164 |
165 | # add to group
166 | group_audio.append(data_processed)
167 |
168 | groups_audio.append(group_audio)
169 |
170 | if SAVE_MODE:
171 | print("Saving results...")
172 | assert len(groups) == len(groups_audio)
173 |
174 | results_path = os.path.join(AUDIO_PATH, f"{AUDIO[:-1]}-normalized")
175 | if not os.path.exists(results_path):
176 | os.makedirs(results_path)
177 |
178 | for idx, group in enumerate(groups):
179 | current_names = group
180 | group_audio = groups_audio[idx]
181 | assert len(current_names) == len(group_audio)
182 |
183 | for idxx, item in enumerate(current_names):
184 | item = item[1:] if item[0] == '/' else item
185 | data = group_audio[idxx]
186 |
187 | filename = os.path.basename(item)
188 | full_name = os.path.join(results_path, filename)
189 |
190 | print(full_name)
191 | sf.write(full_name, data, rate)
192 | print()
193 |
--------------------------------------------------------------------------------
/code/train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Wed Jan 25 11:40:30 2023
5 |
6 | @author: 01tot10
7 | """
8 |
9 | #%% Imports
10 |
11 | import argparse
12 | import os
13 | import sys
14 | import time
15 |
16 | import numpy as np
17 | import torch
18 | from torch.utils.data import DataLoader
19 |
20 | import wandb
21 | from Automated_GuitarAmpModelling.CoreAudioML.training import ESRLoss
22 | from dataset import VADataset
23 | from GreyBoxDRC.loss_funcs import ESRLoss as DCPreESR
24 | from model import RNN, DiffDelRNN
25 |
26 | print("=" * 10, " SCRIPT START ", "=" * 10)
27 |
28 | #%% Argument parser
29 |
30 | # Add argument parser
31 | parser = argparse.ArgumentParser()
32 |
33 | # GLOBAL
34 | parser.add_argument('--DRY_RUN', action='store_true', default=False)
35 | parser.add_argument('--DESCRIPTIVE_NAME', type=str, default=None)
36 |
37 | # DATASET
38 | parser.add_argument(
39 | '--DATASET',
40 | type=str,
41 | default="ReelToReel_Dataset_Mini__F[0.1]_SL[10]_TD[0.75]_TS[0.75]_TB[0.0]")
42 | parser.add_argument('--DATASET_VAL', type=str, default=None)
43 | parser.add_argument('--FRACTION', type=float, default=1.0)
44 | parser.add_argument('--FRACTION_VAL', type=float, default=1.0)
45 | parser.add_argument('--SEGMENT_LENGTH', type=int, default=None)
46 | parser.add_argument('--PRELOAD', action='store_true', default=False)
47 |
48 | # MODEL
49 | parser.add_argument('--MODEL', type=str, default="GRU")
50 | parser.add_argument('--HIDDEN_SIZE', type=int, default=16)
51 |
52 | # TRAINING
53 | parser.add_argument('--N_EPOCHS', type=int, default=1000)
54 | parser.add_argument('--LOSS', type=str, default="DCPreESR")
55 | parser.add_argument('--DEMODULATE', action='store_true', default=False)
56 | parser.add_argument('--DEMODULATE_VAL', action='store_true', default=None)
57 |
58 | args = parser.parse_args()
59 |
60 | print("\nArguments:")
61 | print(args)
62 |
63 | assert args.LOSS in ["DCPreESR", "ESR"], "Chosen loss not supported!"
64 | assert args.MODEL in ["GRU", "DiffDelGRU"], "Chosen loss not supported!"
65 |
66 | #%% Config
67 |
68 | # Global
69 | DRY_RUN = args.DRY_RUN
70 | DESCRIPTIVE_NAME = args.DESCRIPTIVE_NAME
71 |
72 | # Data
73 | AUDIO_PATH = "../audio/"
74 | DATASET = args.DATASET
75 | DATASET_VAL = args.DATASET_VAL
76 | DEMODULATE = args.DEMODULATE
77 | DEMODULATE_VAL = DEMODULATE if args.DEMODULATE_VAL is None else args.DEMODULATE_VAL
78 | FRACTION = args.FRACTION
79 | FRACTION_VAL = args.FRACTION_VAL
80 | SEGMENT_LENGTH = args.SEGMENT_LENGTH
81 | PRELOAD = args.PRELOAD
82 |
83 | # Model
84 | MODEL_PATH = "../weights/"
85 | MODEL = args.MODEL
86 | INPUT_SIZE = 1
87 | OUTPUT_SIZE = 1
88 | HIDDEN_SIZE = args.HIDDEN_SIZE
89 | SKIP = False
90 |
91 | # Training
92 | LOSS = args.LOSS
93 | N_EPOCHS = args.N_EPOCHS
94 | BATCH_SIZE = 32
95 | LEARNING_RATE = 1e-3
96 |
97 | #%% Setup
98 |
99 | # Names and paths
100 | dataset_path = os.path.join(AUDIO_PATH, DATASET)
101 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
102 |
103 | model_name = f"{MODEL}-HS[{HIDDEN_SIZE}]-L[{LOSS}]-DS[{DATASET}]"
104 | if DESCRIPTIVE_NAME:
105 | model_name = f"{model_name}_{DESCRIPTIVE_NAME}"
106 | model_path = os.path.join(MODEL_PATH, model_name)
107 | model_running_path = os.path.join(model_path, "running.pth")
108 | model_best_path = os.path.join(model_path, "best.pth")
109 |
110 | # Logger
111 | run_config = {
112 | "loss": LOSS,
113 | "model": MODEL,
114 | "model_name": model_name,
115 | "dataset": DATASET,
116 | "learning_rate": LEARNING_RATE,
117 | "epochs": N_EPOCHS,
118 | "batch_size": BATCH_SIZE,
119 | "hidden_size": HIDDEN_SIZE,
120 | "segment_length[n]": SEGMENT_LENGTH
121 | }
122 | wandb.init(project="neural-tape",
123 | name=DESCRIPTIVE_NAME if DESCRIPTIVE_NAME else None,
124 | mode="online" if not DRY_RUN else "disabled",
125 | group=DESCRIPTIVE_NAME,
126 | config=run_config)
127 |
128 | # Data
129 | print("\nDataset for training:")
130 | dataset_train = VADataset(dataset_path,
131 | fraction=FRACTION,
132 | length=SEGMENT_LENGTH,
133 | demodulate=DEMODULATE,
134 | preload=PRELOAD)
135 | dataloader_train = DataLoader(dataset_train, batch_size=BATCH_SIZE)
136 | SEGMENT_LENGTH = dataset_train.length
137 |
138 | print("\nDataset for validation:")
139 | dataset_path = dataset_path if DATASET_VAL is None else os.path.join(
140 | AUDIO_PATH, DATASET_VAL)
141 | dataset_val = VADataset(dataset_path,
142 | subset="val",
143 | length=SEGMENT_LENGTH,
144 | preload=PRELOAD,
145 | fraction=FRACTION_VAL,
146 | demodulate=DEMODULATE_VAL,
147 | shuffle=False)
148 | dataloader_val = DataLoader(dataset_val, batch_size=6, shuffle=False)
149 |
150 | assert dataset_train.fs == dataset_val.fs, "Sampling rates don't match"
151 | fs = dataset_train.fs
152 |
153 | # Model
154 | if MODEL == "GRU":
155 | model = RNN(input_size=INPUT_SIZE,
156 | hidden_size=HIDDEN_SIZE,
157 | output_size=OUTPUT_SIZE,
158 | skip=SKIP).to(device)
159 | elif MODEL == "DiffDelGRU":
160 | max_delay_n = int(1.25 * dataset_train.delay_analyzer.max_delay * fs)
161 | model = DiffDelRNN(input_size=INPUT_SIZE,
162 | hidden_size=HIDDEN_SIZE,
163 | output_size=OUTPUT_SIZE,
164 | skip=SKIP,
165 | max_delay=max_delay_n).to(device)
166 | else:
167 | sys.exit()
168 |
169 | print("\nModel:")
170 | print(model)
171 |
172 | # Loss
173 | if LOSS == "DCPreESR":
174 | loss_fcn = DCPreESR(dc_pre=True)
175 | elif LOSS == "ESR":
176 | loss_fcn = ESRLoss()
177 | else:
178 | sys.exit()
179 |
180 | # Optimizer & Scheduler
181 | optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
182 | scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
183 | verbose=True,
184 | factor=0.75)
185 |
186 | #%% Process
187 |
188 | # Compute epoch stats
189 | SEQUENCE_LENGTH, TBPTT_INIT, TBPTT_LEN = dataset_train.length, 2**10, 2**10
190 | n_steps_batch = np.ceil(
191 | (SEQUENCE_LENGTH - TBPTT_INIT) / (TBPTT_LEN)).astype(int)
192 | n_batches = np.ceil(len(dataset_train) * (SEQUENCE_LENGTH / fs) /
193 | BATCH_SIZE).astype(int)
194 | optimizer_steps = n_batches * n_steps_batch
195 |
196 | # Compute total stats
197 | n_batches_total = n_batches * N_EPOCHS
198 | optimizer_steps_total = optimizer_steps * N_EPOCHS
199 |
200 | print("\nTraining start!")
201 | print(f"Using device: {device}")
202 | print(
203 | f"Epoch statistics: batch size: {BATCH_SIZE}, #batches: {n_batches}, #optimizer_steps: {optimizer_steps}"
204 | )
205 | print(
206 | f"Total statistics: #epochs: {N_EPOCHS}, #batches: {n_batches_total}, #optimizer_steps: {optimizer_steps_total}"
207 | )
208 |
209 | if not DRY_RUN:
210 | # Checkpoint
211 | if os.path.exists(model_running_path):
212 | print(
213 | "\nExisting model found! Continuing training from previous checkpoint"
214 | )
215 |
216 | # Load checkpoint
217 | checkpoint_dict = torch.load(model_running_path)
218 |
219 | model.load_state_dict(checkpoint_dict['model_state_dict'])
220 | optimizer.load_state_dict(checkpoint_dict['optimizer_state_dict'])
221 | else:
222 | print("\nNo existing model found! Starting training from scratch")
223 |
224 | # Create checkpoint
225 | os.makedirs(model_path)
226 | checkpoint_dict = {
227 | 'epoch': 0,
228 | 'model_state_dict': model.state_dict(),
229 | 'optimizer_state_dict': optimizer.state_dict(),
230 | 'val_history': np.array([])
231 | }
232 | else:
233 | checkpoint_dict = {'epoch': 0, 'val_history': np.array([])}
234 |
235 | print("=" * 20)
236 | start = time.time()
237 | n_processed = 1
238 | for epoch in range(checkpoint_dict['epoch'], N_EPOCHS):
239 | sys.stdout.write(f"Epoch {epoch+1}/{N_EPOCHS}...")
240 |
241 | train_loss = model.train_epoch(dataloader_train, loss_fcn, optimizer)
242 | val_loss, examples = model.validate(dataloader_val, loss_fcn)
243 |
244 | scheduler.step(val_loss)
245 |
246 | avg_time_epoch = (time.time() - start) / (n_processed)
247 | time_left = avg_time_epoch * (N_EPOCHS - epoch)
248 |
249 | sys.stdout.write(
250 | f" Done! Train loss {'{:.3f}'.format(train_loss)}, Validation loss {'{:.3f}'.format(val_loss)}"
251 | )
252 | sys.stdout.write(
253 | f" Average time/epoch {'{:.3f}'.format(avg_time_epoch/60)} min")
254 | sys.stdout.write(f" (~ {'{:.3f}'.format(time_left / 60)} min left)")
255 |
256 | # Collect results
257 | checkpoint_dict['val_history'] = np.append(checkpoint_dict['val_history'],
258 | val_loss)
259 | checkpoint_dict['epoch'] = epoch + 1
260 | n_processed += 1
261 |
262 | if not DRY_RUN:
263 | # Save checkpoints
264 | torch.save(checkpoint_dict, model_running_path)
265 | if val_loss < np.min(checkpoint_dict['val_history'][:-1], initial=1e3):
266 | sys.stdout.write(" Validation loss decreased! Updating best model.")
267 | torch.save(model.state_dict(), model_best_path)
268 |
269 | # Logging
270 | wandb.log({"loss (train)": train_loss}, step=epoch + 1)
271 | wandb.log({"loss (val)": val_loss}, step=epoch + 1)
272 | for example_idx, example in enumerate(examples):
273 |
274 | for key in example:
275 | waveform = example[key].cpu().squeeze().numpy()
276 |
277 | # Audio
278 | if key in ['input', 'target'] and (epoch + 1 == 1):
279 | # Log input and target only at first pass
280 | wandb.log(
281 | {
282 | f"{key}_{example_idx}":
283 | wandb.Audio(
284 | waveform, caption=f"{key}", sample_rate=fs)
285 | },
286 | step=epoch + 1)
287 | elif key in ['input', 'target'] and (epoch + 1 != 1):
288 | # Don't Log input and target at other passes
289 | pass
290 | else:
291 | # Log everything else always
292 | wandb.log(
293 | {
294 | f"{key}_{example_idx}":
295 | wandb.Audio(
296 | waveform, caption=f"{key}", sample_rate=fs)
297 | },
298 | step=epoch + 1)
299 |
300 | sys.stdout.write("\n")
301 | sys.stdout.flush()
302 |
303 | print("Training Done!\n")
304 |
305 | print("=" * 10, " SCRIPT END ", "=" * 10)
306 |
--------------------------------------------------------------------------------
/configs/AdversarialConfig.py:
--------------------------------------------------------------------------------
1 | def main(config=0):
2 | d = {}
3 | d['segment_length'] = 44100 * 2
4 | d['tbptt_length'] = 8192 * 2
5 | d['val_segment_length'] = 44100 * 10
6 | d['gen_lr'] = 0.0001
7 | d['val_freq'] = 1
8 | d['hid_size'] = 64
9 | d['batch_size'] = 16
10 | d['model_path'] = "../models/"
11 | """configs, critic: 0==MelGan, 1==SpectCrit, 2==MelSpectCrit, 3==DilatedConvCrit"""
12 | if config == 0:
13 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER'
14 | critic = 0
15 | elif config == 1:
16 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER'
17 | critic = 1
18 | elif config == 2:
19 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER'
20 | critic = 2
21 | elif config == 3:
22 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER'
23 | critic = 3
24 | elif config == 4:
25 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL'
26 | critic = 0
27 | elif config == 5:
28 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL'
29 | critic = 1
30 | elif config == 6:
31 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL'
32 | critic = 2
33 | elif config == 7:
34 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL'
35 | critic = 3
36 |
37 | elif config == 10:
38 | d['dataset_path'] = 'ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER'
39 | critic = 5
40 |
41 | if critic == 0:
42 | crit = {
43 | 'crit': 'MelGanCrit',
44 | 'crit_pars': {
45 | 'num_D': 3,
46 | 'ndf': 16,
47 | 'n_layers': 4,
48 | 'downsampling_factor': 4
49 | },
50 | 'crit_lr': 0
51 | }
52 | elif critic == 1:
53 | crit = {
54 | 'crit': 'MultiSpecCrit',
55 | 'crit_pars': {
56 | 'scales': [128, 256, 512, 1024],
57 | 'kernel_sizes': [21, 21, 21, 17],
58 | 'hop_sizes': [32, 64, 128, 128],
59 | 'layers': 4,
60 | 'chan_in': 16,
61 | 'chan_fac': 4,
62 | 'stride': 1,
63 | 'g_fac': 16,
64 | 'tf_rep': 'spec',
65 | 'log': True
66 | },
67 | 'crit_lr': 0
68 | }
69 | elif critic == 2:
70 | crit = {
71 | 'crit': 'MultiSpecCrit',
72 | 'crit_pars': {
73 | 'scales': [128, 256, 512, 1024],
74 | 'kernel_sizes': [21, 21, 21, 17],
75 | 'hop_sizes': [32, 64, 128, 128],
76 | 'layers': 4,
77 | 'chan_in': 16,
78 | 'chan_fac': 4,
79 | 'stride': 1,
80 | 'g_fac': 16,
81 | 'tf_rep': 'mel',
82 | 'log': True
83 | },
84 | 'crit_lr': 0
85 | }
86 | elif critic == 3:
87 | crit = {'crit': 'DilatedConvDisc', 'crit_pars': {}, 'crit_lr': 0}
88 |
89 | elif critic == 5:
90 | crit = {
91 | 'crit': 'MultiSpecCrit',
92 | 'crit_pars': {
93 | 'scales': [512, 1024, 2048],
94 | 'kernel_sizes': [21, 17, 7],
95 | 'hop_sizes': [64, 64, 64],
96 | 'layers': 4,
97 | 'chan_in': 16,
98 | 'chan_fac': 4,
99 | 'stride': 1,
100 | 'g_fac': 16,
101 | 'tf_rep': 'spec',
102 | 'log': True
103 | },
104 | 'crit_lr': 0
105 | }
106 |
107 | d = Merge(d, crit)
108 |
109 | return d
110 |
111 |
112 | def Merge(dict1, dict2):
113 | for i in dict2.keys():
114 | dict1[i] = dict2[i]
115 | return dict1
116 |
--------------------------------------------------------------------------------
/configs/conf_noise.yaml:
--------------------------------------------------------------------------------
1 | train:
2 | optimizer:
3 | type: "adam"
4 | beta1: 0.9
5 | beta2: 0.999
6 | eps: 1e-8
7 | lr: 2e-4
8 | lr_rampup_it: 1000
9 | scheduler_step_size: 10000
10 | batch: 4
11 | scheduler_gamma: 0.8
12 | save_model: True
13 | save_interval: 10000
14 | resume: False
15 | use_grad_clip: True
16 | max_grad_norm: 1.0
17 | ema_rampup: 1000
18 | ema_rate: 0.999
19 |
20 | dset:
21 | callable: "datasets.tapehiss.TapeHissdset"
22 | path: "../audio/Silence_AKAI_IPS[7.5]_MAXELL_SPLIT/Train"
23 | fs: 44100
24 | seg_len: 65536
25 | num_workers: 2
26 |
27 | model_dir: "../weights/83"
28 |
29 | network:
30 | checkpoint: "../weights/83/noise-73000.pt"
31 | name: "unet_1d"
32 | callable: "networks.unet_1d.UNet1D"
33 | Nin: 1 # two would mean stereo
34 | depth: 8
35 | emb_dim: 32
36 | Ns: [8,16,16,16,16,16,16,16,16]
37 | Ss: [4,4,4,4,4,4,4,4,4]
38 | use_norm: False
39 |
40 | exp:
41 | exp_name: "noise_diffusion"
42 | sample_rate: 44100 # sample rate where the generator works
43 | out_sample_rate: 44100 # sample rate at which we work
44 | seg_len: 65536
45 |
46 | diff_params:
47 | T: 16 # number of time steps
48 | sigma_data: 8e-4
49 | sigma_min: 5e-5
50 | sigma_max: 0.1
51 | ro: 10
52 | Schurn: 0.25
53 | Snoise: 1.0000
54 |
55 | outpainting:
56 | overlap: 0.2 # in seconds
57 |
58 | hydra:
59 | job:
60 | config:
61 | # configuration for the ${hydra.job.override_dirname} runtime variable
62 | override_dirname:
63 | kv_sep: '='
64 | item_sep: ','
65 | # Remove all paths, as the / in them would mess up things
66 | exclude_keys: ['path_experiment',
67 | 'hydra.job_logging.handles.file.filename']
68 |
--------------------------------------------------------------------------------
/configs/conf_toytrajectories.yaml:
--------------------------------------------------------------------------------
1 | train:
2 | optimizer:
3 | type: "adam"
4 | beta1: 0.9
5 | beta2: 0.999
6 | eps: 1e-8
7 | lr: 2e-4
8 | lr_rampup_it: 1000
9 | scheduler_step_size: 10000
10 | batch: 4
11 | scheduler_gamma: 0.8
12 | save_model: True
13 | save_interval: 10000
14 | resume: False
15 | use_grad_clip: True
16 | max_grad_norm: 1.0
17 | ema_rampup: 1000
18 | ema_rate: 0.999
19 |
20 | dset:
21 | callable: "datasets.tapehiss.TapeHissdset"
22 | path: "../audio/ReelToReel_Dataset_MiniPulse100_CHOWTAPE_F[0.6]_SL[60]_TRAJECTORIES/Train"
23 | fs: 44100
24 | seg_len: 225792
25 | num_workers: 2
26 |
27 | model_dir: "../weights/90"
28 |
29 | network:
30 | checkpoint: "../weights/90/toytrajectories-36000.pt"
31 | name: "unet_1d"
32 | callable: "networks.unet_1d.UNet1D"
33 | Nin: 1 # two would mean stereo
34 | depth: 4
35 | Ns: [8, 16,16,16,16, 16]
36 | Ss: [2,2,4,4,4]
37 | emb_dim: 16
38 | num_bottleneck_layers: 1
39 | use_norm: False
40 |
41 | exp:
42 | exp_name: "toytrajectories"
43 | sample_rate: 100 # sample rate where the generator works
44 | out_sample_rate: 44100 # sample rate at which we work
45 | seg_len: 512
46 |
47 | diff_params:
48 | T: 4 # number of time steps
49 | sigma_data: 0.0068 # default for maestro
50 | sigma_min: 1e-5
51 | sigma_max: 0.5
52 | Schurn: 0
53 | Snoise: 1
54 | ro: 7
55 |
56 | outpainting:
57 | overlap: 1 # in seconds
58 |
59 | hydra:
60 | job:
61 | config:
62 | # configuration for the ${hydra.job.override_dirname} runtime variable
63 | override_dirname:
64 | kv_sep: '='
65 | item_sep: ','
66 | # Remove all paths, as the / in them would mess up things
67 | exclude_keys: ['path_experiment',
68 | 'hydra.job_logging.handles.file.filename']
69 |
--------------------------------------------------------------------------------
/configs/conf_trajectories.yaml:
--------------------------------------------------------------------------------
1 | train:
2 | optimizer:
3 | type: "adam"
4 | beta1: 0.9
5 | beta2: 0.999
6 | eps: 1e-8
7 | lr: 2e-4
8 | lr_rampup_it: 1000
9 | scheduler_step_size: 10000
10 | batch: 4
11 | scheduler_gamma: 0.8
12 | save_model: True
13 | save_interval: 10000
14 | resume: False
15 | use_grad_clip: True
16 | max_grad_norm: 1.0
17 | ema_rampup: 1000
18 | ema_rate: 0.999
19 |
20 | dset:
21 | callable: "datasets.tapehiss.TapeHissdset"
22 | path: "../audio/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_TRAJECTORIES_SPLIT/Train"
23 | fs: 192000
24 | seg_len: 983040
25 | num_workers: 2
26 |
27 | model_dir: "../weights/91"
28 |
29 | network:
30 | checkpoint: "../weights/91/trajectories-41000.pt"
31 | name: "unet_1d"
32 | callable: "networks.unet_1d.UNet1D"
33 | Nin: 1 # two would mean stereo
34 | depth: 4
35 | Ns: [8, 16,16,16,16, 16]
36 | Ss: [2,2,4,4,4]
37 | emb_dim: 16
38 | num_bottleneck_layers: 1
39 | use_norm: False
40 |
41 | exp:
42 | exp_name: "trajectories"
43 | sample_rate: 100 # sample rate where the generator works
44 | out_sample_rate: 44100 # sample rate at which we work
45 | seg_len: 512
46 |
47 | diff_params:
48 | T: 8 # number of time steps
49 | sigma_data: 1e-4
50 | sigma_min: 1e-5
51 | sigma_max: 1e-2
52 | ro: 7
53 | Schurn: 0
54 | Snoise: 1
55 |
56 | outpainting:
57 | overlap: 1 # in seconds
58 |
59 | hydra:
60 | job:
61 | config:
62 | # configuration for the ${hydra.job.override_dirname} runtime variable
63 | override_dirname:
64 | kv_sep: '='
65 | item_sep: ','
66 | # Remove all paths, as the / in them would mess up things
67 | exclude_keys: ['path_experiment',
68 | 'hydra.job_logging.handles.file.filename']
69 |
--------------------------------------------------------------------------------
/environment.yaml:
--------------------------------------------------------------------------------
1 | name: neural-tape
2 | channels:
3 | - nvidia
4 | - pytorch
5 | - conda-forge
6 | dependencies:
7 | - pytorch
8 | - pytorch-cuda=11.7
9 | - torchvision
10 | - torchaudio
11 | - spyder-kernels
12 | - ca-certificates
13 | - certifi
14 | - openssl
15 | - matplotlib
16 | - numpy
17 | - scipy
18 | - pysoundfile
19 | - wandb
20 | - yapf
21 | - isort
22 | - pylint
23 | - ffmpeg
24 | - plotly
25 | - natsort
26 | - omegaconf
27 | - beautysh
28 | - librosa
29 | - hydra-core
30 |
--------------------------------------------------------------------------------
/matplotlibrc:
--------------------------------------------------------------------------------
1 | code/matplotlibrc
--------------------------------------------------------------------------------
/presentation/NeuralTape_Presentation_DAFx.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/presentation/NeuralTape_Presentation_DAFx.pdf
--------------------------------------------------------------------------------
/presentation/NeuralTape_Presentation_DAFx.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/presentation/NeuralTape_Presentation_DAFx.pptx
--------------------------------------------------------------------------------
/reel-to-reel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/reel-to-reel.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_1.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_2.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_3.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_4.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/delay_trajectories/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_5.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Test.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Train.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Train.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Val.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Val.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/SinesFadedShortContinuousPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/pulse_recovery/SinesFadedShortContinuousPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_input_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_input_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_input_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_input_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_target_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_target_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_target_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_1_target_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_input_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_input_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_input_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_input_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_target_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_target_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_target_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_2_target_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_input_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_input_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_input_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_input_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_target_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_target_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_target_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_3_target_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_input_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_input_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_input_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_input_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_target_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_target_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_target_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_4_target_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_input_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_input_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_input_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_input_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_target_L.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_target_L.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_target_R.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sounds/ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL_IO_5_target_R.wav
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_0.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_1.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_2.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_3.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_4.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_5.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_6.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_7.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_8.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]/sweeps/LogSweepsContinuousPulse100_AKAI_IPS[7.5]_MAXELL_SWEEP_9.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_1.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_2.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_3.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_4.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/delay_trajectories/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_DELAY_TRAJECTORIES_5.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/pulse_recovery/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/pulse_recovery/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Test.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/pulse_recovery/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Train.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/pulse_recovery/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Train.png
--------------------------------------------------------------------------------
/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/pulse_recovery/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Val.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/DataAnalysis_RealData/MAXELL_IPS[7.5]_HIRES/pulse_recovery/ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[7.5]_MAXELL_PULSEREC_Val.png
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/hysteresis/exp1a-hysteresis.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/hysteresis/exp1a-hysteresis.pdf
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/109_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/109_input.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/109_supervised-I.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/109_supervised-I.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/109_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/109_target.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/21_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/21_input.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/21_supervised-I.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/21_supervised-I.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/21_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/21_target.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/52_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/52_input.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/52_supervised-I.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/52_supervised-I.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/52_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/52_target.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/77_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/77_input.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/77_supervised-I.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/77_supervised-I.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/77_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/77_target.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/78_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/78_input.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/78_supervised-I.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/78_supervised-I.wav
--------------------------------------------------------------------------------
/results/EXP1A_ToyData/predictions/78_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1A_ToyData/predictions/78_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/109_1_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/109_1_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/109_1_DELAY[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/109_1_DELAY[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/109_1_DELAY[True]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/109_1_DELAY[True]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/109_2_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/109_2_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/109_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/109_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/109_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/109_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/16_1_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/16_1_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/16_1_DELAY[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/16_1_DELAY[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/16_1_DELAY[True]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/16_1_DELAY[True]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/16_2_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/16_2_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/16_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/16_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/16_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/16_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/40_1_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/40_1_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/40_1_DELAY[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/40_1_DELAY[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/40_1_DELAY[True]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/40_1_DELAY[True]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/40_2_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/40_2_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/40_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/40_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/40_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/40_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/70_1_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/70_1_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/70_1_DELAY[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/70_1_DELAY[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/70_1_DELAY[True]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/70_1_DELAY[True]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/70_2_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/70_2_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/70_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/70_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/70_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/70_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/77_1_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/77_1_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/77_1_DELAY[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/77_1_DELAY[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/77_1_DELAY[True]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/77_1_DELAY[True]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/77_2_DELAY[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/77_2_DELAY[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/77_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/77_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/delay_compare/77_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/delay_compare/77_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/hysteresis/exp1b-hysteresis_combined.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/hysteresis/exp1b-hysteresis_combined.pdf
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/109_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/109_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/109_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/109_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/52_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/52_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/52_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/52_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/70_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/70_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/70_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/70_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/84_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/84_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/84_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/84_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/98_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/98_input.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/predictions/98_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/predictions/98_target.wav
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/trajectories/toyspectra.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/trajectories/toyspectra.pdf
--------------------------------------------------------------------------------
/results/EXP1B_ToyData/trajectories/toytrajectories.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP1B_ToyData/trajectories/toytrajectories.pdf
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise/noise_spectra.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise/noise_spectra.pdf
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/109_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/52_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/70_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/84_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_1_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_2_DELAY[True]_NOISE[Generated]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_2_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/noise_compare/98_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/109_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/52_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/70_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/84_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Adversarial.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_1.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_1_DELAY[True]_NOISE[Real]_prediction_Supervised_2.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_input.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_input.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_target.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/predictions/98_target.wav
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/sweep/sweep-maxell-approach_1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/sweep/sweep-maxell-approach_1.pdf
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/sweep/sweep-maxell-approach_2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/sweep/sweep-maxell-approach_2.pdf
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/sweep/sweep-maxell-approach_3.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/sweep/sweep-maxell-approach_3.pdf
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/trajectories/spectra.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/trajectories/spectra.pdf
--------------------------------------------------------------------------------
/results/EXP2_RealData/MAXELL_IPS[7.5]/trajectories/trajectories.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/EXP2_RealData/MAXELL_IPS[7.5]/trajectories/trajectories.pdf
--------------------------------------------------------------------------------
/results/Simulations/playback_losses.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/Simulations/playback_losses.pdf
--------------------------------------------------------------------------------
/results/Simulations/tape_magnetization.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/results/Simulations/tape_magnetization.pdf
--------------------------------------------------------------------------------
/scripts/README.md:
--------------------------------------------------------------------------------
1 | # Scripts
2 |
3 | This folders contains scripts for running various processing pipelines.
4 | For some of the scripts, an option exists for running them in a computing cluster with [slurm](https://slurm.schedmd.com/documentation.html).
5 |
6 | - [SCRIPTS](#scripts)
7 | - [TAPE SIMULATIONS](#tape-simulations)
8 | - [DATA GENERATION](#data-generation)
9 | - [DATA VISUALIZATION / ANALYSIS](#data-visualization--analysis)
10 | - [MODEL TRAINING](#model-training)
11 | - [MODEL EVALUATION](#model-evaluation)
12 |
13 | ## TAPE SIMULATIONS
14 |
15 | Simulate tape:
16 | ```
17 | > simulate-tape.sh # local
18 | > sbatch sbatch-simulate-tape.sh # slurm
19 | ```
20 |
21 | Simulate tape playback:
22 | ```
23 | > simulate-playback.sh
24 | ```
25 |
26 | ## DATA GENERATION
27 |
28 | Generate target outputs using the VST effect processor:
29 | ```
30 | > ./generate-targets.sh [+ OPTIONS] # local
31 | > sbatch sbatch-generate.sh # slurm
32 | ```
33 |
34 | Pre-compute delay trajectories:
35 | ```
36 | > sbatch sbatch-test-dataset.sh
37 | ```
38 |
39 | Save delay trajectories as `.wav`-files for training the delay generator:
40 | ```
41 | > ./generate-trajectories.sh
42 | ```
43 |
44 | ## DATA VISUALIZATION / ANALYSIS
45 |
46 | Generate visualizations and auralizations of data in dataset.
47 |
48 | All:
49 | ```
50 | > ./analysis-data-all.sh [TAPE] [IPS]
51 | ```
52 |
53 | Pulse recovery:
54 | ```
55 | > ./analysis-data-pulse_recovery.sh [TAPE] [IPS]
56 | ```
57 |
58 | Delay trajectories:
59 | ```
60 | > ./analysis-data-delay_trajectories.sh [TAPE] [IPS]
61 | ```
62 |
63 | Input / Output examples:
64 | ```
65 | > ./analysis-data-io.sh [TAPE] [IPS]
66 | ```
67 |
68 | Magnitude response analysis:
69 | ```
70 | > ./analysis-data-sweep.sh [TAPE] [IPS]
71 | ```
72 |
73 | Hysteresis:
74 | ```
75 | ./analysis-data-hysteresis.sh [TAPE] [IPS]
76 | ```
77 |
78 | ## MODEL TRAINING
79 |
80 | Train the nonlinear model for different experiments.
81 |
82 | Generic:
83 | ```
84 | > sbatch sbatch-train.sh # slurm
85 | > ./train.sh # local
86 | ```
87 |
88 | All:
89 | ```
90 | > sbatch sbatch-train-all.sh
91 | ```
92 |
93 | Experiment 1a:
94 | ```
95 | > sbatch sbatch-train-exp1a.sh
96 | ```
97 |
98 | Experiment 1b:
99 | ```
100 | > sbatch sbatch-train-exp1b.sh
101 | ```
102 |
103 | Experiment 2:
104 | ```
105 | > sbatch sbatch-train-exp2.sh
106 | ```
107 |
108 | ## MODEL EVALUATION
109 |
110 | Test model performance under different test conditions.
111 |
112 | All:
113 | ```
114 | > ./test-model-all.sh
115 | ```
116 |
117 | Hysteresis:
118 | ```
119 | > ./test-model-hysteresis.sh [+ OPTIONS]
120 | ```
121 |
122 | Magnitude response:
123 | ```
124 | > ./test-model-sweep.sh [+ OPTIONS]
125 | ```
126 |
127 | Predictions:
128 | ```
129 | > ./test-model-predictions.sh [+ OPTIONS]
130 | ```
131 |
132 | Losses:
133 | ```
134 | > ./test-model-loss.sh [+ OPTIONS]
135 | ```
136 |
137 | Noise:
138 | ```
139 | > ./test-model-noise.sh [+ OPTIONS]
140 | ```
141 |
--------------------------------------------------------------------------------
/scripts/analysis-data-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls:
4 | - analysis-data-pulse_recovery.sh
5 | - analysis-data-delay_trajectories.sh
6 | - analysis-data-io.sh
7 | - analysis-data-sweeps.sh
8 | - analysis-data-transfer.sh
9 | '
10 | # SETTINGS
11 | TAPE=$1
12 | IPS=$2
13 |
14 | # Pulse recovery
15 | ./analysis-data-pulse_recovery.sh $TAPE $IPS
16 |
17 | # Delay trajectories
18 | ./analysis-data-delay_trajectories.sh $TAPE $IPS
19 |
20 | # Input / Output examples
21 | ./analysis-data-io.sh $TAPE $IPS
22 |
23 | # Sweeps
24 | ./analysis-data-sweeps.sh $TAPE $IPS
25 |
26 | # Hysteresis
27 | ./analysis-data-hysteresis.sh $TAPE $IPS
28 |
--------------------------------------------------------------------------------
/scripts/analysis-data-delay_trajectories.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | EXPERIMENT=$1
7 | TAPE=$2
8 | IPS=$3
9 |
10 | # SETTINGS
11 | # SEGMENT_LENGTH=$((44100/2)) # SHORT
12 | SEGMENT_LENGTH=$((44100*10)) # LONG
13 | # SEGMENT_LENGTH=$((192000*10)) # LONG, HIRES
14 | N_PARALLEL=1
15 | N_RUNS=5
16 |
17 | # ANALYZE
18 | DESCRIPTIVE_NAME="DELAY_TRAJECTORIES"
19 |
20 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
21 | # DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE" # EXP1a
22 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER" # EXP1b
23 | else # REAL
24 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE" # NORMAL
25 | # DATASET_NAME="ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[$IPS]_$TAPE" # HIRES
26 | fi
27 |
28 | counter=0
29 | for idx in $(seq $N_RUNS); do
30 | python -u ../code/test-dataset.py\
31 | --DATASET $DATASET_NAME --PRELOAD\
32 | --SEGMENT_LENGTH $SEGMENT_LENGTH\
33 | --PLOT_DELAY\
34 | --SAVE_FIG --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_""$idx" &
35 | # --NO_SHUFFLE --IDX 0\ # SHORT
36 |
37 | ((counter+=1))
38 | if [ $counter -eq $(($N_PARALLEL)) ]; then
39 | echo "Waitin'.."
40 | wait
41 | ((counter=0))
42 | fi
43 | done
44 | wait
--------------------------------------------------------------------------------
/scripts/analysis-data-hysteresis.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | TAPE=$1
7 | IPS=$2
8 |
9 | # SETTINGS
10 | DESCRIPTIVE_NAME="TRANSFER"
11 | declare -a ARR_INDICES=(13 63 98) # cherry-picked indices
12 | DATASET_NAME="SinesFadedShortContinuousPulse100_AKAI_IPS[$IPS]_$TAPE"
13 | N_PARALLEL=2
14 |
15 | ## ANALYZE
16 | counter=0
17 | for idx in "${ARR_INDICES[@]}"; do
18 | python -u ../code/test-dataset.py \
19 | --DATASET $DATASET_NAME --PRELOAD \
20 | --SYNC 1.0 --SEGMENT_LENGTH 48510 --ZOOM 0.1 --NO_SHUFFLE --IDX $idx \
21 | --DEMODULATE --PLOT_TRANSFER \
22 | --SAVE_FIG --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_""$idx" &
23 |
24 | ((counter+=1))
25 | if [ $counter -eq $(($N_PARALLEL)) ]; then
26 | echo "Waitin'.."
27 | wait
28 | ((counter=0))
29 | fi
30 | done
31 | wait
--------------------------------------------------------------------------------
/scripts/analysis-data-io.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | TAPE=$1
7 | IPS=$2
8 |
9 | # SETTINGS
10 | SEGMENT_LENGTH=441000 # [n]
11 | ZOOM=5.0 # [s]
12 | N_PARALLEL=2
13 |
14 | # Analyze
15 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE"
16 | DESCRIPTIVE_NAME="IO"
17 |
18 | counter=0
19 | for idx in $(seq 5); do
20 | python -u ../code/test-dataset.py --DATASET $DATASET_NAME --SEGMENT_LENGTH $SEGMENT_LENGTH --PLOT_DELAY --ZOOM $ZOOM --SAVE_AUDIO --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_""$idx" &
21 |
22 | ((counter+=1))
23 | if [ $counter -eq $(($N_PARALLEL)) ]; then
24 | echo "Waitin'.."
25 | wait
26 | ((counter=0))
27 | fi
28 | done
29 | wait
--------------------------------------------------------------------------------
/scripts/analysis-data-pulse_recovery.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | EXPERIMENT=$1
7 | TAPE=$2
8 | IPS=$3
9 |
10 | # SETTINGS
11 | SEGMENT_LENGTH=$((44100 / 2)) # SHORT
12 | # SEGMENT_LENGTH=$((192000 / 2)) # SHORT, HIRES
13 | # SEGMENT_LENGTH=441000 # LONG
14 | N_PARALLEL=1
15 |
16 | # ANALYZE
17 | DESCRIPTIVE_NAME="PULSEREC"
18 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
19 | # DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE" # EXP1a
20 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER" # EXP1b
21 | else # REAL
22 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE" # NORMAL
23 | # DATASET_NAME="ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[$IPS]_$TAPE" # HIRES
24 | fi
25 | declare -a ARR_SUBSET=("Train")
26 |
27 | counter=0
28 | for subset in "${ARR_SUBSET[@]}"; do
29 | python -u ../code/test-dataset.py\
30 | --DATASET $DATASET_NAME --SUBSET $subset --PRELOAD\
31 | --SEGMENT_LENGTH $SEGMENT_LENGTH --NO_SHUFFLE --IDX 0 --ZOOM 0.5\
32 | --PLOT_DELAY\
33 | --SAVE_FIG --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_"$subset
34 |
35 | ((counter+=1))
36 | if [ $counter -eq $(($N_PARALLEL)) ]; then
37 | echo "Waitin'.."
38 | wait
39 | ((counter=0))
40 | fi
41 | done
42 |
43 | # # LogSweepsContinuousPulse100
44 | # DATASET_NAME="LogSweepsContinuousPulse100_AKAI_IPS[$IPS]_$TAPE"
45 | # python -u ../code/test-dataset.py --DATASET $DATASET_NAME --SYNC 1.0 --SEGMENT_LENGTH 264600 --PLOT_DELAY --NO_SHUFFLE --IDX 0 --ZOOM 0.5 --SAVE_FIG --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME &
46 | # wait
47 | #
48 | # # SinesFadedShortContinuousPulse100
49 | # DATASET_NAME="SinesFadedShortContinuousPulse100_AKAI_IPS[$IPS]_$TAPE"
50 | # python -u ../code/test-dataset.py --DATASET $DATASET_NAME --SYNC 1.0 --SEGMENT_LENGTH 48510 --PLOT_DELAY --NO_SHUFFLE --IDX 0 --ZOOM 0.5 --SAVE_FIG --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
51 |
--------------------------------------------------------------------------------
/scripts/analysis-data-sweeps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | TAPE=$1
7 | IPS=$2
8 |
9 | # SETTINGS
10 | DESCRIPTIVE_NAME="SWEEP"
11 | N_PARALLEL=4
12 | SEGMENT_LENGTH=1367100
13 | ZOOM=30.0
14 | START_IDX=0
15 | END_IDX=9
16 |
17 | DATASET_NAME="LogSweepsContinuousPulse100_AKAI_IPS[$IPS]_$TAPE" # REAL
18 | SYNC=1.0
19 |
20 | # DATASET_NAME="LogSweepsContinuousPulse100_CHOWTAPE" # CHOWTAPE
21 | # SYNC=0.0
22 |
23 | ## ANALYZE
24 | counter=0
25 | for idx in $(seq $START_IDX $END_IDX); do
26 | python -u ../code/test-dataset.py \
27 | --DATASET $DATASET_NAME --SEGMENT_LENGTH $SEGMENT_LENGTH --PRELOAD \
28 | --SYNC $SYNC --ZOOM $ZOOM --NO_SHUFFLE --IDX $idx \
29 | --DEMODULATE \
30 | --PLOT_SWEEP --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_""$((idx-START_IDX))" --SAVE_FIG &
31 |
32 | ((counter+=1))
33 | if [ $counter -eq $(($N_PARALLEL)) ]; then
34 | echo "Waitin'.."
35 | wait
36 | ((counter=0))
37 | fi
38 | done
39 | wait
40 |
--------------------------------------------------------------------------------
/scripts/generate-targets.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls generate-targets.py
4 | '
5 |
6 | # SETTINGS
7 | DATASET=$1
8 | SUBSET=$2
9 | FRACTION=$3
10 | SEGMENT_LENGTH=$4
11 | TAPE_DRIVE=$5
12 | TAPE_SATURATION=$6
13 | TAPE_BIAS=$7
14 | WOWFLUTTER=True
15 |
16 | # Construct descriptive name
17 | # DESCRIPTIVE_NAME="_F[$FRACTION]_SL[$((SEGMENT_LENGTH/44100))]"
18 | DESCRIPTIVE_NAME="_F[$FRACTION]"
19 | DESCRIPTIVE_NAME=$DESCRIPTIVE_NAME"_TD[$TAPE_DRIVE]_TS[$TAPE_SATURATION]_TB[$TAPE_BIAS]"
20 | if [ $WOWFLUTTER == True ]; then
21 | DESCRIPTIVE_NAME=$DESCRIPTIVE_NAME"_WOWFLUTTER"
22 | fi
23 |
24 | # Run
25 | if [ $WOWFLUTTER == True ]; then
26 | python -u ../code/generate-targets.py \
27 | --DATASET $DATASET --SUBSET $SUBSET --FRACTION $FRACTION --SEGMENT_LENGTH $SEGMENT_LENGTH --PRELOAD --NO_SHUFFLE \
28 | --TAPE_DRIVE $TAPE_DRIVE --TAPE_SATURATION $TAPE_SATURATION --TAPE_BIAS $TAPE_BIAS \
29 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME --NO_SHUFFLE \
30 | --WOW_FLUTTER_ENABLE \
31 | # --TAPE_DISABLE
32 | else
33 | python -u ../code/generate-targets.py \
34 | --DATASET $DATASET --SUBSET $SUBSET --FRACTION $FRACTION --SEGMENT_LENGTH $SEGMENT_LENGTH --PRELOAD --NO_SHUFFLE \
35 | --TAPE_DRIVE $TAPE_DRIVE --TAPE_SATURATION $TAPE_SATURATION --TAPE_BIAS $TAPE_BIAS \
36 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME --NO_SHUFFLE
37 | fi
38 |
39 |
--------------------------------------------------------------------------------
/scripts/generate-trajectories.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 |
6 | # ARGUMENTS
7 | TAPE=$1
8 | IPS=$2
9 |
10 | # SETTINGS
11 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE" # NORMAL
12 | # DATASET_NAME="ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[$IPS]_$TAPE" # HIRES
13 | N_FILES=2
14 |
15 | # ANALYZE
16 | declare -a ARR_SUBSET=("Train" "Val" "Test")
17 | counter=0
18 | N_PARALLEL=2
19 | for subset in "${ARR_SUBSET[@]}"; do
20 | if [ $subset == "Train" ]; then
21 | for idx in $(seq 0 $(($N_FILES - 1))); do
22 | python -u ../code/test-dataset.py --DATASET $DATASET_NAME --SUBSET $subset --PRELOAD\
23 | --NO_SHUFFLE --IDX $idx\
24 | --SAVE_TRAJECTORY --DESCRIPTIVE_NAME "$subset""_""$idx"
25 |
26 | ((counter+=1))
27 | if [ $counter -eq $(($N_PARALLEL)) ]; then
28 | echo "Waitin'.."
29 | wait
30 | ((counter=0))
31 | fi
32 | done
33 | else
34 | idx=0
35 | python -u ../code/test-dataset.py --DATASET $DATASET_NAME --SUBSET $subset --PRELOAD\
36 | --NO_SHUFFLE --IDX $idx\
37 | --SAVE_TRAJECTORY --DESCRIPTIVE_NAME "$subset""_""$idx"
38 |
39 | ((counter+=1))
40 | if [ $counter -eq $(($N_PARALLEL)) ]; then
41 | echo "Waitin'.."
42 | wait
43 | ((counter=0))
44 | fi
45 | fi
46 | done
47 | wait
--------------------------------------------------------------------------------
/scripts/matplotlibrc:
--------------------------------------------------------------------------------
1 | ../matplotlibrc
--------------------------------------------------------------------------------
/scripts/sbatch-generate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:0
3 | #SBATCH --time=2:00:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-generate_%j.out
7 |
8 | ## Generate dataset of input-output pairs using VA reel-to-reel tape.
9 | ## Calls generate.sh
10 |
11 | module load miniconda
12 | source activate neural-tape
13 |
14 | # SETTINGS
15 | DATASET="SinesFaded"
16 | FRACTION=0.3
17 | SIGNAL_AMPLITUDE=1e-3
18 | declare -i SEGMENT_LENGTH=$((1*5*44100))
19 |
20 | # PROCESS
21 | signal_amplitude_converted=$(printf '%.6f' $SIGNAL_AMPLITUDE)
22 | printf "\nSignal amplitude: $SIGNAL_AMPLITUDE = $signal_amplitude_converted\n"
23 |
24 | BIAS_AMPLITUDE=$(echo 5*$signal_amplitude_converted|bc)
25 | printf "Bias amplitude: $BIAS_AMPLITUDE\n"
26 |
27 | ./generate.sh $DATASET $FRACTION $SEGMENT_LENGTH $SIGNAL_AMPLITUDE $BIAS_AMPLITUDE
28 |
--------------------------------------------------------------------------------
/scripts/sbatch-simulate-tape.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:0
3 | #SBATCH --time=0:30:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=2G
6 | #SBATCH --output=sbatch-analysis-tape_%j.out
7 |
8 | ## Analyze VA reel-to-reel tape performace.
9 | ## Calls analysis-tape.sh
10 |
11 | module load miniconda
12 | source activate neural-tape
13 |
14 | # Process
15 | ./analysis-tape.sh
--------------------------------------------------------------------------------
/scripts/sbatch-test-dataset.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:0
3 | #SBATCH --time=8:00:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-test-dataset_%j.out
7 |
8 | module load miniconda
9 | source activate neural-tape
10 |
11 | # SETTINGS
12 | DATASET="ReelToReel_Dataset_Mini192kHzPulse100_AKAI_IPS[3.75]_SCOTCH"
13 | declare -a ARR_SUBSET=("Train" "Val" "Test")
14 |
15 | # PROCESS
16 | for SUBSET in "${ARR_SUBSET[@]}"; do
17 | python -u ../code/test-dataset.py --DATASET $DATASET --SUBSET $SUBSET --PRELOAD
18 | done
--------------------------------------------------------------------------------
/scripts/sbatch-train-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SETTINGS
3 |
4 | # EXP 1a - CHOWTAPE, No Delay
5 | sbatch sbatch-train-exp1a.sh
6 |
7 | # EXP 1a - CHOWTAPE, Delay
8 | sbatch sbatch-train-exp1b.sh
9 |
10 | # EXP 1a - REAL DATA
11 | sbatch sbatch-train-exp2.sh
12 |
--------------------------------------------------------------------------------
/scripts/sbatch-train-exp1a.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=4:00:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-train_%A_%a.out
7 | #SBATCH --array=0-2
8 |
9 | ## Train models in Triton.
10 |
11 | case $SLURM_ARRAY_TASK_ID in
12 | 0) N_MODEL=1 ;;
13 | 1) N_MODEL=2 ;;
14 | 2) N_MODEL=3 ;;
15 | esac
16 |
17 | module load miniconda
18 | source activate neural-tape
19 |
20 | # SETTINGS
21 | DATASET="ReelToReel_Dataset_MiniPulse100_CHOWTAPE"
22 | LOSS="ESR"
23 | MODEL="DiffDelGRU"
24 | HIDDEN_SIZE=64
25 | N_EPOCHS=200
26 | DESCRIPTIVE_NAME="EXP2""_""$MODEL""_""$LOSS""_""$N_MODEL"
27 |
28 | python -u ../code/train.py\
29 | --MODEL $MODEL --HIDDEN_SIZE $HIDDEN_SIZE\
30 | --LOSS $LOSS --N_EPOCHS $N_EPOCHS\
31 | --DATASET $DATASET --SEGMENT_LENGTH 441000 --PRELOAD\
32 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
33 |
--------------------------------------------------------------------------------
/scripts/sbatch-train-exp1b.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=4:00:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-train_%A_%a.out
7 | #SBATCH --array=0-2
8 |
9 | ## Train models in Triton.
10 |
11 | case $SLURM_ARRAY_TASK_ID in
12 | 0) N_MODEL=1 ;;
13 | 1) N_MODEL=2 ;;
14 | 2) N_MODEL=3 ;;
15 | esac
16 |
17 | module load miniconda
18 | source activate neural-tape
19 |
20 | # SETTINGS
21 | DATASET="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER"
22 | LOSS="ESR"
23 | MODEL="DiffDelGRU"
24 | HIDDEN_SIZE=64
25 | N_EPOCHS=200
26 | DESCRIPTIVE_NAME="EXP2""_""$MODEL""_""$LOSS""_""$N_MODEL"
27 |
28 | python -u ../code/train.py\
29 | --MODEL $MODEL --HIDDEN_SIZE $HIDDEN_SIZE\
30 | --LOSS $LOSS --N_EPOCHS $N_EPOCHS\
31 | --DATASET $DATASET --SEGMENT_LENGTH 441000 --PRELOAD\
32 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
33 |
--------------------------------------------------------------------------------
/scripts/sbatch-train-exp2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=4:00:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-train_%A_%a.out
7 | #SBATCH --array=0-2
8 |
9 | ## Train models in Triton.
10 |
11 | case $SLURM_ARRAY_TASK_ID in
12 | 0) N_MODEL=1 ;;
13 | 1) N_MODEL=2 ;;
14 | 2) N_MODEL=3 ;;
15 | esac
16 |
17 | module load miniconda
18 | source activate neural-tape
19 |
20 | # SETTINGS
21 | DATASET="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[3.75]_SCOTCH"
22 | LOSS="DCPreESR"
23 | MODEL="DiffDelGRU"
24 | HIDDEN_SIZE=64
25 | N_EPOCHS=200
26 | FRACTION_VAL=0.25
27 | DESCRIPTIVE_NAME="EXP3""_""$MODEL""_""$LOSS""_""$N_MODEL"
28 |
29 | python -u ../code/train.py\
30 | --MODEL $MODEL --HIDDEN_SIZE $HIDDEN_SIZE\
31 | --LOSS $LOSS --N_EPOCHS $N_EPOCHS\
32 | --DATASET $DATASET --SEGMENT_LENGTH 441000 --PRELOAD\
33 | --FRACTION_VAL $FRACTION_VAL\
34 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
--------------------------------------------------------------------------------
/scripts/sbatch-train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=0:30:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-train_%j.out
7 |
8 | ## Train models in Triton.
9 | ## Calls train.sh
10 |
11 | module load miniconda
12 | source activate neural-tape
13 |
14 | # SETTINGS
15 | HIDDEN_SIZE=32
16 | DATASET="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER"
17 | DATASET_VAL="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER"
18 | N_EPOCHS=200
19 | DESCRIPTIVE_NAME="EXP5"
20 |
21 | ./train.sh $HIDDEN_SIZE $DATASET $DATASET_VAL $N_EPOCHS $DESCRIPTIVE_NAME
22 |
--------------------------------------------------------------------------------
/scripts/simulate-playback.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls analysis-playback-head.py
4 | '
5 |
6 | # SETTINGS
7 | declare -a arr_tape_v=(1.875 3.75 7.5 15 30)
8 | declare -a arr_tape_delta=(8.75 17.5 35 70 140)
9 | declare -a arr_play_g=(5 10 20 40 80)
10 | declare -a arr_play_d=(1.5 3 6 12 24)
11 |
12 | # Choose array to iterate over
13 | chosen=("${arr_tape_v[@]}")
14 |
15 | for item in "${chosen[@]}"; do
16 | processed=$(echo $item*0.0254|bc)
17 | # processed=$(echo $item*$(printf '%.16f' 1e-6)|bc)
18 | echo $processed
19 | python -u ../code/analysis-playback-head.py --TAPE_V $processed --SAVE_FIGS &
20 | done
21 |
--------------------------------------------------------------------------------
/scripts/simulate-tape.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls analysis-tape.py
4 | '
5 |
6 | # SETTINGS
7 | declare -a arr_signal_amplitudes=(1e-4 1e-3 1e-2)
8 | T_MAX=0.1 #s
9 |
10 | bias_amplitude=0
11 | for signal_amplitude in "${arr_signal_amplitudes[@]}"; do
12 | signal_amplitude_converted=$(printf '%.6f' $signal_amplitude)
13 | printf "\nSignal amplitude: $signal_amplitude = $signal_amplitude_converted\n"
14 | bias_amplitude=$(echo 5*$signal_amplitude_converted|bc)
15 | printf "Bias amplitude: $bias_amplitude\n"
16 |
17 | python -u ../code/analysis-tape.py --T_MAX $T_MAX --SIGNAL_AMPLITUDE $signal_amplitude --BIAS_AMPLITUDE $bias_amplitude --BIAS_ENABLE --RETURN_INTERNAL --PLOT_FFT --SAVE_FIGS --SAVE_AUDIO &
18 | done
19 |
--------------------------------------------------------------------------------
/scripts/test-model-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=0:30:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-loss_%j.out
7 |
8 | # LOAD ENVIRONMENT IF IN TRITON
9 | if command -v module &> /dev/null; then
10 | module load miniconda
11 | source activate neural-tape
12 | fi
13 |
14 | # SETTINGS
15 | declare -a ARR_MODELS=(
16 | # "GRU"
17 | "DiffDelGRU"
18 | )
19 | declare -a ARR_LOSSES=(
20 | # "ESR"
21 | # "DCPreESR"
22 | "LogSpec"
23 | )
24 |
25 | for CONFIG_ID in $(seq 1 1); do
26 | case $CONFIG_ID in
27 | 0) EXPERIMENT="TOY"
28 | TAPE="EMPTY"
29 | IPS="EMPTY"
30 | ;;
31 | 1) EXPERIMENT="REAL"
32 | TAPE="MAXELL"
33 | IPS="7.5"
34 | ;;
35 | 2) EXPERIMENT="REAL"
36 | TAPE="SCOTCH"
37 | IPS="3.75"
38 | ;;
39 | esac
40 |
41 | for MODEL in "${ARR_MODELS[@]}"; do
42 | for LOSS in "${ARR_LOSSES[@]}"; do
43 |
44 | echo
45 | echo "========================"
46 | echo "== SETTINGS =="
47 | echo "EXPERIMENT: $EXPERIMENT"
48 | echo "TAPE: $TAPE"
49 | echo "IPS: $IPS"
50 | echo "MODEL: $MODEL"
51 | echo "LOSS: $LOSS"
52 | echo "========================"
53 |
54 | # Loss
55 | ./test-model-loss.sh $EXPERIMENT $TAPE $IPS $MODEL $LOSS
56 |
57 | # Predictions
58 | # ./test-model-prediction.sh $EXPERIMENT $TAPE $IPS $MODEL $LOSS
59 |
60 | # Noise
61 | # ./test-model-noise.sh $EXPERIMENT $TAPE $IPS $MODEL $LOSS
62 |
63 | # Hysteresis
64 | # ./test-model-hysteresis.sh $EXPERIMENT $TAPE $IPS $MODEL $LOSS
65 |
66 | # Sweep
67 | # ./test-model-sweep.sh $EXPERIMENT $TAPE $IPS $MODEL $LOSS
68 | done
69 | done
70 | done
--------------------------------------------------------------------------------
/scripts/test-model-hysteresis.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | EXPERIMENT=$1
7 | TAPE=$2
8 | IPS=$3
9 | MODEL=$4
10 | LOSS=$5
11 |
12 | # SETTINGS
13 | DESCRIPTIVE_NAME="HYSTERESIS"
14 | # declare -a ARR_INDICES=(13 63 98) # cherry-picked indices OLD
15 | # SEGMENT_LENGTH=48510 # OLD
16 | declare -a ARR_INDICES=(5) # cherry-picked indices
17 | SEGMENT_LENGTH=44100
18 | COMBINED=True
19 |
20 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
21 | # OLD
22 | # DATASET_NAME="SinesFadedShortContinuousPulse100_CHOWTAPE" # EXP-1
23 | # DATASET_NAME="SinesFadedShortContinuousPulse100_CHOWTAPE_WOWFLUTTER" # EXP-2
24 |
25 | if [ $TAPE == "EXP1" ]; then # EXP-1
26 | DATASET_NAME="SinesFadedShortContinuousPulse100CherryPicked_CHOWTAPE" # EXP-1
27 | declare -a ARR_WEIGHTS=(
28 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_BEST"
29 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1"
30 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2"
31 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3"
32 | )
33 | else
34 | DATASET_NAME="SinesFadedShortContinuousPulse100CherryPicked_CHOWTAPE_WOWFLUTTER" # EXP-2
35 | declare -a ARR_WEIGHTS=(
36 | "GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST"
37 | "DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST"
38 | "DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST"
39 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST"
40 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1"
41 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2"
42 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3"
43 | )
44 | fi
45 | SYNC=0.0 # CHOWTAPE
46 | else # REAL
47 | DATASET_NAME="SinesFadedShortContinuousPulse100_AKAI_IPS[$IPS]_$TAPE"
48 | declare -a ARR_WEIGHTS=(
49 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]"
50 | )
51 | SYNC=1.0 # REAL
52 | fi
53 |
54 | # ANALYZE
55 | if [ $COMBINED == True ]; then
56 | for idx in "${ARR_INDICES[@]}"; do
57 |
58 | if [ $TAPE == "EXP1" ]; then
59 | python -u ../code/test-model.py \
60 | --MODEL $MODEL --WEIGHTS $(echo "${ARR_WEIGHTS[*]}") \
61 | --DATASET $DATASET_NAME \
62 | --SYNC $SYNC --SEGMENT_LENGTH $SEGMENT_LENGTH --ZOOM 0.1 --NO_SHUFFLE --IDX $idx \
63 | --PLOT_TRANSFER --SAVE_FIG \
64 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx"
65 | else
66 | python -u ../code/test-model.py \
67 | --MODEL $MODEL --WEIGHTS $(echo "${ARR_WEIGHTS[*]}") \
68 | --DATASET $DATASET_NAME \
69 | --SYNC $SYNC --SEGMENT_LENGTH $SEGMENT_LENGTH --ZOOM 0.1 --NO_SHUFFLE --IDX $idx \
70 | --DEMODULATE \
71 | --PLOT_TRANSFER --SAVE_FIG \
72 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx"
73 | fi
74 | done
75 | else
76 | for weight in "${ARR_WEIGHTS[@]}"; do
77 | for idx in "${ARR_INDICES[@]}"; do
78 | if [ $TAPE == "EXP1" ]; then
79 | python -u ../code/test-model.py \
80 | --MODEL $MODEL --WEIGHTS $weight \
81 | --DATASET $DATASET_NAME \
82 | --SYNC $SYNC --SEGMENT_LENGTH $SEGMENT_LENGTH --ZOOM 0.1 --NO_SHUFFLE --IDX $idx \
83 | --PLOT_TRANSFER --SAVE_FIG \
84 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx"
85 | else
86 | python -u ../code/test-model.py \
87 | --MODEL $MODEL --WEIGHTS $weight \
88 | --DATASET $DATASET_NAME \
89 | --SYNC $SYNC --SEGMENT_LENGTH $SEGMENT_LENGTH --ZOOM 0.1 --NO_SHUFFLE --IDX $idx \
90 | --DEMODULATE \
91 | --PLOT_TRANSFER --SAVE_FIG \
92 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx"
93 | fi
94 | done
95 | done
96 | fi
97 |
--------------------------------------------------------------------------------
/scripts/test-model-loss.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=0:30:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-loss_%j.out
7 |
8 | # SETTINGS
9 | EXPERIMENT=$1
10 | TAPE=$2
11 | IPS=$3
12 | MODEL=$4
13 | LOSS=$5
14 |
15 | # LOAD ENVIRONMENT IF IN TRITON
16 | # if command -v module &> /dev/null; then
17 | # module load miniconda
18 | # source activate neural-tape
19 | # fi
20 |
21 | DESCRIPTIVE_NAME="LOSS"
22 | SEGMENT_LENGTH=$((44100*10))
23 | declare -a ARR_SUBSETS=(
24 | # "Val"
25 | "Test"
26 | )
27 |
28 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
29 | # declare -a ARR_WEIGHTS=(
30 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1"
31 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2"
32 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3"
33 | # ) # EXP1
34 | declare -a ARR_WEIGHTS=(
35 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST"
36 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2"
37 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3"
38 | ) # EXP2
39 | else # REAL
40 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE" # REAL
41 | declare -a ARR_WEIGHTS=(
42 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_BEST"
43 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_1"
44 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_2"
45 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_3"
46 | )
47 | DATASET_NOISE="Silence_AKAI_IPS[$IPS]_$TAPE"
48 | fi
49 |
50 | # ANALYZE
51 | for weight in "${ARR_WEIGHTS[@]}"; do
52 | for subset in "${ARR_SUBSETS[@]}"; do
53 |
54 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
55 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER" # EXP 2
56 | # Delayed
57 | python -u ../code/test-model.py \
58 | --MODEL $MODEL --WEIGHTS $weight \
59 | --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
60 | --ADD_DELAY \
61 | --COMPUTE_LOSS \
62 | --SAVE_AUDIO \
63 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
64 |
65 | if [ $MODEL == "GRU" ]; then
66 | # Demodulated
67 | python -u ../code/test-model.py \
68 | --MODEL $MODEL --WEIGHTS $weight \
69 | --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
70 | --DEMODULATE \
71 | --COMPUTE_LOSS \
72 | --SAVE_AUDIO \
73 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
74 |
75 | # DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE" # EXP 1
76 | # python -u ../code/test-model.py \
77 | # --MODEL $MODEL --WEIGHTS $weight \
78 | # --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
79 | # --COMPUTE_LOSS \
80 | # --SAVE_AUDIO \
81 | # --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
82 | fi
83 |
84 | else # REAL
85 |
86 | # Delayed + No Noise
87 | python -u ../code/test-model.py \
88 | --MODEL $MODEL --WEIGHTS $weight \
89 | --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
90 | --ADD_DELAY \
91 | --COMPUTE_LOSS \
92 | --SAVE_AUDIO \
93 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
94 |
95 | # Delayed + Noised
96 | # python -u ../code/test-model.py \
97 | # --MODEL $MODEL --WEIGHTS $weight \
98 | # --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
99 | # --DATASET_NOISE $DATASET_NOISE\
100 | # --ADD_DELAY --ADD_NOISE \
101 | # --COMPUTE_LOSS \
102 | # --SAVE_AUDIO \
103 | # --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
104 |
105 | if [ $MODEL == "GRU" ]; then
106 | # Demodulated + No Noise
107 | python -u ../code/test-model.py \
108 | --MODEL $MODEL --WEIGHTS $weight \
109 | --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
110 | --DEMODULATE \
111 | --COMPUTE_LOSS \
112 | --SAVE_AUDIO \
113 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
114 |
115 | # Demodulated + Noised
116 | # python -u ../code/test-model.py \
117 | # --MODEL $MODEL --WEIGHTS $weight \
118 | # --DATASET $DATASET_NAME --SUBSET $subset --NO_SHUFFLE --SEGMENT_LENGTH $SEGMENT_LENGTH\
119 | # --DATASET_NOISE $DATASET_NOISE\
120 | # --DEMODULATE --ADD_NOISE \
121 | # --COMPUTE_LOSS \
122 | # --SAVE_AUDIO \
123 | # --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME
124 | fi
125 | fi
126 | done
127 | done
--------------------------------------------------------------------------------
/scripts/test-model-noise.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | EXPERIMENT=$1
7 | TAPE=$2
8 | IPS=$3
9 | MODEL=$4
10 | LOSS=$5
11 |
12 | # SETTINGS
13 | DESCRIPTIVE_NAME="PREDICTION"
14 | SEGMENT_LENGTH=$((5*44100)) # in [n]
15 | NUM_EXAMPLES=5
16 | declare -a ARR_INDICES=(19 98 118) # cherry-picked
17 |
18 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
19 | echo "Toy data doesn't contain noise component!"
20 | exit
21 | else # REAL
22 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE" # REAL
23 | declare -a ARR_WEIGHTS=(
24 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_BEST"
25 | )
26 | DATASET_NOISE="Silence_AKAI_IPS[$IPS]_$TAPE"
27 | SYNC=0.0
28 | fi
29 |
30 | # ANALYZE
31 | for weight in "${ARR_WEIGHTS[@]}"; do
32 | for idx in "${ARR_INDICES[@]}"; do
33 | for repeat in $(seq $NUM_EXAMPLES); do
34 | # Real noise
35 | NOISE_TYPE="Real"
36 | python -u ../code/test-model.py \
37 | --MODEL $MODEL --WEIGHTS $weight \
38 | --DATASET $DATASET_NAME --SEGMENT_LENGTH $SEGMENT_LENGTH\
39 | --SYNC $SYNC --NO_SHUFFLE --IDX $idx\
40 | --DATASET_NOISE $DATASET_NOISE\
41 | --ADD_DELAY --ADD_NOISE --NOISE_TYPE $NOISE_TYPE\
42 | --SAVE_AUDIO\
43 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx""_$repeat""_REAL"
44 |
45 | # Generated noise
46 | NOISE_TYPE="Generated"
47 | python -u ../code/test-model.py \
48 | --MODEL $MODEL --WEIGHTS $weight \
49 | --DATASET $DATASET_NAME --SEGMENT_LENGTH $SEGMENT_LENGTH\
50 | --SYNC $SYNC --NO_SHUFFLE --IDX $idx\
51 | --DATASET_NOISE $DATASET_NOISE\
52 | --ADD_DELAY --ADD_NOISE --NOISE_TYPE $NOISE_TYPE\
53 | --SAVE_AUDIO\
54 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx""_$repeat""_GENERATED"
55 | done
56 | done
57 | done
58 |
--------------------------------------------------------------------------------
/scripts/test-model-prediction.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | : '
3 | Calls test-dataset.py
4 | '
5 | # ARGUMENTS
6 | EXPERIMENT=$1
7 | TAPE=$2
8 | IPS=$3
9 | MODEL=$4
10 | LOSS=$5
11 |
12 | # SETTINGS
13 | DESCRIPTIVE_NAME="PREDICTION"
14 | SEGMENT_LENGTH=$((5*44100)) # in [n]
15 | NOISE_TYPE="Generated" # in ["Real","Generated"]
16 | DELAY_TYPE="True" # in ["True", "Real", "Generated"]
17 | NUM_EXAMPLES=2
18 |
19 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
20 | # DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE" # EXP-1
21 | # declare -a ARR_WEIGHTS=(
22 | # "GRU-HS[64]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]"
23 | # )
24 | declare -a ARR_INDICES=(16 40 70 77 109) # cherry-picked indices
25 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER" # EXP-2
26 | declare -a ARR_WEIGHTS=(
27 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST"
28 | )
29 | SYNC=0.0
30 | else # REAL
31 | declare -a ARR_INDICES=(52 70 84 98 109) # cherry-picked indices
32 | DATASET_NAME="ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE" # REAL
33 | declare -a ARR_WEIGHTS=(
34 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_BEST"
35 | )
36 | DATASET_NOISE="Silence_AKAI_IPS[$IPS]_$TAPE"
37 | SYNC=0.0
38 | fi
39 |
40 | # ANALYZE
41 | for weight in "${ARR_WEIGHTS[@]}"; do
42 | for idx in "${ARR_INDICES[@]}"; do
43 | for repeat in $(seq $NUM_EXAMPLES); do
44 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
45 | python -u ../code/test-model.py \
46 | --MODEL $MODEL --WEIGHTS $weight \
47 | --DATASET $DATASET_NAME --SEGMENT_LENGTH $SEGMENT_LENGTH\
48 | --SYNC $SYNC --NO_SHUFFLE --IDX $idx\
49 | --ADD_DELAY --DELAY_TYPE $DELAY_TYPE\
50 | --SAVE_AUDIO\
51 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx""_$repeat""_DELAY[$DELAY_TYPE]"
52 | else
53 | python -u ../code/test-model.py \
54 | --MODEL $MODEL --WEIGHTS $weight \
55 | --DATASET $DATASET_NAME --SEGMENT_LENGTH $SEGMENT_LENGTH\
56 | --SYNC $SYNC --NO_SHUFFLE --IDX $idx\
57 | --DATASET_NOISE $DATASET_NOISE\
58 | --ADD_DELAY --DELAY_TYPE $DELAY_TYPE\
59 | --ADD_NOISE --NOISE_TYPE $NOISE_TYPE\
60 | --SAVE_AUDIO\
61 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_$idx""_$repeat""_DELAY[$DELAY_TYPE]""_NOISE[$NOISE_TYPE]"
62 | fi
63 | done
64 | done
65 | done
--------------------------------------------------------------------------------
/scripts/test-model-sweep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=0:30:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-sweep_%j.out
7 |
8 | # ARGUMENTS
9 | EXPERIMENT=$1
10 | TAPE=$2
11 | IPS=$3
12 | MODEL=$4
13 | LOSS=$5
14 |
15 | # LOAD ENVIRONMENT IF IN TRITON
16 | if command -v module &> /dev/null; then
17 | module load miniconda
18 | source activate neural-tape
19 | fi
20 |
21 | # SETTINGS
22 | DESCRIPTIVE_NAME="SWEEP"
23 | N_PARALLEL=1
24 |
25 | if [ $EXPERIMENT == "TOY" ]; then # CHOWTAPE
26 | SYNC=0.0
27 | # DATASET_NAME="LogSweepsContinuousPulse100_CHOWTAPE" # EXP-1
28 | DATASET_NAME="LogSweepsContinuousPulse100_CHOWTAPE_WOWFLUTTER" # EXP-2
29 | declare -a ARR_WEIGHTS=(
30 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]"
31 | )
32 | else # REAL
33 | SYNC=1.0
34 | DATASET_NAME="LogSweepsContinuousPulse100_AKAI_IPS[$IPS]_$TAPE" # REAL
35 | declare -a ARR_WEIGHTS=(
36 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]"
37 | "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_BEST"
38 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_1"
39 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_2"
40 | # "$MODEL-HS[64]-L[$LOSS]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[$IPS]_$TAPE]_3"
41 | )
42 | fi
43 |
44 | SEGMENT_LENGTH=1367100 # NEW
45 | ZOOM=30.0
46 | START_IDX=8
47 | END_IDX=8
48 | # SEGMENT_LENGTH=264600 # OLD
49 | # ZOOM=5.0
50 | # START_IDX=5
51 | # END_IDX=14
52 |
53 | # ANALYZE
54 | counter=0
55 | for weight in "${ARR_WEIGHTS[@]}"; do
56 | for idx in $(seq $START_IDX $END_IDX); do
57 | python -u ../code/test-model.py \
58 | --MODEL $MODEL --WEIGHTS $weight \
59 | --DATASET $DATASET_NAME \
60 | --SYNC $SYNC --SEGMENT_LENGTH $SEGMENT_LENGTH --ZOOM $ZOOM --NO_SHUFFLE --IDX $idx \
61 | --DEMODULATE \
62 | --PLOT_SWEEP --SAVE_FIG \
63 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME"_""$((idx))" &
64 |
65 | ((counter+=1))
66 | if [ $counter -eq $(($N_PARALLEL)) ]; then
67 | echo "Waitin'.."
68 | wait
69 | ((counter=0))
70 | fi
71 | done
72 | done
73 |
--------------------------------------------------------------------------------
/scripts/train-adversarial.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=2:00:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-train_adversarial_%j.out
7 |
8 | # LOAD ENVIRONMENT IF IN TRITON
9 | if command -v module &> /dev/null; then
10 | module load miniconda
11 | source activate neural-tape
12 | fi
13 |
14 | # SETTINGS
15 | # NADA
16 |
17 | # Run
18 | python -u ../code/train-adversarial.py
--------------------------------------------------------------------------------
/scripts/train.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH --gres=gpu:1
3 | #SBATCH --time=0:30:00
4 | #SBATCH --cpus-per-task=4
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH --output=sbatch-train_%j.out
7 |
8 | # LOAD ENVIRONMENT IF IN TRITON
9 | if command -v module &> /dev/null; then
10 | module load miniconda
11 | source activate neural-tape
12 | fi
13 |
14 | # SETTINGS
15 | DATASET="ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER"
16 | LOSS="ESR"
17 | MODEL="DiffDelGRU"
18 | HIDDEN_SIZE=64
19 | N_EPOCHS=200
20 | DESCRIPTIVE_NAME="TEMP"
21 |
22 | # Run
23 | python -u ../code/train.py\
24 | --MODEL $MODEL --HIDDEN_SIZE $HIDDEN_SIZE\
25 | --LOSS $LOSS --N_EPOCHS $N_EPOCHS\
26 | --DATASET $DATASET --SEGMENT_LENGTH 441000 --PRELOAD\
27 | --DESCRIPTIVE_NAME $DESCRIPTIVE_NAME\
28 | --DRY_RUN
--------------------------------------------------------------------------------
/weights/83/noise-73000.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/83/noise-73000.pt
--------------------------------------------------------------------------------
/weights/90/toytrajectories-36000.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/90/toytrajectories-36000.pt
--------------------------------------------------------------------------------
/weights/91/trajectories-41000.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/91/trajectories-41000.pt
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST:
--------------------------------------------------------------------------------
1 | DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST:
--------------------------------------------------------------------------------
1 | DiffDelGRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST:
--------------------------------------------------------------------------------
1 | DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST:
--------------------------------------------------------------------------------
1 | DiffDelGRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST:
--------------------------------------------------------------------------------
1 | DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth
--------------------------------------------------------------------------------
/weights/DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST:
--------------------------------------------------------------------------------
1 | DiffDelGRU-HS[64]-L[LogSpec]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST:
--------------------------------------------------------------------------------
1 | GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_BEST:
--------------------------------------------------------------------------------
1 | GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST:
--------------------------------------------------------------------------------
1 | GRU-HS[64]-L[DCPreESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_1/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_3/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_BEST:
--------------------------------------------------------------------------------
1 | GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_AKAI_IPS[7.5]_MAXELL]_2
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_1/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_2/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_BEST:
--------------------------------------------------------------------------------
1 | GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE]_3
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_1/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_2/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/best.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/01tot10/neural-tape-modeling/228b6e366d0d895e7562a3035fdbd5ff816c442f/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3/running.pth
--------------------------------------------------------------------------------
/weights/GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_BEST:
--------------------------------------------------------------------------------
1 | GRU-HS[64]-L[ESR]-DS[ReelToReel_Dataset_MiniPulse100_CHOWTAPE_WOWFLUTTER]_3
--------------------------------------------------------------------------------