├── .gitignore ├── CNN_CIFAR ├── README.md ├── data │ └── cifar.py ├── environment.yml ├── main.py ├── main_all_dense_cifar10.sh ├── main_all_res_cifar10.sh ├── main_all_vgg_cifar10.sh ├── model │ ├── densenet.py │ ├── resnet.py │ ├── smooth_cross_entropy.py │ ├── vgg.py │ └── wide_res_net.py ├── optimizers │ ├── AdaBelief.py │ ├── AdaM3.py │ ├── AdamW.py │ ├── RAdam.py │ ├── Types.py │ ├── Yogi.py │ └── __init__.py ├── plot_curve.py ├── print_value.py └── utility │ ├── cutout.py │ ├── initialize.py │ ├── loading_bar.py │ ├── log.py │ └── step_lr.py ├── CNN_ImageNet ├── AdaM3.py ├── README.md ├── SGD_GC.py ├── check_cv.py ├── dataloader.py ├── datasets.py ├── environment.yml ├── main.py ├── utils.py └── xception.py ├── DCGAN_BigGAN ├── README.md ├── environment.yml └── src │ ├── AdaM3.py │ ├── RAdam.py │ ├── Types.py │ ├── Yogi.py │ ├── config.py │ ├── configs │ └── CIFAR10 │ │ ├── BigGAN-Mod-CR_adabelief.yaml │ │ ├── BigGAN-Mod-CR_adabound.yaml │ │ ├── BigGAN-Mod-CR_adam.yaml │ │ ├── BigGAN-Mod-CR_adam3.yaml │ │ ├── BigGAN-Mod-CR_radam.yaml │ │ ├── BigGAN-Mod-CR_sgd.yaml │ │ ├── BigGAN-Mod-CR_yogi.yaml │ │ ├── DCGAN_adabelief.yaml │ │ ├── DCGAN_adabound.yaml │ │ ├── DCGAN_adam.yaml │ │ ├── DCGAN_adam3.yaml │ │ ├── DCGAN_radam.yaml │ │ ├── DCGAN_sgd.yaml │ │ └── DCGAN_yogi.yaml │ ├── data_util.py │ ├── loader.py │ ├── main.py │ ├── metrics │ ├── fid.py │ ├── inception_net.py │ ├── ins.py │ ├── ins_tf13.py │ ├── prdc_trained.py │ ├── preparation.py │ └── resnet.py │ ├── models │ ├── big_resnet.py │ ├── deep_big_resnet.py │ ├── deep_conv.py │ ├── model.py │ ├── resnet.py │ └── stylegan2.py │ ├── sync_batchnorm │ ├── batchnorm.py │ ├── batchnorm_reimpl.py │ ├── comm.py │ ├── replicate.py │ └── unittest.py │ ├── utils │ ├── ImageNet_label.txt │ ├── ada_aug.py │ ├── ckpt.py │ ├── cr.py │ ├── custom_ops.py │ ├── diffaug.py │ ├── ema.py │ ├── hdf5.py │ ├── log.py │ ├── losses.py │ ├── misc.py │ ├── ops.py │ ├── sample.py │ ├── sefa.py │ ├── simclr_aug.py │ ├── style_misc.py │ └── style_ops │ │ ├── __init__.py │ │ ├── bias_act.cpp │ │ ├── bias_act.cu │ │ ├── bias_act.h │ │ ├── bias_act.py │ │ ├── conv2d_gradfix.py │ │ ├── conv2d_resample.py │ │ ├── dnnlib │ │ ├── __init__.py │ │ └── util.py │ │ ├── fma.py │ │ ├── grid_sample_gradfix.py │ │ ├── upfirdn2d.cpp │ │ ├── upfirdn2d.cu │ │ ├── upfirdn2d.h │ │ └── upfirdn2d.py │ └── worker.py ├── ICDM23_AdaM3_supp.pdf ├── LSTM_Penn_Treebank ├── AdaBelief.py ├── AdaM3.py ├── AdamW.py ├── RAdam.py ├── README.md ├── Types.py ├── curve │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] │ ├── PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] │ └── PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] ├── data.py ├── embed_regularize.py ├── environment.yml ├── finetune.py ├── generate.py ├── getdata.sh ├── locked_dropout.py ├── main.py ├── main_all_1layer.sh ├── main_all_2layer.sh ├── main_all_3layer.sh ├── model.py ├── plot_curve.py ├── pointer.py ├── print_value.py ├── splitcross.py ├── utils.py ├── weight_drop.py └── yogi.py ├── README.md ├── SNGAN ├── AdaM3.py ├── README.md ├── configs │ └── CIFAR10 │ │ ├── SNGAN-adam.json │ │ └── SNGAN-adam3.json ├── data_utils │ └── load_dataset.py ├── environment.yml ├── inception_tf13.py ├── load_framework.py ├── main.py ├── metrics │ ├── Accuracy.py │ ├── FID.py │ ├── F_beta.py │ ├── IS.py │ ├── inception_network.py │ └── prepare_inception_moments.py ├── models │ ├── big_resnet.py │ ├── big_resnet_deep.py │ ├── dcgan.py │ └── resnet.py ├── sync_batchnorm │ ├── batchnorm.py │ ├── batchnorm_reimpl.py │ ├── comm.py │ ├── replicate.py │ └── unittest.py ├── train_eval.py └── utils │ ├── ada.py │ ├── ada_op │ ├── __init__.py │ ├── fused_act.py │ ├── fused_bias_act.cpp │ ├── fused_bias_act_kernel.cu │ ├── upfirdn2d.cpp │ ├── upfirdn2d.py │ └── upfirdn2d_kernel.cu │ ├── biggan_utils.py │ ├── cr_diff_aug.py │ ├── diff_aug.py │ ├── load_checkpoint.py │ ├── log.py │ ├── losses.py │ ├── misc.py │ ├── model_ops.py │ └── sample.py ├── Transformer_NMT ├── ! ├── README.md ├── config │ ├── adabelief.sh │ ├── adam.sh │ ├── adam3.sh │ ├── radam.sh │ └── sgd.sh ├── environment.yml ├── eval_lm.py ├── export ├── fairseq │ ├── ! │ ├── __init__.py │ ├── binarizer.py │ ├── bleu.py │ ├── checkpoint_utils.py │ ├── clib │ │ ├── libbleu │ │ │ ├── libbleu.cpp │ │ │ └── module.cpp │ │ └── libnat │ │ │ └── edit_dist.cpp │ ├── criterions │ │ ├── __init__.py │ │ ├── adaptive_loss.py │ │ ├── binary_cross_entropy.py │ │ ├── composite_loss.py │ │ ├── cross_entropy.py │ │ ├── fairseq_criterion.py │ │ ├── label_smoothed_cross_entropy.py │ │ ├── label_smoothed_cross_entropy_with_alignment.py │ │ ├── label_smoothed_cross_entropy_with_reg.py │ │ ├── legacy_masked_lm.py │ │ ├── masked_lm.py │ │ ├── nat_loss.py │ │ ├── sentence_prediction.py │ │ └── sentence_ranking.py │ ├── data │ │ ├── __init__.py │ │ ├── audio │ │ │ ├── __init__.py │ │ │ └── raw_audio_dataset.py │ │ ├── backtranslation_dataset.py │ │ ├── base_wrapper_dataset.py │ │ ├── colorize_dataset.py │ │ ├── concat_dataset.py │ │ ├── concat_sentences_dataset.py │ │ ├── data_utils.py │ │ ├── data_utils_fast.cpp │ │ ├── data_utils_fast.pyx │ │ ├── dictionary.py │ │ ├── encoders │ │ │ ├── __init__.py │ │ │ ├── fastbpe.py │ │ │ ├── gpt2_bpe.py │ │ │ ├── gpt2_bpe_utils.py │ │ │ ├── hf_bert_bpe.py │ │ │ ├── moses_tokenizer.py │ │ │ ├── nltk_tokenizer.py │ │ │ ├── sentencepiece_bpe.py │ │ │ ├── space_tokenizer.py │ │ │ └── subword_nmt_bpe.py │ │ ├── fairseq_dataset.py │ │ ├── id_dataset.py │ │ ├── indexed_dataset.py │ │ ├── iterators.py │ │ ├── language_pair_dataset.py │ │ ├── legacy │ │ │ ├── __init__.py │ │ │ ├── block_pair_dataset.py │ │ │ ├── masked_lm_dataset.py │ │ │ └── masked_lm_dictionary.py │ │ ├── list_dataset.py │ │ ├── lm_context_window_dataset.py │ │ ├── lru_cache_dataset.py │ │ ├── mask_tokens_dataset.py │ │ ├── monolingual_dataset.py │ │ ├── multi_corpus_sampled_dataset.py │ │ ├── nested_dictionary_dataset.py │ │ ├── noising.py │ │ ├── num_samples_dataset.py │ │ ├── numel_dataset.py │ │ ├── offset_tokens_dataset.py │ │ ├── pad_dataset.py │ │ ├── plasma_utils.py │ │ ├── prepend_dataset.py │ │ ├── prepend_token_dataset.py │ │ ├── raw_label_dataset.py │ │ ├── replace_dataset.py │ │ ├── resampling_dataset.py │ │ ├── round_robin_zip_datasets.py │ │ ├── sharded_dataset.py │ │ ├── sort_dataset.py │ │ ├── strip_token_dataset.py │ │ ├── subsample_dataset.py │ │ ├── token_block_dataset.py │ │ ├── token_block_utils_fast.cpp │ │ ├── token_block_utils_fast.pyx │ │ ├── transform_eos_dataset.py │ │ ├── transform_eos_lang_pair_dataset.py │ │ └── truncate_dataset.py │ ├── distributed_utils.py │ ├── export │ ├── file_utils.py │ ├── hub_utils.py │ ├── iterative_refinement_generator.py │ ├── legacy_distributed_data_parallel.py │ ├── meters.py │ ├── models │ │ ├── __init__.py │ │ ├── cmlm_transformer.py │ │ ├── composite_encoder.py │ │ ├── distributed_fairseq_model.py │ │ ├── fairseq_decoder.py │ │ ├── fairseq_encoder.py │ │ ├── fairseq_incremental_decoder.py │ │ ├── fairseq_model.py │ │ ├── fconv.py │ │ ├── fconv_lm.py │ │ ├── fconv_self_att.py │ │ ├── insertion_transformer.py │ │ ├── iterative_nonautoregressive_transformer.py │ │ ├── levenshtein_transformer.py │ │ ├── lightconv.py │ │ ├── lightconv_lm.py │ │ ├── lstm.py │ │ ├── model_utils.py │ │ ├── multilingual_transformer.py │ │ ├── nonautoregressive_ensembles.py │ │ ├── nonautoregressive_transformer.py │ │ ├── roberta │ │ │ ├── __init__.py │ │ │ ├── alignment_utils.py │ │ │ ├── hub_interface.py │ │ │ └── model.py │ │ ├── tracing_compliant_transformer.py │ │ ├── transformer.py │ │ ├── transformer_from_pretrained_xlm.py │ │ ├── transformer_lm.py │ │ └── wav2vec.py │ ├── modules │ │ ├── __init__.py │ │ ├── adaptive_input.py │ │ ├── adaptive_softmax.py │ │ ├── beamable_mm.py │ │ ├── character_token_embedder.py │ │ ├── conv_tbc.py │ │ ├── cuda_utils.cu │ │ ├── downsampled_multihead_attention.py │ │ ├── dropout_select.py │ │ ├── dynamic_convolution.py │ │ ├── dynamicconv_layer │ │ │ ├── __init__.py │ │ │ ├── cuda_function_gen.py │ │ │ ├── dynamicconv_cuda.cpp │ │ │ ├── dynamicconv_cuda.cuh │ │ │ ├── dynamicconv_cuda_kernel.cu │ │ │ ├── dynamicconv_layer.py │ │ │ ├── dynamiconv_cpu.cpp │ │ │ └── setup.py │ │ ├── gelu.py │ │ ├── grad_multiply.py │ │ ├── highway.py │ │ ├── layer_norm.py │ │ ├── learned_positional_embedding.py │ │ ├── lightconv_layer │ │ │ ├── __init__.py │ │ │ ├── cuda_function_gen.py │ │ │ ├── lightconv_cuda.cpp │ │ │ ├── lightconv_cuda.cuh │ │ │ ├── lightconv_cuda_kernel.cu │ │ │ ├── lightconv_layer.py │ │ │ └── setup.py │ │ ├── lightweight_convolution.py │ │ ├── linearized_convolution.py │ │ ├── logsumexp_moe.py │ │ ├── mean_pool_gating_network.py │ │ ├── multihead_attention.py │ │ ├── norm_select.py │ │ ├── norms │ │ │ └── mask_layernorm.py │ │ ├── positional_embedding.py │ │ ├── scalar_bias.py │ │ ├── sinusoidal_positional_embedding.py │ │ ├── sparse_multihead_attention.py │ │ ├── sparse_transformer_sentence_encoder.py │ │ ├── sparse_transformer_sentence_encoder_layer.py │ │ ├── transformer_layer.py │ │ ├── transformer_sentence_encoder.py │ │ ├── transformer_sentence_encoder_layer.py │ │ ├── unfold.py │ │ └── vggblock.py │ ├── optim │ │ ├── __init__.py │ │ ├── adabelief.py │ │ ├── adabeliefv2.py │ │ ├── adadelta.py │ │ ├── adafactor.py │ │ ├── adagrad.py │ │ ├── adahessian.py │ │ ├── adam.py │ │ ├── adam3.py │ │ ├── adamax.py │ │ ├── adamw.py │ │ ├── back.py │ │ ├── bmuf.py │ │ ├── fairseq_optimizer.py │ │ ├── fp16_optimizer.py │ │ ├── lr_scheduler │ │ │ ├── __init__.py │ │ │ ├── cosine_lr_scheduler.py │ │ │ ├── fairseq_lr_scheduler.py │ │ │ ├── fixed_schedule.py │ │ │ ├── inverse_square_root_schedule.py │ │ │ ├── polynomial_decay_schedule.py │ │ │ ├── reduce_lr_on_plateau.py │ │ │ ├── tri_stage_lr_scheduler.py │ │ │ └── triangular_lr_scheduler.py │ │ ├── nag.py │ │ ├── radam.py │ │ └── sgd.py │ ├── options.py │ ├── pdb.py │ ├── progress_bar.py │ ├── registry.py │ ├── search.py │ ├── sequence_generator.py │ ├── sequence_scorer.py │ ├── tasks │ │ ├── __init__.py │ │ ├── audio_pretraining.py │ │ ├── cross_lingual_lm.py │ │ ├── fairseq_task.py │ │ ├── language_modeling.py │ │ ├── legacy_masked_lm.py │ │ ├── masked_lm.py │ │ ├── multilingual_masked_lm.py │ │ ├── multilingual_translation.py │ │ ├── semisupervised_translation.py │ │ ├── sentence_prediction.py │ │ ├── sentence_ranking.py │ │ ├── translation.py │ │ ├── translation_from_pretrained_xlm.py │ │ ├── translation_lev.py │ │ └── translation_moe.py │ ├── tokenizer.py │ ├── trainer.py │ └── utils.py ├── fairseq_cli │ ├── __init__.py │ ├── eval_lm.py │ ├── generate.py │ ├── interactive.py │ ├── preprocess.py │ ├── score.py │ ├── setup.py │ └── train.py ├── generate.py ├── hubconf.py ├── interactive.py ├── main.py ├── prepare-iwslt14.sh ├── prepare-wmt14en2de.sh ├── preprocess.py ├── probe_train.py ├── score.py ├── scripts │ ├── __init__.py │ ├── average_checkpoints.py │ ├── build_sym_alignment.py │ ├── compare_namespaces.py │ ├── compound_split_bleu.sh │ ├── convert_dictionary.lua │ ├── convert_model.lua │ ├── count_docs.py │ ├── read_binarized.py │ ├── rm_pt.py │ ├── sacrebleu_pregen.sh │ ├── shard_docs.py │ ├── split_train_valid_docs.py │ ├── spm_decode.py │ ├── spm_encode.py │ ├── spm_train.py │ ├── wav2vec_featurize.py │ └── wav2vec_manifest.py ├── setup.py ├── sh_files │ ├── adabelief.sh │ ├── adam.sh │ ├── adam3.sh │ ├── adamw.sh │ └── sgdm.sh └── validate.py └── images ├── AdaM3.png ├── ETH.png ├── UCD.png ├── framework.png ├── neu.png └── smile.png /CNN_CIFAR/README.md: -------------------------------------------------------------------------------- 1 | # CNN for Image Classification 2 | 3 | This folder contains the implementation of training CNNs (ResNet, VGGNet and DenseNet) on 4 | CIFAR-10. Each experiment is run $5$ times independently using random seeds 0 1 2 3 4. 5 | 6 | ## Environment setup 7 | 8 | ``` 9 | conda env create -f environment.yml 10 | ``` 11 | 12 | ## Reproducing the results 13 | 14 | ### VGGNet-16 on CIFAR-10 15 | ``` 16 | sh main_all_vgg_cifar10.sh 17 | ``` 18 | 19 | ### ResNet-34 on CIFAR-10 20 | ``` 21 | sh main_all_res_cifar10.sh 22 | ``` 23 | 24 | ### DenseNet-121 on CIFAR-10 25 | ``` 26 | sh main_all_res_cifar10.sh 27 | ``` 28 | 29 | The log data in dictionary format are in folder `./curve_cifar10`. 30 | 31 | ##### To plot the curves 32 | ``` 33 | python plot_curve.py 34 | ``` 35 | 36 | ##### To print the numerical results 37 | ``` 38 | python print_value.py 39 | ``` 40 | 41 | 42 | -------------------------------------------------------------------------------- /CNN_CIFAR/model/smooth_cross_entropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | def smooth_crossentropy(pred, gold, smoothing=0.1): 7 | n_class = pred.size(1) 8 | 9 | one_hot = torch.full_like(pred, fill_value=smoothing / (n_class - 1)) 10 | one_hot.scatter_(dim=1, index=gold.unsqueeze(1), value=1.0 - smoothing) 11 | log_prob = F.log_softmax(pred, dim=1) 12 | 13 | return F.kl_div(input=log_prob, target=one_hot, reduction='none').sum(-1) 14 | -------------------------------------------------------------------------------- /CNN_CIFAR/optimizers/Types.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable, Union, Callable, Dict, Optional, Tuple, Any 2 | from torch import Tensor 3 | 4 | Params = Union[Iterable[Tensor], Iterable[dict]] 5 | 6 | LossClosure = Callable[[], float] 7 | OptLossClosure = Optional[LossClosure] 8 | Betas2 = Tuple[float, float] 9 | State = Dict[str, Any] 10 | OptFloat = Optional[float] 11 | -------------------------------------------------------------------------------- /CNN_CIFAR/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .AdaBelief import AdaBelief 2 | from .Yogi import Yogi 3 | from .RAdam import RAdam 4 | from .AdamW import AdamW 5 | from .AdaM3 import AdaM3 6 | -------------------------------------------------------------------------------- /CNN_CIFAR/utility/cutout.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class Cutout: 5 | def __init__(self, size=16, p=0.5): 6 | self.size = size 7 | self.half_size = size // 2 8 | self.p = p 9 | 10 | def __call__(self, image): 11 | if torch.rand([1]).item() > self.p: 12 | return image 13 | 14 | left = torch.randint(-self.half_size, image.size(1) - self.half_size, [1]).item() 15 | top = torch.randint(-self.half_size, image.size(2) - self.half_size, [1]).item() 16 | right = min(image.size(1), left + self.size) 17 | bottom = min(image.size(2), top + self.size) 18 | 19 | image[:, max(0, left): right, max(0, top): bottom] = 0 20 | return image 21 | -------------------------------------------------------------------------------- /CNN_CIFAR/utility/initialize.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | import os 4 | 5 | 6 | def initialize(seed: int): 7 | random.seed(seed) 8 | torch.manual_seed(seed) 9 | torch.cuda.manual_seed(seed) 10 | torch.cuda.manual_seed_all(seed) 11 | torch.set_printoptions(10) 12 | 13 | torch.backends.cudnn.enabled = True 14 | torch.backends.cudnn.benchmark = False 15 | torch.backends.cudnn.deterministic = True 16 | 17 | os.environ['PYTHONHASHSEED'] = str(seed) 18 | -------------------------------------------------------------------------------- /CNN_CIFAR/utility/loading_bar.py: -------------------------------------------------------------------------------- 1 | class LoadingBar: 2 | def __init__(self, length: int = 40): 3 | self.length = length 4 | self.symbols = ['┈', '░', '▒', '▓'] 5 | 6 | def __call__(self, progress: float) -> str: 7 | p = int(progress * self.length*4 + 0.5) 8 | d, r = p // 4, p % 4 9 | return '┠┈' + d * '█' + ((self.symbols[r]) + max(0, self.length-1-d) * '┈' if p < self.length*4 else '') + "┈┨" 10 | -------------------------------------------------------------------------------- /CNN_CIFAR/utility/step_lr.py: -------------------------------------------------------------------------------- 1 | class StepLR: 2 | def __init__(self, optimizer, learning_rate: float, total_epochs: int): 3 | self.optimizer = optimizer 4 | self.total_epochs = total_epochs 5 | self.base = learning_rate 6 | 7 | def __call__(self, epoch): 8 | if epoch < self.total_epochs * 3/10: 9 | lr = self.base 10 | elif epoch < self.total_epochs * 6/10: 11 | lr = self.base * 0.2 12 | elif epoch < self.total_epochs * 8/10: 13 | lr = self.base * 0.2 ** 2 14 | else: 15 | lr = self.base * 0.2 ** 3 16 | 17 | for param_group in self.optimizer.param_groups: 18 | param_group["lr"] = lr 19 | 20 | def lr(self) -> float: 21 | return self.optimizer.param_groups[0]["lr"] 22 | -------------------------------------------------------------------------------- /CNN_ImageNet/README.md: -------------------------------------------------------------------------------- 1 | # CNN for ImageNet 2 | 3 | This folder contains the implementation of training ResNet-18 on ImageNet using different optimizers. Download ImageNet data into folder `.\ImageNet`. 4 | 5 | ## Environment setup 6 | 7 | ``` 8 | conda env create -f environment.yml 9 | ``` 10 | 11 | ## Reproducing the results 12 | 13 | 14 | 15 | ### Run SGDM optimizer 16 | ``` 17 | python main.py --optimizer sgd --lr 0.1 --wd 1e-4 --beta1 0.9 --beta2 0.999 --eps 1e-8 --batch-size 256 --arch resnet18 --epochs 100 --lr_decay cosine --last_lr 1e-4 18 | ``` 19 | ### Run Adam optimizer 20 | ``` 21 | python main.py --optimizer adam --lr 1e-3 --wd 1e-4 --beta1 0.9 --beta2 0.999 --eps 1e-8 --batch-size 256 --arch resnet18 --epochs 100 --lr_decay cosine --last_lr 5e-6 22 | ``` 23 | ### Run AdaM3 optimzier 24 | ``` 25 | python main.py --optimizer adam3 --lr 1e-3 --wd 5e-2 --beta1 0.9 --beta2 0.999 --eps 1e-16 --batch-size 256 --arch resnet18 --epochs 100 --lr_decay cosine --last_lr 5e-6 26 | ``` -------------------------------------------------------------------------------- /CNN_ImageNet/check_cv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: benchmark-opencv-resize.py 4 | 5 | 6 | import cv2 7 | import time 8 | import numpy as np 9 | 10 | """ 11 | Some prebuilt opencv is much slower than others. 12 | You should check with this script and make sure it prints < 1s. 13 | On E5-2680v3, archlinux, this script prints: 14 | 0.61s for system opencv 3.4.0-2. 15 | >5 s for anaconda opencv 3.3.1 py36h6cbbc71_1. 16 | On E5-2650v4, this script prints: 17 | 0.6s for opencv built locally with -DWITH_OPENMP=OFF 18 | 0.6s for opencv from `pip install opencv-python`. 19 | 1.3s for opencv built locally with -DWITH_OPENMP=ON 20 | 2s for opencv from `conda install`. 21 | """ 22 | 23 | 24 | img = (np.random.rand(256, 256, 3) * 255).astype('uint8') 25 | 26 | start = time.time() 27 | for k in range(1000): 28 | out = cv2.resize(img, (384, 384)) 29 | print(time.time() - start) 30 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/README.md: -------------------------------------------------------------------------------- 1 | # Generatvie Adversarial Network 2 | 3 | This folder contains the implementation of training DCGAN and BigGAN on CIFAR-10. Each experiment is run $5$ times independently. 4 | 5 | ## Environment setup 6 | 7 | ``` 8 | conda env create -f environment.yml 9 | ``` 10 | 11 | ## Reproducing the results of DCGAN 12 | ``` 13 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/DCGAN_adam3.yaml -save checkpoints/ -data data/ --seed 0 14 | 15 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/DCGAN_adam3.yaml -save checkpoints/ -data data/ --seed 1 16 | 17 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/DCGAN_adam3.yaml -save checkpoints/ -data data/ --seed 2 18 | 19 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/DCGAN_adam3.yaml -save checkpoints/ -data data/ --seed 3 20 | 21 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/DCGAN_adam3.yaml -save checkpoints/ -data data/ --seed 4 22 | ``` 23 | 24 | ## Reproducing the results of BigGAN 25 | ``` 26 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/BigGAN-Mod-CR_adam3.yaml -save checkpoints/ -data data/ --seed 0 27 | 28 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/BigGAN-Mod-CR_adam3.yaml -save checkpoints/ -data data/ --seed 1 29 | 30 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/BigGAN-Mod-CR_adam3.yaml -save checkpoints/ -data data/ --seed 2 31 | 32 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/BigGAN-Mod-CR_adam3.yaml -save checkpoints/ -data data/ --seed 3 33 | 34 | python src/main.py -t -hdf5 -l -batch_stat -metrics fid -ref "test" -cfg src/configs/CIFAR10/BigGAN-Mod-CR_adam3.yaml -save checkpoints/ -data data/ --seed 4 35 | ``` 36 | 37 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/Types.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable, Union, Callable, Dict, Optional, Tuple, Any 2 | from torch import Tensor 3 | 4 | Params = Union[Iterable[Tensor], Iterable[dict]] 5 | 6 | LossClosure = Callable[[], float] 7 | OptLossClosure = Optional[LossClosure] 8 | Betas2 = Tuple[float, float] 9 | State = Dict[str, Any] 10 | OptFloat = Optional[float] 11 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_adabelief.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | OPTIMIZATION: 28 | type_: "AdaBelief" 29 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_adabound.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | OPTIMIZATION: 28 | type_: "AdaBound" 29 | final_lr: 0.02 30 | gamma: 0.001 31 | eps: 0.00000001 32 | 33 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_adam.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_adam3.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | OPTIMIZATION: 28 | type_: "AdaM3" 29 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_radam.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | OPTIMIZATION: 28 | type_: "RAdam" 29 | eps: 0.00000001 30 | 31 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_sgd.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | OPTIMIZATION: 28 | type_: "SGD" 29 | momentum: 0.9 30 | 31 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/BigGAN-Mod-CR_yogi.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "big_resnet" 7 | g_cond_mtd: "cBN" 8 | d_cond_mtd: "PD" 9 | apply_g_sn: True 10 | apply_d_sn: True 11 | apply_attn: True 12 | attn_g_loc: [2] 13 | attn_d_loc: [1] 14 | z_dim: 80 15 | g_shared_dim: 128 16 | g_conv_dim: 96 17 | d_conv_dim: 96 18 | apply_g_ema: True 19 | g_ema_decay: 0.9999 20 | g_ema_start: 1000 21 | LOSS: 22 | adv_loss: "hinge" 23 | apply_cr: True 24 | cr_lambda: 10.0 25 | AUG: 26 | cr_aug_type: "cr" 27 | OPTIMIZATION: 28 | type_: "Yogi" 29 | eps: 0.00000001 30 | 31 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_adabelief.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | type_: "AdaBelief" 13 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_adabound.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | type_: "AdaBound" 13 | final_lr: 0.02 14 | gamma: 0.001 15 | eps: 0.00000001 16 | 17 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_adam.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_adam3.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | type_: "AdaM3" 13 | eps: 0.00000001 14 | rectify: False 15 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_radam.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | type_: "RAdam" 13 | eps: 0.00000001 14 | 15 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_sgd.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | type_: "SGD" 13 | momentum: 0.9 14 | 15 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/configs/CIFAR10/DCGAN_yogi.yaml: -------------------------------------------------------------------------------- 1 | DATA: 2 | name: "CIFAR10" 3 | img_size: 32 4 | num_classes: 10 5 | MODEL: 6 | backbone: "deep_conv" 7 | g_conv_dim: "N/A" 8 | d_conv_dim: "N/A" 9 | OPTIMIZATION: 10 | d_updates_per_step: 2 11 | total_steps: 200000 12 | type_: "Yogi" 13 | eps: 0.00000001 14 | 15 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | """ 2 | -*- coding: utf-8 -*- 3 | File : unittest.py 4 | Author : Jiayuan Mao 5 | Email : maojiayuan@gmail.com 6 | Date : 27/01/2018 7 | 8 | This file is part of Synchronized-BatchNorm-PyTorch. 9 | https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 10 | Distributed under MIT License. 11 | 12 | MIT License 13 | 14 | Copyright (c) 2018 Jiayuan MAO 15 | 16 | Permission is hereby granted, free of charge, to any person obtaining a copy 17 | of this software and associated documentation files (the "Software"), to deal 18 | in the Software without restriction, including without limitation the rights 19 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 20 | copies of the Software, and to permit persons to whom the Software is 21 | furnished to do so, subject to the following conditions: 22 | 23 | The above copyright notice and this permission notice shall be included in all 24 | copies or substantial portions of the Software. 25 | 26 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 27 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 28 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 29 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 30 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 31 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 | SOFTWARE. 33 | """ 34 | 35 | import unittest 36 | import torch 37 | 38 | 39 | class TorchTestCase(unittest.TestCase): 40 | def assertTensorClose(self, x, y): 41 | adiff = float((x - y).abs().max()) 42 | if (y == 0).all(): 43 | rdiff = 'NaN' 44 | else: 45 | rdiff = float((adiff / y).abs().max()) 46 | 47 | message = ('Tensor close check failed\n' 'adiff={}\n' 'rdiff={}\n').format(adiff, rdiff) 48 | self.assertTrue(torch.allclose(x, y), message) 49 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/cr.py: -------------------------------------------------------------------------------- 1 | # PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN 2 | # The MIT License (MIT) 3 | # See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details 4 | 5 | # src/utils/cr.py 6 | 7 | import random 8 | 9 | import torch 10 | import torch.nn.functional as F 11 | 12 | 13 | def apply_cr_aug(x, flip=True, translation=True): 14 | if flip: 15 | x = random_flip(x, 0.5) 16 | if translation: 17 | x = random_translation(x, 1 / 8) 18 | if flip or translation: 19 | x = x.contiguous() 20 | return x 21 | 22 | 23 | def random_flip(x, p): 24 | x_out = x.clone() 25 | n, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] 26 | flip_prob = torch.FloatTensor(n, 1).uniform_(0.0, 1.0).to(x.device) 27 | flip_mask = flip_prob < p 28 | flip_mask = flip_mask.type(torch.bool).view(n, 1, 1, 1).repeat(1, c, h, w) 29 | x_out[flip_mask] = torch.flip(x[flip_mask].view(-1, c, h, w), [3]).view(-1) 30 | return x_out 31 | 32 | 33 | def random_translation(x, ratio): 34 | max_t_x, max_t_y = int(x.shape[2] * ratio), int(x.shape[3] * ratio) 35 | t_x = torch.randint(-max_t_x, max_t_x + 1, size=[x.shape[0], 1, 1], device=x.device) 36 | t_y = torch.randint(-max_t_y, max_t_y + 1, size=[x.shape[0], 1, 1], device=x.device) 37 | 38 | grid_batch, grid_x, grid_y = torch.meshgrid( 39 | torch.arange(x.shape[0], dtype=torch.long, device=x.device), 40 | torch.arange(x.shape[2], dtype=torch.long, device=x.device), 41 | torch.arange(x.shape[3], dtype=torch.long, device=x.device), 42 | ) 43 | 44 | grid_x = (grid_x + t_x) + max_t_x 45 | grid_y = (grid_y + t_y) + max_t_y 46 | x_pad = F.pad(input=x, pad=[max_t_y, max_t_y, max_t_x, max_t_x], mode='reflect') 47 | x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2) 48 | return x 49 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/log.py: -------------------------------------------------------------------------------- 1 | # PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN 2 | # The MIT License (MIT) 3 | # See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details 4 | 5 | # src/utils/log.py 6 | 7 | from os.path import dirname, exists, join 8 | from datetime import datetime 9 | import json 10 | import os 11 | import logging 12 | 13 | 14 | def make_run_name(format, data_name, framework, phase): 15 | return format.format(data_name=data_name, 16 | framework=framework, 17 | phase=phase, 18 | timestamp=datetime.now().strftime("%Y_%m_%d_%H_%M_%S")) 19 | 20 | 21 | def make_logger(save_dir, run_name, log_output): 22 | if log_output is not None: 23 | run_name = log_output.split('/')[-1].split('.')[0] 24 | logger = logging.getLogger(run_name) 25 | logger.propagate = False 26 | log_filepath = log_output if log_output is not None else join(save_dir, "logs", run_name + ".log") 27 | 28 | log_dir = dirname(log_filepath) 29 | if not exists(log_dir): 30 | os.makedirs(log_dir) 31 | 32 | if not logger.handlers: # execute only if logger doesn't already exist 33 | file_handler = logging.FileHandler(log_filepath, 'a', 'utf-8') 34 | stream_handler = logging.StreamHandler(os.sys.stdout) 35 | 36 | formatter = logging.Formatter('[%(levelname)s] %(asctime)s > %(message)s', datefmt="%Y-%m-%d %H:%M:%S") 37 | 38 | file_handler.setFormatter(formatter) 39 | stream_handler.setFormatter(formatter) 40 | 41 | logger.addHandler(file_handler) 42 | logger.addHandler(stream_handler) 43 | logger.setLevel(logging.INFO) 44 | return logger 45 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/sefa.py: -------------------------------------------------------------------------------- 1 | # PyTorch StudioGAN: https://github.com/POSTECH-CVLab/PyTorch-StudioGAN 2 | # The MIT License (MIT) 3 | # See license file or visit https://github.com/POSTECH-CVLab/PyTorch-StudioGAN for details 4 | 5 | # src/utils/sefa.py 6 | 7 | import torch 8 | 9 | import utils.misc as misc 10 | 11 | 12 | def apply_sefa(generator, backbone, z, fake_label, num_semantic_axis, maximum_variations, num_cols): 13 | generator = misc.peel_model(generator) 14 | w = generator.linear0.weight 15 | if backbone == "big_resnet": 16 | zs = z 17 | z = torch.split(zs, generator.chunk_size, 0)[0] 18 | eigen_vectors = torch.svd(w).V.to(z.device)[:, :num_semantic_axis] 19 | 20 | z_dim = len(z) 21 | zs_start = z.repeat(num_semantic_axis).view(-1, 1, z_dim) 22 | zs_end = (z.unsqueeze(1) + maximum_variations * eigen_vectors).T.view(-1, 1, z_dim) 23 | if backbone == "big_resnet": 24 | zs_shard = zs[z_dim:].expand([1, 1, -1]).repeat(num_semantic_axis, 1, 1) 25 | zs_start = torch.cat([zs_start, zs_shard], axis=2) 26 | zs_end = torch.cat([zs_end, zs_shard], axis=2) 27 | zs_canvas = misc.interpolate(x0=zs_start, x1=zs_end, num_midpoints=num_cols - 2).view(-1, zs_start.shape[-1]) 28 | images_canvas = generator(zs_canvas, fake_label.repeat(len(zs_canvas)), eval=True) 29 | return images_canvas 30 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/style_ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | # empty 10 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/style_ops/bias_act.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | //------------------------------------------------------------------------ 10 | // CUDA kernel parameters. 11 | 12 | struct bias_act_kernel_params 13 | { 14 | const void* x; // [sizeX] 15 | const void* b; // [sizeB] or NULL 16 | const void* xref; // [sizeX] or NULL 17 | const void* yref; // [sizeX] or NULL 18 | const void* dy; // [sizeX] or NULL 19 | void* y; // [sizeX] 20 | 21 | int grad; 22 | int act; 23 | float alpha; 24 | float gain; 25 | float clamp; 26 | 27 | int sizeX; 28 | int sizeB; 29 | int stepB; 30 | int loopX; 31 | }; 32 | 33 | //------------------------------------------------------------------------ 34 | // CUDA kernel selection. 35 | 36 | template void* choose_bias_act_kernel(const bias_act_kernel_params& p); 37 | 38 | //------------------------------------------------------------------------ 39 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/style_ops/dnnlib/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | from .util import EasyDict, make_cache_dir_path 10 | -------------------------------------------------------------------------------- /DCGAN_BigGAN/src/utils/style_ops/upfirdn2d.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 | // 3 | // NVIDIA CORPORATION and its licensors retain all intellectual property 4 | // and proprietary rights in and to this software, related documentation 5 | // and any modifications thereto. Any use, reproduction, disclosure or 6 | // distribution of this software and related documentation without an express 7 | // license agreement from NVIDIA CORPORATION is strictly prohibited. 8 | 9 | #include 10 | 11 | //------------------------------------------------------------------------ 12 | // CUDA kernel parameters. 13 | 14 | struct upfirdn2d_kernel_params 15 | { 16 | const void* x; 17 | const float* f; 18 | void* y; 19 | 20 | int2 up; 21 | int2 down; 22 | int2 pad0; 23 | int flip; 24 | float gain; 25 | 26 | int4 inSize; // [width, height, channel, batch] 27 | int4 inStride; 28 | int2 filterSize; // [width, height] 29 | int2 filterStride; 30 | int4 outSize; // [width, height, channel, batch] 31 | int4 outStride; 32 | int sizeMinor; 33 | int sizeMajor; 34 | 35 | int loopMinor; 36 | int loopMajor; 37 | int loopX; 38 | int launchMinor; 39 | int launchMajor; 40 | }; 41 | 42 | //------------------------------------------------------------------------ 43 | // CUDA kernel specialization. 44 | 45 | struct upfirdn2d_kernel_spec 46 | { 47 | void* kernel; 48 | int tileOutW; 49 | int tileOutH; 50 | int loopMinor; 51 | int loopX; 52 | }; 53 | 54 | //------------------------------------------------------------------------ 55 | // CUDA kernel selection. 56 | 57 | template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); 58 | 59 | //------------------------------------------------------------------------ 60 | -------------------------------------------------------------------------------- /ICDM23_AdaM3_supp.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/ICDM23_AdaM3_supp.pdf -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/README.md: -------------------------------------------------------------------------------- 1 | # LSTM for Language Modeling 2 | 3 | This folder contains the implementation of training LSTMs on Penn Treebank dataset. Each experiment is run $5$ times independently using random seeds 0 1 2 3 4. 4 | 5 | ## Environment setup 6 | 7 | ``` 8 | conda env create -f environment.yml 9 | ``` 10 | 11 | ## Prepare data 12 | ``` 13 | sh getdata.sh 14 | ``` 15 | 16 | ## Reproducing the results 17 | 18 | ### 1-layer LSTM 19 | ``` 20 | sh main_all_1layer.sh 21 | ``` 22 | 23 | ### 2-layer LSTM 24 | ``` 25 | sh main_all_2layer.sh 26 | ``` 27 | 28 | ### 3-layer LSTM 29 | ``` 30 | sh main_all_3layer.sh 31 | ``` 32 | 33 | ### To plot the curves 34 | ``` 35 | python plot_curve.py 36 | ``` 37 | 38 | ### To print the numerical results 39 | ``` 40 | python print_value.py 41 | ``` 42 | 43 | -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/Types.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable, Union, Callable, Dict, Optional, Tuple, Any 2 | from torch import Tensor 3 | 4 | Params = Union[Iterable[Tensor], Iterable[dict]] 5 | 6 | LossClosure = Callable[[], float] 7 | OptLossClosure = Optional[LossClosure] 8 | Betas2 = Tuple[float, float] 9 | State = Dict[str, Any] 10 | OptFloat = Optional[float] 11 | -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers2-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabelief-nlayers3-lr0.01-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers1-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adabound-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers2-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam-nlayers3-lr0.01-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers1-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers2-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adam3-nlayers3-lr0.001-clip-0.25-eps1e-16-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-adamw-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers1-lr0.001-clip-0.25-eps1e-12-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers2-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-radam-nlayers3-lr0.001-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers1-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers2-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-sgd-nlayers3-lr30.0-clip-0.25-eps1e-08-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers1-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers2-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run0-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run1-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run2-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run3-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145]: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/LSTM_Penn_Treebank/curve/PTB.pt-niter-200-optimizer-yogi-nlayers3-lr0.01-clip-0.25-eps0.001-betas-0.9-0.999-run4-wdecay1.2e-06-when-[100, 145] -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | 4 | from collections import Counter 5 | 6 | 7 | class Dictionary(object): 8 | def __init__(self): 9 | self.word2idx = {} 10 | self.idx2word = [] 11 | self.counter = Counter() 12 | self.total = 0 13 | 14 | def add_word(self, word): 15 | if word not in self.word2idx: 16 | self.idx2word.append(word) 17 | self.word2idx[word] = len(self.idx2word) - 1 18 | token_id = self.word2idx[word] 19 | self.counter[token_id] += 1 20 | self.total += 1 21 | return self.word2idx[word] 22 | 23 | def __len__(self): 24 | return len(self.idx2word) 25 | 26 | 27 | class Corpus(object): 28 | def __init__(self, path): 29 | self.dictionary = Dictionary() 30 | self.train = self.tokenize(os.path.join(path, 'train.txt')) 31 | self.valid = self.tokenize(os.path.join(path, 'valid.txt')) 32 | self.test = self.tokenize(os.path.join(path, 'test.txt')) 33 | 34 | def tokenize(self, path): 35 | """Tokenizes a text file.""" 36 | assert os.path.exists(path) 37 | # Add words to the dictionary 38 | with open(path, 'r') as f: 39 | tokens = 0 40 | for line in f: 41 | words = line.split() + [''] 42 | tokens += len(words) 43 | for word in words: 44 | self.dictionary.add_word(word) 45 | 46 | # Tokenize file content 47 | with open(path, 'r') as f: 48 | ids = torch.LongTensor(tokens) 49 | token = 0 50 | for line in f: 51 | words = line.split() + [''] 52 | for word in words: 53 | ids[token] = self.dictionary.word2idx[word] 54 | token += 1 55 | 56 | return ids 57 | -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/embed_regularize.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | 5 | def embedded_dropout(embed, words, dropout=0.1, scale=None): 6 | if dropout: 7 | mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout) 8 | masked_embed_weight = mask * embed.weight 9 | else: 10 | masked_embed_weight = embed.weight 11 | if scale: 12 | masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight 13 | 14 | padding_idx = embed.padding_idx 15 | if padding_idx is None: 16 | padding_idx = -1 17 | 18 | X = torch.nn.functional.embedding(words, masked_embed_weight, 19 | padding_idx, embed.max_norm, embed.norm_type, 20 | embed.scale_grad_by_freq, embed.sparse 21 | ) 22 | return X 23 | 24 | if __name__ == '__main__': 25 | V = 50 26 | h = 4 27 | bptt = 10 28 | batch_size = 2 29 | 30 | embed = torch.nn.Embedding(V, h) 31 | 32 | words = np.random.random_integers(low=0, high=V-1, size=(batch_size, bptt)) 33 | words = torch.LongTensor(words) 34 | 35 | origX = embed(words) 36 | X = embedded_dropout(embed, words) 37 | 38 | print(origX) 39 | print(X) 40 | -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/getdata.sh: -------------------------------------------------------------------------------- 1 | echo "=== Acquiring datasets ===" 2 | echo "---" 3 | mkdir -p save 4 | 5 | mkdir -p data 6 | cd data 7 | 8 | echo "- Downloading WikiText-2 (WT2)" 9 | wget --quiet --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip 10 | unzip -q wikitext-2-v1.zip 11 | cd wikitext-2 12 | mv wiki.train.tokens train.txt 13 | mv wiki.valid.tokens valid.txt 14 | mv wiki.test.tokens test.txt 15 | cd .. 16 | 17 | echo "- Downloading WikiText-103 (WT2)" 18 | wget --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip 19 | unzip -q wikitext-103-v1.zip 20 | cd wikitext-103 21 | mv wiki.train.tokens train.txt 22 | mv wiki.valid.tokens valid.txt 23 | mv wiki.test.tokens test.txt 24 | cd .. 25 | 26 | echo "- Downloading enwik8 (Character)" 27 | mkdir -p enwik8 28 | cd enwik8 29 | wget --continue http://mattmahoney.net/dc/enwik8.zip 30 | python prep_enwik8.py 31 | cd .. 32 | 33 | echo "- Downloading Penn Treebank (PTB)" 34 | wget --quiet --continue http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz 35 | tar -xzf simple-examples.tgz 36 | 37 | mkdir -p penn 38 | cd penn 39 | mv ../simple-examples/data/ptb.train.txt train.txt 40 | mv ../simple-examples/data/ptb.test.txt test.txt 41 | mv ../simple-examples/data/ptb.valid.txt valid.txt 42 | cd .. 43 | 44 | echo "- Downloading Penn Treebank (Character)" 45 | mkdir -p pennchar 46 | cd pennchar 47 | mv ../simple-examples/data/ptb.char.train.txt train.txt 48 | mv ../simple-examples/data/ptb.char.test.txt test.txt 49 | mv ../simple-examples/data/ptb.char.valid.txt valid.txt 50 | cd .. 51 | 52 | rm -rf simple-examples/ 53 | 54 | echo "---" 55 | echo "Happy language modeling :)" 56 | -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/locked_dropout.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | class LockedDropout(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | 9 | def forward(self, x, dropout=0.5): 10 | if not self.training or not dropout: 11 | return x 12 | m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout) 13 | mask = Variable(m, requires_grad=False) / (1 - dropout) 14 | mask = mask.expand_as(x) 15 | return mask * x 16 | -------------------------------------------------------------------------------- /LSTM_Penn_Treebank/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def repackage_hidden(h): 5 | """Wraps hidden states in new Tensors, 6 | to detach them from their history.""" 7 | if isinstance(h, torch.Tensor): 8 | return h.detach() 9 | else: 10 | return tuple(repackage_hidden(v) for v in h) 11 | 12 | 13 | def batchify(data, bsz, args): 14 | # Work out how cleanly we can divide the dataset into bsz parts. 15 | nbatch = data.size(0) // bsz 16 | # Trim off any extra elements that wouldn't cleanly fit (remainders). 17 | data = data.narrow(0, 0, nbatch * bsz) 18 | # Evenly divide the data across the bsz batches. 19 | data = data.view(bsz, -1).t().contiguous() 20 | if args.cuda: 21 | data = data.cuda() 22 | return data 23 | 24 | 25 | def get_batch(source, i, args, seq_len=None, evaluation=False): 26 | seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i) 27 | data = source[i:i+seq_len] 28 | target = source[i+1:i+1+seq_len].view(-1) 29 | return data, target 30 | -------------------------------------------------------------------------------- /SNGAN/README.md: -------------------------------------------------------------------------------- 1 | # Generatvie Adversarial Network 2 | 3 | This folder contains the implementation of training SNGAN on CIFAR-10. Each experiment is run $5$ times independently. 4 | 5 | ## Environment setup 6 | 7 | ``` 8 | conda env create -f environment.yml 9 | ``` 10 | 11 | 12 | ## Reproducing the results 13 | ``` 14 | python main.py -t -e -c configs/CIFAR10/SNGAN-adam3.json 15 | ``` 16 | 17 | -------------------------------------------------------------------------------- /SNGAN/sync_batchnorm/unittest.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import unittest 4 | import torch 5 | 6 | 7 | class TorchTestCase(unittest.TestCase): 8 | def assertTensorClose(self, x, y): 9 | adiff = float((x - y).abs().max()) 10 | if (y == 0).all(): 11 | rdiff = 'NaN' 12 | else: 13 | rdiff = float((adiff / y).abs().max()) 14 | 15 | message = ( 16 | 'Tensor close check failed\n' 17 | 'adiff={}\n' 18 | 'rdiff={}\n' 19 | ).format(adiff, rdiff) 20 | self.assertTrue(torch.allclose(x, y), message) 21 | 22 | -------------------------------------------------------------------------------- /SNGAN/utils/ada_op/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_act import FusedLeakyReLU, fused_leaky_relu 2 | from .upfirdn2d import upfirdn2d 3 | -------------------------------------------------------------------------------- /SNGAN/utils/ada_op/fused_bias_act.cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | #include 5 | 6 | 7 | torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, 8 | int act, int grad, float alpha, float scale); 9 | 10 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 11 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 12 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 13 | 14 | torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, 15 | int act, int grad, float alpha, float scale) { 16 | CHECK_CUDA(input); 17 | CHECK_CUDA(bias); 18 | 19 | return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); 20 | } 21 | 22 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 23 | m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); 24 | } 25 | -------------------------------------------------------------------------------- /SNGAN/utils/ada_op/upfirdn2d.cpp: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | #include 5 | 6 | 7 | torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, 8 | int up_x, int up_y, int down_x, int down_y, 9 | int pad_x0, int pad_x1, int pad_y0, int pad_y1); 10 | 11 | #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") 12 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 13 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 14 | 15 | torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, 16 | int up_x, int up_y, int down_x, int down_y, 17 | int pad_x0, int pad_x1, int pad_y0, int pad_y1) { 18 | CHECK_CUDA(input); 19 | CHECK_CUDA(kernel); 20 | 21 | return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); 22 | } 23 | 24 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 25 | m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); 26 | } 27 | -------------------------------------------------------------------------------- /SNGAN/utils/cr_diff_aug.py: -------------------------------------------------------------------------------- 1 | 2 | import random 3 | 4 | import torch 5 | import torch.nn.functional as F 6 | 7 | 8 | 9 | def CR_DiffAug(x, flip=True, translation=True): 10 | if flip: 11 | x = random_flip(x, 0.5) 12 | if translation: 13 | x = random_translation(x, 1/8) 14 | if flip or translation: 15 | x = x.contiguous() 16 | return x 17 | 18 | 19 | def random_flip(x, p): 20 | x_out = x.clone() 21 | n, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3] 22 | flip_prob = torch.FloatTensor(n, 1).uniform_(0.0, 1.0) 23 | flip_mask = flip_prob < p 24 | flip_mask = flip_mask.type(torch.bool).view(n, 1, 1, 1).repeat(1, c, h, w).to(x.device) 25 | x_out[flip_mask] = torch.flip(x[flip_mask].view(-1, c, h, w), [3]).view(-1) 26 | return x_out 27 | 28 | 29 | def random_translation(x, ratio): 30 | max_t_x, max_t_y = int(x.shape[2]*ratio), int(x.shape[3]*ratio) 31 | t_x = torch.randint(-max_t_x, max_t_x + 1, size = [x.shape[0], 1, 1], device=x.device) 32 | t_y = torch.randint(-max_t_y, max_t_y + 1, size = [x.shape[0], 1, 1], device=x.device) 33 | 34 | grid_batch, grid_x, grid_y = torch.meshgrid( 35 | torch.arange(x.shape[0], dtype=torch.long, device=x.device), 36 | torch.arange(x.shape[2], dtype=torch.long, device=x.device), 37 | torch.arange(x.shape[3], dtype=torch.long, device=x.device), 38 | ) 39 | 40 | grid_x = (grid_x + t_x) + max_t_x 41 | grid_y = (grid_y + t_y) + max_t_y 42 | x_pad = F.pad(input=x, pad=[max_t_x, max_t_x, max_t_y, max_t_y], mode='reflect') 43 | x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2) 44 | return x 45 | -------------------------------------------------------------------------------- /SNGAN/utils/load_checkpoint.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import os 4 | 5 | import torch 6 | 7 | 8 | 9 | def load_checkpoint(model, optimizer, filename, metric=False, ema=False): 10 | # Note: Input model & optimizer should be pre-defined. This routine only updates their states. 11 | start_step = 0 12 | if ema: 13 | checkpoint = torch.load(filename) 14 | model.load_state_dict(checkpoint['state_dict']) 15 | return model 16 | else: 17 | checkpoint = torch.load(filename) 18 | seed = checkpoint['seed'] 19 | run_name = checkpoint['run_name'] 20 | start_step = checkpoint['step'] 21 | model.load_state_dict(checkpoint['state_dict']) 22 | optimizer.load_state_dict(checkpoint['optimizer']) 23 | ada_p = checkpoint['ada_p'] 24 | for state in optimizer.state.values(): 25 | for k, v in state.items(): 26 | if isinstance(v, torch.Tensor): 27 | state[k] = v.cuda() 28 | 29 | if metric: 30 | best_step = checkpoint['best_step'] 31 | best_fid = checkpoint['best_fid'] 32 | best_fid_checkpoint_path = checkpoint['best_fid_checkpoint_path'] 33 | return model, optimizer, seed, run_name, start_step, ada_p, best_step, best_fid, best_fid_checkpoint_path 34 | return model, optimizer, seed, run_name, start_step, ada_p 35 | -------------------------------------------------------------------------------- /SNGAN/utils/log.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | import json 5 | import os 6 | import logging 7 | from os.path import dirname, abspath, exists, join 8 | from datetime import datetime 9 | 10 | 11 | 12 | def make_run_name(format, framework, phase): 13 | return format.format( 14 | framework=framework, 15 | phase=phase, 16 | timestamp=datetime.now().strftime("%Y_%m_%d_%H_%M_%S") 17 | ) 18 | 19 | 20 | def make_logger(run_name, log_output): 21 | if log_output is not None: 22 | run_name = log_output.split('/')[-1].split('.')[0] 23 | logger = logging.getLogger(run_name) 24 | logger.propagate = False 25 | log_filepath = log_output if log_output is not None else join('logs', f'{run_name}.log') 26 | 27 | log_dir = dirname(abspath(log_filepath)) 28 | if not exists(log_dir): 29 | os.makedirs(log_dir) 30 | 31 | if not logger.handlers: # execute only if logger doesn't already exist 32 | file_handler = logging.FileHandler(log_filepath, 'a', 'utf-8') 33 | stream_handler = logging.StreamHandler(os.sys.stdout) 34 | 35 | formatter = logging.Formatter('[%(levelname)s] %(asctime)s > %(message)s', datefmt='%Y-%m-%d %H:%M:%S') 36 | 37 | file_handler.setFormatter(formatter) 38 | stream_handler.setFormatter(formatter) 39 | 40 | logger.addHandler(file_handler) 41 | logger.addHandler(stream_handler) 42 | logger.setLevel(logging.INFO) 43 | return logger 44 | 45 | 46 | def make_checkpoint_dir(checkpoint_dir, run_name): 47 | checkpoint_dir = checkpoint_dir if checkpoint_dir is not None else join('checkpoints', run_name) 48 | if not exists(abspath(checkpoint_dir)): 49 | os.makedirs(checkpoint_dir) 50 | return checkpoint_dir 51 | -------------------------------------------------------------------------------- /Transformer_NMT/!: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/! -------------------------------------------------------------------------------- /Transformer_NMT/README.md: -------------------------------------------------------------------------------- 1 | # Transformer for Neural Machine Translation 2 | 3 | This folder contains the implementation of training transformers on IWSTL'14 DE-EN dataset. Each experiment is run $5$ times independently using random seeds 0 1 2 3 4. The code is partially adapted from https://github.com/pytorch/fairseq.git. 4 | 5 | ## Environment setup 6 | ### create conda environment 7 | ``` 8 | conda env create -f environment.yml 9 | ``` 10 | ### set up fairseq 11 | ``` 12 | pip install --editable . 13 | ``` 14 | 15 | ## Prepare data 16 | ``` 17 | sh prepare-iwslt14.sh 18 | ``` 19 | 20 | ## Reproducing the results 21 | 22 | ### SGDM 23 | 24 | ``` 25 | sh sh_files/sgdm.sh 26 | ``` 27 | ### Adam 28 | 29 | ``` 30 | sh sh_files/adam.sh 31 | ``` 32 | ### AdamW 33 | 34 | ``` 35 | sh sh_files/adamw.sh 36 | ``` 37 | ### AdaBelief 38 | 39 | ``` 40 | sh sh_files/adabelief.sh 41 | ``` 42 | 43 | ### AdaM3 44 | 45 | ``` 46 | sh sh_files/adam3.sh 47 | ``` 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /Transformer_NMT/config/adabelief.sh: -------------------------------------------------------------------------------- 1 | DATA_PATH=./data-bin/iwslt14.tokenized.de-en.joined 2 | model=transformer 3 | PROBLEM=iwslt14_de_en 4 | ARCH=transformer_iwslt_de_en_v2 5 | OUTPUT_PATH=log/adabelief 6 | NUM=5 7 | mkdir -p $OUTPUT_PATH 8 | 9 | export CUDA_VISIBLE_DEVICES=0; python main.py ${DATA_PATH} \ 10 | --seed 1 \ 11 | --adam-eps 1e-16 \ 12 | --rectify True --weight-decouple True \ 13 | --arch ${ARCH} --share-all-embeddings \ 14 | --optimizer adabeliefv2 --adam-betas '(0.9, 0.999)' --clip-norm 0.0 \ 15 | --dropout 0.3 --attention-dropout 0.1 --relu-dropout 0.1 \ 16 | --criterion label_smoothed_cross_entropy \ 17 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-7 --warmup-updates 8000 \ 18 | --lr 0.0015 --min-lr 1e-9 \ 19 | --label-smoothing 0.1 --weight-decay 0.0001 \ 20 | --max-tokens 4096 --save-dir ${OUTPUT_PATH} \ 21 | --update-freq 1 --no-progress-bar --log-interval 50 \ 22 | --ddp-backend no_c10d \ 23 | --keep-last-epochs ${NUM} --max-epoch 55 \ 24 | --restore-file ${OUTPUT_PATH}/checkpoint_best.pt \ 25 | | tee -a ${OUTPUT_PATH}/train_log.txt 26 | 27 | # --early-stop ${NUM} \ 28 | 29 | python scripts/average_checkpoints.py --inputs ${OUTPUT_PATH} --num-epoch-checkpoints ${NUM} --output ${OUTPUT_PATH}/averaged_model.pt 30 | 31 | BEAM_SIZE=5 32 | LPEN=1.0 33 | TRANS_PATH=${OUTPUT_PATH}/trans 34 | RESULT_PATH=${TRANS_PATH}/ 35 | 36 | mkdir -p $RESULT_PATH 37 | CKPT=averaged_model.pt 38 | 39 | export CUDA_VISIBLE_DEVICES=0; python generate.py \ 40 | ${DATA_PATH} \ 41 | --path ${OUTPUT_PATH}/${CKPT} \ 42 | --batch-size 128 \ 43 | --beam ${BEAM_SIZE} \ 44 | --lenpen ${LPEN} \ 45 | --remove-bpe \ 46 | --log-format simple \ 47 | --source-lang de \ 48 | --target-lang en \ 49 | > ${RESULT_PATH}/res.txt 50 | 51 | -------------------------------------------------------------------------------- /Transformer_NMT/config/adam.sh: -------------------------------------------------------------------------------- 1 | DATA_PATH=./data-bin/iwslt14.tokenized.de-en.joined 2 | model=transformer 3 | PROBLEM=iwslt14_de_en 4 | ARCH=transformer_iwslt_de_en_v2 5 | OUTPUT_PATH=log/adam 6 | NUM=5 7 | mkdir -p $OUTPUT_PATH 8 | 9 | export CUDA_VISIBLE_DEVICES=0; python main.py ${DATA_PATH} \ 10 | --seed 1 \ 11 | --adam-eps 1e-08 \ 12 | --arch ${ARCH} --share-all-embeddings \ 13 | --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ 14 | --dropout 0.3 --attention-dropout 0.1 --relu-dropout 0.1 \ 15 | --criterion label_smoothed_cross_entropy \ 16 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-7 --warmup-updates 8000 \ 17 | --lr 0.0015 --min-lr 1e-9 \ 18 | --label-smoothing 0.1 --weight-decay 0.0001 \ 19 | --max-tokens 4096 --save-dir ${OUTPUT_PATH} \ 20 | --update-freq 1 --no-progress-bar --log-interval 50 \ 21 | --ddp-backend no_c10d \ 22 | --keep-last-epochs ${NUM} --max-epoch 55 \ 23 | --restore-file ${OUTPUT_PATH}/checkpoint_best.pt \ 24 | | tee -a ${OUTPUT_PATH}/train_log.txt 25 | 26 | # --early-stop ${NUM} \ 27 | 28 | python scripts/average_checkpoints.py --inputs ${OUTPUT_PATH} --num-epoch-checkpoints ${NUM} --output ${OUTPUT_PATH}/averaged_model.pt 29 | 30 | BEAM_SIZE=5 31 | LPEN=1.0 32 | TRANS_PATH=${OUTPUT_PATH}/trans 33 | RESULT_PATH=${TRANS_PATH}/ 34 | 35 | mkdir -p $RESULT_PATH 36 | CKPT=averaged_model.pt 37 | 38 | export CUDA_VISIBLE_DEVICES=0; python generate.py \ 39 | ${DATA_PATH} \ 40 | --path ${OUTPUT_PATH}/${CKPT} \ 41 | --batch-size 128 \ 42 | --beam ${BEAM_SIZE} \ 43 | --lenpen ${LPEN} \ 44 | --remove-bpe \ 45 | --log-format simple \ 46 | --source-lang de \ 47 | --target-lang en \ 48 | > ${RESULT_PATH}/res.txt 49 | -------------------------------------------------------------------------------- /Transformer_NMT/config/adam3.sh: -------------------------------------------------------------------------------- 1 | DATA_PATH=./data-bin/iwslt14.tokenized.de-en.joined 2 | model=transformer 3 | PROBLEM=iwslt14_de_en 4 | ARCH=transformer_iwslt_de_en_v2 5 | OUTPUT_PATH=log/adam3 6 | NUM=5 7 | mkdir -p $OUTPUT_PATH 8 | 9 | export CUDA_VISIBLE_DEVICES=0; python main.py ${DATA_PATH} \ 10 | --seed 1 \ 11 | --adam-eps 1e-16 \ 12 | --rectify True --weight-decouple True \ 13 | --arch ${ARCH} --share-all-embeddings \ 14 | --optimizer adam3 --adam-betas '(0.9, 0.999)' --clip-norm 0.0 \ 15 | --dropout 0.3 --attention-dropout 0.1 --relu-dropout 0.1 \ 16 | --criterion label_smoothed_cross_entropy \ 17 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-7 --warmup-updates 8000 \ 18 | --lr 5e-4 --min-lr 1e-9 \ 19 | --label-smoothing 0.1 --weight-decay 0.0001 \ 20 | --max-tokens 4096 --save-dir ${OUTPUT_PATH} \ 21 | --update-freq 1 --no-progress-bar --log-interval 50 \ 22 | --ddp-backend no_c10d \ 23 | --keep-last-epochs ${NUM} --max-epoch 55 \ 24 | --restore-file ${OUTPUT_PATH}/checkpoint_best.pt \ 25 | | tee -a ${OUTPUT_PATH}/train_log.txt 26 | 27 | # --early-stop ${NUM} \ 28 | 29 | python scripts/average_checkpoints.py --inputs ${OUTPUT_PATH} --num-epoch-checkpoints ${NUM} --output ${OUTPUT_PATH}/averaged_model.pt 30 | 31 | BEAM_SIZE=5 32 | LPEN=1.0 33 | TRANS_PATH=${OUTPUT_PATH}/trans 34 | RESULT_PATH=${TRANS_PATH}/ 35 | 36 | mkdir -p $RESULT_PATH 37 | CKPT=averaged_model.pt 38 | 39 | export CUDA_VISIBLE_DEVICES=0; python generate.py \ 40 | ${DATA_PATH} \ 41 | --path ${OUTPUT_PATH}/${CKPT} \ 42 | --batch-size 128 \ 43 | --beam ${BEAM_SIZE} \ 44 | --lenpen ${LPEN} \ 45 | --remove-bpe \ 46 | --log-format simple \ 47 | --source-lang de \ 48 | --target-lang en \ 49 | > ${RESULT_PATH}/res.txt 50 | 51 | -------------------------------------------------------------------------------- /Transformer_NMT/config/radam.sh: -------------------------------------------------------------------------------- 1 | DATA_PATH=./data-bin/iwslt14.tokenized.de-en.joined 2 | model=transformer 3 | PROBLEM=iwslt14_de_en 4 | ARCH=transformer_iwslt_de_en_v2 5 | OUTPUT_PATH=log/radam 6 | NUM=5 7 | mkdir -p $OUTPUT_PATH 8 | 9 | export CUDA_VISIBLE_DEVICES=0; python main.py ${DATA_PATH} \ 10 | --seed 1 \ 11 | --adam-eps 1e-08 \ 12 | --arch ${ARCH} --share-all-embeddings \ 13 | --optimizer radam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ 14 | --dropout 0.3 --attention-dropout 0.1 --relu-dropout 0.1 \ 15 | --criterion label_smoothed_cross_entropy \ 16 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-7 --warmup-updates 8000 \ 17 | --lr 0.0015 --min-lr 1e-9 \ 18 | --label-smoothing 0.1 --weight-decay 0.0001 \ 19 | --max-tokens 4096 --save-dir ${OUTPUT_PATH} \ 20 | --update-freq 1 --no-progress-bar --log-interval 50 \ 21 | --ddp-backend no_c10d \ 22 | --keep-last-epochs ${NUM} --max-epoch 55 \ 23 | --restore-file ${OUTPUT_PATH}/checkpoint_best.pt \ 24 | | tee -a ${OUTPUT_PATH}/train_log.txt 25 | 26 | # --early-stop ${NUM} \ 27 | 28 | python scripts/average_checkpoints.py --inputs ${OUTPUT_PATH} --num-epoch-checkpoints ${NUM} --output ${OUTPUT_PATH}/averaged_model.pt 29 | 30 | BEAM_SIZE=5 31 | LPEN=1.0 32 | TRANS_PATH=${OUTPUT_PATH}/trans 33 | RESULT_PATH=${TRANS_PATH}/ 34 | 35 | mkdir -p $RESULT_PATH 36 | CKPT=averaged_model.pt 37 | 38 | export CUDA_VISIBLE_DEVICES=0; python generate.py \ 39 | ${DATA_PATH} \ 40 | --path ${OUTPUT_PATH}/${CKPT} \ 41 | --batch-size 128 \ 42 | --beam ${BEAM_SIZE} \ 43 | --lenpen ${LPEN} \ 44 | --remove-bpe \ 45 | --log-format simple \ 46 | --source-lang de \ 47 | --target-lang en \ 48 | > ${RESULT_PATH}/res.txt 49 | 50 | -------------------------------------------------------------------------------- /Transformer_NMT/config/sgd.sh: -------------------------------------------------------------------------------- 1 | DATA_PATH=./data-bin/iwslt14.tokenized.de-en.joined 2 | model=transformer 3 | PROBLEM=iwslt14_de_en 4 | ARCH=transformer_iwslt_de_en_v2 5 | OUTPUT_PATH=log/sgd 6 | NUM=5 7 | mkdir -p $OUTPUT_PATH 8 | 9 | export CUDA_VISIBLE_DEVICES=0; python main.py ${DATA_PATH} \ 10 | --seed 1 \ 11 | --momentum 0.9 \ 12 | --arch ${ARCH} --share-all-embeddings \ 13 | --optimizer sgd --clip-norm 0.0 \ 14 | --dropout 0.3 --attention-dropout 0.1 --relu-dropout 0.1 \ 15 | --criterion label_smoothed_cross_entropy \ 16 | --lr-scheduler inverse_sqrt --warmup-init-lr 1e-7 --warmup-updates 8000 \ 17 | --lr 0.1 --min-lr 1e-9 \ 18 | --label-smoothing 0.1 --weight-decay 0.0001 \ 19 | --max-tokens 4096 --save-dir ${OUTPUT_PATH} \ 20 | --update-freq 1 --no-progress-bar --log-interval 50 \ 21 | --ddp-backend no_c10d \ 22 | --keep-last-epochs ${NUM} --max-epoch 55 \ 23 | --restore-file ${OUTPUT_PATH}/checkpoint_best.pt \ 24 | | tee -a ${OUTPUT_PATH}/train_log.txt 25 | 26 | # --early-stop ${NUM} \ 27 | 28 | python scripts/average_checkpoints.py --inputs ${OUTPUT_PATH} --num-epoch-checkpoints ${NUM} --output ${OUTPUT_PATH}/averaged_model.pt 29 | 30 | BEAM_SIZE=5 31 | LPEN=1.0 32 | TRANS_PATH=${OUTPUT_PATH}/trans 33 | RESULT_PATH=${TRANS_PATH}/ 34 | 35 | mkdir -p $RESULT_PATH 36 | CKPT=averaged_model.pt 37 | 38 | export CUDA_VISIBLE_DEVICES=0; python generate.py \ 39 | ${DATA_PATH} \ 40 | --path ${OUTPUT_PATH}/${CKPT} \ 41 | --batch-size 128 \ 42 | --beam ${BEAM_SIZE} \ 43 | --lenpen ${LPEN} \ 44 | --remove-bpe \ 45 | --log-format simple \ 46 | --source-lang de \ 47 | --target-lang en \ 48 | > ${RESULT_PATH}/res.txt 49 | 50 | -------------------------------------------------------------------------------- /Transformer_NMT/export: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/export -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/!: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/fairseq/! -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | __all__ = ['pdb'] 7 | __version__ = '0.8.0' 8 | 9 | import fairseq.criterions # noqa 10 | import fairseq.models # noqa 11 | import fairseq.modules # noqa 12 | import fairseq.optim # noqa 13 | import fairseq.optim.lr_scheduler # noqa 14 | import fairseq.pdb # noqa 15 | import fairseq.tasks # noqa 16 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/clib/libbleu/module.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2017-present, Facebook, Inc. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #include 10 | 11 | 12 | static PyMethodDef method_def[] = { 13 | {NULL, NULL, 0, NULL} 14 | }; 15 | 16 | static struct PyModuleDef module_def = { 17 | PyModuleDef_HEAD_INIT, 18 | "libbleu", /* name of module */ 19 | NULL, /* module documentation, may be NULL */ 20 | -1, /* size of per-interpreter state of the module, 21 | or -1 if the module keeps state in global variables. */ 22 | method_def 23 | }; 24 | 25 | 26 | #if PY_MAJOR_VERSION == 2 27 | PyMODINIT_FUNC init_libbleu() 28 | #else 29 | PyMODINIT_FUNC PyInit_libbleu() 30 | #endif 31 | { 32 | PyObject *m = PyModule_Create(&module_def); 33 | if (!m) { 34 | return NULL; 35 | } 36 | return m; 37 | } 38 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/criterions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.criterions.fairseq_criterion import FairseqCriterion 11 | 12 | 13 | build_criterion, register_criterion, CRITERION_REGISTRY = registry.setup_registry( 14 | '--criterion', 15 | base_class=FairseqCriterion, 16 | default='cross_entropy', 17 | ) 18 | 19 | 20 | # automatically import any Python files in the criterions/ directory 21 | for file in os.listdir(os.path.dirname(__file__)): 22 | if file.endswith('.py') and not file.startswith('_'): 23 | module = file[:file.find('.py')] 24 | importlib.import_module('fairseq.criterions.' + module) 25 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/criterions/fairseq_criterion.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch.nn.modules.loss import _Loss 7 | 8 | 9 | class FairseqCriterion(_Loss): 10 | 11 | def __init__(self, args, task): 12 | super().__init__() 13 | self.args = args 14 | self.task = task 15 | self.padding_idx = task.target_dictionary.pad() if task.target_dictionary is not None else -100 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add criterion-specific arguments to the parser.""" 20 | pass 21 | 22 | @classmethod 23 | def build_criterion(cls, args, task): 24 | return cls(args, task) 25 | 26 | def forward(self, model, sample, reduce=True): 27 | """Compute the loss for the given sample. 28 | 29 | Returns a tuple with three elements: 30 | 1) the loss 31 | 2) the sample size, which is used as the denominator for the gradient 32 | 3) logging outputs to display while training 33 | """ 34 | raise NotImplementedError 35 | 36 | @staticmethod 37 | def aggregate_logging_outputs(logging_outputs): 38 | """Aggregate logging outputs from data parallel training.""" 39 | raise NotImplementedError 40 | 41 | @staticmethod 42 | def grad_denom(sample_sizes): 43 | """Compute the gradient denominator for a set of sample sizes.""" 44 | return sum(sample_sizes) 45 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/audio/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/fairseq/data/audio/__init__.py -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/base_wrapper_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from torch.utils.data.dataloader import default_collate 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class BaseWrapperDataset(FairseqDataset): 12 | 13 | def __init__(self, dataset): 14 | super().__init__() 15 | self.dataset = dataset 16 | 17 | def __getitem__(self, index): 18 | return self.dataset[index] 19 | 20 | def __len__(self): 21 | return len(self.dataset) 22 | 23 | def collater(self, samples): 24 | if hasattr(self.dataset, 'collater'): 25 | return self.dataset.collater(samples) 26 | else: 27 | return default_collate(samples) 28 | 29 | @property 30 | def sizes(self): 31 | return self.dataset.sizes 32 | 33 | def num_tokens(self, index): 34 | return self.dataset.num_tokens(index) 35 | 36 | def size(self, index): 37 | return self.dataset.size(index) 38 | 39 | def ordered_indices(self): 40 | return self.dataset.ordered_indices() 41 | 42 | @property 43 | def supports_prefetch(self): 44 | return getattr(self.dataset, 'supports_prefetch', False) 45 | 46 | def prefetch(self, indices): 47 | self.dataset.prefetch(indices) 48 | 49 | def set_epoch(self, epoch): 50 | super().set_epoch(epoch) 51 | if hasattr(self.dataset, 'set_epoch'): 52 | self.dataset.set_epoch(epoch) 53 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/colorize_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class ColorizeDataset(BaseWrapperDataset): 12 | """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ 13 | def __init__(self, dataset, color_getter): 14 | super().__init__(dataset) 15 | self.color_getter = color_getter 16 | 17 | def collater(self, samples): 18 | base_collate = super().collater(samples) 19 | if len(base_collate) > 0: 20 | base_collate["net_input"]["colors"] = torch.tensor( 21 | list(self.color_getter(self.dataset, s["id"]) for s in samples), 22 | dtype=torch.long, 23 | ) 24 | return base_collate 25 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/concat_sentences_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class ConcatSentencesDataset(FairseqDataset): 12 | 13 | def __init__(self, *datasets): 14 | super().__init__() 15 | self.datasets = datasets 16 | assert all(len(ds) == len(datasets[0]) for ds in datasets), \ 17 | 'datasets must have the same length' 18 | 19 | def __getitem__(self, index): 20 | return torch.cat([ds[index] for ds in self.datasets]) 21 | 22 | def __len__(self): 23 | return len(self.datasets[0]) 24 | 25 | def collater(self, samples): 26 | return self.datasets[0].collater(samples) 27 | 28 | @property 29 | def sizes(self): 30 | return sum(ds.sizes for ds in self.datasets) 31 | 32 | def num_tokens(self, index): 33 | return sum(ds.num_tokens(index) for ds in self.datasets) 34 | 35 | def size(self, index): 36 | return sum(ds.size(index) for ds in self.datasets) 37 | 38 | def ordered_indices(self): 39 | return self.datasets[0].ordered_indices() 40 | 41 | @property 42 | def supports_prefetch(self): 43 | return any( 44 | getattr(ds, 'supports_prefetch', False) for ds in self.datasets 45 | ) 46 | 47 | def prefetch(self, indices): 48 | for ds in self.datasets: 49 | if getattr(ds, 'supports_prefetch', False): 50 | ds.prefetch(indices) 51 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | import importlib 8 | import os 9 | 10 | from fairseq import registry 11 | 12 | 13 | build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY = registry.setup_registry( 14 | '--tokenizer', 15 | default=None, 16 | ) 17 | 18 | 19 | build_bpe, register_bpe, BPE_REGISTRY = registry.setup_registry( 20 | '--bpe', 21 | default=None, 22 | ) 23 | 24 | 25 | # automatically import any Python files in the encoders/ directory 26 | for file in os.listdir(os.path.dirname(__file__)): 27 | if file.endswith('.py') and not file.startswith('_'): 28 | module = file[:file.find('.py')] 29 | importlib.import_module('fairseq.data.encoders.' + module) 30 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/fastbpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('fastbpe') 11 | class fastBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--bpe-codes', type=str, 17 | help='path to fastBPE BPE') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | if args.bpe_codes is None: 22 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt') 23 | codes = file_utils.cached_path(args.bpe_codes) 24 | try: 25 | import fastBPE 26 | self.bpe = fastBPE.fastBPE(codes) 27 | self.bpe_symbol = "@@ " 28 | except ImportError: 29 | raise ImportError('Please install fastBPE with: pip install fastBPE') 30 | 31 | def encode(self, x: str) -> str: 32 | return self.bpe.apply([x])[0] 33 | 34 | def decode(self, x: str) -> str: 35 | return (x + ' ').replace(self.bpe_symbol, '').rstrip() 36 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/gpt2_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | from .gpt2_bpe_utils import get_encoder 10 | 11 | 12 | DEFAULT_ENCODER_JSON = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' 13 | DEFAULT_VOCAB_BPE = 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' 14 | 15 | 16 | @register_bpe('gpt2') 17 | class GPT2BPE(object): 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | # fmt: off 22 | parser.add_argument('--gpt2-encoder-json', type=str, 23 | default=DEFAULT_ENCODER_JSON, 24 | help='path to encoder.json') 25 | parser.add_argument('--gpt2-vocab-bpe', type=str, 26 | default=DEFAULT_VOCAB_BPE, 27 | help='path to vocab.bpe') 28 | # fmt: on 29 | 30 | def __init__(self, args): 31 | encoder_json = file_utils.cached_path( 32 | getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON) 33 | ) 34 | vocab_bpe = file_utils.cached_path( 35 | getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE) 36 | ) 37 | self.bpe = get_encoder(encoder_json, vocab_bpe) 38 | 39 | def encode(self, x: str) -> str: 40 | return ' '.join(map(str, self.bpe.encode(x))) 41 | 42 | def decode(self, x: str) -> str: 43 | return self.bpe.decode(map(int, x.split())) 44 | 45 | def is_beginning_of_word(self, x: str) -> bool: 46 | return self.decode(x).startswith(' ') 47 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/hf_bert_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_bpe 7 | 8 | 9 | @register_bpe('bert') 10 | class BertBPE(object): 11 | 12 | @staticmethod 13 | def add_args(parser): 14 | # fmt: off 15 | parser.add_argument('--bpe-cased', action='store_true', 16 | help='set for cased BPE', 17 | default=False) 18 | parser.add_argument('--bpe-vocab-file', type=str, 19 | help='bpe vocab file.') 20 | # fmt: on 21 | 22 | def __init__(self, args): 23 | try: 24 | from pytorch_transformers import BertTokenizer 25 | from pytorch_transformers.tokenization_utils import clean_up_tokenization 26 | except ImportError: 27 | raise ImportError( 28 | 'Please install 1.0.0 version of pytorch_transformers' 29 | 'with: pip install pytorch-transformers' 30 | ) 31 | 32 | if 'bpe_vocab_file' in args: 33 | self.bert_tokenizer = BertTokenizer( 34 | args.bpe_vocab_file, 35 | do_lower_case=not args.bpe_cased 36 | ) 37 | else: 38 | vocab_file_name = 'bert-base-cased' if args.bpe_cased else 'bert-base-uncased' 39 | self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) 40 | self.clean_up_tokenization = clean_up_tokenization 41 | 42 | def encode(self, x: str) -> str: 43 | return ' '.join(self.bert_tokenizer.tokenize(x)) 44 | 45 | def decode(self, x: str) -> str: 46 | return self.clean_up_tokenization( 47 | self.bert_tokenizer.convert_tokens_to_string(x.split(' ')) 48 | ) 49 | 50 | def is_beginning_of_word(self, x: str) -> bool: 51 | return not x.startswith('##') 52 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/nltk_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.encoders import register_tokenizer 7 | 8 | 9 | @register_tokenizer('nltk') 10 | class NLTKTokenizer(object): 11 | 12 | def __init__(self, source_lang=None, target_lang=None): 13 | try: 14 | from nltk.tokenize import word_tokenize 15 | self.word_tokenize = word_tokenize 16 | except ImportError: 17 | raise ImportError('Please install nltk with: pip install nltk') 18 | 19 | def encode(self, x: str) -> str: 20 | return ' '.join(self.word_tokenize(x)) 21 | 22 | def decode(self, x: str) -> str: 23 | return x 24 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/sentencepiece_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('sentencepiece') 11 | class SentencepieceBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--sentencepiece-vocab', type=str, 17 | help='path to sentencepiece vocab') 18 | # fmt: on 19 | 20 | def __init__(self, args): 21 | vocab = file_utils.cached_path(args.sentencepiece_vocab) 22 | try: 23 | import sentencepiece as spm 24 | self.sp = spm.SentencePieceProcessor() 25 | self.sp.Load(vocab) 26 | except ImportError: 27 | raise ImportError('Please install sentencepiece with: pip install sentencepiece') 28 | 29 | def encode(self, x: str) -> str: 30 | return ' '.join(self.sp.EncodeAsPieces(x)) 31 | 32 | def decode(self, x: str) -> str: 33 | return x.replace(' ', '').replace('\u2581', ' ').strip() 34 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/space_tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | from fairseq.data.encoders import register_tokenizer 9 | 10 | 11 | @register_tokenizer('space') 12 | class SpaceTokenizer(object): 13 | 14 | def __init__(self, source_lang=None, target_lang=None): 15 | self.space_tok = re.compile(r"\s+") 16 | 17 | def encode(self, x: str) -> str: 18 | return self.space_tok.sub(' ', x) 19 | 20 | def decode(self, x: str) -> str: 21 | return x 22 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/encoders/subword_nmt_bpe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq import file_utils 7 | from fairseq.data.encoders import register_bpe 8 | 9 | 10 | @register_bpe('subword_nmt') 11 | class SubwordNMTBPE(object): 12 | 13 | @staticmethod 14 | def add_args(parser): 15 | # fmt: off 16 | parser.add_argument('--bpe-codes', type=str, 17 | help='path to subword NMT BPE') 18 | parser.add_argument('--bpe-separator', default='@@', 19 | help='BPE separator') 20 | # fmt: on 21 | 22 | def __init__(self, args): 23 | if args.bpe_codes is None: 24 | raise ValueError('--bpe-codes is required for --bpe=subword_nmt') 25 | codes = file_utils.cached_path(args.bpe_codes) 26 | try: 27 | from subword_nmt import apply_bpe 28 | bpe_parser = apply_bpe.create_parser() 29 | bpe_args = bpe_parser.parse_args([ 30 | '--codes', codes, 31 | '--separator', args.bpe_separator, 32 | ]) 33 | self.bpe = apply_bpe.BPE( 34 | bpe_args.codes, 35 | bpe_args.merges, 36 | bpe_args.separator, 37 | None, 38 | bpe_args.glossaries, 39 | ) 40 | self.bpe_symbol = bpe_args.separator + ' ' 41 | except ImportError: 42 | raise ImportError('Please install subword_nmt with: pip install subword-nmt') 43 | 44 | def encode(self, x: str) -> str: 45 | return self.bpe.process_line(x) 46 | 47 | def decode(self, x: str) -> str: 48 | return (x + ' ').replace(self.bpe_symbol, '').rstrip() 49 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/fairseq_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch.utils.data 8 | 9 | 10 | class FairseqDataset(torch.utils.data.Dataset): 11 | """A dataset that provides helpers for batching.""" 12 | 13 | def __getitem__(self, index): 14 | raise NotImplementedError 15 | 16 | def __len__(self): 17 | raise NotImplementedError 18 | 19 | def collater(self, samples): 20 | """Merge a list of samples to form a mini-batch. 21 | 22 | Args: 23 | samples (List[dict]): samples to collate 24 | 25 | Returns: 26 | dict: a mini-batch suitable for forwarding with a Model 27 | """ 28 | raise NotImplementedError 29 | 30 | def num_tokens(self, index): 31 | """Return the number of tokens in a sample. This value is used to 32 | enforce ``--max-tokens`` during batching.""" 33 | raise NotImplementedError 34 | 35 | def size(self, index): 36 | """Return an example's size as a float or tuple. This value is used when 37 | filtering a dataset with ``--max-positions``.""" 38 | raise NotImplementedError 39 | 40 | def ordered_indices(self): 41 | """Return an ordered list of indices. Batches will be constructed based 42 | on this order.""" 43 | return np.arange(len(self)) 44 | 45 | @property 46 | def supports_prefetch(self): 47 | """Whether this dataset supports prefetching.""" 48 | return False 49 | 50 | def attr(self, attr: str, index: int): 51 | return getattr(self, attr, None) 52 | 53 | def prefetch(self, indices): 54 | """Prefetch the data required for this epoch.""" 55 | raise NotImplementedError 56 | 57 | def set_epoch(self, epoch): 58 | pass 59 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/id_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class IdDataset(FairseqDataset): 12 | 13 | def __getitem__(self, index): 14 | return index 15 | 16 | def __len__(self): 17 | return 0 18 | 19 | def collater(self, samples): 20 | return torch.tensor(samples) 21 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/legacy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary 7 | from .block_pair_dataset import BlockPairDataset 8 | from .masked_lm_dataset import MaskedLMDataset 9 | 10 | __all__ = [ 11 | 'BertDictionary', 12 | 'BlockPairDataset', 13 | 'MaskedLMDataset', 14 | 'MaskedLMDictionary', 15 | ] 16 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/legacy/masked_lm_dictionary.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import Dictionary 7 | 8 | 9 | class MaskedLMDictionary(Dictionary): 10 | """ 11 | Dictionary for Masked Language Modelling tasks. This extends Dictionary by 12 | adding the mask symbol. 13 | """ 14 | def __init__( 15 | self, 16 | pad='', 17 | eos='', 18 | unk='', 19 | mask='', 20 | ): 21 | super().__init__(pad, eos, unk) 22 | self.mask_word = mask 23 | self.mask_index = self.add_symbol(mask) 24 | self.nspecial = len(self.symbols) 25 | 26 | def mask(self): 27 | """Helper to get index of mask symbol""" 28 | return self.mask_index 29 | 30 | 31 | class BertDictionary(MaskedLMDictionary): 32 | """ 33 | Dictionary for BERT task. This extends MaskedLMDictionary by adding support 34 | for cls and sep symbols. 35 | """ 36 | def __init__( 37 | self, 38 | pad='', 39 | eos='', 40 | unk='', 41 | mask='', 42 | cls='', 43 | sep='' 44 | ): 45 | super().__init__(pad, eos, unk, mask) 46 | self.cls_word = cls 47 | self.sep_word = sep 48 | self.cls_index = self.add_symbol(cls) 49 | self.sep_index = self.add_symbol(sep) 50 | self.nspecial = len(self.symbols) 51 | 52 | def cls(self): 53 | """Helper to get index of cls symbol""" 54 | return self.cls_index 55 | 56 | def sep(self): 57 | """Helper to get index of sep symbol""" 58 | return self.sep_index 59 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/list_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ListDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, sizes=None): 12 | super().__init__(dataset) 13 | self._sizes = sizes 14 | 15 | def collater(self, samples): 16 | return samples 17 | 18 | @property 19 | def sizes(self): 20 | return self._sizes 21 | 22 | def num_tokens(self, index): 23 | return self.sizes[index] 24 | 25 | def size(self, index): 26 | return self.sizes[index] 27 | 28 | def set_epoch(self, epoch): 29 | pass 30 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/lru_cache_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from functools import lru_cache 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class LRUCacheDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, token=None): 14 | super().__init__(dataset) 15 | 16 | @lru_cache(maxsize=8) 17 | def __getitem__(self, index): 18 | return self.dataset[index] 19 | 20 | @lru_cache(maxsize=8) 21 | def collater(self, samples): 22 | return self.dataset.collater(samples) 23 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/num_samples_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import FairseqDataset 7 | 8 | 9 | class NumSamplesDataset(FairseqDataset): 10 | 11 | def __getitem__(self, index): 12 | return 1 13 | 14 | def __len__(self): 15 | return 0 16 | 17 | def collater(self, samples): 18 | return sum(samples) 19 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/numel_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class NumelDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, reduce=False): 15 | super().__init__(dataset) 16 | self.reduce = reduce 17 | 18 | def __getitem__(self, index): 19 | item = self.dataset[index] 20 | if torch.is_tensor(item): 21 | return torch.numel(item) 22 | else: 23 | return np.size(item) 24 | 25 | def __len__(self): 26 | return len(self.dataset) 27 | 28 | def collater(self, samples): 29 | if self.reduce: 30 | return sum(samples) 31 | else: 32 | return torch.tensor(samples) 33 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/offset_tokens_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class OffsetTokensDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, offset): 12 | super().__init__(dataset) 13 | self.offset = offset 14 | 15 | def __getitem__(self, idx): 16 | return self.dataset[idx] + self.offset 17 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/pad_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data import data_utils 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class PadDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, pad_idx, left_pad): 14 | super().__init__(dataset) 15 | self.pad_idx = pad_idx 16 | self.left_pad = left_pad 17 | 18 | def collater(self, samples): 19 | return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) 20 | 21 | 22 | class LeftPadDataset(PadDataset): 23 | 24 | def __init__(self, dataset, pad_idx): 25 | super().__init__(dataset, pad_idx, left_pad=True) 26 | 27 | 28 | class RightPadDataset(PadDataset): 29 | 30 | def __init__(self, dataset, pad_idx): 31 | super().__init__(dataset, pad_idx, left_pad=False) 32 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/prepend_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependDataset(BaseWrapperDataset): 13 | def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): 14 | super().__init__(dataset) 15 | self.prepend_getter = prepend_getter 16 | self.ensure_first_token = ensure_first_token_is 17 | 18 | def __getitem__(self, idx): 19 | item = self.dataset[idx] 20 | is_tuple = isinstance(item, tuple) 21 | src = item[0] if is_tuple else item 22 | 23 | assert self.ensure_first_token is None or src[0] == self.ensure_first_token 24 | prepend_idx = self.prepend_getter(self.dataset, idx) 25 | assert isinstance(prepend_idx, int) 26 | src[0] = prepend_idx 27 | item = tuple((src,) + item[1:]) if is_tuple else src 28 | return item 29 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/prepend_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | import torch 8 | 9 | from . import BaseWrapperDataset 10 | 11 | 12 | class PrependTokenDataset(BaseWrapperDataset): 13 | 14 | def __init__(self, dataset, token=None): 15 | super().__init__(dataset) 16 | self.token = token 17 | if token is not None: 18 | self._sizes = np.array(dataset.sizes) + 1 19 | else: 20 | self._sizes = dataset.sizes 21 | 22 | def __getitem__(self, idx): 23 | item = self.dataset[idx] 24 | if self.token is not None: 25 | item = torch.cat([item.new([self.token]), item]) 26 | return item 27 | 28 | @property 29 | def sizes(self): 30 | return self._sizes 31 | 32 | def num_tokens(self, index): 33 | n = self.dataset.num_tokens(index) 34 | if self.token is not None: 35 | n += 1 36 | return n 37 | 38 | def size(self, index): 39 | n = self.dataset.size(index) 40 | if self.token is not None: 41 | n += 1 42 | return n 43 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/raw_label_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from . import FairseqDataset 9 | 10 | 11 | class RawLabelDataset(FairseqDataset): 12 | 13 | def __init__(self, labels): 14 | super().__init__() 15 | self.labels = labels 16 | 17 | def __getitem__(self, index): 18 | return self.labels[index] 19 | 20 | def __len__(self): 21 | return len(self.labels) 22 | 23 | def collater(self, samples): 24 | return torch.tensor(samples) 25 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/replace_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class ReplaceDataset(BaseWrapperDataset): 10 | """Replaces tokens found in the dataset by a specified replacement token 11 | 12 | Args: 13 | dataset (~torch.utils.data.Dataset): dataset to replace tokens in 14 | replace_map(Dictionary[int,int]): map of token to replace -> replacement token 15 | offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be 16 | as many as the number of objects returned by the underlying dataset __getitem__ method. 17 | """ 18 | 19 | def __init__(self, dataset, replace_map, offsets): 20 | super().__init__(dataset) 21 | assert len(replace_map) > 0 22 | self.replace_map = replace_map 23 | self.offsets = offsets 24 | 25 | def __getitem__(self, index): 26 | item = self.dataset[index] 27 | is_tuple = isinstance(item, tuple) 28 | srcs = item if is_tuple else [item] 29 | 30 | for offset, src in zip(self.offsets, srcs): 31 | for k, v in self.replace_map.items(): 32 | src_off = src[offset:] if offset >= 0 else src[:offset] 33 | src_off.masked_fill_(src_off == k, v) 34 | 35 | item = srcs if is_tuple else srcs[0] 36 | return item 37 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/sharded_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import itertools 7 | import os 8 | import random 9 | 10 | from . import BaseWrapperDataset 11 | from fairseq.data import data_utils 12 | 13 | 14 | class ShardedDataset(BaseWrapperDataset): 15 | """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. 16 | 17 | Loads a dataset which has been sharded into multiple files. each shard is only loaded for each specific epoch 18 | 19 | """ 20 | 21 | def __init__( 22 | self, 23 | dictionary, 24 | dataset_impl: str, 25 | path: str, 26 | split: str, 27 | epoch: int, 28 | name: str = None, 29 | combine: bool = False, 30 | seed: int = 0, 31 | ): 32 | self._name = name if name is not None else os.path.basename(path) 33 | num_shards = 0 34 | for i in itertools.count(): 35 | if not os.path.exists(os.path.join(path, "shard" + str(i))): 36 | break 37 | num_shards += 1 38 | 39 | if num_shards > 0 and split == "train": 40 | random.seed(seed ^ epoch) 41 | shard = random.randint(0, num_shards - 1) 42 | split_path = os.path.join(path, "shard" + str(shard), split) 43 | else: 44 | split_path = os.path.join(path, split) 45 | if os.path.isdir(split_path): 46 | split_path = os.path.join(split_path, split) 47 | 48 | dataset = data_utils.load_indexed_dataset( 49 | split_path, dictionary, dataset_impl, combine=combine 50 | ) 51 | if dataset is None: 52 | raise FileNotFoundError( 53 | "Dataset not found: {} ({})".format(split, split_path) 54 | ) 55 | 56 | super().__init__(dataset) 57 | 58 | @property 59 | def name(self): 60 | return self._name 61 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/sort_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class SortDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, sort_order): 14 | super().__init__(dataset) 15 | if not isinstance(sort_order, (list, tuple)): 16 | sort_order = [sort_order] 17 | self.sort_order = sort_order 18 | 19 | assert all(len(so) == len(dataset) for so in sort_order) 20 | 21 | def ordered_indices(self): 22 | return np.lexsort(self.sort_order) 23 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/strip_token_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from . import BaseWrapperDataset 7 | 8 | 9 | class StripTokenDataset(BaseWrapperDataset): 10 | 11 | def __init__(self, dataset, id_to_strip): 12 | super().__init__(dataset) 13 | self.id_to_strip = id_to_strip 14 | 15 | def __getitem__(self, index): 16 | item = self.dataset[index] 17 | return item[item.ne(self.id_to_strip)] 18 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/data/truncate_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import numpy as np 7 | 8 | from . import BaseWrapperDataset 9 | 10 | 11 | class TruncateDataset(BaseWrapperDataset): 12 | 13 | def __init__(self, dataset, truncation_length): 14 | super().__init__(dataset) 15 | assert truncation_length is not None 16 | self.truncation_length = truncation_length 17 | self.dataset = dataset 18 | 19 | def __getitem__(self, index): 20 | item = self.dataset[index] 21 | item_len = item.size(0) 22 | if item_len > self.truncation_length: 23 | item = item[:self.truncation_length] 24 | return item 25 | 26 | @property 27 | def sizes(self): 28 | return np.minimum(self.dataset.sizes, self.truncation_length) 29 | 30 | def __len__(self): 31 | return len(self.dataset) 32 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/export: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/fairseq/export -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/meters.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import time 7 | 8 | 9 | class AverageMeter(object): 10 | """Computes and stores the average and current value""" 11 | def __init__(self): 12 | self.reset() 13 | 14 | def reset(self): 15 | self.val = 0 16 | self.avg = 0 17 | self.sum = 0 18 | self.count = 0 19 | 20 | def update(self, val, n=1): 21 | self.val = val 22 | self.sum += val * n 23 | self.count += n 24 | self.avg = self.sum / self.count 25 | 26 | 27 | class TimeMeter(object): 28 | """Computes the average occurrence of some event per second""" 29 | def __init__(self, init=0): 30 | self.reset(init) 31 | 32 | def reset(self, init=0): 33 | self.init = init 34 | self.start = time.time() 35 | self.n = 0 36 | 37 | def update(self, val=1): 38 | self.n += val 39 | 40 | @property 41 | def avg(self): 42 | return self.n / self.elapsed_time 43 | 44 | @property 45 | def elapsed_time(self): 46 | return self.init + (time.time() - self.start) 47 | 48 | 49 | class StopwatchMeter(object): 50 | """Computes the sum/avg duration of some event in seconds""" 51 | def __init__(self): 52 | self.reset() 53 | 54 | def start(self): 55 | self.start_time = time.time() 56 | 57 | def stop(self, n=1): 58 | if self.start_time is not None: 59 | delta = time.time() - self.start_time 60 | self.sum += delta 61 | self.n += n 62 | self.start_time = None 63 | 64 | def reset(self): 65 | self.sum = 0 66 | self.n = 0 67 | self.start_time = None 68 | 69 | @property 70 | def avg(self): 71 | return self.sum / self.n 72 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/models/composite_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.models import FairseqEncoder 7 | 8 | 9 | class CompositeEncoder(FairseqEncoder): 10 | """ 11 | A wrapper around a dictionary of :class:`FairseqEncoder` objects. 12 | 13 | We run forward on each encoder and return a dictionary of outputs. The first 14 | encoder's dictionary is used for initialization. 15 | 16 | Args: 17 | encoders (dict): a dictionary of :class:`FairseqEncoder` objects. 18 | """ 19 | 20 | def __init__(self, encoders): 21 | super().__init__(next(iter(encoders.values())).dictionary) 22 | self.encoders = encoders 23 | for key in self.encoders: 24 | self.add_module(key, self.encoders[key]) 25 | 26 | def forward(self, src_tokens, src_lengths): 27 | """ 28 | Args: 29 | src_tokens (LongTensor): tokens in the source language of shape 30 | `(batch, src_len)` 31 | src_lengths (LongTensor): lengths of each source sentence of shape 32 | `(batch)` 33 | 34 | Returns: 35 | dict: 36 | the outputs from each Encoder 37 | """ 38 | encoder_out = {} 39 | for key in self.encoders: 40 | encoder_out[key] = self.encoders[key](src_tokens, src_lengths) 41 | return encoder_out 42 | 43 | def reorder_encoder_out(self, encoder_out, new_order): 44 | """Reorder encoder output according to new_order.""" 45 | for key in self.encoders: 46 | encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order) 47 | return encoder_out 48 | 49 | def max_positions(self): 50 | return min([self.encoders[key].max_positions() for key in self.encoders]) 51 | 52 | def upgrade_state_dict(self, state_dict): 53 | for key in self.encoders: 54 | self.encoders[key].upgrade_state_dict(state_dict) 55 | return state_dict 56 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/models/fairseq_encoder.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | 9 | class FairseqEncoder(nn.Module): 10 | """Base class for encoders.""" 11 | 12 | def __init__(self, dictionary): 13 | super().__init__() 14 | self.dictionary = dictionary 15 | 16 | def forward(self, src_tokens, src_lengths=None, **kwargs): 17 | """ 18 | Args: 19 | src_tokens (LongTensor): tokens in the source language of shape 20 | `(batch, src_len)` 21 | src_lengths (LongTensor): lengths of each source sentence of shape 22 | `(batch)` 23 | """ 24 | raise NotImplementedError 25 | 26 | def reorder_encoder_out(self, encoder_out, new_order): 27 | """ 28 | Reorder encoder output according to `new_order`. 29 | 30 | Args: 31 | encoder_out: output from the ``forward()`` method 32 | new_order (LongTensor): desired order 33 | 34 | Returns: 35 | `encoder_out` rearranged according to `new_order` 36 | """ 37 | raise NotImplementedError 38 | 39 | def max_positions(self): 40 | """Maximum input length supported by the encoder.""" 41 | return 1e6 # an arbitrary large number 42 | 43 | def upgrade_state_dict(self, state_dict): 44 | """Upgrade a (possibly old) state dict for new versions of fairseq.""" 45 | return state_dict 46 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/models/roberta/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .hub_interface import * # noqa 7 | from .model import * # noqa 8 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/beamable_mm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class BeamableMM(nn.Module): 11 | """This module provides an optimized MM for beam decoding with attention. 12 | 13 | It leverage the fact that the source-side of the input is replicated beam 14 | times and the target-side of the input is of width one. This layer speeds up 15 | inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)} 16 | with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}. 17 | """ 18 | def __init__(self, beam_size=None): 19 | super(BeamableMM, self).__init__() 20 | self.beam_size = beam_size 21 | 22 | def forward(self, input1, input2): 23 | if ( 24 | not self.training and # test mode 25 | self.beam_size is not None and # beam size is set 26 | input1.dim() == 3 and # only support batched input 27 | input1.size(1) == 1 # single time step update 28 | ): 29 | bsz, beam = input1.size(0), self.beam_size 30 | 31 | # bsz x 1 x nhu --> bsz/beam x beam x nhu 32 | input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1) 33 | 34 | # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu 35 | input2 = input2.unfold(0, beam, beam)[:, :, :, 0] 36 | 37 | # use non batched operation if bsz = beam 38 | if input1.size(0) == 1: 39 | output = torch.mm(input1[0, :, :], input2[0, :, :]) 40 | else: 41 | output = input1.bmm(input2) 42 | return output.view(bsz, 1, -1) 43 | else: 44 | return input1.bmm(input2) 45 | 46 | def set_beam_size(self, beam_size): 47 | self.beam_size = beam_size 48 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/conv_tbc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | from torch.nn.modules.utils import _single 8 | 9 | 10 | class ConvTBC(torch.nn.Module): 11 | """1D convolution over an input of shape (time x batch x channel) 12 | 13 | The implementation uses gemm to perform the convolution. This implementation 14 | is faster than cuDNN for small kernel sizes. 15 | """ 16 | def __init__(self, in_channels, out_channels, kernel_size, padding=0): 17 | super(ConvTBC, self).__init__() 18 | self.in_channels = in_channels 19 | self.out_channels = out_channels 20 | self.kernel_size = _single(kernel_size) 21 | self.padding = _single(padding) 22 | 23 | self.weight = torch.nn.Parameter(torch.Tensor( 24 | self.kernel_size[0], in_channels, out_channels)) 25 | self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) 26 | 27 | def forward(self, input): 28 | return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0]) 29 | 30 | def __repr__(self): 31 | s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}' 32 | ', padding={padding}') 33 | if self.bias is None: 34 | s += ', bias=False' 35 | s += ')' 36 | return s.format(name=self.__class__.__name__, **self.__dict__) 37 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/dynamicconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .dynamicconv_layer import DynamicconvLayer # noqa 7 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector dynamicconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector dynamicconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector dynamicconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return dynamicconv_cuda_forward(input, filters, 36 | padding_l); 37 | } 38 | 39 | std::vector dynamicconv_backward( 40 | at::Tensor gradOutput, 41 | int padding_l, 42 | at::Tensor input, 43 | at::Tensor filters) { 44 | 45 | CHECK_INPUT(gradOutput); 46 | CHECK_INPUT(input); 47 | CHECK_INPUT(filters); 48 | 49 | return dynamicconv_cuda_backward(gradOutput, padding_l, 50 | input, filters); 51 | } 52 | 53 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 54 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CUDA)"); 55 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CUDA)"); 56 | } 57 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/dynamicconv_layer/dynamicconv_cuda.cuh: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #define SHFL_MASK 0xffffffff 27 | 28 | template 29 | __global__ 30 | void dynamicconv_forward_kernel(const scalar_t* input, 31 | const scalar_t* weight, 32 | int minibatch, 33 | int sequenceLength, 34 | int numFeatures, 35 | int numFiltersInBlock, 36 | int numHeads, 37 | scalar_t* output); 38 | 39 | template 40 | __global__ 41 | void dynamicconv_backward_kernel( 42 | const scalar_t* gradOutput, // B * C * T 43 | const scalar_t* input, // B * C * T 44 | const scalar_t* weight, 45 | int minibatch, 46 | int sequenceLength, 47 | int numFeatures, 48 | int numFiltersInBlock, 49 | int numHeads, 50 | scalar_t* gradWeight, 51 | scalar_t* gradInput); // B * H * k * T 52 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/dynamicconv_layer/dynamiconv_cpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | std::vector dynamicconv_cpu_forward( 5 | float* input, 6 | float* filters, 7 | int padding_l); 8 | 9 | std::vector dynamicconv_cpu_backward( 10 | float* gradOutput, 11 | int padding_l, 12 | float* input, 13 | float* filters); 14 | 15 | std::vector dynamicconv_forward( 16 | float* input, 17 | float* filters, 18 | int padding_l) { 19 | 20 | return dynamicconv_cpu_forward(input, filters, padding_l); 21 | } 22 | 23 | std::vector dynamicconv_backward( 24 | float* gradOutput, 25 | int padding_l, 26 | float* input, 27 | float* filters) { 28 | 29 | return dynamicconv_cpu_backward(gradOutput, padding_l, input, filters); 30 | } 31 | 32 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 33 | m.def("forward", &dynamicconv_forward, "dynamicconv forward (CPU)"); 34 | m.def("backward", &dynamicconv_backward, "dynamicconv backward (CPU)"); 35 | } 36 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/dynamicconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 9 | 10 | setup( 11 | name='dynamicconv_layer', 12 | ext_modules=[ 13 | CUDAExtension( 14 | name='dynamicconv_cuda', 15 | sources=[ 16 | 'dynamicconv_cuda.cpp', 17 | 'dynamicconv_cuda_kernel.cu', 18 | ], 19 | ), 20 | ], 21 | cmdclass={ 22 | 'build_ext': BuildExtension 23 | }) 24 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/gelu.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | """ 6 | See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with 7 | the corresponding GitHub repo: https://github.com/hendrycks/GELUs 8 | """ 9 | 10 | import math 11 | 12 | import torch 13 | 14 | 15 | def gelu_accurate(x): 16 | if not hasattr(gelu_accurate, "_a"): 17 | gelu_accurate._a = math.sqrt(2 / math.pi) 18 | return 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) 19 | 20 | 21 | def gelu(x: torch.Tensor) -> torch.Tensor: 22 | return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) 23 | if hasattr(torch.nn.functional, 'gelu'): 24 | return torch.nn.functional.gelu(x.float()).type_as(x) 25 | else: 26 | return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) 27 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/grad_multiply.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class GradMultiply(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, x, scale): 12 | ctx.scale = scale 13 | res = x.new(x) 14 | return res 15 | 16 | @staticmethod 17 | def backward(ctx, grad): 18 | return grad * ctx.scale, None 19 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/highway.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | from torch import nn 9 | 10 | 11 | class Highway(torch.nn.Module): 12 | """ 13 | A `Highway layer `_. 14 | Adopted from the AllenNLP implementation. 15 | """ 16 | 17 | def __init__( 18 | self, 19 | input_dim: int, 20 | num_layers: int = 1 21 | ): 22 | super(Highway, self).__init__() 23 | self.input_dim = input_dim 24 | self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2) 25 | for _ in range(num_layers)]) 26 | self.activation = nn.ReLU() 27 | 28 | self.reset_parameters() 29 | 30 | def reset_parameters(self): 31 | for layer in self.layers: 32 | # As per comment in AllenNLP: 33 | # We should bias the highway layer to just carry its input forward. We do that by 34 | # setting the bias on `B(x)` to be positive, because that means `g` will be biased to 35 | # be high, so we will carry the input forward. The bias on `B(x)` is the second half 36 | # of the bias vector in each Linear layer. 37 | nn.init.constant_(layer.bias[self.input_dim:], 1) 38 | 39 | nn.init.constant_(layer.bias[:self.input_dim], 0) 40 | nn.init.xavier_normal_(layer.weight) 41 | 42 | def forward( 43 | self, 44 | x: torch.Tensor 45 | ): 46 | for layer in self.layers: 47 | projection = layer(x) 48 | proj_x, gate = projection.chunk(2, dim=-1) 49 | proj_x = self.activation(proj_x) 50 | gate = torch.sigmoid(gate) 51 | x = gate * x + (gate.new_tensor([1]) - gate) * proj_x 52 | return x 53 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/layer_norm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): 10 | if not export and torch.cuda.is_available(): 11 | try: 12 | from apex.normalization import FusedLayerNorm 13 | return FusedLayerNorm(normalized_shape, eps, elementwise_affine) 14 | except ImportError: 15 | pass 16 | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) 17 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/learned_positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from fairseq import utils 9 | 10 | 11 | class LearnedPositionalEmbedding(nn.Embedding): 12 | """ 13 | This module learns positional embeddings up to a fixed maximum size. 14 | Padding ids are ignored by either offsetting based on padding_idx 15 | or by setting padding_idx to None and ensuring that the appropriate 16 | position ids are passed to the forward function. 17 | """ 18 | 19 | def __init__( 20 | self, 21 | num_embeddings: int, 22 | embedding_dim: int, 23 | padding_idx: int, 24 | ): 25 | super().__init__(num_embeddings, embedding_dim, padding_idx) 26 | self.onnx_trace = False 27 | 28 | def forward(self, input, incremental_state=None, positions=None): 29 | """Input is expected to be of size [bsz x seqlen].""" 30 | assert ( 31 | (positions is None) or (self.padding_idx is None) 32 | ), "If positions is pre-computed then padding_idx should not be set." 33 | 34 | if positions is None: 35 | if incremental_state is not None: 36 | # positions is the same for every token when decoding a single step 37 | # Without the int() cast, it doesn't work in some cases when exporting to ONNX 38 | positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1))) 39 | else: 40 | positions = utils.make_positions( 41 | input, self.padding_idx, onnx_trace=self.onnx_trace, 42 | ) 43 | return super().forward(positions) 44 | 45 | def max_positions(self): 46 | """Maximum number of supported positions.""" 47 | if self.padding_idx is not None: 48 | return self.num_embeddings - self.padding_idx - 1 49 | else: 50 | return self.num_embeddings 51 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/lightconv_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .lightconv_layer import LightconvLayer # noqa 7 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/lightconv_layer/lightconv_cuda.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) Facebook, Inc. and its affiliates. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | std::vector lightconv_cuda_forward( 12 | at::Tensor input, 13 | at::Tensor filters, 14 | int padding_l); 15 | 16 | std::vector lightconv_cuda_backward( 17 | at::Tensor gradOutput, 18 | int padding_l, 19 | at::Tensor input, 20 | at::Tensor filters); 21 | 22 | 23 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 24 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 25 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 26 | 27 | std::vector lightconv_forward( 28 | at::Tensor input, 29 | at::Tensor filters, 30 | int padding_l) { 31 | 32 | CHECK_INPUT(input); 33 | CHECK_INPUT(filters); 34 | 35 | return lightconv_cuda_forward(input, filters, padding_l); 36 | } 37 | 38 | std::vector lightconv_backward( 39 | at::Tensor gradOutput, 40 | int padding_l, 41 | at::Tensor input, 42 | at::Tensor filters) { 43 | 44 | CHECK_INPUT(gradOutput); 45 | CHECK_INPUT(input); 46 | CHECK_INPUT(filters); 47 | 48 | return lightconv_cuda_backward(gradOutput, padding_l, input, filters); 49 | } 50 | 51 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 52 | m.def("forward", &lightconv_forward, "lighconv forward (CUDA)"); 53 | m.def("backward", &lightconv_backward, "lighconv backward (CUDA)"); 54 | } 55 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/lightconv_layer/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | from setuptools import setup 8 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 9 | 10 | setup( 11 | name='lightconv_layer', 12 | ext_modules=[ 13 | CUDAExtension('lightconv_cuda', [ 14 | 'lightconv_cuda.cpp', 15 | 'lightconv_cuda_kernel.cu', 16 | ]), 17 | ], 18 | cmdclass={ 19 | 'build_ext': BuildExtension 20 | }) 21 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/logsumexp_moe.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch 7 | 8 | 9 | class LogSumExpMoE(torch.autograd.Function): 10 | """Standard LogSumExp forward pass, but use *posterior* for the backward. 11 | 12 | See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" 13 | (Shen et al., 2019) `_. 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, logp, posterior, dim=-1): 18 | ctx.save_for_backward(posterior) 19 | ctx.dim = dim 20 | return torch.logsumexp(logp, dim=dim) 21 | 22 | @staticmethod 23 | def backward(ctx, grad_output): 24 | posterior, = ctx.saved_tensors 25 | grad_logp = grad_output.unsqueeze(ctx.dim) * posterior 26 | return grad_logp, None, None 27 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/norm_select.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | 7 | from .norms.mask_layernorm import LayerNorm 8 | from torch.nn.utils import spectral_norm 9 | 10 | def NormSelect(norm_type, embed_dim, head_num=None): 11 | if norm_type == "layer": 12 | return LayerNorm(embed_dim) 13 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/positional_embedding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn as nn 7 | 8 | from .learned_positional_embedding import LearnedPositionalEmbedding 9 | from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding 10 | 11 | 12 | def PositionalEmbedding( 13 | num_embeddings: int, 14 | embedding_dim: int, 15 | padding_idx: int, 16 | learned: bool = False, 17 | ): 18 | if learned: 19 | # if padding_idx is specified then offset the embedding ids by 20 | # this index and adjust num_embeddings appropriately 21 | # TODO: The right place for this offset would be inside 22 | # LearnedPositionalEmbedding. Move this there for a cleaner implementation. 23 | if padding_idx is not None: 24 | num_embeddings = num_embeddings + padding_idx + 1 25 | m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) 26 | nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) 27 | if padding_idx is not None: 28 | nn.init.constant_(m.weight[padding_idx], 0) 29 | else: 30 | m = SinusoidalPositionalEmbedding( 31 | embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1, 32 | ) 33 | return m 34 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/scalar_bias.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | # 6 | 7 | import torch 8 | 9 | 10 | class ScalarBias(torch.autograd.Function): 11 | """ 12 | Adds a vector of scalars, used in self-attention mechanism to allow 13 | the model to optionally attend to this vector instead of the past 14 | """ 15 | 16 | @staticmethod 17 | def forward(ctx, input, dim, bias_init): 18 | size = list(input.size()) 19 | size[dim] += 1 20 | output = input.new(*size).fill_(bias_init) 21 | output.narrow(dim, 1, size[dim] - 1).copy_(input) 22 | ctx.dim = dim 23 | return output 24 | 25 | @staticmethod 26 | def backward(ctx, grad): 27 | return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None 28 | 29 | 30 | def scalar_bias(input, dim, bias_init=0): 31 | return ScalarBias.apply(input, dim, bias_init) 32 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/sparse_transformer_sentence_encoder_layer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.modules import TransformerSentenceEncoderLayer 7 | from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention 8 | 9 | 10 | class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer): 11 | """ 12 | Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention) 13 | """ 14 | 15 | def __init__( 16 | self, 17 | embedding_dim: float = 768, 18 | ffn_embedding_dim: float = 3072, 19 | num_attention_heads: float = 8, 20 | dropout: float = 0.1, 21 | attention_dropout: float = 0.1, 22 | activation_dropout: float = 0.1, 23 | activation_fn: str = 'relu', 24 | add_bias_kv: bool = False, 25 | add_zero_attn: bool = False, 26 | export: bool = False, 27 | is_bidirectional: bool = True, 28 | stride: int = 32, 29 | expressivity: int = 8, 30 | ) -> None: 31 | 32 | super().__init__( 33 | embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, 34 | attention_dropout, activation_dropout, activation_fn, add_bias_kv, 35 | add_zero_attn, export 36 | ) 37 | 38 | self.self_attn = SparseMultiheadAttention( 39 | self.embedding_dim, 40 | num_attention_heads, 41 | dropout=attention_dropout, 42 | add_bias_kv=add_bias_kv, 43 | add_zero_attn=add_zero_attn, 44 | self_attention=True, 45 | is_bidirectional=is_bidirectional, 46 | stride=stride, 47 | expressivity=expressivity, 48 | ) 49 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/modules/unfold.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.nn.functional as F 7 | 8 | 9 | def unfold1d(x, kernel_size, padding_l, pad_value=0): 10 | '''unfold T x B x C to T x B x C x K''' 11 | if kernel_size > 1: 12 | T, B, C = x.size() 13 | x = F.pad(x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value) 14 | x = x.as_strided((T, B, C, kernel_size), (B*C, C, 1, B*C)) 15 | else: 16 | x = x.unsqueeze(3) 17 | return x 18 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.optim.fairseq_optimizer import FairseqOptimizer 11 | from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer 12 | from fairseq.optim.bmuf import FairseqBMUF # noqa 13 | 14 | 15 | __all__ = [ 16 | 'FairseqOptimizer', 17 | 'FP16Optimizer', 18 | 'MemoryEfficientFP16Optimizer', 19 | ] 20 | 21 | 22 | build_optimizer, register_optimizer, OPTIMIZER_REGISTRY = registry.setup_registry( 23 | '--optimizer', 24 | base_class=FairseqOptimizer, 25 | default='nag', 26 | ) 27 | 28 | 29 | # automatically import any Python files in the optim/ directory 30 | for file in os.listdir(os.path.dirname(__file__)): 31 | if file.endswith('.py') and not file.startswith('_'): 32 | module = file[:file.find('.py')] 33 | importlib.import_module('fairseq.optim.' + module) 34 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/optim/adadelta.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('adadelta') 12 | class Adadelta(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', 22 | help='coefficient used for computing a running average of squared gradients') 23 | parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', 24 | help='term added to the denominator to improve numerical stability') 25 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 26 | help='weight decay') 27 | parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') 28 | # fmt: on 29 | 30 | @property 31 | def optimizer_config(self): 32 | """ 33 | Return a kwarg dictionary that will be used to override optimizer 34 | args stored in checkpoints. This allows us to load a checkpoint and 35 | resume training using a different set of optimizer args, e.g., with a 36 | different learning rate. 37 | """ 38 | return { 39 | 'lr': self.args.lr[0], 40 | 'rho': self.args.adadelta_rho, 41 | 'eps': self.args.adadelta_eps, 42 | 'weight_decay': self.args.weight_decay, 43 | } 44 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/optim/adagrad.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('adagrad') 12 | class Adagrad(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 22 | help='weight decay') 23 | # fmt: on 24 | 25 | @property 26 | def optimizer_config(self): 27 | """ 28 | Return a kwarg dictionary that will be used to override optimizer 29 | args stored in checkpoints. This allows us to load a checkpoint and 30 | resume training using a different set of optimizer args, e.g., with a 31 | different learning rate. 32 | """ 33 | return { 34 | 'lr': self.args.lr[0], 35 | 'weight_decay': self.args.weight_decay, 36 | } 37 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/optim/lr_scheduler/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import importlib 7 | import os 8 | 9 | from fairseq import registry 10 | from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import FairseqLRScheduler 11 | 12 | 13 | build_lr_scheduler, register_lr_scheduler, LR_SCHEDULER_REGISTRY = registry.setup_registry( 14 | '--lr-scheduler', 15 | base_class=FairseqLRScheduler, 16 | default='fixed', 17 | ) 18 | 19 | # automatically import any Python files in the optim/lr_scheduler/ directory 20 | for file in os.listdir(os.path.dirname(__file__)): 21 | if file.endswith('.py') and not file.startswith('_'): 22 | module = file[:file.find('.py')] 23 | importlib.import_module('fairseq.optim.lr_scheduler.' + module) 24 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from .. import FairseqOptimizer 7 | 8 | 9 | class FairseqLRScheduler(object): 10 | 11 | def __init__(self, args, optimizer): 12 | super().__init__() 13 | if not isinstance(optimizer, FairseqOptimizer): 14 | raise ValueError('optimizer must be an instance of FairseqOptimizer') 15 | self.args = args 16 | self.optimizer = optimizer 17 | self.best = None 18 | 19 | @staticmethod 20 | def add_args(parser): 21 | """Add arguments to the parser for this LR scheduler.""" 22 | pass 23 | 24 | def state_dict(self): 25 | """Return the LR scheduler state dict.""" 26 | return {'best': self.best} 27 | 28 | def load_state_dict(self, state_dict): 29 | """Load an LR scheduler state dict.""" 30 | self.best = state_dict['best'] 31 | 32 | def step(self, epoch, val_loss=None): 33 | """Update the learning rate at the end of the given epoch.""" 34 | if val_loss is not None: 35 | if self.best is None: 36 | self.best = val_loss 37 | else: 38 | self.best = min(self.best, val_loss) 39 | 40 | def step_update(self, num_updates): 41 | """Update the learning rate after each update.""" 42 | return self.optimizer.get_lr() 43 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/optim/sgd.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import torch.optim 7 | 8 | from . import FairseqOptimizer, register_optimizer 9 | 10 | 11 | @register_optimizer('sgd') 12 | class SGD(FairseqOptimizer): 13 | def __init__(self, args, params): 14 | super().__init__(args) 15 | self._optimizer = torch.optim.SGD(params, **self.optimizer_config) 16 | 17 | @staticmethod 18 | def add_args(parser): 19 | """Add optimizer-specific arguments to the parser.""" 20 | # fmt: off 21 | parser.add_argument('--momentum', default=0.0, type=float, metavar='M', 22 | help='momentum factor') 23 | parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', 24 | help='weight decay') 25 | # fmt: on 26 | 27 | @property 28 | def optimizer_config(self): 29 | """ 30 | Return a kwarg dictionary that will be used to override optimizer 31 | args stored in checkpoints. This allows us to load a checkpoint and 32 | resume training using a different set of optimizer args, e.g., with a 33 | different learning rate. 34 | """ 35 | return { 36 | 'lr': self.args.lr[0], 37 | 'momentum': self.args.momentum, 38 | 'weight_decay': self.args.weight_decay, 39 | } 40 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/pdb.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import multiprocessing 7 | import os 8 | import pdb 9 | import sys 10 | 11 | 12 | __all__ = ['set_trace'] 13 | 14 | 15 | _stdin = [None] 16 | _stdin_lock = multiprocessing.Lock() 17 | try: 18 | _stdin_fd = sys.stdin.fileno() 19 | except Exception: 20 | _stdin_fd = None 21 | 22 | 23 | class MultiprocessingPdb(pdb.Pdb): 24 | """A Pdb wrapper that works in a multiprocessing environment. 25 | 26 | Usage: `from fairseq import pdb; pdb.set_trace()` 27 | """ 28 | 29 | def __init__(self): 30 | pdb.Pdb.__init__(self, nosigint=True) 31 | 32 | def _cmdloop(self): 33 | stdin_bak = sys.stdin 34 | with _stdin_lock: 35 | try: 36 | if _stdin_fd is not None: 37 | if not _stdin[0]: 38 | _stdin[0] = os.fdopen(_stdin_fd) 39 | sys.stdin = _stdin[0] 40 | self.cmdloop() 41 | finally: 42 | sys.stdin = stdin_bak 43 | 44 | 45 | def set_trace(): 46 | pdb = MultiprocessingPdb() 47 | pdb.set_trace(sys._getframe().f_back) 48 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/tasks/translation_from_pretrained_xlm.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary 7 | from fairseq.tasks.translation import TranslationTask 8 | 9 | from . import register_task 10 | 11 | 12 | @register_task("translation_from_pretrained_xlm") 13 | class TranslationFromPretrainedXLMTask(TranslationTask): 14 | """ 15 | Same as TranslationTask except use the MaskedLMDictionary class so that 16 | we can load data that was binarized with the MaskedLMDictionary class. 17 | 18 | This task should be used for the entire training pipeline when we want to 19 | train an NMT model from a pretrained XLM checkpoint: binarizing NMT data, 20 | training NMT with the pretrained XLM checkpoint, and subsequent evaluation 21 | of that trained model. 22 | """ 23 | 24 | @classmethod 25 | def load_dictionary(cls, filename): 26 | """Load the masked LM dictionary from the filename 27 | 28 | Args: 29 | filename (str): the filename 30 | """ 31 | return MaskedLMDictionary.load(filename) 32 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq/tokenizer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import re 7 | 8 | SPACE_NORMALIZER = re.compile(r"\s+") 9 | 10 | 11 | def tokenize_line(line): 12 | line = SPACE_NORMALIZER.sub(" ", line) 13 | line = line.strip() 14 | return line.split() 15 | -------------------------------------------------------------------------------- /Transformer_NMT/fairseq_cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/fairseq_cli/__init__.py -------------------------------------------------------------------------------- /Transformer_NMT/hubconf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. 2 | # 3 | # This source code is licensed under the MIT license found in the 4 | # LICENSE file in the root directory of this source tree. 5 | 6 | import functools 7 | 8 | from fairseq.hub_utils import BPEHubInterface as bpe # noqa 9 | from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa 10 | from fairseq.models import MODEL_REGISTRY 11 | 12 | 13 | dependencies = [ 14 | 'numpy', 15 | 'regex', 16 | 'requests', 17 | 'torch', 18 | ] 19 | 20 | 21 | for _model_type, _cls in MODEL_REGISTRY.items(): 22 | for model_name in _cls.hub_models().keys(): 23 | globals()[model_name] = functools.partial( 24 | _cls.from_pretrained, 25 | model_name, 26 | ) 27 | # to simplify the interface we only expose named models 28 | # globals()[_model_type] = _cls.from_pretrained 29 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/Transformer_NMT/scripts/__init__.py -------------------------------------------------------------------------------- /Transformer_NMT/scripts/compare_namespaces.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Helper script to compare two argparse.Namespace objects.""" 3 | 4 | from argparse import Namespace # noqa 5 | 6 | 7 | def main(): 8 | 9 | ns1 = eval(input('Namespace 1: ')) 10 | ns2 = eval(input('Namespace 2: ')) 11 | 12 | def keys(ns): 13 | ks = set() 14 | for k in dir(ns): 15 | if not k.startswith('_'): 16 | ks.add(k) 17 | return ks 18 | 19 | k1 = keys(ns1) 20 | k2 = keys(ns2) 21 | 22 | def print_keys(ks, ns1, ns2=None): 23 | for k in ks: 24 | if ns2 is None: 25 | print('{}\t{}'.format(k, getattr(ns1, k, None))) 26 | else: 27 | print('{}\t{}\t{}'.format(k, getattr(ns1, k, None), getattr(ns2, k, None))) 28 | 29 | print('Keys unique to namespace 1:') 30 | print_keys(k1 - k2, ns1) 31 | print() 32 | 33 | print('Keys unique to namespace 2:') 34 | print_keys(k2 - k1, ns2) 35 | print() 36 | 37 | print('Overlapping keys with different values:') 38 | ks = [k for k in k1 & k2 if getattr(ns1, k, 'None') != getattr(ns2, k, 'None')] 39 | print_keys(ks, ns1, ns2) 40 | print() 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/compound_split_bleu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 1 ]; then 4 | echo "usage: $0 GENERATE_PY_OUTPUT" 5 | exit 1 6 | fi 7 | 8 | GEN=$1 9 | 10 | SYS=$GEN.sys 11 | REF=$GEN.ref 12 | 13 | if [ $(tail -n 1 $GEN | grep BLEU | wc -l) -ne 1 ]; then 14 | echo "not done generating" 15 | exit 16 | fi 17 | 18 | grep ^H $GEN | cut -f3- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $SYS 19 | grep ^T $GEN | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $REF 20 | fairseq-score --sys $SYS --ref $REF 21 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/convert_dictionary.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) Facebook, Inc. and its affiliates. 2 | -- 3 | -- This source code is licensed under the MIT license found in the 4 | -- LICENSE file in the root directory of this source tree. 5 | -- 6 | -- Usage: convert_dictionary.lua 7 | require 'fairseq' 8 | require 'torch' 9 | require 'paths' 10 | 11 | if #arg < 1 then 12 | print('usage: convert_dictionary.lua ') 13 | os.exit(1) 14 | end 15 | if not paths.filep(arg[1]) then 16 | print('error: file does not exit: ' .. arg[1]) 17 | os.exit(1) 18 | end 19 | 20 | dict = torch.load(arg[1]) 21 | dst = paths.basename(arg[1]):gsub('.th7', '.txt') 22 | assert(dst:match('.txt$')) 23 | 24 | f = io.open(dst, 'w') 25 | for idx, symbol in ipairs(dict.index_to_symbol) do 26 | if idx > dict.cutoff then 27 | break 28 | end 29 | f:write(symbol) 30 | f:write(' ') 31 | f:write(dict.index_to_freq[idx]) 32 | f:write('\n') 33 | end 34 | f:close() 35 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/count_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Count the number of documents and average number of lines and tokens per 8 | document in a large file. Documents should be separated by a single empty line. 9 | """ 10 | 11 | import argparse 12 | import gzip 13 | import sys 14 | 15 | import numpy as np 16 | 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser() 20 | parser.add_argument('input') 21 | parser.add_argument('--gzip', action='store_true') 22 | args = parser.parse_args() 23 | 24 | def gopen(): 25 | if args.gzip: 26 | return gzip.open(args.input, 'r') 27 | else: 28 | return open(args.input, 'r', encoding='utf-8') 29 | 30 | num_lines = [] 31 | num_toks = [] 32 | with gopen() as h: 33 | num_docs = 1 34 | num_lines_in_doc = 0 35 | num_toks_in_doc = 0 36 | for i, line in enumerate(h): 37 | if len(line.strip()) == 0: # empty line indicates new document 38 | num_docs += 1 39 | num_lines.append(num_lines_in_doc) 40 | num_toks.append(num_toks_in_doc) 41 | num_lines_in_doc = 0 42 | num_toks_in_doc = 0 43 | else: 44 | num_lines_in_doc += 1 45 | num_toks_in_doc += len(line.rstrip().split()) 46 | if i % 1000000 == 0: 47 | print(i, file=sys.stderr, end="", flush=True) 48 | elif i % 100000 == 0: 49 | print(".", file=sys.stderr, end="", flush=True) 50 | print(file=sys.stderr, flush=True) 51 | 52 | print("found {} docs".format(num_docs)) 53 | print("average num lines per doc: {}".format(np.mean(num_lines))) 54 | print("average num toks per doc: {}".format(np.mean(num_toks))) 55 | 56 | 57 | if __name__ == '__main__': 58 | main() 59 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/read_binarized.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | 7 | import argparse 8 | 9 | from fairseq.data import data_utils, Dictionary, indexed_dataset 10 | 11 | 12 | def get_parser(): 13 | parser = argparse.ArgumentParser( 14 | description='writes text from binarized file to stdout') 15 | # fmt: off 16 | parser.add_argument('--dataset-impl', help='dataset implementation', 17 | choices=indexed_dataset.get_available_dataset_impl()) 18 | parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None) 19 | parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') 20 | # fmt: on 21 | 22 | return parser 23 | 24 | 25 | def main(): 26 | parser = get_parser() 27 | args = parser.parse_args() 28 | 29 | dictionary = Dictionary.load(args.dict) if args.dict is not None else None 30 | dataset = data_utils.load_indexed_dataset( 31 | args.input, 32 | dictionary, 33 | dataset_impl=args.dataset_impl, 34 | default='lazy', 35 | ) 36 | 37 | for tensor_line in dataset: 38 | if dictionary is None: 39 | line = ' '.join([str(int(x)) for x in tensor_line]) 40 | else: 41 | line = dictionary.string(tensor_line) 42 | 43 | print(line) 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/sacrebleu_pregen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -ne 4 ]; then 4 | echo "usage: $0 TESTSET SRCLANG TGTLANG GEN" 5 | exit 1 6 | fi 7 | 8 | TESTSET=$1 9 | SRCLANG=$2 10 | TGTLANG=$3 11 | 12 | GEN=$4 13 | 14 | echo 'Cloning Moses github repository (for tokenization scripts)...' 15 | git clone https://github.com/moses-smt/mosesdecoder.git 16 | 17 | SCRIPTS=mosesdecoder/scripts 18 | DETOKENIZER=$SCRIPTS/tokenizer/detokenizer.perl 19 | 20 | grep ^H $GEN \ 21 | | sed 's/^H\-//' \ 22 | | sort -n -k 1 \ 23 | | cut -f 3 \ 24 | | perl $DETOKENIZER -l $TGTLANG \ 25 | | sed "s/ - /-/g" \ 26 | > $GEN.sorted.detok 27 | 28 | sacrebleu --test-set $TESTSET --language-pair "${SRCLANG}-${TGTLANG}" < $GEN.sorted.detok 29 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/shard_docs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # 4 | # This source code is licensed under the MIT license found in the 5 | # LICENSE file in the root directory of this source tree. 6 | """ 7 | Split a large file into shards while respecting document boundaries. Documents 8 | should be separated by a single empty line. 9 | """ 10 | 11 | import argparse 12 | import contextlib 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('input') 18 | parser.add_argument('--num-shards', type=int) 19 | args = parser.parse_args() 20 | 21 | assert args.num_shards is not None and args.num_shards > 1 22 | 23 | with open(args.input, 'r', encoding='utf-8') as h: 24 | with contextlib.ExitStack() as stack: 25 | outputs = [ 26 | stack.enter_context(open(args.input + ".shard" + str(i), "w", encoding="utf-8")) 27 | for i in range(args.num_shards) 28 | ] 29 | 30 | doc = [] 31 | first_doc = [True]*args.num_shards 32 | def output_doc(i): 33 | if not first_doc[i]: 34 | outputs[i].write("\n") 35 | first_doc[i] = False 36 | for line in doc: 37 | outputs[i].write(line) 38 | doc.clear() 39 | 40 | num_docs = 0 41 | for line in h: 42 | if line.strip() == "": # empty line indicates new document 43 | output_doc(num_docs % args.num_shards) 44 | num_docs += 1 45 | else: 46 | doc.append(line) 47 | output_doc(num_docs % args.num_shards) 48 | 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/spm_decode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import argparse 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument("--model", required=True, 18 | help="sentencepiece model to use for decoding") 19 | parser.add_argument("--input", required=True, help="input file to decode") 20 | parser.add_argument("--input_format", choices=["piece", "id"], default="piece") 21 | args = parser.parse_args() 22 | 23 | sp = spm.SentencePieceProcessor() 24 | sp.Load(args.model) 25 | 26 | if args.input_format == "piece": 27 | def decode(l): 28 | return "".join(sp.DecodePieces(l)) 29 | elif args.input_format == "id": 30 | def decode(l): 31 | return "".join(sp.DecodeIds(l)) 32 | else: 33 | raise NotImplementedError 34 | 35 | def tok2int(tok): 36 | # remap reference-side (represented as <>) to 0 37 | return int(tok) if tok != "<>" else 0 38 | 39 | with open(args.input, "r", encoding="utf-8") as h: 40 | for line in h: 41 | print(decode(list(map(tok2int, line.rstrip().split())))) 42 | 43 | 44 | if __name__ == "__main__": 45 | main() 46 | -------------------------------------------------------------------------------- /Transformer_NMT/scripts/spm_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) Facebook, Inc. and its affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | from __future__ import absolute_import, division, print_function, unicode_literals 9 | 10 | import sys 11 | 12 | import sentencepiece as spm 13 | 14 | 15 | if __name__ == "__main__": 16 | spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:])) 17 | -------------------------------------------------------------------------------- /images/AdaM3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/images/AdaM3.png -------------------------------------------------------------------------------- /images/ETH.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/images/ETH.png -------------------------------------------------------------------------------- /images/UCD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/images/UCD.png -------------------------------------------------------------------------------- /images/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/images/framework.png -------------------------------------------------------------------------------- /images/neu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/images/neu.png -------------------------------------------------------------------------------- /images/smile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wyzjack/AdaM3/e715cde47d7f77d0fdb4e083511dec03a6b0ff3c/images/smile.png --------------------------------------------------------------------------------