├── .gitignore ├── LICENSE ├── README.md ├── apex ├── .gitignore ├── .gitmodules ├── .nojekyll ├── LICENSE ├── README.md ├── apex │ ├── RNN │ │ ├── README.md │ │ ├── RNNBackend.py │ │ ├── __init__.py │ │ ├── cells.py │ │ └── models.py │ ├── __init__.py │ ├── amp │ │ ├── README.md │ │ ├── __init__.py │ │ ├── __version__.py │ │ ├── _amp_state.py │ │ ├── _initialize.py │ │ ├── _process_optimizer.py │ │ ├── amp.py │ │ ├── compat.py │ │ ├── frontend.py │ │ ├── handle.py │ │ ├── lists │ │ │ ├── __init__.py │ │ │ ├── functional_overrides.py │ │ │ ├── tensor_overrides.py │ │ │ └── torch_overrides.py │ │ ├── opt.py │ │ ├── rnn_compat.py │ │ ├── scaler.py │ │ ├── utils.py │ │ └── wrap.py │ ├── contrib │ │ ├── __init__.py │ │ ├── bottleneck │ │ │ ├── __init__.py │ │ │ ├── bottleneck.py │ │ │ └── test.py │ │ ├── csrc │ │ │ ├── bottleneck │ │ │ │ └── bottleneck.cpp │ │ │ ├── fmha │ │ │ │ ├── fmha_api.cpp │ │ │ │ └── src │ │ │ │ │ ├── fmha.h │ │ │ │ │ ├── fmha │ │ │ │ │ ├── gemm.h │ │ │ │ │ ├── gmem_tile.h │ │ │ │ │ ├── kernel_traits.h │ │ │ │ │ ├── mask.h │ │ │ │ │ ├── smem_tile.h │ │ │ │ │ ├── softmax.h │ │ │ │ │ └── utils.h │ │ │ │ │ ├── fmha_dgrad_fp16_128_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_dgrad_fp16_256_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_dgrad_fp16_384_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_dgrad_fp16_512_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_dgrad_kernel_1xN_reload.h │ │ │ │ │ ├── fmha_dgrad_kernel_1xN_reload_nl.h │ │ │ │ │ ├── fmha_fprop_fp16_128_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_fprop_fp16_256_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_fprop_fp16_384_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_fprop_fp16_512_64_kernel.sm80.cu │ │ │ │ │ ├── fmha_fprop_kernel_1xN.h │ │ │ │ │ ├── fmha_fprop_kernel_1xN_nl.h │ │ │ │ │ ├── fmha_fprop_kernel_1xN_reload_v.h │ │ │ │ │ ├── fmha_kernel.h │ │ │ │ │ ├── fmha_noloop_reduce.cu │ │ │ │ │ └── fmha_utils.h │ │ │ ├── groupbn │ │ │ │ ├── batch_norm.cu │ │ │ │ ├── batch_norm.h │ │ │ │ ├── batch_norm_add_relu.cu │ │ │ │ ├── batch_norm_add_relu.h │ │ │ │ ├── cuda_utils.h │ │ │ │ ├── interface.cpp │ │ │ │ ├── ipc.cu │ │ │ │ └── nhwc_batch_norm_kernel.h │ │ │ ├── layer_norm │ │ │ │ ├── ln_api.cpp │ │ │ │ ├── ln_bwd_semi_cuda_kernel.cu │ │ │ │ ├── ln_fwd_cuda_kernel.cu │ │ │ │ ├── ln_kernel_traits.h │ │ │ │ └── utils.cuh │ │ │ ├── multihead_attn │ │ │ │ ├── additive_masked_softmax_dropout.cpp │ │ │ │ ├── additive_masked_softmax_dropout_cuda.cu │ │ │ │ ├── dropout.h │ │ │ │ ├── encdec_multihead_attn.cpp │ │ │ │ ├── encdec_multihead_attn_cuda.cu │ │ │ │ ├── encdec_multihead_attn_norm_add.cpp │ │ │ │ ├── encdec_multihead_attn_norm_add_cuda.cu │ │ │ │ ├── layer_norm.h │ │ │ │ ├── masked_softmax_dropout.cpp │ │ │ │ ├── masked_softmax_dropout_cuda.cu │ │ │ │ ├── philox.h │ │ │ │ ├── self_multihead_attn.cpp │ │ │ │ ├── self_multihead_attn_bias.cpp │ │ │ │ ├── self_multihead_attn_bias_additive_mask.cpp │ │ │ │ ├── self_multihead_attn_bias_additive_mask_cuda.cu │ │ │ │ ├── self_multihead_attn_bias_cuda.cu │ │ │ │ ├── self_multihead_attn_cuda.cu │ │ │ │ ├── self_multihead_attn_norm_add.cpp │ │ │ │ ├── self_multihead_attn_norm_add_cuda.cu │ │ │ │ ├── softmax.h │ │ │ │ └── strided_batched_gemm.h │ │ │ ├── optimizers │ │ │ │ ├── fused_adam_cuda.cpp │ │ │ │ ├── fused_adam_cuda_kernel.cu │ │ │ │ ├── fused_lamb_cuda.cpp │ │ │ │ ├── fused_lamb_cuda_kernel.cu │ │ │ │ ├── multi_tensor_distopt_adam.cpp │ │ │ │ ├── multi_tensor_distopt_adam_kernel.cu │ │ │ │ ├── multi_tensor_distopt_lamb.cpp │ │ │ │ └── multi_tensor_distopt_lamb_kernel.cu │ │ │ ├── transducer │ │ │ │ ├── transducer_joint.cpp │ │ │ │ ├── transducer_joint_kernel.cu │ │ │ │ ├── transducer_loss.cpp │ │ │ │ └── transducer_loss_kernel.cu │ │ │ └── xentropy │ │ │ │ ├── interface.cpp │ │ │ │ └── xentropy_kernel.cu │ │ ├── examples │ │ │ └── multihead_attn │ │ │ │ ├── func_test_multihead_attn.py │ │ │ │ └── perf_test_multihead_attn.py │ │ ├── fmha │ │ │ ├── __init__.py │ │ │ └── fmha.py │ │ ├── groupbn │ │ │ ├── __init__.py │ │ │ └── batch_norm.py │ │ ├── layer_norm │ │ │ ├── __init__.py │ │ │ └── layer_norm.py │ │ ├── multihead_attn │ │ │ ├── MHA_bwd.png │ │ │ ├── MHA_fwd.png │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── encdec_multihead_attn.py │ │ │ ├── encdec_multihead_attn_func.py │ │ │ ├── fast_encdec_multihead_attn_func.py │ │ │ ├── fast_encdec_multihead_attn_norm_add_func.py │ │ │ ├── fast_self_multihead_attn_func.py │ │ │ ├── fast_self_multihead_attn_norm_add_func.py │ │ │ ├── mask_softmax_dropout_func.py │ │ │ ├── self_multihead_attn.py │ │ │ └── self_multihead_attn_func.py │ │ ├── optimizers │ │ │ ├── __init__.py │ │ │ ├── distributed_fused_adam.py │ │ │ ├── distributed_fused_adam_v2.py │ │ │ ├── distributed_fused_adam_v3.py │ │ │ ├── distributed_fused_lamb.py │ │ │ ├── fp16_optimizer.py │ │ │ ├── fused_adam.py │ │ │ ├── fused_lamb.py │ │ │ └── fused_sgd.py │ │ ├── sparsity │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── asp.py │ │ │ ├── sparse_masklib.py │ │ │ └── test │ │ │ │ ├── checkpointing_test_part1.py │ │ │ │ ├── checkpointing_test_part2.py │ │ │ │ ├── checkpointing_test_reference.py │ │ │ │ └── toy_problem.py │ │ ├── test │ │ │ ├── fmha │ │ │ │ └── test_fmha.py │ │ │ ├── layer_norm │ │ │ │ └── test_fast_layer_norm.py │ │ │ ├── multihead_attn │ │ │ │ ├── test_encdec_multihead_attn.py │ │ │ │ ├── test_encdec_multihead_attn_norm_add.py │ │ │ │ ├── test_fast_self_multihead_attn_bias.py │ │ │ │ ├── test_mha_fused_softmax.py │ │ │ │ ├── test_self_multihead_attn.py │ │ │ │ └── test_self_multihead_attn_norm_add.py │ │ │ ├── test_label_smoothing.py │ │ │ └── transducer │ │ │ │ ├── test_transducer_joint.py │ │ │ │ ├── test_transducer_loss.py │ │ │ │ └── transducer_ref.py │ │ ├── transducer │ │ │ ├── __init__.py │ │ │ └── transducer.py │ │ └── xentropy │ │ │ ├── __init__.py │ │ │ └── softmax_xentropy.py │ ├── fp16_utils │ │ ├── README.md │ │ ├── __init__.py │ │ ├── fp16_optimizer.py │ │ ├── fp16util.py │ │ └── loss_scaler.py │ ├── mlp │ │ ├── __init__.py │ │ └── mlp.py │ ├── multi_tensor_apply │ │ ├── __init__.py │ │ └── multi_tensor_apply.py │ ├── normalization │ │ ├── __init__.py │ │ └── fused_layer_norm.py │ ├── optimizers │ │ ├── __init__.py │ │ ├── fused_adagrad.py │ │ ├── fused_adam.py │ │ ├── fused_lamb.py │ │ ├── fused_novograd.py │ │ └── fused_sgd.py │ ├── parallel │ │ ├── LARC.py │ │ ├── README.md │ │ ├── __init__.py │ │ ├── distributed.py │ │ ├── multiproc.py │ │ ├── optimized_sync_batchnorm.py │ │ ├── optimized_sync_batchnorm_kernel.py │ │ ├── sync_batchnorm.py │ │ └── sync_batchnorm_kernel.py │ ├── pyprof │ │ ├── FAQs.md │ │ ├── README.md │ │ ├── __init__.py │ │ ├── examples │ │ │ ├── .gitignore │ │ │ ├── apex │ │ │ │ ├── README.md │ │ │ │ ├── fused_adam.py │ │ │ │ ├── fused_layer_norm.py │ │ │ │ └── test.sh │ │ │ ├── custom_func_module │ │ │ │ ├── README.md │ │ │ │ ├── custom_function.py │ │ │ │ ├── custom_module.py │ │ │ │ └── test.sh │ │ │ ├── imagenet │ │ │ │ ├── imagenet.py │ │ │ │ └── test.sh │ │ │ ├── jit │ │ │ │ ├── README.md │ │ │ │ ├── jit_script_function.py │ │ │ │ ├── jit_script_method.py │ │ │ │ ├── jit_trace_function.py │ │ │ │ ├── jit_trace_method.py │ │ │ │ └── test.sh │ │ │ ├── lenet.py │ │ │ ├── operators.py │ │ │ ├── simple.py │ │ │ └── user_annotation │ │ │ │ ├── README.md │ │ │ │ ├── resnet.py │ │ │ │ └── test.sh │ │ ├── nvtx │ │ │ ├── __init__.py │ │ │ └── nvmarker.py │ │ ├── parse │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ ├── db.py │ │ │ ├── kernel.py │ │ │ ├── nvvp.py │ │ │ └── parse.py │ │ └── prof │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ ├── activation.py │ │ │ ├── base.py │ │ │ ├── blas.py │ │ │ ├── conv.py │ │ │ ├── convert.py │ │ │ ├── data.py │ │ │ ├── dropout.py │ │ │ ├── embedding.py │ │ │ ├── index_slice_join_mutate.py │ │ │ ├── linear.py │ │ │ ├── loss.py │ │ │ ├── misc.py │ │ │ ├── normalization.py │ │ │ ├── optim.py │ │ │ ├── output.py │ │ │ ├── pointwise.py │ │ │ ├── pooling.py │ │ │ ├── prof.py │ │ │ ├── randomSample.py │ │ │ ├── recurrentCell.py │ │ │ ├── reduction.py │ │ │ ├── softmax.py │ │ │ ├── usage.py │ │ │ └── utility.py │ └── reparameterization │ │ ├── README.md │ │ ├── __init__.py │ │ ├── reparameterization.py │ │ └── weight_norm.py ├── csrc │ ├── amp_C_frontend.cpp │ ├── compat.h │ ├── flatten_unflatten.cpp │ ├── layer_norm_cuda.cpp │ ├── layer_norm_cuda_kernel.cu │ ├── mlp.cpp │ ├── mlp_cuda.cu │ ├── multi_tensor_adagrad.cu │ ├── multi_tensor_adam.cu │ ├── multi_tensor_apply.cuh │ ├── multi_tensor_axpby_kernel.cu │ ├── multi_tensor_l2norm_kernel.cu │ ├── multi_tensor_lamb.cu │ ├── multi_tensor_lamb_stage_1.cu │ ├── multi_tensor_lamb_stage_2.cu │ ├── multi_tensor_novograd.cu │ ├── multi_tensor_scale_kernel.cu │ ├── multi_tensor_sgd_kernel.cu │ ├── syncbn.cpp │ ├── type_shim.h │ └── welford.cu ├── docs │ ├── Makefile │ └── source │ │ ├── _static │ │ ├── css │ │ │ └── pytorch_theme.css │ │ └── img │ │ │ └── nv-pytorch2.png │ │ ├── _templates │ │ └── layout.html │ │ ├── advanced.rst │ │ ├── amp.rst │ │ ├── conf.py │ │ ├── fp16_utils.rst │ │ ├── index.rst │ │ ├── layernorm.rst │ │ ├── optimizers.rst │ │ └── parallel.rst ├── examples │ ├── README.md │ ├── dcgan │ │ ├── README.md │ │ └── main_amp.py │ ├── docker │ │ ├── Dockerfile │ │ └── README.md │ ├── imagenet │ │ ├── README.md │ │ └── main_amp.py │ └── simple │ │ └── distributed │ │ ├── README.md │ │ ├── distributed_data_parallel.py │ │ └── run.sh ├── requirements.txt ├── requirements_dev.txt ├── setup.py └── tests │ ├── L0 │ ├── run_amp │ │ ├── __init__.py │ │ ├── test_add_param_group.py │ │ ├── test_basic_casts.py │ │ ├── test_cache.py │ │ ├── test_checkpointing.py │ │ ├── test_fused_sgd.py │ │ ├── test_larc.py │ │ ├── test_multi_tensor_axpby.py │ │ ├── test_multi_tensor_l2norm.py │ │ ├── test_multi_tensor_scale.py │ │ ├── test_multiple_models_optimizers_losses.py │ │ ├── test_promotion.py │ │ ├── test_rnn.py │ │ └── utils.py │ ├── run_fp16util │ │ ├── __init__.py │ │ └── test_fp16util.py │ ├── run_fused_layer_norm │ │ └── test_fused_layer_norm.py │ ├── run_mlp │ │ └── test_mlp.py │ ├── run_optimizers │ │ ├── __init__.py │ │ ├── test_dist_adam.py │ │ ├── test_fused_novograd.py │ │ ├── test_fused_optimizer.py │ │ └── test_lamb.py │ ├── run_pyprof_data │ │ ├── __init__.py │ │ └── test_pyprof_data.py │ ├── run_pyprof_nvtx │ │ ├── __init__.py │ │ └── test_pyprof_nvtx.py │ └── run_test.py │ ├── L1 │ ├── common │ │ ├── compare.py │ │ ├── main_amp.py │ │ └── run_test.sh │ ├── cross_product │ │ └── run.sh │ └── cross_product_distributed │ │ └── run.sh │ ├── distributed │ ├── DDP │ │ ├── ddp_race_condition_test.py │ │ └── run_race_test.sh │ ├── amp_master_params │ │ ├── amp_master_params.py │ │ ├── compare.py │ │ └── run.sh │ └── synced_batchnorm │ │ ├── python_single_gpu_unit_test.py │ │ ├── single_gpu_unit_test.py │ │ ├── test_batchnorm1d.py │ │ ├── test_groups.py │ │ ├── two_gpu_test_different_batch_size.py │ │ ├── two_gpu_unit_test.py │ │ └── unit_test.sh │ └── docker_extension_builds │ └── run.sh ├── assets ├── demo.png ├── dog.jpg ├── git_fig.png └── logo.png ├── cocoapi ├── .gitignore ├── .travis.yml ├── LuaAPI │ ├── CocoApi.lua │ ├── MaskApi.lua │ ├── cocoDemo.lua │ ├── env.lua │ ├── init.lua │ └── rocks │ │ └── coco-scm-1.rockspec ├── MatlabAPI │ ├── CocoApi.m │ ├── CocoEval.m │ ├── CocoUtils.m │ ├── MaskApi.m │ ├── cocoDemo.m │ ├── evalDemo.m │ ├── gason.m │ └── private │ │ ├── gasonMex.cpp │ │ ├── gasonMex.mexa64 │ │ ├── gasonMex.mexmaci64 │ │ └── getPrmDflt.m ├── PythonAPI │ ├── Makefile │ ├── pycocoDemo.ipynb │ ├── pycocoEvalDemo.ipynb │ ├── pycocotools │ │ ├── __init__.py │ │ ├── _mask.pyx │ │ ├── coco.py │ │ ├── cocoeval.py │ │ └── mask.py │ └── setup.py ├── README.txt ├── common │ ├── gason.cpp │ ├── gason.h │ ├── maskApi.c │ └── maskApi.h └── license.txt ├── demo ├── MegEngine │ ├── cpp │ │ ├── README.md │ │ ├── build.sh │ │ └── yolox.cpp │ └── python │ │ ├── README.md │ │ ├── build.py │ │ ├── convert_weights.py │ │ ├── demo.py │ │ ├── dump.py │ │ └── models │ │ ├── __init__.py │ │ ├── darknet.py │ │ ├── network_blocks.py │ │ ├── yolo_fpn.py │ │ ├── yolo_head.py │ │ ├── yolo_pafpn.py │ │ └── yolox.py ├── ONNXRuntime │ ├── README.md │ └── onnx_inference.py ├── OpenVINO │ ├── README.md │ ├── cpp │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ └── yolox_openvino.cpp │ └── python │ │ ├── README.md │ │ └── openvino_inference.py ├── TensorRT │ ├── cpp │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ ├── logging.h │ │ └── yolox.cpp │ └── python │ │ └── README.md └── ncnn │ ├── android │ ├── README.md │ ├── app │ │ ├── build.gradle │ │ └── src │ │ │ └── main │ │ │ ├── AndroidManifest.xml │ │ │ ├── assets │ │ │ └── yolox.param │ │ │ ├── java │ │ │ └── com │ │ │ │ └── megvii │ │ │ │ └── yoloXncnn │ │ │ │ ├── MainActivity.java │ │ │ │ └── yoloXncnn.java │ │ │ ├── jni │ │ │ ├── CMakeLists.txt │ │ │ └── yoloXncnn_jni.cpp │ │ │ └── res │ │ │ ├── layout │ │ │ └── main.xml │ │ │ └── values │ │ │ └── strings.xml │ ├── build.gradle │ ├── gradle │ │ └── wrapper │ │ │ ├── gradle-wrapper.jar │ │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle │ └── cpp │ ├── README.md │ └── yolox.cpp ├── docs ├── .gitignore ├── Makefile ├── _static │ └── css │ │ └── custom.css ├── conf.py ├── demo │ ├── megengine_cpp_readme.md │ ├── megengine_py_readme.md │ ├── ncnn_android_readme.md │ ├── ncnn_cpp_readme.md │ ├── onnx_readme.md │ ├── openvino_cpp_readme.md │ ├── openvino_py_readme.md │ ├── trt_cpp_readme.md │ └── trt_py_readme.md ├── index.rst ├── model_zoo.md ├── quick_run.md ├── requirements-doc.txt └── train_custom_data.md ├── exps ├── default │ ├── nano.py │ ├── yolov3.py │ ├── yolox_l.py │ ├── yolox_m.py │ ├── yolox_s.py │ ├── yolox_tiny.py │ └── yolox_x.py └── example │ ├── custom │ ├── nano.py │ ├── yolox__vector_s.py │ ├── yolox_s.py │ ├── yolox_singing.py │ ├── yolox_singsong.py │ ├── yolox_singsong_l.py │ ├── yolox_singsong_s_automatic.py │ ├── yolox_singsong_x.py │ ├── yolox_singsong_x_renoise.py │ └── yolox_vector_s.py │ └── yolox_voc │ └── yolox_voc_s.py ├── requirements.txt ├── setup.py ├── tools ├── demo.py ├── eval.py ├── export_onnx.py ├── note_eval.py ├── predict.py ├── train.py ├── trt.py └── util │ ├── __init__.py │ ├── automatic_annotation.py │ ├── const_values.py │ ├── cut_image.py │ ├── do_spleeter.py │ ├── get_res.py │ ├── labelme2coco.py │ ├── merge_res.py │ └── split_mst.py └── yolox ├── __init__.py ├── core ├── __init__.py ├── launch.py └── trainer.py ├── data ├── __init__.py ├── data_augment.py ├── data_prefetcher.py ├── dataloading.py ├── datasets │ ├── Singsong.py │ ├── __init__.py │ ├── coco.py │ ├── coco_classes.py │ ├── datasets_wrapper.py │ ├── mosaicdetection.py │ ├── singsong.py │ ├── vector.py │ ├── voc.py │ └── voc_classes.py └── samplers.py ├── evaluators ├── __init__.py ├── coco_evaluator.py ├── voc_eval.py └── voc_evaluator.py ├── exp ├── __init__.py ├── base_exp.py ├── build.py └── yolox_base.py ├── layers ├── __init__.py ├── csrc │ ├── cocoeval │ │ ├── cocoeval.cpp │ │ └── cocoeval.h │ └── vision.cpp └── fast_coco_eval_api.py ├── models ├── __init__.py ├── darknet.py ├── losses.py ├── network_blocks.py ├── yolo_fpn.py ├── yolo_head.py ├── yolo_pafpn.py └── yolox.py └── utils ├── __init__.py ├── allreduce_norm.py ├── boxes.py ├── checkpoint.py ├── demo_utils.py ├── dist.py ├── ema.py ├── logger.py ├── lr_scheduler.py ├── metric.py ├── model_utils.py ├── setup_env.py └── visualize.py /apex/.gitignore: -------------------------------------------------------------------------------- 1 | apex.egg-info 2 | dist 3 | build 4 | docs/build 5 | *~ 6 | __pycache__ 7 | -------------------------------------------------------------------------------- /apex/.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "apex/contrib/csrc/multihead_attn/cutlass"] 2 | path = apex/contrib/csrc/multihead_attn/cutlass 3 | url = https://github.com/NVIDIA/cutlass.git 4 | branch = v1.2.0 5 | [submodule "apex/contrib/csrc/cudnn-frontend"] 6 | path = apex/contrib/csrc/cudnn-frontend 7 | url = https://github.com/NVIDIA/cudnn-frontend.git 8 | -------------------------------------------------------------------------------- /apex/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/.nojekyll -------------------------------------------------------------------------------- /apex/LICENSE: -------------------------------------------------------------------------------- 1 | All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /apex/apex/RNN/README.md: -------------------------------------------------------------------------------- 1 | Under construction... 2 | -------------------------------------------------------------------------------- /apex/apex/RNN/__init__.py: -------------------------------------------------------------------------------- 1 | from .models import LSTM, GRU, ReLU, Tanh, mLSTM 2 | 3 | __all__ = ['models'] 4 | -------------------------------------------------------------------------------- /apex/apex/RNN/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from torch.nn._functions.rnn import LSTMCell, RNNReLUCell, RNNTanhCell, GRUCell 4 | 5 | from .RNNBackend import bidirectionalRNN, stackedRNN, RNNCell 6 | from .cells import mLSTMRNNCell, mLSTMCell 7 | 8 | def toRNNBackend(inputRNN, num_layers, bidirectional=False, dropout = 0): 9 | """ 10 | :class:`toRNNBackend` 11 | """ 12 | 13 | if bidirectional: 14 | return bidirectionalRNN(inputRNN, num_layers, dropout = dropout) 15 | else: 16 | return stackedRNN(inputRNN, num_layers, dropout = dropout) 17 | 18 | 19 | def LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None): 20 | """ 21 | :class:`LSTM` 22 | """ 23 | inputRNN = RNNCell(4, input_size, hidden_size, LSTMCell, 2, bias, output_size) 24 | return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout) 25 | 26 | def GRU(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None): 27 | """ 28 | :class:`GRU` 29 | """ 30 | inputRNN = RNNCell(3, input_size, hidden_size, GRUCell, 1, bias, output_size) 31 | return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout) 32 | 33 | def ReLU(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None): 34 | """ 35 | :class:`ReLU` 36 | """ 37 | inputRNN = RNNCell(1, input_size, hidden_size, RNNReLUCell, 1, bias, output_size) 38 | return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout) 39 | 40 | def Tanh(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None): 41 | """ 42 | :class:`Tanh` 43 | """ 44 | inputRNN = RNNCell(1, input_size, hidden_size, RNNTanhCell, 1, bias, output_size) 45 | return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout) 46 | 47 | def mLSTM(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None): 48 | """ 49 | :class:`mLSTM` 50 | """ 51 | inputRNN = mLSTMRNNCell(input_size, hidden_size, bias=bias, output_size=output_size) 52 | return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout) 53 | 54 | 55 | -------------------------------------------------------------------------------- /apex/apex/__init__.py: -------------------------------------------------------------------------------- 1 | # May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten 2 | import torch 3 | import warnings 4 | 5 | if torch.distributed.is_available(): 6 | from . import parallel 7 | 8 | from . import amp 9 | from . import fp16_utils 10 | 11 | # For optimizers and normalization there is no Python fallback. 12 | # Absence of cuda backend is a hard error. 13 | # I would like the errors from importing fused_adam_cuda or fused_layer_norm_cuda 14 | # to be triggered lazily, because if someone has installed with --cpp_ext and --cuda_ext 15 | # so they expect those backends to be available, but for some reason they actually aren't 16 | # available (for example because they built improperly in a way that isn't revealed until 17 | # load time) the error message is timely and visible. 18 | from . import optimizers 19 | from . import normalization 20 | from . import pyprof 21 | -------------------------------------------------------------------------------- /apex/apex/amp/README.md: -------------------------------------------------------------------------------- 1 | # amp: Automatic Mixed Precision 2 | 3 | ## Annotating User Functions 4 | 5 | Nearly all PyTorch user code needs nothing more than the two steps 6 | above to use amp. After all, custom layers are built out of simpler 7 | PyTorch components, and amp already can see those. 8 | 9 | However, any custom C++ or CUDA code is outside of amp's (default) 10 | view of things. For example, suppose I implemented a new recurrent 11 | cell called a "forgetful recurrent unit" that calls directly into a 12 | CUDA backend: 13 | 14 | ```python 15 | from backend import FRUBackend 16 | 17 | def fru(input, hidden, weight, bias): 18 | # call to CUDA code 19 | FRUBackend(input, hidden, weight, bias) 20 | ``` 21 | 22 | In this case, it is possible to get a runtime type mismatch. For 23 | example, you might have `input` in fp16, and `weight` in fp32, and amp 24 | doesn't have the visibility to insert an appropriate cast. 25 | 26 | amp exposes two ways to handle "invisible" backend code: function 27 | annotations and explicit registration. 28 | 29 | #### Function annotation 30 | 31 | The first way to handle backend code is a set of function annotations: 32 | 33 | - `@amp.half_function` 34 | - `@amp.float_function` 35 | - `@amp.promote_function` 36 | 37 | These correspond to: 38 | 39 | - Cast all arguments to fp16 40 | - Cast all argumnets fo fp32 41 | - If there are any type mismatches, cast everything to the widest type 42 | 43 | In our example, we believe that the FRU unit is fp16-safe and will get 44 | performance gains from casting its arguments to fp16, so we write: 45 | 46 | ```python 47 | @amp.half_function 48 | def fru(input, hidden, weight, bias): 49 | #... 50 | ``` 51 | 52 | #### Explicit registration 53 | 54 | The other way to handle backend code is with explicit function 55 | registration: 56 | 57 | - `amp.register_half_function(module, function_name)` 58 | - `amp.register_float_function(module, function_name)` 59 | - `amp.register_promote_function(module, function_name)` 60 | 61 | When using this API, `module` is the containing class or module for 62 | the function, and `function_name` is the _string_ name of the 63 | function. Note that the function must be registered before the call to 64 | `amp.initalize()`. 65 | 66 | For our FRU unit, we can register the backend function directly: 67 | 68 | ```python 69 | import backend 70 | 71 | amp.register_half_function(backend, 'FRUBackend') 72 | ``` 73 | -------------------------------------------------------------------------------- /apex/apex/amp/__init__.py: -------------------------------------------------------------------------------- 1 | from .amp import init, half_function, float_function, promote_function,\ 2 | register_half_function, register_float_function, register_promote_function 3 | from .handle import scale_loss, disable_casts 4 | from .frontend import initialize, state_dict, load_state_dict 5 | from ._amp_state import master_params, _amp_state 6 | -------------------------------------------------------------------------------- /apex/apex/amp/__version__.py: -------------------------------------------------------------------------------- 1 | VERSION = (0, 1, 0) 2 | __version__ = '.'.join(map(str, VERSION)) 3 | -------------------------------------------------------------------------------- /apex/apex/amp/_amp_state.py: -------------------------------------------------------------------------------- 1 | # This is a "header object" that allows different amp modules to communicate. 2 | # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. 3 | # But apparently it's ok: 4 | # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm 5 | import os 6 | import torch 7 | 8 | TORCH_MAJOR = int(torch.__version__.split('.')[0]) 9 | TORCH_MINOR = int(torch.__version__.split('.')[1]) 10 | 11 | 12 | if TORCH_MAJOR == 1 and TORCH_MINOR < 8: 13 | from torch._six import container_abcs 14 | else: 15 | import collections.abc as container_abcs 16 | 17 | 18 | class AmpState(object): 19 | def __init__(self): 20 | self.hard_override=False 21 | self.allow_incoming_model_not_fp32 = False 22 | self.verbosity=1 23 | 24 | 25 | # Attribute stash. Could also just stash things as global module attributes. 26 | _amp_state = AmpState() 27 | 28 | 29 | def warn_or_err(msg): 30 | if _amp_state.hard_override: 31 | print("Warning: " + msg) 32 | else: 33 | raise RuntimeError(msg) 34 | # I'm not sure if allowing hard_override is a good idea. 35 | # + " If you're sure you know what you're doing, supply " + 36 | # "hard_override=True to amp.initialize.") 37 | 38 | 39 | def maybe_print(msg, rank0=False): 40 | distributed = torch.distributed.is_available() and \ 41 | torch.distributed.is_initialized() and \ 42 | torch.distributed.get_world_size() > 1 43 | if _amp_state.verbosity > 0: 44 | if rank0: 45 | if distributed: 46 | if torch.distributed.get_rank() == 0: 47 | print(msg) 48 | else: 49 | print(msg) 50 | else: 51 | print(msg) 52 | 53 | 54 | # def iter_params(param_groups): 55 | # for group in param_groups: 56 | # for p in group['params']: 57 | # yield p 58 | 59 | 60 | def master_params(optimizer): 61 | """ 62 | Generator expression that iterates over the params owned by ``optimizer``. 63 | 64 | Args: 65 | optimizer: An optimizer previously returned from ``amp.initialize``. 66 | """ 67 | for group in optimizer.param_groups: 68 | for p in group['params']: 69 | yield p 70 | -------------------------------------------------------------------------------- /apex/apex/amp/compat.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # True for post-0.4, when Variables/Tensors merged. 4 | def variable_is_tensor(): 5 | v = torch.autograd.Variable() 6 | return isinstance(v, torch.Tensor) 7 | 8 | def tensor_is_variable(): 9 | x = torch.Tensor() 10 | return type(x) == torch.autograd.Variable 11 | 12 | # False for post-0.4 13 | def tensor_is_float_tensor(): 14 | x = torch.Tensor() 15 | return type(x) == torch.FloatTensor 16 | 17 | # Akin to `torch.is_tensor`, but returns True for Variable 18 | # objects in pre-0.4. 19 | def is_tensor_like(x): 20 | return torch.is_tensor(x) or isinstance(x, torch.autograd.Variable) 21 | 22 | # Wraps `torch.is_floating_point` if present, otherwise checks 23 | # the suffix of `x.type()`. 24 | def is_floating_point(x): 25 | if hasattr(torch, 'is_floating_point'): 26 | return torch.is_floating_point(x) 27 | try: 28 | torch_type = x.type() 29 | return torch_type.endswith('FloatTensor') or \ 30 | torch_type.endswith('HalfTensor') or \ 31 | torch_type.endswith('DoubleTensor') 32 | except AttributeError: 33 | return False 34 | 35 | def scalar_python_val(x): 36 | if hasattr(x, 'item'): 37 | return x.item() 38 | else: 39 | if isinstance(x, torch.autograd.Variable): 40 | return x.data[0] 41 | else: 42 | return x[0] 43 | 44 | # Accounts for the possibility that some ops may be removed from a namespace. 45 | def filter_attrs(module, attrs): 46 | return list(attrname for attrname in attrs if hasattr(module, attrname)) 47 | -------------------------------------------------------------------------------- /apex/apex/amp/lists/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/apex/amp/lists/__init__.py -------------------------------------------------------------------------------- /apex/apex/amp/lists/functional_overrides.py: -------------------------------------------------------------------------------- 1 | 2 | # TODO: think about the following two. They do weird things. 3 | # - torch.nn.utils.clip_grad (but it should always be fp32 anyway) 4 | # - torch.nn.utils.weight_norm 5 | 6 | # Notes: 7 | # F.instance_norm uses batch_norm internally. Which correctly handles 8 | # fp16 in/out with fp32 weights. So we shouldn't do anything for 9 | # either of these. 10 | # F.normalize calls `input.norm()` internally, so it's redundant, but 11 | # kept here in case impl. changes. 12 | # F.cosine_similarity is same: calls `x.norm()` internally. 13 | 14 | import torch.nn.functional 15 | 16 | MODULE = torch.nn.functional 17 | 18 | FP16_FUNCS = [ 19 | 'conv1d', 20 | 'conv2d', 21 | 'conv3d', 22 | 'conv_transpose1d', 23 | 'conv_transpose2d', 24 | 'conv_transpose3d', 25 | 'conv_tbc', # Undocumented / maybe new? 26 | 'linear', 27 | ] 28 | 29 | FP32_FUNCS = [ 30 | 31 | # Interpolation/Upsampling TODO: Remove for 1.2 32 | 'interpolate', 33 | 'grid_sample', 34 | 35 | # Pointwise 36 | 'softplus', 37 | 'softmin', 38 | 'log_softmax', 39 | 'softmax', 40 | 'gelu', 41 | 42 | # Normalization 43 | 'layer_norm', 44 | 'group_norm', 45 | 'local_response_norm', 46 | 'normalize', 47 | 'cosine_similarity', 48 | 49 | # Loss functions 50 | # TODO: which of these can be fp16? 51 | 'poisson_nll_loss', 52 | 'cosine_embedding_loss', 53 | 'cross_entropy', 54 | 'hinge_embedding_loss', 55 | 'kl_div', 56 | 'l1_loss', 57 | 'mse_loss', 58 | 'margin_ranking_loss', 59 | 'multilabel_margin_loss', 60 | 'multilabel_soft_margin_loss', 61 | 'multi_margin_loss', 62 | 'nll_loss', 63 | 'binary_cross_entropy_with_logits', 64 | 'smooth_l1_loss', 65 | 'soft_margin_loss', 66 | 'triplet_margin_loss', 67 | 'ctc_loss' 68 | ] 69 | 70 | BANNED_FUNCS = [ 71 | ('binary_cross_entropy', 72 | ("\namp does not work out-of-the-box with `F.binary_cross_entropy` or `torch.nn.BCELoss.` " 73 | "It requires that the output of the previous function be already a FloatTensor. \n\n" 74 | "Most models have a Sigmoid right before BCELoss. In that case, you can use\n" 75 | " torch.nn.BCEWithLogitsLoss\nto combine Sigmoid+BCELoss into a single layer " 76 | "that is compatible with amp.\nAnother option is to add\n" 77 | " amp.register_float_function(torch, 'sigmoid')\nbefore calling `amp.init()`.\n" 78 | "If you _really_ know what you are doing, you can disable this warning by passing " 79 | "allow_banned=True to `amp.init()`.")) 80 | ] 81 | -------------------------------------------------------------------------------- /apex/apex/amp/lists/tensor_overrides.py: -------------------------------------------------------------------------------- 1 | from .. import compat 2 | from . import torch_overrides 3 | 4 | import importlib 5 | 6 | import torch 7 | 8 | # if compat.variable_is_tensor() and not compat.tensor_is_variable(): 9 | MODULE = torch.Tensor 10 | # else: 11 | # MODULE = torch.autograd.Variable 12 | 13 | 14 | FP16_FUNCS = compat.filter_attrs(MODULE, [ 15 | '__matmul__', 16 | ]) 17 | 18 | FP32_FUNCS = compat.filter_attrs(MODULE, [ 19 | '__ipow__', 20 | '__pow__', 21 | '__rpow__', 22 | 23 | # Cast to fp32 before transfer to CPU 24 | 'cpu', 25 | ]) 26 | 27 | CASTS = compat.filter_attrs(MODULE, [ 28 | '__add__', 29 | '__div__', 30 | '__eq__', 31 | '__ge__', 32 | '__gt__', 33 | '__iadd__', 34 | '__idiv__', 35 | '__imul__', 36 | '__isub__', 37 | '__itruediv__', 38 | '__le__', 39 | '__lt__', 40 | '__mul__', 41 | '__ne__', 42 | '__radd__', 43 | '__rdiv__', 44 | '__rmul__', 45 | '__rsub__', 46 | '__rtruediv__', 47 | '__sub__', 48 | '__truediv__', 49 | ]) 50 | 51 | # None of these, but here to make code cleaner. 52 | SEQUENCE_CASTS = [] 53 | 54 | # We need to grab all the methods from torch_overrides and add them to 55 | # the Tensor lists as well, as almost all methods are duplicated 56 | # between `torch` and `torch.Tensor` (and check with `hasattr`, 57 | # because a few random ones aren't defined on Tensor) 58 | _self_mod = importlib.import_module(__name__) 59 | for attrname in ['FP16_FUNCS', 'FP32_FUNCS', 'CASTS', 'SEQUENCE_CASTS']: 60 | lst = getattr(_self_mod, attrname) 61 | for fn in getattr(torch_overrides, attrname): 62 | if hasattr(MODULE, fn): 63 | lst.append(fn) 64 | -------------------------------------------------------------------------------- /apex/apex/amp/lists/torch_overrides.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | from .. import utils 4 | 5 | MODULE = torch 6 | 7 | FP16_FUNCS = [ 8 | # Low level functions wrapped by torch.nn layers. 9 | # The wrapper layers contain the weights which are then passed in as a parameter 10 | # to these functions. 11 | 'conv1d', 12 | 'conv2d', 13 | 'conv3d', 14 | 'conv_transpose1d', 15 | 'conv_transpose2d', 16 | 'conv_transpose3d', 17 | 'conv_tbc', 18 | 'prelu', 19 | 20 | # BLAS 21 | 'addmm', 22 | 'addmv', 23 | 'addr', 24 | 'matmul', 25 | 'mm', 26 | 'mv', 27 | ] 28 | 29 | FP32_FUNCS = [ 30 | # Pointwise 31 | 'acos', 32 | 'asin', 33 | 'cosh', 34 | 'erfinv', 35 | 'exp', 36 | 'expm1', 37 | 'log', 38 | 'log10', 39 | 'log2', 40 | 'reciprocal', 41 | 'rsqrt', 42 | 'sinh', 43 | 'tan', 44 | 45 | # Other math 46 | 'pow', 47 | 48 | # Reduction 49 | 'cumprod', 50 | 'cumsum', 51 | 'dist', 52 | # 'mean', 53 | 'norm', 54 | 'prod', 55 | 'std', 56 | 'sum', 57 | 'var', 58 | 59 | # Misc 60 | 'renorm' 61 | ] 62 | 63 | version_strings = torch.__version__.split('.') 64 | version_major = version_strings[0] 65 | version_minor = version_strings[1] 66 | version_num = float(version_major + "." + version_minor) 67 | # Before torch 1.1, mean must be blacklisted. 68 | if version_num < 1.1: 69 | FP32_FUNCS.append('mean') 70 | 71 | # Before CUDA 9.1, batched matmul was missing fast FP16 kernels. We 72 | # check the CUDA version -- if at least 9.1, then put the bmm 73 | # functions on the fp16 list. Otherwise, put them on the fp32 list. 74 | _bmms = ['addbmm', 75 | 'baddbmm', 76 | 'bmm'] 77 | 78 | if utils.is_cuda_enabled(): 79 | # workaround https://github.com/facebookresearch/maskrcnn-benchmark/issues/802 80 | if utils.get_cuda_version() >= (9, 1, 0): 81 | FP16_FUNCS.extend(_bmms) 82 | else: 83 | FP32_FUNCS.extend(_bmms) 84 | 85 | # Multi-tensor fns that may need type promotion 86 | CASTS = [ 87 | # Multi-tensor math 88 | 'addcdiv', 89 | 'addcmul', 90 | 'atan2', 91 | 'cross', 92 | 'bilinear', 93 | 'dot', 94 | 95 | # Element-wise _or_ tensor-wise math 96 | 'add', 97 | 'div', 98 | 'mul', 99 | 100 | # Comparison 101 | 'eq', 102 | 'equal', 103 | 'ge', 104 | 'gt', 105 | 'le', 106 | 'lt', 107 | 'ne' 108 | ] 109 | 110 | # Functions that take sequence arguments. We need to inspect the whole 111 | # sequence and cast to the widest type. 112 | SEQUENCE_CASTS = [ 113 | 'cat', 114 | 'stack' 115 | ] 116 | -------------------------------------------------------------------------------- /apex/apex/amp/rnn_compat.py: -------------------------------------------------------------------------------- 1 | from . import utils, wrap 2 | 3 | import torch 4 | _VF = torch._C._VariableFunctions 5 | RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] 6 | 7 | def _gen_VF_wrapper(name): 8 | def wrapper(*args, **kwargs): 9 | return getattr(_VF, name)(*args, **kwargs) 10 | return wrapper 11 | 12 | # Some python magic to generate an object that has the rnn cell functions 13 | # defined on it, all of which call into corresponding _VF version. 14 | # Intended to patch torch.nn.modules.rnn._VF (aka, the ref named "_VF" 15 | # imported at module scope within torch.nn.modules.rnn). This should 16 | # not affect third-party importers of _VF.py. 17 | class VariableFunctionsShim(object): 18 | def __init__(self): 19 | for name in RNN_NAMES: 20 | for suffix in ['', '_cell']: 21 | fn_name = name + suffix 22 | setattr(self, fn_name, _gen_VF_wrapper(fn_name)) 23 | 24 | def has_old_rnns(): 25 | try: 26 | torch.nn.backends.thnn.backend.LSTMCell 27 | return True 28 | except: 29 | return False 30 | 31 | def whitelist_rnn_cells(handle, verbose): 32 | # Different module + function names in old/new RNN cases 33 | if has_old_rnns(): 34 | fn_names = ['RNNReLUCell', 'RNNTanhCell', 'LSTMCell', 'GRUCell'] 35 | mod = torch.nn.backends.thnn.backend 36 | else: 37 | fn_names = [x + '_cell' for x in RNN_NAMES] 38 | mod = torch.nn.modules.rnn._VF 39 | assert isinstance(mod, VariableFunctionsShim) 40 | 41 | # Insert casts on cell functions 42 | for fn in fn_names: 43 | wrap.cached_cast(mod, fn, utils.maybe_half, handle, 44 | try_caching=True, verbose=verbose) 45 | 46 | if has_old_rnns(): 47 | # Special handling of `backward` for fused gru / lstm: 48 | # The `backward` method calls Tensor.sum() (blacklist) internally, 49 | # and then the resulting grad_input has the wrong type. 50 | # TODO: where else is this a problem? 51 | for rnn_type in ['GRUFused', 'LSTMFused']: 52 | mod = getattr(torch.nn._functions.thnn.rnnFusedPointwise, rnn_type) 53 | wrap.disable_casts(mod, 'backward', handle) 54 | -------------------------------------------------------------------------------- /apex/apex/contrib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/apex/contrib/__init__.py -------------------------------------------------------------------------------- /apex/apex/contrib/bottleneck/__init__.py: -------------------------------------------------------------------------------- 1 | from .bottleneck import Bottleneck 2 | -------------------------------------------------------------------------------- /apex/apex/contrib/csrc/groupbn/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #include 2 | #ifndef CUDA_UTILS_H 3 | #define CUDA_UTILS_H 4 | 5 | namespace at { 6 | namespace cuda { 7 | 8 | namespace utils { 9 | 10 | static inline int MaxSharedMemoryPerMultiprocessor(int device_id) { 11 | return getDeviceProperties(device_id)->sharedMemPerMultiprocessor; 12 | } 13 | 14 | 15 | } 16 | } 17 | } 18 | 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /apex/apex/contrib/csrc/layer_norm/ln_kernel_traits.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | constexpr uint32_t THREADS_PER_WARP = 32; 4 | 5 | template 7 | struct Kernel_traits { 8 | enum { WARPS_M = WARPS_M_ }; 9 | enum { WARPS_N = WARPS_N_ }; 10 | enum { COLS = COLS_ }; 11 | enum { BYTES_PER_LDG = BYTES_PER_LDG_ }; 12 | 13 | using Vec = Vec; 14 | 15 | using vec_t = typename Vec::vec_t; 16 | using base_t = typename Vec::base_t; 17 | using packed_t = typename Vec::packed_t; 18 | using compute_t = typename Vec::compute_t; 19 | using packed_compute_t = typename Vec::packed_compute_t; 20 | 21 | enum { THREADS_PER_ROW = WARPS_N * THREADS_PER_WARP }; 22 | enum { THREADS_PER_CTA = WARPS_M * THREADS_PER_ROW }; 23 | enum { ROWS_PER_CTA = WARPS_M }; 24 | 25 | enum { BYTES_PER_ROW = COLS * sizeof(base_t) }; 26 | enum { BYTES_PER_ROW_PER_CTA = THREADS_PER_ROW * BYTES_PER_LDG }; 27 | enum {SMEM_BYTES = ROWS_PER_CTA * COLS * sizeof(compute_t)}; 28 | }; 29 | -------------------------------------------------------------------------------- /apex/apex/contrib/csrc/optimizers/fused_lamb_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void multi_tensor_lamb_cuda( 4 | int chunk_size, 5 | at::Tensor noop_flag, 6 | std::vector> tensor_lists, 7 | const float lr, 8 | const float beta1, 9 | const float beta2, 10 | const float epsilon, 11 | const int step, 12 | const int bias_correction, 13 | const float weight_decay, 14 | const int grad_averaging, 15 | const int mode, 16 | const float global_grad_norm, 17 | const float max_grad_norm); 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("lamb", &multi_tensor_lamb_cuda, "Computes and apply update for LAMB optimizer"); 21 | } 22 | -------------------------------------------------------------------------------- /apex/apex/contrib/csrc/optimizers/multi_tensor_distopt_adam.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void multi_tensor_fused_adam_cuda( 4 | int chunk_size, 5 | at::Tensor noop_flag, 6 | std::vector> tensor_lists, 7 | at::Tensor per_tensor_beta1, 8 | at::Tensor per_tensor_beta2, 9 | at::Tensor per_tensor_bias_correction, 10 | at::Tensor per_tensor_eps, 11 | at::Tensor per_tensor_weight_decay, 12 | float lr, 13 | float grad_scale, 14 | int step, 15 | int mode); 16 | 17 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 18 | m.def("multi_tensor_fused_adam", &multi_tensor_fused_adam_cuda, 19 | "Multi tensor Adam optimized CUDA implementation."); 20 | } 21 | -------------------------------------------------------------------------------- /apex/apex/contrib/csrc/optimizers/multi_tensor_distopt_lamb.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void multi_tensor_lamb_compute_update_term_cuda( 4 | int chunk_size, 5 | at::Tensor noop_flag, 6 | std::vector> tensor_lists, 7 | at::Tensor per_tensor_beta1, 8 | at::Tensor per_tensor_beta2, 9 | at::Tensor per_tensor_beta3, 10 | at::Tensor per_tensor_bias_correction, 11 | at::Tensor step, 12 | at::Tensor per_tensor_epsilon, 13 | const int mode, 14 | at::Tensor per_tensor_decay, 15 | at::Tensor global_scale, 16 | at::Tensor global_grad_norm, 17 | const float max_grad_norm); 18 | 19 | void multi_tensor_lamb_update_weights_cuda( 20 | int chunk_size, 21 | at::Tensor noop_flag, 22 | std::vector> tensor_lists, 23 | at::Tensor per_tensor_param_norm, 24 | at::Tensor per_tensor_update_norm, 25 | at::Tensor update_norm_offset, 26 | at::Tensor learning_rate, 27 | at::Tensor per_tensor_decay, 28 | at::Tensor global_grad_norm, 29 | bool use_nvlamb); 30 | 31 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 32 | m.def("multi_tensor_lamb_compute_update_term", &multi_tensor_lamb_compute_update_term_cuda, 33 | "Computes update term for LAMB optimizer"); 34 | m.def("multi_tensor_lamb_update_weights", &multi_tensor_lamb_update_weights_cuda, 35 | "Applies update term for LAMB optimizer"); 36 | } 37 | -------------------------------------------------------------------------------- /apex/apex/contrib/csrc/xentropy/interface.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // CUDA forward declarations 4 | 5 | std::vector softmax_xentropy_cuda( 6 | const at::Tensor &input, 7 | const at::Tensor &labels, 8 | const float smoothing, 9 | const bool half_to_float); 10 | 11 | at::Tensor softmax_xentropy_backward_cuda( 12 | const at::Tensor &grad_loss, 13 | const at::Tensor &logits, 14 | const at::Tensor &max_log_sum_exp, 15 | const at::Tensor &labels, 16 | const float smoothing); 17 | 18 | // C++ interface 19 | 20 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 21 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 22 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 23 | 24 | std::vector softmax_xentropy_forward( 25 | const at::Tensor &input, 26 | const at::Tensor &labels, 27 | const float smoothing, 28 | const bool half_to_float) { 29 | CHECK_CUDA(input); 30 | CHECK_INPUT(labels); 31 | 32 | return softmax_xentropy_cuda(input, labels, smoothing, half_to_float); 33 | } 34 | 35 | at::Tensor softmax_xentropy_backward( 36 | const at::Tensor &grad_loss, 37 | const at::Tensor &logits, 38 | const at::Tensor &max_log_sum_exp, 39 | const at::Tensor &labels, 40 | const float smoothing) { 41 | CHECK_CUDA(grad_loss); 42 | CHECK_CUDA(logits); 43 | CHECK_INPUT(max_log_sum_exp); 44 | CHECK_INPUT(labels); 45 | 46 | return softmax_xentropy_backward_cuda(grad_loss, logits, max_log_sum_exp, labels, smoothing); 47 | } 48 | 49 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 50 | m.def("forward", &softmax_xentropy_forward, "Softmax cross entropy loss with label smoothing forward (CUDA)"); 51 | m.def("backward", &softmax_xentropy_backward, "Softmax cross entropy loss with label smoothing backward (CUDA)"); 52 | } 53 | -------------------------------------------------------------------------------- /apex/apex/contrib/fmha/__init__.py: -------------------------------------------------------------------------------- 1 | from .fmha import FMHAFun 2 | -------------------------------------------------------------------------------- /apex/apex/contrib/groupbn/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import bnp 4 | from .batch_norm import BatchNorm2d_NHWC 5 | del torch 6 | del bnp 7 | del batch_norm 8 | except ImportError as err: 9 | print("apex was installed without --bnp flag, contrib.groupbn is not available") 10 | -------------------------------------------------------------------------------- /apex/apex/contrib/layer_norm/__init__.py: -------------------------------------------------------------------------------- 1 | from .layer_norm import FastLayerNorm 2 | -------------------------------------------------------------------------------- /apex/apex/contrib/layer_norm/layer_norm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import init 3 | 4 | import fast_layer_norm 5 | 6 | class FastLayerNormFN(torch.autograd.Function): 7 | @staticmethod 8 | def forward(ctx, x, gamma, beta, epsilon): 9 | x = x.contiguous() 10 | gamma = gamma.contiguous() 11 | beta = beta.contiguous() 12 | hidden_size = gamma.numel() 13 | xmat = x.view((-1, hidden_size)) 14 | ymat, mu, rsigma = fast_layer_norm.ln_fwd(xmat, gamma, beta, epsilon) 15 | ctx.save_for_backward(x, gamma, mu, rsigma) 16 | return ymat.view(x.shape) 17 | 18 | @staticmethod 19 | def backward(ctx, dy): 20 | #assert dy.is_contiguous() 21 | dy = dy.contiguous() # this happens! 22 | x, gamma, mu, rsigma = ctx.saved_tensors 23 | 24 | hidden_size = gamma.numel() 25 | xmat = x.view((-1, hidden_size)) 26 | dymat = dy.view(xmat.shape) 27 | dxmat, dgamma, dbeta = fast_layer_norm.ln_bwd(dymat, xmat, mu, rsigma, gamma) 28 | dx = dxmat.view(x.shape) 29 | return dx, dgamma, dbeta, None 30 | 31 | class FastLayerNorm(torch.nn.Module): 32 | def __init__(self, hidden_size, eps=1e-5): 33 | super(FastLayerNorm, self).__init__() 34 | self.epsilon = eps 35 | self.weight = torch.nn.Parameter(torch.Tensor(hidden_size)) 36 | self.bias = torch.nn.Parameter(torch.Tensor(hidden_size)) 37 | self.reset_parameters() 38 | 39 | def reset_parameters(self): 40 | init.ones_(self.weight) 41 | init.zeros_(self.bias) 42 | 43 | def forward(self, x): 44 | return FastLayerNormFN.apply(x, self.weight, self.bias, self.epsilon) 45 | -------------------------------------------------------------------------------- /apex/apex/contrib/multihead_attn/MHA_bwd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/apex/contrib/multihead_attn/MHA_bwd.png -------------------------------------------------------------------------------- /apex/apex/contrib/multihead_attn/MHA_fwd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/apex/contrib/multihead_attn/MHA_fwd.png -------------------------------------------------------------------------------- /apex/apex/contrib/multihead_attn/README.md: -------------------------------------------------------------------------------- 1 | # Fast Multihead Attention 2 | 3 | This implementation has two main features : 4 | * A C++ implementation to avoid the CPU overheads of Pytorch found with smaller batch sizes. 5 | * The removal of all copies and transposes found in standard implementations of Multihead Attention. 6 | 7 | | | Python Version | C++ Version | 8 | | :----------------------------------------- | :------------: | :---------: | 9 | | Layer Norm and Residual Add Variant | X | X | 10 | | Includes Linear Biases | X | | 11 | | Reduces CPU Overheads | | X | 12 | | Fuses masking with Softmax | | X | 13 | | Removes Transposes and Copies | X | X | 14 | | Includes Self and Encoder/Decoder Variants | X | X | 15 | 16 | ## How to Instantiate 17 | 18 | `SelfMultiheadAttn(` _hidden dim_, _heads_, _dropout=prob_, _bias=bool_, _include_norm_add=bool_, _impl='fast'_ `)` 19 | `EncdecMultiheadAttn(` _hidden dim_, _heads_, _dropout=prob_, _bias=bool_, _include_norm_add=bool_, _impl='fast'_ `)` 20 | 21 | `impl` has two options: 22 | * `fast` uses C++ Version 23 | * `default` uses Python Version 24 | 25 | ## Instructions to build on Linux 26 | 27 | ``` 28 | $ git clone https://github.com/NVIDIA/apex 29 | $ cd apex 30 | $ pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--fast_multihead_attn" ./ 31 | ``` 32 | ## Try Performance Tests Yourself! 33 | Perf test script is found here! 34 | ``` 35 | cd contrib/examples/multihead_attn 36 | ``` 37 | #### Fast Multihead Attention 38 | ``` 39 | python perf_test_multihead_attn.py --ref 40 | ``` 41 | #### Fast Multihead Attention with C++ Implementation 42 | ``` 43 | python perf_test_multihead_attn.py 44 | ``` 45 | #### Compare with `torch.nn.MultiheadAttn` 46 | ``` 47 | python perf_test_multihead_attn.py --native 48 | ``` 49 | #### Test your own range! 50 | ``` 51 | python perf_test_multihead_attn.py --seq-length 64 --num-seqs-start 10 --num-seqs-stop 120 --num-seqs-inc 5 52 | ``` 53 | 54 | ## Performance Comparisons 55 | 56 | * Performance was measured with 64 token sequence lengths on an NVIDIA TitanV card. 57 | * Time is measured across multiple layers to simulate an in model scenario. 58 | 59 | ![Multihead Attention Forward](MHA_fwd.png) 60 | ![Multihead Attention Backward](MHA_bwd.png) 61 | -------------------------------------------------------------------------------- /apex/apex/contrib/multihead_attn/__init__.py: -------------------------------------------------------------------------------- 1 | from .self_multihead_attn import SelfMultiheadAttn 2 | from .encdec_multihead_attn import EncdecMultiheadAttn 3 | from .mask_softmax_dropout_func import fast_mask_softmax_dropout_func 4 | -------------------------------------------------------------------------------- /apex/apex/contrib/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .fp16_optimizer import FP16_Optimizer 2 | from .fused_adam import FusedAdam 3 | from .fused_lamb import FusedLAMB 4 | -------------------------------------------------------------------------------- /apex/apex/contrib/sparsity/__init__.py: -------------------------------------------------------------------------------- 1 | from .sparse_masklib import create_mask 2 | from .asp import ASP 3 | -------------------------------------------------------------------------------- /apex/apex/contrib/test/multihead_attn/test_mha_fused_softmax.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import unittest 3 | import torch.nn.functional as F 4 | from apex.contrib.multihead_attn import fast_mask_softmax_dropout_func 5 | 6 | class FusedSoftmaxTest(unittest.TestCase): 7 | def setUp(self, seed=1234): 8 | torch.manual_seed(seed) 9 | torch.cuda.manual_seed_all(seed) 10 | 11 | self.seq_length = 80 12 | self.sequences = 10 13 | self.hidden_dim = 1024 14 | self.heads = 16 15 | self.dropout_prob = 0.0 16 | 17 | self.mask = (torch.randn(self.sequences,self.seq_length)>0).cuda() 18 | self.mask = self.mask.half()*-10000 19 | self.ref_inputs = torch.randn(self.heads * self.sequences, self.seq_length, self.seq_length, 20 | dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True) 21 | 22 | self.tst_inputs = self.ref_inputs.clone().detach().requires_grad_(True) 23 | 24 | def test_fused_softmax(self) : 25 | grads = torch.randn_like(self.tst_inputs) 26 | y_ref = self.ref_inputs.view(self.sequences, self.heads, self.seq_length, self.seq_length) 27 | y_ref = y_ref + self.mask.unsqueeze(1).unsqueeze(2) 28 | y_ref = y_ref.view(self.sequences*self.heads, self.seq_length, self.seq_length) 29 | y_ref = F.softmax(y_ref, dim=-1) 30 | y_ref = torch._fused_dropout(y_ref, 1.0) 31 | 32 | y_tst = fast_mask_softmax_dropout_func(True, self.heads, self.tst_inputs, self.mask, True, 0.0) 33 | y_ref[0].backward(grads) 34 | y_tst.backward(grads) 35 | 36 | self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-5, rtol=1e-5)) 37 | self.assertTrue(torch.allclose(y_ref[0], y_tst, atol=1e-3, rtol=1e-3)) 38 | self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3)) 39 | 40 | 41 | if __name__ == '__main__': 42 | unittest.main() 43 | -------------------------------------------------------------------------------- /apex/apex/contrib/transducer/__init__.py: -------------------------------------------------------------------------------- 1 | from .transducer import TransducerJoint 2 | from .transducer import TransducerLoss -------------------------------------------------------------------------------- /apex/apex/contrib/xentropy/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import torch 3 | import xentropy_cuda 4 | from .softmax_xentropy import SoftmaxCrossEntropyLoss 5 | del torch 6 | del xentropy_cuda 7 | del softmax_xentropy 8 | except ImportError as err: 9 | print("apex was installed without --xentropy flag, contrib.xentropy is not available") 10 | -------------------------------------------------------------------------------- /apex/apex/contrib/xentropy/softmax_xentropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import xentropy_cuda 3 | 4 | class SoftmaxCrossEntropyLoss(torch.autograd.Function): 5 | @staticmethod 6 | def forward(ctx, logits, labels, smoothing=0.0, padding_idx=0, half_to_float=False): 7 | losses, max_log_sum_exp = xentropy_cuda.forward( 8 | logits, labels, smoothing, half_to_float) 9 | losses.masked_fill_(labels==padding_idx, 0) 10 | 11 | ctx.save_for_backward(logits, max_log_sum_exp, labels, 12 | torch.FloatTensor([smoothing]), 13 | torch.LongTensor([padding_idx])) 14 | 15 | return losses 16 | 17 | @staticmethod 18 | def backward(ctx, grad_loss): 19 | logits, max_log_sum_exp, labels, smoothing, padding_idx = ctx.saved_tensors 20 | 21 | if not grad_loss.is_contiguous(): 22 | grad_loss = grad_loss.contiguous() 23 | grad_loss.masked_fill_(labels==padding_idx.item(), 0) 24 | grad_logits = xentropy_cuda.backward( 25 | grad_loss.contiguous(), logits, max_log_sum_exp, 26 | labels, smoothing.item()) 27 | 28 | return grad_logits, None, None, None, None 29 | -------------------------------------------------------------------------------- /apex/apex/fp16_utils/README.md: -------------------------------------------------------------------------------- 1 | fp16_optimizer.py contains `FP16_Optimizer`, a Python class designed to wrap an existing Pytorch optimizer and automatically enable master parameters and loss scaling in a manner transparent to the user. To use `FP16_Optimizer`, only two lines of one's Python model need to change. 2 | 3 | #### [FP16_Optimizer API documentation](https://nvidia.github.io/apex/fp16_utils.html#automatic-management-of-master-params-loss-scaling) 4 | 5 | #### [Simple examples with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/FP16_Optimizer_simple) 6 | 7 | #### [Imagenet with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/imagenet) 8 | 9 | #### [word_language_model with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/word_language_model) 10 | 11 | 12 | fp16_util.py contains a number of utilities to manually manage master parameters and loss scaling, if the user chooses. 13 | 14 | #### [Manual management documentation](https://nvidia.github.io/apex/fp16_utils.html#manual-master-parameter-management) 15 | 16 | The [Imagenet with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/imagenet) and [word_language_model with FP16_Optimizer](https://github.com/NVIDIA/apex/tree/master/examples/word_language_model) directories also contain `main.py` files that demonstrate manual management of master parameters and static loss scaling. These examples illustrate what sort of operations `FP16_Optimizer` is performing automatically. 17 | -------------------------------------------------------------------------------- /apex/apex/fp16_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .fp16util import ( 2 | BN_convert_float, 3 | network_to_half, 4 | prep_param_lists, 5 | model_grads_to_master_grads, 6 | master_params_to_model_params, 7 | tofp16, 8 | to_python_float, 9 | clip_grad_norm, 10 | convert_module, 11 | convert_network, 12 | FP16Model, 13 | ) 14 | 15 | from .fp16_optimizer import FP16_Optimizer 16 | from .loss_scaler import LossScaler, DynamicLossScaler 17 | -------------------------------------------------------------------------------- /apex/apex/mlp/__init__.py: -------------------------------------------------------------------------------- 1 | from .mlp import * 2 | -------------------------------------------------------------------------------- /apex/apex/multi_tensor_apply/__init__.py: -------------------------------------------------------------------------------- 1 | from .multi_tensor_apply import MultiTensorApply 2 | 3 | multi_tensor_applier = MultiTensorApply(2048*32) 4 | 5 | -------------------------------------------------------------------------------- /apex/apex/multi_tensor_apply/multi_tensor_apply.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | class MultiTensorApply(object): 4 | available = False 5 | warned = False 6 | 7 | def __init__(self, chunk_size): 8 | try: 9 | import amp_C 10 | MultiTensorApply.available = True 11 | self.chunk_size = chunk_size 12 | except ImportError as err: 13 | MultiTensorApply.available = False 14 | MultiTensorApply.import_err = err 15 | 16 | def check_avail(self): 17 | if MultiTensorApply.available == False: 18 | raise RuntimeError( 19 | "Attempted to call MultiTensorApply method, but MultiTensorApply " 20 | "is not available, possibly because Apex was installed without " 21 | "--cpp_ext --cuda_ext. Original import error message:", 22 | MultiTensorApply.import_err) 23 | 24 | def __call__(self, op, noop_flag_buffer, tensor_lists, *args): 25 | self.check_avail() 26 | 27 | return op(self.chunk_size, 28 | noop_flag_buffer, 29 | tensor_lists, 30 | *args) 31 | -------------------------------------------------------------------------------- /apex/apex/normalization/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_layer_norm import FusedLayerNorm 2 | -------------------------------------------------------------------------------- /apex/apex/optimizers/__init__.py: -------------------------------------------------------------------------------- 1 | from .fused_sgd import FusedSGD 2 | from .fused_adam import FusedAdam 3 | from .fused_novograd import FusedNovoGrad 4 | from .fused_lamb import FusedLAMB 5 | from .fused_adagrad import FusedAdagrad -------------------------------------------------------------------------------- /apex/apex/parallel/multiproc.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | import subprocess 4 | 5 | def docstring_hack(): 6 | """ 7 | Multiproc file which will launch a set of processes locally for multi-gpu 8 | usage: python -m apex.parallel.multiproc main.py ... 9 | """ 10 | pass 11 | 12 | argslist = list(sys.argv)[1:] 13 | world_size = torch.cuda.device_count() 14 | 15 | if '--world-size' in argslist: 16 | world_size = int(argslist[argslist.index('--world-size')+1]) 17 | else: 18 | argslist.append('--world-size') 19 | argslist.append(str(world_size)) 20 | 21 | workers = [] 22 | 23 | for i in range(world_size): 24 | if '--rank' in argslist: 25 | argslist[argslist.index('--rank')+1] = str(i) 26 | else: 27 | argslist.append('--rank') 28 | argslist.append(str(i)) 29 | stdout = None if i == 0 else open("GPU_"+str(i)+".log", "w") 30 | print(argslist) 31 | p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) 32 | workers.append(p) 33 | 34 | for p in workers: 35 | p.wait() 36 | -------------------------------------------------------------------------------- /apex/apex/pyprof/FAQs.md: -------------------------------------------------------------------------------- 1 | 1. How do I intercept the Adam optimizer in APEX ? 2 | 3 | ```python 4 | from apex import pyprof 5 | import fused_adam_cuda 6 | pyprof.nvtx.wrap(fused_adam_cuda, 'adam') 7 | ``` 8 | 9 | 2. If you are using JIT and/or AMP, the correct initialization sequence is 10 | 1. Let any JIT to finish. 11 | 2. Initlialize pyprof `pyprof.nvtx.init()`. 12 | 3. Initialize AMP. 13 | 14 | 3. How do I profile with `torch.distributed.launch` ? 15 | 16 | ```python 17 | nvprof -f -o net%p.sql \ 18 | --profile-from-start off \ 19 | --profile-child-processes \ 20 | python -m torch.distributed.launch net.py 21 | ``` 22 | -------------------------------------------------------------------------------- /apex/apex/pyprof/__init__.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from . import nvtx, prof 4 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.sql 3 | *.dict 4 | *.csv 5 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/apex/README.md: -------------------------------------------------------------------------------- 1 | This directory has examples of how to use `pyprof` with APEX extensions e.g. `fused_adam_cuda` and `fused_layer_norm_cuda`. 2 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/apex/fused_adam.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import fused_adam_cuda 3 | from apex.optimizers import FusedAdam, FP16_Optimizer 4 | from apex import pyprof 5 | 6 | pyprof.nvtx.init() 7 | pyprof.nvtx.wrap(fused_adam_cuda, 'adam') 8 | 9 | model = torch.nn.Linear(10, 20).cuda().half() 10 | criterion = torch.nn.CrossEntropyLoss().cuda() 11 | optimizer = FusedAdam(model.parameters()) 12 | optimizer = FP16_Optimizer(optimizer) 13 | 14 | x = torch.ones(32, 10).cuda().half() 15 | target = torch.empty(32, dtype=torch.long).random_(20).cuda() 16 | y = model(x) 17 | loss = criterion(y, target) 18 | optimizer.zero_grad() 19 | loss.backward() 20 | optimizer.step() 21 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/apex/fused_layer_norm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import fused_layer_norm_cuda 3 | from apex.normalization import FusedLayerNorm 4 | from apex import pyprof 5 | 6 | pyprof.nvtx.init() 7 | pyprof.nvtx.wrap(fused_layer_norm_cuda, 'forward') 8 | pyprof.nvtx.wrap(fused_layer_norm_cuda, 'backward') 9 | pyprof.nvtx.wrap(fused_layer_norm_cuda, 'forward_affine') 10 | pyprof.nvtx.wrap(fused_layer_norm_cuda, 'backward_affine') 11 | 12 | input = torch.randn(20, 5, 10, 10).cuda() 13 | 14 | # With Learnable Parameters 15 | m = FusedLayerNorm(input.size()[1:]).cuda() 16 | output = m(input) 17 | 18 | # Without Learnable Parameters 19 | m = FusedLayerNorm(input.size()[1:], elementwise_affine=False).cuda() 20 | output = m(input) 21 | 22 | # Normalize over last two dimensions 23 | m = FusedLayerNorm([10, 10]).cuda() 24 | output = m(input) 25 | 26 | # Normalize over last dimension of size 10 27 | m = FusedLayerNorm(10).cuda() 28 | output = m(input) 29 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/apex/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPT=`realpath $0` 6 | SCRIPTPATH=`dirname $SCRIPT` 7 | PYPROF="$SCRIPTPATH/../.." 8 | 9 | parse="python $PYPROF/parse/parse.py" 10 | prof="python $PYPROF/prof/prof.py" 11 | 12 | for f in *.py 13 | do 14 | base=`basename $f .py` 15 | sql=$base.sql 16 | dict=$base.dict 17 | 18 | #NVprof 19 | echo "nvprof -fo $sql python $f" 20 | nvprof -fo $sql python $f 21 | 22 | #Parse 23 | echo $parse $sql 24 | $parse $sql > $dict 25 | 26 | #Prof 27 | echo $prof $dict 28 | $prof -w 130 $dict 29 | \rm $sql $dict 30 | done 31 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/custom_func_module/README.md: -------------------------------------------------------------------------------- 1 | This directory has examples which show how to intercept (monkey patch) custom functions and modules with `pyprof`. No changes are required in `pyprof/parse`, however, users can add support for bytes and flops calculation for custom functions and modules in `pyprof/prof` by extending the `OperatorLayerBase` class. 2 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/custom_func_module/custom_function.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.cuda.profiler as profiler 5 | from apex import pyprof 6 | #Initialize pyprof 7 | pyprof.nvtx.init() 8 | 9 | class Foo(torch.autograd.Function): 10 | @staticmethod 11 | def forward(ctx, in1, in2): 12 | out = in1 + in2 #This could be a custom C/C++ function. 13 | return out 14 | 15 | @staticmethod 16 | def backward(ctx, grad): 17 | in1_grad = grad #This could be a custom C/C++ function. 18 | in2_grad = grad #This could be a custom C/C++ function. 19 | return in1_grad, in2_grad 20 | 21 | #Hook the forward and backward functions to pyprof 22 | pyprof.nvtx.wrap(Foo, 'forward') 23 | pyprof.nvtx.wrap(Foo, 'backward') 24 | 25 | foo = Foo.apply 26 | 27 | x = torch.ones(4,4).cuda() 28 | y = torch.ones(4,4).cuda() 29 | 30 | with torch.autograd.profiler.emit_nvtx(): 31 | profiler.start() 32 | z = foo(x,y) 33 | profiler.stop() 34 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/custom_func_module/custom_module.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.cuda.profiler as profiler 5 | from apex import pyprof 6 | pyprof.nvtx.init() 7 | 8 | class Foo(torch.nn.Module): 9 | def __init__(self, size): 10 | super(Foo, self).__init__() 11 | self.n = torch.nn.Parameter(torch.ones(size)) 12 | self.m = torch.nn.Parameter(torch.ones(size)) 13 | 14 | def forward(self, input): 15 | return self.n*input + self.m 16 | 17 | #Hook the forward function to pyprof 18 | pyprof.nvtx.wrap(Foo, 'forward') 19 | 20 | foo = Foo(4) 21 | foo.cuda() 22 | x = torch.ones(4).cuda() 23 | 24 | with torch.autograd.profiler.emit_nvtx(): 25 | profiler.start() 26 | z = foo(x) 27 | profiler.stop() 28 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/custom_func_module/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPT=`realpath $0` 6 | SCRIPTPATH=`dirname $SCRIPT` 7 | PYPROF="$SCRIPTPATH/../.." 8 | 9 | parse="python $PYPROF/parse/parse.py" 10 | prof="python $PYPROF/prof/prof.py" 11 | 12 | for f in *.py 13 | do 14 | base=`basename $f .py` 15 | sql=$base.sql 16 | dict=$base.dict 17 | 18 | #NVprof 19 | echo "nvprof -fo $sql python $f" 20 | nvprof -fo $sql python $f 21 | 22 | #Parse 23 | echo $parse $sql 24 | $parse $sql > $dict 25 | 26 | #Prof 27 | echo $prof $dict 28 | $prof -w 130 $dict 29 | \rm $sql $dict 30 | done 31 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/imagenet/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPT=`realpath $0` 6 | SCRIPTPATH=`dirname $SCRIPT` 7 | PYPROF="$SCRIPTPATH/../.." 8 | 9 | parse="python -m apex.pyprof.parse" 10 | prof="python -m apex.pyprof.prof" 11 | 12 | for net in "resnet50" 13 | do 14 | for optim in adam sgd 15 | do 16 | for batch in 32 64 17 | do 18 | base="torchvision".$net.$optim.$batch 19 | sql=$base.sql 20 | dict=$base.dict 21 | 22 | #NVprof 23 | echo "nvprof -fo $sql --profile-from-start off python imagenet.py -m ${net} -o $optim -b $batch" 24 | nvprof -fo $sql --profile-from-start off python imagenet.py -m ${net} -o $optim -b $batch 25 | 26 | #Parse 27 | echo $parse $sql 28 | $parse $sql > $dict 29 | 30 | #Prof 31 | echo $prof $dict 32 | $prof -w 130 $dict 33 | # \rm $sql $dict 34 | done 35 | done 36 | done 37 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/jit/README.md: -------------------------------------------------------------------------------- 1 | *As of this writing, these examples do not work 2 | because of changes being proposed in PyTorch.* 3 | 4 | There are two ways to use PyTorch JIT 5 | - Scripting 6 | - Tracing 7 | 8 | In addition, we can JIT a 9 | - Stand alone function 10 | - Class / class method 11 | 12 | This directory has an example for each of the 4 cases. 13 | Intercepting (monkey patching) JITted code has a few extra steps, 14 | which are explained through comments. 15 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/jit/jit_script_function.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.cuda.profiler as profiler 5 | from apex import pyprof 6 | 7 | #The following creates an object "foo" of type ScriptModule 8 | #The new object has a function called "forward" 9 | 10 | @torch.jit.script 11 | def foo(x, y): 12 | return torch.sigmoid(x) + y 13 | 14 | #Initialize pyprof after the JIT step 15 | pyprof.nvtx.init() 16 | 17 | #Assign a name to the object "foo" 18 | foo.__name__ = "foo" 19 | 20 | #Hook up the forward function to pyprof 21 | pyprof.nvtx.wrap(foo, 'forward') 22 | 23 | x = torch.zeros(4,4).cuda() 24 | y = torch.ones(4,4).cuda() 25 | 26 | with torch.autograd.profiler.emit_nvtx(): 27 | profiler.start() 28 | z = foo(x, y) 29 | profiler.stop() 30 | print(z) 31 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/jit/jit_script_method.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.cuda.profiler as profiler 5 | from apex import pyprof 6 | 7 | class Foo(torch.jit.ScriptModule): 8 | def __init__(self, size): 9 | super(Foo, self).__init__() 10 | self.n = torch.nn.Parameter(torch.ones(size)) 11 | self.m = torch.nn.Parameter(torch.ones(size)) 12 | 13 | @torch.jit.script_method 14 | def forward(self, input): 15 | return self.n*input + self.m 16 | 17 | #Initialize pyprof after the JIT step 18 | pyprof.nvtx.init() 19 | 20 | #Hook up the forward function to pyprof 21 | pyprof.nvtx.wrap(Foo, 'forward') 22 | 23 | foo = Foo(4) 24 | foo.cuda() 25 | x = torch.ones(4).cuda() 26 | 27 | with torch.autograd.profiler.emit_nvtx(): 28 | profiler.start() 29 | z = foo(x) 30 | profiler.stop() 31 | print(z) 32 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/jit/jit_trace_function.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.cuda.profiler as profiler 5 | from apex import pyprof 6 | 7 | def foo(x, y): 8 | return torch.sigmoid(x) + y 9 | 10 | x = torch.zeros(4,4).cuda() 11 | y = torch.ones(4,4).cuda() 12 | 13 | #JIT the function using tracing 14 | #This returns an object of type ScriptModule with a forward method. 15 | traced_foo = torch.jit.trace(foo, (x,y)) 16 | 17 | #Initialize pyprof after the JIT step 18 | pyprof.nvtx.init() 19 | 20 | #Assign a name to the object "traced_foo" 21 | traced_foo.__dict__['__name__'] = "foo" 22 | 23 | #Hook up the forward function to pyprof 24 | pyprof.nvtx.wrap(traced_foo, 'forward') 25 | 26 | with torch.autograd.profiler.emit_nvtx(): 27 | profiler.start() 28 | z = traced_foo(x, y) 29 | profiler.stop() 30 | print(z) 31 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/jit/jit_trace_method.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.cuda.profiler as profiler 5 | from apex import pyprof 6 | 7 | class Foo(torch.nn.Module): 8 | def __init__(self, size): 9 | super(Foo, self).__init__() 10 | self.n = torch.nn.Parameter(torch.ones(size)) 11 | self.m = torch.nn.Parameter(torch.ones(size)) 12 | 13 | def forward(self, input): 14 | return self.n*input + self.m 15 | 16 | foo = Foo(4) 17 | foo.cuda() 18 | x = torch.ones(4).cuda() 19 | 20 | #JIT the class using tracing 21 | traced_foo = torch.jit.trace(foo, x) 22 | 23 | #Initialize pyprof after the JIT step 24 | pyprof.nvtx.init() 25 | 26 | #Assign a name to the object "traced_foo" 27 | traced_foo.__dict__['__name__'] = "foo" 28 | 29 | #Hook up the forward function to pyprof 30 | pyprof.nvtx.wrap(traced_foo, 'forward') 31 | 32 | with torch.autograd.profiler.emit_nvtx(): 33 | profiler.start() 34 | z = traced_foo(x) 35 | profiler.stop() 36 | print(z) 37 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/jit/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPT=`realpath $0` 6 | SCRIPTPATH=`dirname $SCRIPT` 7 | PYPROF="$SCRIPTPATH/../.." 8 | 9 | parse="python $PYPROF/parse/parse.py" 10 | prof="python $PYPROF/prof/prof.py" 11 | 12 | for f in *.py 13 | do 14 | base=`basename $f .py` 15 | sql=$base.sql 16 | dict=$base.dict 17 | 18 | #NVprof 19 | echo "nvprof -fo $sql python $f" 20 | nvprof -fo $sql python $f 21 | 22 | #Parse 23 | echo $parse $sql 24 | $parse $sql > $dict 25 | 26 | #Prof 27 | echo $prof $dict 28 | $prof -w 130 $dict 29 | \rm $sql $dict 30 | done 31 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/lenet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | import torch.cuda.profiler as profiler 7 | import torch.optim as optim 8 | 9 | from apex import pyprof 10 | pyprof.nvtx.init() 11 | 12 | class LeNet5(nn.Module): 13 | def __init__(self): 14 | super(LeNet5, self).__init__() 15 | # 1 input image channel, 6 output channels, 5x5 square convolution 16 | # kernel 17 | self.conv1 = nn.Conv2d(1, 6, 5) 18 | self.conv2 = nn.Conv2d(6, 16, 5) 19 | # an affine operation: y = Wx + b 20 | self.fc1 = nn.Linear(16 * 5 * 5, 120) 21 | self.fc2 = nn.Linear(120, 84) 22 | self.fc3 = nn.Linear(84, 10) 23 | 24 | def forward(self, x): 25 | # Max pooling over a (2, 2) window 26 | x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) 27 | # If the size is a square you can only specify a single number 28 | x = F.max_pool2d(F.relu(self.conv2(x)), 2) 29 | x = x.view(-1, self.num_flat_features(x)) 30 | x = F.relu(self.fc1(x)) 31 | x = F.relu(self.fc2(x)) 32 | x = self.fc3(x) 33 | return x 34 | 35 | def num_flat_features(self, x): 36 | size = x.size()[1:] # all dimensions except the batch dimension 37 | num_features = 1 38 | for s in size: 39 | num_features *= s 40 | return num_features 41 | 42 | with torch.autograd.profiler.emit_nvtx(): 43 | 44 | net = LeNet5().cuda() 45 | 46 | input = torch.randn(1, 1, 32, 32).cuda() 47 | out = net(input) 48 | 49 | target = torch.randn(10) # a dummy target, for example 50 | target = target.view(1, -1).cuda() # make it the same shape as output 51 | criterion = nn.MSELoss() 52 | 53 | # create your optimizer 54 | optimizer = optim.SGD(net.parameters(), lr=0.01) 55 | 56 | # in your training loop: 57 | optimizer.zero_grad() # zero the gradient buffers 58 | 59 | profiler.start() 60 | output = net(input) 61 | loss = criterion(output, target) 62 | loss.backward() 63 | optimizer.step() # Does the update 64 | profiler.stop() 65 | 66 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/simple.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | This simple file provides an example of how to 5 | - import the pyprof library and initialize it 6 | - use the emit_nvtx context manager 7 | - start and stop the profiler 8 | 9 | Only kernels within profiler.start and profiler.stop calls are profiled. 10 | To profile 11 | $ nvprof -f -o simple.sql --profile-from-start off ./simple.py 12 | """ 13 | 14 | import sys 15 | import torch 16 | import torch.cuda.profiler as profiler 17 | 18 | #Import and initialize pyprof 19 | from apex import pyprof 20 | pyprof.nvtx.init() 21 | 22 | a = torch.randn(5, 5).cuda() 23 | b = torch.randn(5, 5).cuda() 24 | 25 | #Context manager 26 | with torch.autograd.profiler.emit_nvtx(): 27 | 28 | #Start profiler 29 | profiler.start() 30 | 31 | c = a + b 32 | c = torch.mul(a,b) 33 | c = torch.matmul(a,b) 34 | c = torch.argmax(a, dim=1) 35 | c = torch.nn.functional.pad(a, (1,1)) 36 | 37 | #Stop profiler 38 | profiler.stop() 39 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/user_annotation/README.md: -------------------------------------------------------------------------------- 1 | Nvidia NVTX range markers (https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm) 2 | are a useful tool to capture and observe events and code ranges etc. 3 | Using PyTorch APIs e.g, `torch.cuda.nvtx.range_push("xxx")` and `torch.cuda.nvtx.range_pop()` users can easily add their own NVTX range markers. These markers can then be observed in the Nvidia Visual Profiler (NVVP). 4 | 5 | While inserting NVTX markers (strings), if the users follow a specific string pattern `"layer:your_string_here"` e.g. `"layer:conv1"` or `"layer:encoder_layer_3_self_attention`, then `pyprof` will display the strings `conv1` and `encoder_layer_3_self_attention` next to the associated kernels in the output of `prof.py` when used with the `-c layer` option. 6 | 7 | NVTX range markers can be nested and if users follow the above string pattern, the output of `prof.py` will show all the markers associated with a kernel. 8 | 9 | The file `resnet.py` (a simplified version of the torchvision model) shows an example of how users can add (nested) NVTX markers with information which can greatly aid in understanding and analysis of networks. 10 | 11 | Note that the pattern `"layer:your_string_here"` was chosen to aid information extraction by `pyprof`. The tool will work seamlessly even if there are other markers or no markers at all. 12 | 13 | ### To run 14 | 15 | ```sh 16 | nvprof -fo resnet.sql --profile-from-start off python resnet.py 17 | parse.py resnet.sql > resnet.dict 18 | prof.py --csv -c idx,layer,dir,mod,op,kernel,params,sil resnet.dict 19 | ``` 20 | 21 | The file `resnet.sql` can also be opened with NVVP as usual. 22 | -------------------------------------------------------------------------------- /apex/apex/pyprof/examples/user_annotation/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPT=`realpath $0` 6 | SCRIPTPATH=`dirname $SCRIPT` 7 | PYPROF="$SCRIPTPATH/../.." 8 | 9 | parse="python $PYPROF/parse/parse.py" 10 | prof="python $PYPROF/prof/prof.py" 11 | 12 | for f in *.py 13 | do 14 | base=`basename $f .py` 15 | sql=$base.sql 16 | dict=$base.dict 17 | 18 | #NVprof 19 | echo "nvprof -fo --profile-from-start off $sql python $f" 20 | nvprof -fo $sql --profile-from-start off python $f 21 | 22 | #Parse 23 | echo $parse $sql 24 | $parse $sql > $dict 25 | 26 | #Prof 27 | echo $prof $dict 28 | #$prof -w 130 $dict 29 | $prof --csv -c idx,layer,dir,mod,op,kernel,params,sil $dict 30 | \rm $sql $dict 31 | done 32 | -------------------------------------------------------------------------------- /apex/apex/pyprof/nvtx/__init__.py: -------------------------------------------------------------------------------- 1 | from .nvmarker import init 2 | from .nvmarker import add_wrapper as wrap 3 | -------------------------------------------------------------------------------- /apex/apex/pyprof/parse/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/apex/pyprof/parse/__init__.py -------------------------------------------------------------------------------- /apex/apex/pyprof/parse/__main__.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | try: 4 | from .parse import main 5 | except ImportError as e: 6 | warnings.warn("Did you make sure to install PyProf dependencies by using the --pyprof flag during Apex installation?)") 7 | raise e 8 | 9 | if __name__ == '__main__': 10 | main() 11 | -------------------------------------------------------------------------------- /apex/apex/pyprof/parse/db.py: -------------------------------------------------------------------------------- 1 | import sys, sqlite3 2 | 3 | class DB(object): 4 | """ 5 | This class provides functions for DB operations 6 | with exception handling. 7 | """ 8 | 9 | def __init__(self, dbFile): 10 | try: 11 | conn = sqlite3.connect(dbFile) 12 | conn.row_factory = sqlite3.Row 13 | c = conn.cursor() 14 | except: 15 | print("Error opening {}".format(dbFile)) 16 | sys.exit(1) 17 | 18 | self.conn = conn 19 | self.c = c 20 | 21 | def select(self, cmd): 22 | try: 23 | self.c.execute(cmd) 24 | #rows = self.c.fetchall() 25 | rows = [dict(row) for row in self.c.fetchall()] 26 | except sqlite3.Error as e: 27 | print(e) 28 | sys.exit(1) 29 | except: 30 | print("Uncaught error in SQLite access while executing {}".format(cmd)) 31 | sys.exit(1) 32 | 33 | #print(rows) 34 | return rows 35 | 36 | def insert(self, cmd, data): 37 | try: 38 | self.c.execute(cmd, data) 39 | except sqlite3.Error as e: 40 | print(e) 41 | sys.exit(1) 42 | except: 43 | print("Uncaught error in SQLite access while executing {}".format(cmd)) 44 | sys.exit(1) 45 | 46 | def execute(self, cmd): 47 | try: 48 | self.c.execute(cmd) 49 | except sqlite3.Error as e: 50 | print(e) 51 | sys.exit(1) 52 | except: 53 | print("Uncaught error in SQLite access while executing {}".format(cmd)) 54 | sys.exit(1) 55 | 56 | def commit(self): 57 | self.conn.commit() 58 | 59 | def close(self): 60 | self.c.close() 61 | self.conn.close() 62 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/__init__.py: -------------------------------------------------------------------------------- 1 | from . import data, prof 2 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/__main__.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | try: 4 | from .prof import main 5 | except ImportError as e: 6 | warnings.warn("Did you make sure to install PyProf dependencies by using the --pyprof flag during Apex installation?") 7 | raise e 8 | 9 | if __name__ == '__main__': 10 | main() 11 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/activation.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class Activation(OperatorLayerBase): 6 | """ 7 | This class handles the various activation functions. 8 | """ 9 | 10 | ops = ["celu", "elu", "elu_", "hardshrink", "hardtanh", "hardtanh_", "leaky_relu", "leaky_relu_", "logsigmoid", "prelu", "relu", "relu_", "relu6", "rrelu", "rrelu_", "selu", "sigmoid", "softplus", "softshrink", "softsign", "tanh", "tanhshrink", "threshold", "threshold_"] 11 | 12 | def __init__(self, d): 13 | marker = eval(d.argMarker[0]) 14 | mod = marker['mod'] 15 | op = marker['op'] 16 | args = marker['args'] 17 | 18 | self.marker = marker 19 | self.mod_ = mod 20 | self.op_ = op 21 | self.args = args 22 | 23 | assert (mod in ["torch.nn.functional", "torch", "Tensor"]) 24 | 25 | #Filter out named parameters 26 | args = list(filter(lambda x : x['name'] == '', args)) 27 | 28 | assert (len(args) >= 1) 29 | arg = args[0] 30 | assert (arg['type'] == "tensor") 31 | 32 | self.i = arg 33 | self.dir = d.dir 34 | 35 | def params(self): 36 | p = OrderedDict([('T', self.i['shape']),('type', self.i['dtype'])]) 37 | return p 38 | 39 | def flops(self): 40 | direction = self.dir 41 | tensor = self.i['shape'] 42 | t = self.i['dtype'] 43 | 44 | # TODO: revise 45 | elems = Utility.numElems(tensor) 46 | return elems 47 | 48 | def bytes(self): 49 | direction = self.dir 50 | tensor = self.i['shape'] 51 | t = self.i['dtype'] 52 | 53 | elems = Utility.numElems(tensor) 54 | elems = elems * (2 if direction == "fprop" else 3) 55 | 56 | return elems * Utility.typeToBytes(t) 57 | 58 | def tc(self): 59 | return "-" 60 | 61 | def op(self): 62 | return self.op_ 63 | 64 | def mod(self): 65 | return self.mod_ 66 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | class OperatorLayerBase(ABC): 4 | """ 5 | Base class for all layers and operators. 6 | Every derived class should have the following functions. 7 | """ 8 | 9 | @abstractmethod 10 | def tc(self): 11 | """ 12 | Tensor core usage by the kernel. 13 | Return "1" (yes), "0" (no, but possible), "-" (not applicable) 14 | """ 15 | pass 16 | 17 | @abstractmethod 18 | def params(self): 19 | """ 20 | Kernel parameters to be printed. 21 | """ 22 | pass 23 | 24 | @abstractmethod 25 | def flops(self): 26 | """ 27 | Note that 1 FMA = 2 flops. 28 | """ 29 | pass 30 | 31 | @abstractmethod 32 | def bytes(self): 33 | pass 34 | 35 | @abstractmethod 36 | def mod(self): 37 | """ 38 | Name of the module/class e.g. torch.nn.functional. 39 | """ 40 | pass 41 | 42 | @abstractmethod 43 | def op(self): 44 | """ 45 | Name of the operator e.g. sigmoid. 46 | """ 47 | pass 48 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/convert.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class Convert(OperatorLayerBase): 6 | """ 7 | Class to handle convert operations. 8 | """ 9 | ops = ["byte", "char", "double", "float", "half", "int", "long", "short", "to"] 10 | 11 | def __init__(self, d): 12 | marker = eval(d.argMarker[0]) 13 | mod = marker['mod'] 14 | op = marker['op'] 15 | args = marker['args'] 16 | 17 | self.marker = marker 18 | self.mod_ = mod 19 | self.op_ = op 20 | self.args = args 21 | 22 | assert (mod == "Tensor") 23 | assert (op in Convert.ops) 24 | assert (len(args) == 1) 25 | 26 | #The argument could be a tensor or scalar 27 | t = args[0] 28 | if t['type'] == "tensor": 29 | shape = t['shape'] 30 | stype = t['dtype'] 31 | else: 32 | shape = (1,) 33 | stype = t['type'] 34 | if self.op_ == "to": 35 | op = stype 36 | 37 | self.shape = shape 38 | self.stype = stype 39 | self.dtype = op 40 | 41 | def params(self): 42 | p = OrderedDict([('T', self.shape), ('stype', self.stype), ('dtype', self.dtype)]) 43 | return p 44 | 45 | def op(self): 46 | return self.op_ 47 | 48 | def mod(self): 49 | return self.mod_ 50 | 51 | def tc(self): 52 | return "-" 53 | 54 | def elems(self): 55 | return Utility.numElems(self.shape) 56 | 57 | def flops(self): 58 | return 0 59 | 60 | def bytes(self): 61 | b = self.elems() * (Utility.typeToBytes(self.stype) + Utility.typeToBytes(self.dtype)) 62 | return b 63 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/data.py: -------------------------------------------------------------------------------- 1 | from .utility import Utility 2 | 3 | class Data(object): 4 | """ 5 | Class to store all the data for every kernel e.g. name, bytes, flops, device, stream etc. 6 | """ 7 | def __init__(self, kernel): 8 | #Available from NVprof 9 | self.tid = kernel['tid'] 10 | self.device = kernel['device'] 11 | self.stream = kernel['stream'] 12 | self.grid = str(kernel['grid']).replace(" ","").replace("(","").replace(")","") 13 | self.block = str(kernel['block']).replace(" ","").replace("(","").replace(")","") 14 | self.name = kernel['kShortName'].replace(" ","_") 15 | self.lName = kernel['kLongName'] 16 | self.sil = kernel['kDuration'] #units ns 17 | 18 | self.index = None 19 | 20 | #Markers 21 | self.argMarker = kernel['marker'] 22 | self.modMarker = kernel['reprMarkers'] 23 | self.seqMarker = kernel['seqMarker'] 24 | 25 | self.layer = kernel['layer'] 26 | self.trace = kernel['trace'] 27 | 28 | self.seqId = kernel['seqId'] 29 | self.altSeqId = kernel['altSeqId'] 30 | 31 | self.dir = kernel['dir'] 32 | self.sub = kernel['subSeqId'] 33 | 34 | self.mod = "na" 35 | self.op = "na" 36 | self.params = {"na":"na"} 37 | self.tc = "na" 38 | self.flops = 0 39 | self.bytes = 0 40 | 41 | def setParams(self, params): 42 | #Remove space from params 43 | qaz = "" 44 | for key,value in params.items(): 45 | if "type" not in key: 46 | qaz += "{}={},".format(key,value) 47 | else: 48 | if type(value) is str: 49 | qaz += "{},".format(Utility.typeToString(value)) 50 | else: 51 | qaz += "{}".format(value) 52 | 53 | self.params = qaz.replace(" ", "") 54 | 55 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/dropout.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class Dropout(OperatorLayerBase): 6 | 7 | def __init__(self, d): 8 | marker = eval(d.argMarker[0]) 9 | mod = marker['mod'] 10 | op = marker['op'] 11 | args = marker['args'] 12 | 13 | self.marker = marker 14 | self.mod_ = mod 15 | self.op_ = op 16 | self.args = args 17 | 18 | assert (mod == "torch.nn.functional") 19 | assert (op == "dropout") 20 | #assert (len(args) == 1) 21 | 22 | self.shape = args[0]['shape'] 23 | self.type = args[0]['dtype'] 24 | self.dir = d.dir 25 | 26 | return 27 | 28 | def params(self): 29 | p = OrderedDict([('T', self.shape), ('type', self.type)]) 30 | return p 31 | 32 | def op(self): 33 | return self.op_ 34 | 35 | def mod(self): 36 | return self.mod_ 37 | 38 | def tc(self): 39 | return "-" 40 | 41 | def elems(self): 42 | return Utility.numElems(self.shape) 43 | 44 | def bytes(self): 45 | #Ignoring the cost of writing and reading the mask 46 | return Utility.typeToBytes(self.type) * self.elems() * 2 47 | 48 | def flops(self): 49 | # Note: This is approximate and depends on the RNG 50 | return 5*self.elems() 51 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/embedding.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class Embedding(OperatorLayerBase): 6 | 7 | def __init__(self, d): 8 | marker = eval(d.argMarker[0]) 9 | mod = marker['mod'] 10 | op = marker['op'] 11 | args = marker['args'] 12 | 13 | self.marker = marker 14 | self.mod_ = mod 15 | self.op_ = op 16 | self.args = args 17 | 18 | assert (mod == "torch.nn.functional") 19 | assert (op == "embedding") 20 | 21 | self.ishape = args[0]['shape'] 22 | self.itype = args[0]['dtype'] 23 | 24 | self.eshape = args[1]['shape'] 25 | self.etype = args[1]['dtype'] 26 | 27 | assert (len(self.eshape) == 2) 28 | 29 | self.dir = d.dir 30 | self.sub = d.sub 31 | return 32 | 33 | def params(self): 34 | p = OrderedDict([('I', self.ishape), ('itype', self.itype), ('E', self.eshape), ('etype', self.etype)]) 35 | return p 36 | 37 | def op(self): 38 | return self.op_ 39 | 40 | def mod(self): 41 | return self.mod_ 42 | 43 | def tc(self): 44 | return "-" 45 | 46 | def bytes(self): 47 | ishape = self.ishape 48 | itype = self.itype 49 | eshape = self.eshape 50 | etype = self.etype 51 | 52 | ielems = Utility.numElems(ishape) 53 | 54 | b = 0 55 | if self.dir == "fprop": 56 | #indices 57 | b += ielems * Utility.typeToBytes(itype) 58 | #read and write the embedding matrix 59 | b += ielems * eshape[1] * 2 * Utility.typeToBytes(etype) 60 | else: 61 | #3 times the size of the incoming gradient 62 | b = ielems * eshape[1] * 3 * Utility.typeToBytes(etype) 63 | 64 | if self.sub > 0: 65 | b = 0 66 | 67 | return b 68 | 69 | def flops(self): 70 | # Note: not implemented yet 71 | return 0 72 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/loss.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | #TODO: Add support for additional loss functions. 6 | 7 | class MSELoss(OperatorLayerBase): 8 | 9 | def __init__(self, d): 10 | marker = eval(d.argMarker[0]) 11 | mod = marker['mod'] 12 | op = marker['op'] 13 | args = marker['args'] 14 | 15 | self.marker = marker 16 | self.mod_ = mod 17 | self.op_ = op 18 | self.args = args 19 | 20 | assert (mod == "torch.nn.functional") 21 | assert (op == "mse_loss") 22 | assert (len(args) == 3) 23 | 24 | #Get input, target and reduction 25 | if (args[0]['name'] == ""): 26 | x = args[0] 27 | else: 28 | x = list(filter(lambda x : x['name'] == "input", args))[0] 29 | 30 | if (args[1]['name'] == ""): 31 | y = args[1] 32 | else: 33 | y = list(filter(lambda x : x['name'] == "target", args))[0] 34 | 35 | if (args[2]['name'] == ""): 36 | r = args[2] 37 | else: 38 | r = list(filter(lambda x : x['name'] == "reduction", args))[0] 39 | 40 | assert (x['type'] == y['type'] == "tensor") 41 | assert (x['shape'] == y['shape']) 42 | assert (x['dtype'] == y['dtype']) 43 | assert (r['type'] == "str") 44 | assert (r['value'] in ["none", "mean", "sum"]) 45 | 46 | self.shape = x['shape'] 47 | self.type = x['dtype'] 48 | self.red = r['value'] 49 | self.dir = d.dir 50 | 51 | def params(self): 52 | p = OrderedDict([('T', self.shape), ('type', self.type), ('red', self.red)]) 53 | return p 54 | 55 | def elems(self): 56 | red = self.red 57 | e = Utility.numElems(self.shape) 58 | 59 | if self.dir == "fprop": 60 | if red == "none": 61 | e *= 3 62 | else: 63 | e *= 2 64 | else: 65 | if red == "none": 66 | e *= 4 67 | else: 68 | e *= 3 69 | return e 70 | 71 | def bytes(self): 72 | return self.elems() * Utility.typeToBytes(self.type) 73 | 74 | def flops(self): 75 | return self.elems() * 2 + 1 76 | 77 | def tc(self): 78 | return "-" 79 | 80 | def op(self): 81 | return self.op_ 82 | 83 | def mod(self): 84 | return self.mod_ 85 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/normalization.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class BatchNorm(OperatorLayerBase): 6 | 7 | def __init__(self, d): 8 | marker = eval(d.argMarker[0]) 9 | mod = marker['mod'] 10 | op = marker['op'] 11 | args = marker['args'] 12 | 13 | self.marker = marker 14 | self.mod_ = mod 15 | self.op_ = op 16 | self.args = args 17 | 18 | assert (op == "batch_norm") 19 | assert (len(args) == 8) 20 | i = args[0] 21 | assert (i['type'] == "tensor") 22 | 23 | self.shape = i['shape'] 24 | self.type = i['dtype'] 25 | self.dir = d.dir 26 | 27 | def params(self): 28 | p = OrderedDict([('T', self.shape), ('type', self.type)]) 29 | return p 30 | 31 | def tc(self): 32 | return "-" 33 | 34 | def op(self): 35 | return self.op_ 36 | 37 | def mod(self): 38 | return self.mod_ 39 | 40 | def elems(self): 41 | return Utility.numElems(self.shape) 42 | 43 | def flops(self): 44 | # Variance algo-dependent, but this is a reasonable value. 45 | return self.elems() * 8 46 | 47 | def bytes(self): 48 | e = self.elems() 49 | if self.dir == "fprop": 50 | e *= 4 51 | else: 52 | e *= 5 53 | 54 | return e * Utility.typeToBytes(self.type) 55 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/optim.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | #TODO: Add support for other optimizers. 6 | 7 | class Adam(OperatorLayerBase): 8 | 9 | def __init__(self, d): 10 | marker = eval(d.argMarker[0]) 11 | mod = marker['mod'] 12 | op = marker['op'] 13 | args = marker['args'] 14 | 15 | self.marker = marker 16 | self.mod_ = mod 17 | self.op_ = op 18 | self.args = args 19 | 20 | assert(op == "adam") 21 | assert (len(args) == 12) or (len(args) == 14) 22 | w, hw, m, v, g = args[0:5] 23 | assert (w['shape'] == m['shape'] == v['shape'] == g['shape']) 24 | assert (hw['shape'] == w['shape']) or (hw['shape'] == (0,)) #hw could be null 25 | assert (w['type'] == m['type'] == v['type'] == g['type'] == hw['type'] == "tensor") 26 | assert (w['dtype'] == m['dtype'] == v['dtype'] == "float32") 27 | 28 | self.w = w 29 | self.g = g 30 | 31 | def params(self): 32 | p = OrderedDict([('T',self.w['shape']), ('wtype',self.w['dtype']), ('gtype',self.g['dtype'])]) 33 | return p 34 | 35 | def flops(self): 36 | return 0 37 | 38 | def bytes(self): 39 | wshape = self.w['shape'] 40 | wtype = self.w['dtype'] 41 | gtype = self.g['dtype'] 42 | b = 0 43 | 44 | elems = Utility.numElems(wshape) 45 | 46 | #Get time to stream read/write w, m, v 47 | b += 6 * elems * Utility.typeToBytes(wtype) 48 | 49 | #Get time to read "g" 50 | b += elems * Utility.typeToBytes(gtype) 51 | 52 | if wtype != gtype: #mixed precision 53 | #Get time to write "hw 54 | b += elems * Utility.typeToBytes(gtype) 55 | 56 | return b 57 | 58 | def tc(self): 59 | return "-" 60 | 61 | def op(self): 62 | return self.op_ 63 | 64 | def mod(self): 65 | return self.mod_ 66 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/pooling.py: -------------------------------------------------------------------------------- 1 | from .collections import OrderedDict 2 | from .utility import Utility 3 | 4 | # Work in progress. 5 | 6 | #poolFuncs = ["max_pool2d_with_indices_forward", "max_pool2d_with_indices"] 7 | class MaxPool2d(object): 8 | 9 | def parse(marker): 10 | 11 | def convert2Tuple(arg): 12 | assert (arg['type'] in ["int", "tuple"]) 13 | if arg['type'] == "int": 14 | return (arg['value'], arg['value']) 15 | else: 16 | return arg['value'] 17 | 18 | mod = marker['mod'] 19 | op = marker['op'] 20 | args = marker['args'] 21 | assert (mod == "torch.nn.functional") 22 | assert (op == "max_pool2d") 23 | assert (len(args) >= 2) 24 | 25 | #input 26 | assert (args[0]['name'] == "") 27 | inp = args[0] 28 | assert (inp['type'] == "tensor") 29 | i = inp['shape'] 30 | t = inp['dtype'] 31 | assert (len(i) == 4) #nchw tensor 32 | 33 | #kernel 34 | if (args[1]['name'] == ""): 35 | k = args[1] 36 | else: 37 | k = list(filter(lambda x : x['name'] == "kernel_size", args))[0] 38 | k = convert2Tuple(k) 39 | 40 | #stride 41 | s = k #default value 42 | if ((len(args) >= 3) and args[2] == ""): 43 | s = args[2] 44 | s = convert2Tuple(s) 45 | elif any(x['name'] == "stride" for x in args): 46 | s = list(filter(lambda x : x['name'] == "stride", args))[0] 47 | s = convert2Tuple(s) 48 | 49 | #padding 50 | p = (0,0) 51 | if ((len(args) >= 4) and args[3] == ""): 52 | p = args[3] 53 | p = convert2Tuple(p) 54 | elif any(x['name'] == "padding" for x in args): 55 | p = list(filter(lambda x : x['name'] == "padding", args))[0] 56 | p = convert2Tuple(p) 57 | 58 | params = OrderedDict([('T', i), ('K', k), ('s',s), ('p',p), ('type', t)]) 59 | return params 60 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/randomSample.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class RandPerm(OperatorLayerBase): 6 | 7 | def __init__(self, d): 8 | marker = eval(d.argMarker[0]) 9 | mod = marker['mod'] 10 | op = marker['op'] 11 | args = marker['args'] 12 | 13 | self.marker = marker 14 | self.mod_ = mod 15 | self.op_ = op 16 | self.args = args 17 | 18 | assert (mod == "torch") 19 | assert (op == "randperm") 20 | assert (len(args) == 1) 21 | n = args[0] 22 | assert n['type'] == "int" 23 | self.n = n['value'] 24 | 25 | def params(self): 26 | p = OrderedDict([('N', self.n)]) 27 | return p 28 | 29 | def tc(self): 30 | return "-" 31 | 32 | def op(self): 33 | return self.op_ 34 | 35 | def mod(self): 36 | return self.mod_ 37 | 38 | def bytes(self): 39 | return self.n * Utility.typeToBytes("int64") 40 | 41 | def flops(self): 42 | # Depends on RNG but this is probably a reasonable assumption. 43 | return self.n * 3 44 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/softmax.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from .utility import Utility 3 | from .base import OperatorLayerBase 4 | 5 | class Softmax(OperatorLayerBase): 6 | 7 | def __init__(self, d): 8 | marker = eval(d.argMarker[0]) 9 | mod = marker['mod'] 10 | op = marker['op'] 11 | args = marker['args'] 12 | 13 | self.marker = marker 14 | self.mod_ = mod 15 | self.op_ = op 16 | self.args = args 17 | 18 | assert (mod == "torch.nn.functional") 19 | assert (op == "softmax") 20 | 21 | #Filter out named parameters 22 | args = list(filter(lambda x : x['name'] == '', args)) 23 | 24 | assert (len(args) <= 2) 25 | self.shape = args[0]['shape'] 26 | self.type = args[0]['dtype'] 27 | self.dir = d.dir 28 | 29 | return 30 | 31 | def op(self): 32 | return self.op_ 33 | 34 | def mod(self): 35 | return self.mod_ 36 | 37 | def tc(self): 38 | return "-" 39 | 40 | def params(self): 41 | p = OrderedDict([('T', self.shape), ('type', self.type)]) 42 | return p 43 | 44 | def elems(self): 45 | return Utility.numElems(self.shape) 46 | 47 | def flops(self): 48 | # Note: exp, sum-reduce, divide 49 | #flops = elems * 3 50 | return 0 51 | 52 | def bytes(self): 53 | b = self.elems() * Utility.typeToBytes(self.type) 54 | b *= 3 if self.dir == "fprop" else 5 #verify 55 | return b 56 | 57 | class LogSoftmax(OperatorLayerBase): 58 | 59 | def __init__(self, d): 60 | marker = eval(d.argMarker[0]) 61 | mod = marker['mod'] 62 | op = marker['op'] 63 | args = marker['args'] 64 | 65 | self.marker = marker 66 | self.mod_ = mod 67 | self.op_ = op 68 | self.args = args 69 | 70 | assert (mod == "torch.nn.functional") 71 | assert (op == "log_softmax") 72 | 73 | #Filter out named parameters 74 | args = list(filter(lambda x : x['name'] == '', args)) 75 | 76 | assert (len(args) <= 2) 77 | 78 | #Get input 79 | if (args[0]['name'] == ""): 80 | i = args[0] 81 | else: 82 | i = list(filter(lambda x : x['name'] == "input", args))[0] 83 | 84 | t = i['dtype'] 85 | 86 | self.shape = i['shape'] 87 | self.type = i['dtype'] 88 | self.dir = d.dir 89 | return 90 | 91 | def op(self): 92 | return self.op_ 93 | 94 | def mod(self): 95 | return self.mod_ 96 | 97 | def tc(self): 98 | return "-" 99 | 100 | def params(self): 101 | p = OrderedDict([('T', self.shape), ('type', self.type)]) 102 | return p 103 | 104 | def elems(self): 105 | return Utility.numElems(self.shape) 106 | 107 | def flops(self): 108 | # Note: exp, sum-reduce, divide, log 109 | #flops = elems * 4 110 | return 0 111 | 112 | def bytes(self): 113 | b = self.elems() * Utility.typeToBytes(self.type) 114 | b *= 3 if self.dir == "fprop" else 5 #verify 115 | return b 116 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/usage.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | 4 | def parseArgs(): 5 | """ 6 | Print usage and parse arguments. 7 | """ 8 | 9 | def check_cols(value): 10 | valid = ["idx", "seq", "altseq", "tid", "layer", "trace", "dir", "sub", "mod", "op", "kernel", "params", "sil", "tc", "device", "stream", "grid", "block", "flops", "bytes"] 11 | cols = value.split(",") 12 | for col in cols: 13 | if col not in valid: 14 | raise argparse.ArgumentTypeError("{} is not a valid column name. Valid column names are {}.".format(col, ",".join(valid))) 15 | return cols 16 | 17 | def openFile(f): 18 | try: 19 | d = open(f, "r") 20 | return d 21 | except IOError: 22 | print("Error opening file {}. Exiting.".format(f), file=sys.stderr) 23 | sys.exit(1) 24 | 25 | parser = argparse.ArgumentParser(prog=sys.argv[0], description="PyTorch Profiler", formatter_class=argparse.RawTextHelpFormatter) 26 | parser.add_argument("file", 27 | nargs='?', 28 | type=str, 29 | default=None, 30 | help="Output of parse.py (Python dictionary).") 31 | 32 | parser.add_argument("-c", 33 | type=check_cols, 34 | default="idx,dir,sub,mod,op,kernel,params,sil", 35 | help='''Comma seperated names of columns to print. 36 | idx: Index 37 | seq: PyTorch Sequence Id 38 | altseq: PyTorch Alternate Sequence Id 39 | tid: Thread Id 40 | layer: User annotated NVTX string (can be nested) 41 | trace: Function Call Trace 42 | dir: Direction 43 | sub: Sub Sequence Id 44 | mod: Module 45 | op: Operattion 46 | kernel: Kernel Name 47 | params: Parameters 48 | sil: Silicon Time (in ns) 49 | tc: Tensor Core Usage 50 | device: GPU Device Id 51 | stream: Stream Id 52 | grid: Grid Dimensions 53 | block: Block Dimensions 54 | flops: Floating point ops (FMA = 2 FLOPs) 55 | bytes: Number of bytes in and out of DRAM 56 | e.g. -c idx,kernel,sil''') 57 | 58 | group = parser.add_mutually_exclusive_group() 59 | group.add_argument("--csv", 60 | action="store_true", 61 | default=False, 62 | help="Print a CSV output.") 63 | group.add_argument("-w", 64 | type=int, 65 | default=0, 66 | help="Width of columnated output.") 67 | 68 | args = parser.parse_args() 69 | if args.file is None: 70 | args.file = sys.stdin 71 | else: 72 | args.file = openFile(args.file) 73 | return args 74 | -------------------------------------------------------------------------------- /apex/apex/pyprof/prof/utility.py: -------------------------------------------------------------------------------- 1 | from functools import reduce 2 | 3 | class Utility(object): 4 | 5 | @staticmethod 6 | def numElems(shape): 7 | assert (type(shape) == tuple) 8 | return reduce(lambda x,y: x*y, shape, 1) 9 | 10 | @staticmethod 11 | def typeToBytes(t): 12 | if (t in ["uint8", "int8", "byte", "char", "bool"]): 13 | return 1 14 | elif (t in ["float16", "half", "int16", "short"]): 15 | return 2 16 | elif (t in ["float32", "float", "int32", "int"]): 17 | return 4 18 | elif (t in ["int64", "long", "float64", "double"]): 19 | return 8 20 | assert False 21 | 22 | @staticmethod 23 | def typeToString(t): 24 | if (t in ["uint8", "byte", "char",]): 25 | return "uint8" 26 | elif (t in ["int8",]): 27 | return "int8" 28 | elif (t in ["int16", "short",]): 29 | return "int16" 30 | elif (t in ["float16", "half"]): 31 | return "fp16" 32 | elif (t in ["float32", "float"]): 33 | return "fp32" 34 | elif (t in ["int32", "int",]): 35 | return "int32" 36 | elif (t in ["int64", "long"]): 37 | return "int64" 38 | elif (t in ["float64", "double",]): 39 | return "fp64" 40 | elif (t in ["bool",]): 41 | return "bool" 42 | assert False 43 | 44 | @staticmethod 45 | def hasNVTX(marker): 46 | if type(marker) is str: 47 | try: 48 | marker = eval(marker) 49 | except: 50 | return False 51 | 52 | if type(marker) is dict: 53 | keys = marker.keys() 54 | return ("mod" in keys) and ("op" in keys) and ("args" in keys) 55 | else: 56 | return False 57 | 58 | @staticmethod 59 | def isscalar(t): 60 | return (t in ["float", "int"]) 61 | -------------------------------------------------------------------------------- /apex/apex/reparameterization/README.md: -------------------------------------------------------------------------------- 1 | Under construction... 2 | -------------------------------------------------------------------------------- /apex/csrc/compat.h: -------------------------------------------------------------------------------- 1 | #ifndef TORCH_CHECK 2 | #define TORCH_CHECK AT_CHECK 3 | #endif 4 | 5 | #ifdef VERSION_GE_1_3 6 | #define DATA_PTR data_ptr 7 | #else 8 | #define DATA_PTR data 9 | #endif 10 | -------------------------------------------------------------------------------- /apex/csrc/flatten_unflatten.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | // https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_flatten.h 4 | 5 | at::Tensor flatten(std::vector tensors) 6 | { 7 | return torch::utils::flatten_dense_tensors(tensors); 8 | } 9 | 10 | std::vector unflatten(at::Tensor flat, std::vector tensors) 11 | { 12 | return torch::utils::unflatten_dense_tensors(flat, tensors); 13 | } 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("flatten", &flatten, "Flatten dense tensors"); 17 | m.def("unflatten", &unflatten, "Unflatten dense tensors"); 18 | } 19 | -------------------------------------------------------------------------------- /apex/docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = NVIDIAAPEX 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | gh-pages: 16 | git checkout gh-pages 17 | rm -rf build 18 | rm -rf source 19 | git checkout master -- . 20 | make html 21 | rm -rf ../_modules ../_sources ../_static 22 | mv -fv build/html/* ../ 23 | rm -rf build 24 | git add -A 25 | git commit -m "Generated gh-pages for `git log master -1 --pretty=short --abbrev-commit`" && git push origin gh-pages ; git checkout master 26 | 27 | .PHONY: help Makefile 28 | 29 | # Catch-all target: route all unknown targets to Sphinx using the new 30 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 31 | %: Makefile 32 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 33 | -------------------------------------------------------------------------------- /apex/docs/source/_static/img/nv-pytorch2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/docs/source/_static/img/nv-pytorch2.png -------------------------------------------------------------------------------- /apex/docs/source/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | {% block sidebartitle %} {{ super() }} 3 | 4 | 32 | {% endblock %} 33 | 34 | {% block footer %} {{ super() }} 35 | 36 | 51 | {% endblock %} 52 | -------------------------------------------------------------------------------- /apex/docs/source/fp16_utils.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.fp16_utils 5 | =================================== 6 | 7 | This submodule contains utilities designed to streamline the mixed precision training recipe 8 | presented by NVIDIA `on Parallel Forall`_ and in GTC 2018 Sessions 9 | `Training Neural Networks with Mixed Precision: Theory and Practice`_ and 10 | `Training Neural Networks with Mixed Precision: Real Examples`_. 11 | For Pytorch users, Real Examples in particular is recommended. 12 | 13 | Full runnable Python scripts demonstrating ``apex.fp16_utils`` 14 | can be found on the Github page: 15 | 16 | | `Simple FP16_Optimizer demos`_ 17 | | 18 | | `Distributed Mixed Precision Training with imagenet`_ 19 | | 20 | | `Mixed Precision Training with word_language_model`_ 21 | | 22 | | 23 | 24 | .. _`on Parallel Forall`: 25 | https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/ 26 | .. _`Training Neural Networks with Mixed Precision: Theory and Practice`: 27 | http://on-demand.gputechconf.com/gtc/2018/video/S8923/ 28 | .. _`Training Neural Networks with Mixed Precision: Real Examples`: 29 | http://on-demand.gputechconf.com/gtc/2018/video/S81012/ 30 | .. _`Simple FP16_Optimizer demos`: 31 | https://github.com/NVIDIA/apex/tree/master/examples/FP16_Optimizer_simple 32 | .. _`Distributed Mixed Precision Training with imagenet`: 33 | https://github.com/NVIDIA/apex/tree/master/examples/imagenet 34 | .. _`Mixed Precision Training with word_language_model`: 35 | https://github.com/NVIDIA/apex/tree/master/examples/word_language_model 36 | 37 | .. automodule:: apex.fp16_utils 38 | .. currentmodule:: apex.fp16_utils 39 | 40 | Automatic management of master params + loss scaling 41 | ---------------------------------------------------- 42 | 43 | .. autoclass:: FP16_Optimizer 44 | :members: 45 | 46 | .. autoclass:: LossScaler 47 | :members: 48 | 49 | .. autoclass:: DynamicLossScaler 50 | :members: 51 | 52 | Manual master parameter management 53 | ---------------------------------- 54 | 55 | .. autofunction:: prep_param_lists 56 | 57 | .. autofunction:: master_params_to_model_params 58 | 59 | .. autofunction:: model_grads_to_master_grads 60 | -------------------------------------------------------------------------------- /apex/docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. PyTorch documentation master file, created by 2 | sphinx-quickstart on Fri Dec 23 13:31:47 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | :github_url: https://github.com/nvidia/apex 7 | 8 | Apex (A PyTorch Extension) 9 | =================================== 10 | 11 | This site contains the API documentation for Apex (https://github.com/nvidia/apex), 12 | a Pytorch extension with NVIDIA-maintained utilities to streamline mixed precision and distributed training. Some of the code here will be included in upstream Pytorch eventually. The intention of Apex is to make up-to-date utilities available to users as quickly as possible. 13 | 14 | Installation instructions can be found here: https://github.com/NVIDIA/apex#quick-start. 15 | 16 | Some other useful material, including GTC 2019 and Pytorch DevCon 2019 Slides, can be found here: https://github.com/mcarilli/mixed_precision_references. 17 | 18 | .. toctree:: 19 | :maxdepth: 1 20 | :caption: AMP: Automatic Mixed Precision 21 | 22 | amp 23 | 24 | .. toctree:: 25 | :maxdepth: 1 26 | :caption: Distributed Training 27 | 28 | parallel 29 | 30 | .. toctree:: 31 | :maxdepth: 1 32 | :caption: Fused Optimizers 33 | 34 | optimizers 35 | 36 | .. toctree:: 37 | :maxdepth: 1 38 | :caption: Fused Layer Norm 39 | 40 | layernorm 41 | 42 | .. .. toctree:: 43 | :maxdepth: 1 44 | :caption: Deprecated mixed precision API 45 | fp16_util 46 | 47 | .. reparameterization 48 | .. RNN 49 | 50 | Indices and tables 51 | ================== 52 | 53 | * :ref:`genindex` 54 | * :ref:`modindex` 55 | -------------------------------------------------------------------------------- /apex/docs/source/layernorm.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.normalization.fused_layer_norm 5 | =================================== 6 | 7 | .. automodule:: apex.normalization 8 | .. currentmodule:: apex.normalization 9 | 10 | .. FusedAdam 11 | ---------- 12 | 13 | .. autoclass:: FusedLayerNorm 14 | :members: 15 | -------------------------------------------------------------------------------- /apex/docs/source/optimizers.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.optimizers 5 | =================================== 6 | 7 | .. automodule:: apex.optimizers 8 | .. currentmodule:: apex.optimizers 9 | 10 | .. FusedAdam 11 | ---------- 12 | 13 | .. autoclass:: FusedAdam 14 | :members: 15 | 16 | .. autoclass:: FusedLAMB 17 | :members: 18 | 19 | .. autoclass:: FusedNovoGrad 20 | :members: 21 | 22 | .. autoclass:: FusedSGD 23 | :members: 24 | -------------------------------------------------------------------------------- /apex/docs/source/parallel.rst: -------------------------------------------------------------------------------- 1 | .. role:: hidden 2 | :class: hidden-section 3 | 4 | apex.parallel 5 | =================================== 6 | 7 | .. automodule:: apex.parallel 8 | .. currentmodule:: apex.parallel 9 | 10 | .. DistributedDataParallel 11 | ---------- 12 | 13 | .. autoclass:: DistributedDataParallel 14 | :members: 15 | 16 | .. autoclass:: Reducer 17 | :members: 18 | 19 | .. autoclass:: SyncBatchNorm 20 | :members: 21 | 22 | Utility functions 23 | ---------------------------------- 24 | 25 | .. autofunction:: convert_syncbn_model 26 | -------------------------------------------------------------------------------- /apex/examples/README.md: -------------------------------------------------------------------------------- 1 | This directory contains examples illustrating Apex mixed precision and distributed tools. 2 | 3 | **Note for users of the pre-unification API**: 4 | `deprecated_api` contains examples illustrating the old (pre-unified) APIs. These APIs will be removed soon, and users are strongly encouraged to switch. The separate mixed precision tools called `Amp` and `FP16_Optimizer` in the old API are exposed via different flags/optimization levels in the new API. 5 | -------------------------------------------------------------------------------- /apex/examples/dcgan/README.md: -------------------------------------------------------------------------------- 1 | # Mixed Precision DCGAN Training in PyTorch 2 | 3 | `main_amp.py` is based on [https://github.com/pytorch/examples/tree/master/dcgan](https://github.com/pytorch/examples/tree/master/dcgan). 4 | It implements Automatic Mixed Precision (Amp) training of the DCGAN example for different datasets. Command-line flags forwarded to `amp.initialize` are used to easily manipulate and switch between various pure and mixed precision "optimization levels" or `opt_level`s. For a detailed explanation of `opt_level`s, see the [updated API guide](https://nvidia.github.io/apex/amp.html). 5 | 6 | We introduce these changes to the PyTorch DCGAN example as described in the [Multiple models/optimizers/losses](https://nvidia.github.io/apex/advanced.html#multiple-models-optimizers-losses) section of the documentation:: 7 | ``` 8 | # Added after models and optimizers construction 9 | [netD, netG], [optimizerD, optimizerG] = amp.initialize( 10 | [netD, netG], [optimizerD, optimizerG], opt_level=opt.opt_level, num_losses=3) 11 | ... 12 | # loss.backward() changed to: 13 | with amp.scale_loss(errD_real, optimizerD, loss_id=0) as errD_real_scaled: 14 | errD_real_scaled.backward() 15 | ... 16 | with amp.scale_loss(errD_fake, optimizerD, loss_id=1) as errD_fake_scaled: 17 | errD_fake_scaled.backward() 18 | ... 19 | with amp.scale_loss(errG, optimizerG, loss_id=2) as errG_scaled: 20 | errG_scaled.backward() 21 | ``` 22 | 23 | Note that we use different `loss_scalers` for each computed loss. 24 | Using a separate loss scaler per loss is [optional, not required](https://nvidia.github.io/apex/advanced.html#optionally-have-amp-use-a-different-loss-scaler-per-loss). 25 | 26 | To improve the numerical stability, we swapped `nn.Sigmoid() + nn.BCELoss()` to `nn.BCEWithLogitsLoss()`. 27 | 28 | With the new Amp API **you never need to explicitly convert your model, or the input data, to half().** 29 | 30 | "Pure FP32" training: 31 | ``` 32 | $ python main_amp.py --opt_level O0 33 | ``` 34 | Recommended mixed precision training: 35 | ``` 36 | $ python main_amp.py --opt_level O1 37 | ``` 38 | 39 | Have a look at the original [DCGAN example](https://github.com/pytorch/examples/tree/master/dcgan) for more information about the used arguments. 40 | 41 | To enable mixed precision training, we introduce the `--opt_level` argument. 42 | -------------------------------------------------------------------------------- /apex/examples/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base image must at least have pytorch and CUDA installed. 2 | ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:19.07-py3 3 | FROM $BASE_IMAGE 4 | ARG BASE_IMAGE 5 | RUN echo "Installing Apex on top of ${BASE_IMAGE}" 6 | # make sure we don't overwrite some existing directory called "apex" 7 | WORKDIR /tmp/unique_for_apex 8 | # uninstall Apex if present, twice to make absolutely sure :) 9 | RUN pip uninstall -y apex || : 10 | RUN pip uninstall -y apex || : 11 | # SHA is something the user can touch to force recreation of this Docker layer, 12 | # and therefore force cloning of the latest version of Apex 13 | RUN SHA=ToUcHMe git clone https://github.com/NVIDIA/apex.git 14 | WORKDIR /tmp/unique_for_apex/apex 15 | RUN pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . 16 | WORKDIR /workspace 17 | -------------------------------------------------------------------------------- /apex/examples/docker/README.md: -------------------------------------------------------------------------------- 1 | ## Option 1: Create a new container with Apex 2 | 3 | **Dockerfile** installs the latest Apex on top of an existing image. Run 4 | ``` 5 | docker build -t new_image_with_apex . 6 | ``` 7 | By default, **Dockerfile** uses NVIDIA's Pytorch container as the base image, 8 | which requires an NVIDIA GPU Cloud (NGC) account. If you don't have an NGC account, you can sign up for free by following the instructions [here](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html#generating-api-key). 9 | 10 | Alternatively, you can supply your own base image via the `BASE_IMAGE` build-arg. 11 | `BASE_IMAGE` must have Pytorch and Cuda installed. For example, any 12 | `-devel` image for Pytorch 1.0 and later from the 13 | [official Pytorch Dockerhub](https://hub.docker.com/r/pytorch/pytorch) may be used: 14 | ``` 15 | docker build --build-arg BASE_IMAGE=1.3-cuda10.1-cudnn7-devel -t new_image_with_apex . 16 | ``` 17 | 18 | If you want to rebuild your image, and force the latest Apex to be cloned and installed, make any small change to the `SHA` variable in **Dockerfile**. 19 | 20 | **Warning:** 21 | Currently, the non-`-devel` images on Pytorch Dockerhub do not contain the Cuda compiler `nvcc`. Therefore, 22 | images whose name does not contain `-devel` are not eligible candidates for `BASE_IMAGE`. 23 | 24 | ### Running your Apex container 25 | 26 | Like any Cuda-enabled Pytorch container, a container with Apex should be run via [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), for example: 27 | ``` 28 | docker run --runtime=nvidia -it --rm --ipc=host new_image_with_apex 29 | ``` 30 | 31 | ## Option 2: Install Apex in a running container 32 | 33 | Instead of building a new container, it is also a viable option to `git clone https://github.com/NVIDIA/apex.git` on bare metal, mount the Apex repo into your container at launch by running, for example, 34 | ``` 35 | docker run --runtime=nvidia -it --rm --ipc=host -v /bare/metal/apex:/apex/in/container 36 | ``` 37 | then go to /apex/in/container within the running container and 38 | ``` 39 | pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" . 40 | ``` 41 | -------------------------------------------------------------------------------- /apex/examples/simple/distributed/README.md: -------------------------------------------------------------------------------- 1 | **distributed_data_parallel.py** and **run.sh** show an example using Amp with 2 | [apex.parallel.DistributedDataParallel](https://nvidia.github.io/apex/parallel.html) or 3 | [torch.nn.parallel.DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#distributeddataparallel) 4 | and the Pytorch multiprocess launcher script, 5 | [torch.distributed.launch](https://pytorch.org/docs/master/distributed.html#launch-utility). 6 | The use of `Amp` with DistributedDataParallel does not need to change from ordinary 7 | single-process use. The only gotcha is that wrapping your model with `DistributedDataParallel` must 8 | come after the call to `amp.initialize`. Test via 9 | ```bash 10 | bash run.sh 11 | ``` 12 | 13 | **This is intended purely as an instructional example, not a performance showcase.** 14 | -------------------------------------------------------------------------------- /apex/examples/simple/distributed/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python -m torch.distributed.launch --nproc_per_node=2 distributed_data_parallel.py 3 | -------------------------------------------------------------------------------- /apex/requirements.txt: -------------------------------------------------------------------------------- 1 | cxxfilt>=0.2.0 2 | tqdm>=4.28.1 3 | numpy>=1.15.3 4 | PyYAML>=5.1 5 | pytest>=3.5.1 6 | -------------------------------------------------------------------------------- /apex/requirements_dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | flake8>=3.7.9 3 | Sphinx>=3.0.3 -------------------------------------------------------------------------------- /apex/tests/L0/run_amp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/tests/L0/run_amp/__init__.py -------------------------------------------------------------------------------- /apex/tests/L0/run_amp/test_larc.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | from torch import nn 5 | from torch.nn import Parameter 6 | 7 | from apex import amp 8 | from apex.parallel.LARC import LARC 9 | from utils import common_init 10 | 11 | 12 | class MyModel(torch.nn.Module): 13 | def __init__(self, unique): 14 | super(MyModel, self).__init__() 15 | self.weight0 = Parameter( 16 | unique + torch.arange(2, device="cuda", dtype=torch.float32) 17 | ) 18 | 19 | def forward(self, input): 20 | return (input * self.weight0).sum() 21 | 22 | 23 | class TestLARC(unittest.TestCase): 24 | def setUp(self): 25 | self.x = torch.ones((2), device="cuda", dtype=torch.float32) 26 | common_init(self) 27 | 28 | def tearDown(self): 29 | pass 30 | 31 | def test_larc_mixed_precision(self): 32 | for opt_level in ["O0", "O1", "O2", "O3"]: 33 | model = MyModel(1) 34 | 35 | optimizer = LARC( 36 | torch.optim.SGD( 37 | [{"params": model.parameters(), "lr": 0.25}], momentum=0.125 38 | ) 39 | ) 40 | 41 | model, optimizer = amp.initialize( 42 | model, optimizer, opt_level=opt_level, verbosity=0 43 | ) 44 | 45 | optimizer.zero_grad() 46 | loss = model(self.x) 47 | with amp.scale_loss(loss, optimizer) as scaled_loss: 48 | scaled_loss.backward() 49 | optimizer.step() 50 | 51 | 52 | if __name__ == "__main__": 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /apex/tests/L0/run_amp/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | HALF = 'torch.cuda.HalfTensor' 4 | FLOAT = 'torch.cuda.FloatTensor' 5 | 6 | DTYPES = [torch.half, torch.float] 7 | 8 | ALWAYS_HALF = {torch.float: HALF, 9 | torch.half: HALF} 10 | ALWAYS_FLOAT = {torch.float: FLOAT, 11 | torch.half: FLOAT} 12 | MATCH_INPUT = {torch.float: FLOAT, 13 | torch.half: HALF} 14 | 15 | def common_init(test_case): 16 | test_case.h = 64 17 | test_case.b = 16 18 | test_case.c = 16 19 | test_case.k = 3 20 | test_case.t = 10 21 | torch.set_default_tensor_type(torch.cuda.FloatTensor) 22 | -------------------------------------------------------------------------------- /apex/tests/L0/run_fp16util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/tests/L0/run_fp16util/__init__.py -------------------------------------------------------------------------------- /apex/tests/L0/run_fp16util/test_fp16util.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from apex.fp16_utils import FP16Model 7 | 8 | 9 | class DummyBlock(nn.Module): 10 | def __init__(self): 11 | super(DummyBlock, self).__init__() 12 | 13 | self.conv = nn.Conv2d(10, 10, 2) 14 | self.bn = nn.BatchNorm2d(10, affine=True) 15 | 16 | def forward(self, x): 17 | return self.conv(self.bn(x)) 18 | 19 | 20 | class DummyNet(nn.Module): 21 | def __init__(self): 22 | super(DummyNet, self).__init__() 23 | 24 | self.conv1 = nn.Conv2d(3, 10, 2) 25 | self.bn1 = nn.BatchNorm2d(10, affine=False) 26 | self.db1 = DummyBlock() 27 | self.db2 = DummyBlock() 28 | 29 | def forward(self, x): 30 | out = x 31 | out = self.conv1(out) 32 | out = self.bn1(out) 33 | out = self.db1(out) 34 | out = self.db2(out) 35 | return out 36 | 37 | 38 | class DummyNetWrapper(nn.Module): 39 | def __init__(self): 40 | super(DummyNetWrapper, self).__init__() 41 | 42 | self.bn = nn.BatchNorm2d(3, affine=True) 43 | self.dn = DummyNet() 44 | 45 | def forward(self, x): 46 | return self.dn(self.bn(x)) 47 | 48 | 49 | class TestFP16Model(unittest.TestCase): 50 | def setUp(self): 51 | self.N = 64 52 | self.C_in = 3 53 | self.H_in = 16 54 | self.W_in = 32 55 | self.in_tensor = torch.randn((self.N, self.C_in, self.H_in, self.W_in)).cuda() 56 | self.orig_model = DummyNetWrapper().cuda() 57 | self.fp16_model = FP16Model(self.orig_model) 58 | 59 | def test_params_and_buffers(self): 60 | exempted_modules = [ 61 | self.fp16_model.network.bn, 62 | self.fp16_model.network.dn.db1.bn, 63 | self.fp16_model.network.dn.db2.bn, 64 | ] 65 | for m in self.fp16_model.modules(): 66 | expected_dtype = torch.float if (m in exempted_modules) else torch.half 67 | for p in m.parameters(recurse=False): 68 | assert p.dtype == expected_dtype 69 | for b in m.buffers(recurse=False): 70 | assert b.dtype in (expected_dtype, torch.int64) 71 | 72 | def test_output_is_half(self): 73 | out_tensor = self.fp16_model(self.in_tensor) 74 | assert out_tensor.dtype == torch.half 75 | 76 | -------------------------------------------------------------------------------- /apex/tests/L0/run_fused_layer_norm/test_fused_layer_norm.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | import random 4 | 5 | import torch 6 | import apex 7 | from torch.autograd import Variable 8 | 9 | 10 | class TestFusedLayerNorm(unittest.TestCase): 11 | def setUp(self): 12 | # bias and weight are set to 0 and 1 respectively, so no need to copy parameters from cpu module to the gpu one 13 | self.module_cpu_ = apex.normalization.FusedLayerNorm(normalized_shape=[32, 16], elementwise_affine=False).cpu() 14 | self.module_cuda_ = apex.normalization.FusedLayerNorm(normalized_shape=[32, 16], elementwise_affine=False).cuda() 15 | 16 | def _test_same_output(self, batch_size): 17 | torch.cuda.manual_seed(42) 18 | self.input_ = torch.randn((batch_size, *self.module_cpu_.normalized_shape), device="cpu").requires_grad_(True) 19 | self.input_cuda_ = self.input_.cuda().detach().requires_grad_(True) 20 | out_cpu_ = self.module_cpu_(self.input_) 21 | gO = torch.rand_like(out_cpu_) 22 | out_cpu_.backward(gO) 23 | out_cuda_ = self.module_cuda_(self.input_cuda_) 24 | gO = gO.cuda() 25 | out_cuda_.backward(gO) 26 | assert out_cpu_.is_cuda == False 27 | assert out_cuda_.is_cuda == True 28 | torch.testing.assert_allclose(out_cpu_, out_cuda_.cpu()) 29 | torch.testing.assert_allclose(self.input_.grad, self.input_cuda_.grad.cpu()) 30 | 31 | def test_layer_norm(self): 32 | self._test_same_output(16) 33 | 34 | def test_large_batch(self): 35 | self._test_same_output(65536) 36 | 37 | 38 | class TestFusedLayerNormElemWise(TestFusedLayerNorm): 39 | def setUp(self): 40 | self.module_cpu_ = apex.normalization.FusedLayerNorm(normalized_shape=[32, 16], elementwise_affine=True).cpu() 41 | self.module_cuda_ = apex.normalization.FusedLayerNorm(normalized_shape=[32, 16], elementwise_affine=True).cuda() 42 | 43 | -------------------------------------------------------------------------------- /apex/tests/L0/run_optimizers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/tests/L0/run_optimizers/__init__.py -------------------------------------------------------------------------------- /apex/tests/L0/run_pyprof_data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/apex/tests/L0/run_pyprof_data/__init__.py -------------------------------------------------------------------------------- /apex/tests/L0/run_pyprof_nvtx/__init__.py: -------------------------------------------------------------------------------- 1 | import test_pyprof_nvtx.TestPyProfNvtx as TestPyProfNvtx 2 | -------------------------------------------------------------------------------- /apex/tests/L0/run_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | 4 | test_dirs = ["run_amp", "run_fp16util", "run_optimizers", "run_fused_layer_norm", "run_pyprof_nvtx", "run_pyprof_data", "run_mlp"] 5 | 6 | runner = unittest.TextTestRunner(verbosity=2) 7 | 8 | errcode = 0 9 | 10 | for test_dir in test_dirs: 11 | suite = unittest.TestLoader().discover(test_dir) 12 | 13 | print("\nExecuting tests from " + test_dir) 14 | 15 | result = runner.run(suite) 16 | 17 | if not result.wasSuccessful(): 18 | errcode = 1 19 | 20 | sys.exit(errcode) 21 | -------------------------------------------------------------------------------- /apex/tests/L1/common/compare.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | 4 | parser = argparse.ArgumentParser(description='Compare') 5 | parser.add_argument('--opt-level', type=str) 6 | parser.add_argument('--keep-batchnorm-fp32', type=str, default=None) 7 | parser.add_argument('--loss-scale', type=str, default=None) 8 | parser.add_argument('--fused-adam', action='store_true') 9 | parser.add_argument('--use_baseline', action='store_true') 10 | args = parser.parse_args() 11 | 12 | base_file = str(args.opt_level) + "_" +\ 13 | str(args.loss_scale) + "_" +\ 14 | str(args.keep_batchnorm_fp32) + "_" +\ 15 | str(args.fused_adam) 16 | 17 | file_e = "True_" + base_file 18 | file_p = "False_" + base_file 19 | if args.use_baseline: 20 | file_b = "baselines/True_" + base_file 21 | 22 | dict_e = torch.load(file_e) 23 | dict_p = torch.load(file_p) 24 | if args.use_baseline: 25 | dict_b = torch.load(file_b) 26 | 27 | torch.set_printoptions(precision=10) 28 | 29 | print(file_e) 30 | print(file_p) 31 | if args.use_baseline: 32 | print(file_b) 33 | 34 | # ugly duplication here... 35 | if not args.use_baseline: 36 | for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])): 37 | assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p) 38 | 39 | loss_e = dict_e["Loss"][n] 40 | loss_p = dict_p["Loss"][n] 41 | assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p) 42 | print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format( 43 | i_e, 44 | loss_e, 45 | loss_p, 46 | dict_e["Speed"][n], 47 | dict_p["Speed"][n])) 48 | else: 49 | for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])): 50 | assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p) 51 | 52 | loss_e = dict_e["Loss"][n] 53 | loss_p = dict_p["Loss"][n] 54 | loss_b = dict_b["Loss"][n] 55 | assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p) 56 | assert loss_e == loss_b, "Iteration {}, loss_e = {}, loss_b = {}".format(i_e, loss_e, loss_b) 57 | print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format( 58 | i_e, 59 | loss_b, 60 | loss_e, 61 | loss_p, 62 | dict_b["Speed"][n], 63 | dict_e["Speed"][n], 64 | dict_p["Speed"][n])) 65 | -------------------------------------------------------------------------------- /apex/tests/L1/cross_product/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # DATADIR="/home/mcarilli/Desktop/pt18data/apex_stale/examples/imagenet/bare_metal_train_val/" 4 | # DATADIR="/opt/home/apex/examples/imagenet/" 5 | cp ../common/* . 6 | bash run_test.sh single_gpu $1 7 | -------------------------------------------------------------------------------- /apex/tests/L1/cross_product_distributed/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cp ../common/* . 4 | bash run_test.sh distributed $1 5 | -------------------------------------------------------------------------------- /apex/tests/distributed/DDP/ddp_race_condition_test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.distributed as dist 3 | from torch.nn import Parameter 4 | from torch.nn import Module 5 | from apex.parallel import DistributedDataParallel as DDP 6 | import argparse 7 | import os 8 | 9 | 10 | parser = argparse.ArgumentParser(description='allreduce hook example') 11 | parser.add_argument("--local_rank", default=0, type=int) 12 | args = parser.parse_args() 13 | 14 | args.distributed = False 15 | if 'WORLD_SIZE' in os.environ: 16 | args.distributed = int(os.environ['WORLD_SIZE']) > 1 17 | 18 | if args.distributed: 19 | args.gpu = args.local_rank % torch.cuda.device_count() 20 | torch.cuda.set_device(args.gpu) 21 | torch.distributed.init_process_group(backend='nccl', 22 | init_method='env://') 23 | args.world_size = torch.distributed.get_world_size() 24 | 25 | torch.set_printoptions(precision=10) 26 | torch.manual_seed(args.local_rank) 27 | 28 | class Model(Module): 29 | def __init__(self): 30 | super(Model, self).__init__() 31 | self.a = Parameter(torch.cuda.FloatTensor(4096*4096).fill_(1.0)) 32 | self.b = Parameter(torch.cuda.FloatTensor(4096*4096).fill_(2.0)) 33 | def forward(self, input): 34 | return (input*self.a)*self.b 35 | 36 | model = Model() 37 | # model = DDP(model, message_size=1, gradient_predivide_factor=8.0) 38 | # model = DDP(model, delay_allreduce=True) 39 | # model = DDP(model, message_size=1, allreduce_trigger_params=[model.b]) 40 | model = DDP(model, message_size=1, allreduce_trigger_params=[model.b], num_allreduce_streams=3) 41 | 42 | x = torch.cuda.FloatTensor(4096*4096) 43 | 44 | passed = True 45 | torch.cuda.cudart().cudaProfilerStart() 46 | for i in range(10): 47 | x.fill_(i + args.local_rank) # fill x with new values every iteration for sanity 48 | model.zero_grad() 49 | out = model(x) 50 | loss = out.sum() 51 | # torch.cuda.nvtx.range_push("backward") 52 | loss.backward() 53 | # torch.cuda.nvtx.range_pop() 54 | 55 | # torch.cuda.nvtx.range_push("synchronize() + info") 56 | # torch.cuda.synchronize() 57 | print("i = {}".format(i)) 58 | def info(name, param, val): 59 | expected = val*4096*4096*(2.*i+1)/2. 60 | actual = param.grad.data.sum().item() 61 | print(name+": grad.data_ptr() = {}, expected sum {}, got {}".format( 62 | param.grad.data_ptr(), expected, actual)) 63 | return (expected == actual) 64 | if not info("model.a", model.module.a, 2.): passed = False 65 | if not info("model.b", model.module.b, 1.): passed = False 66 | # torch.cuda.nvtx.range_pop() 67 | torch.cuda.cudart().cudaProfilerStop() 68 | 69 | print("passed = ", passed) 70 | -------------------------------------------------------------------------------- /apex/tests/distributed/DDP/run_race_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 ddp_race_condition_test.py 4 | -------------------------------------------------------------------------------- /apex/tests/distributed/amp_master_params/compare.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | model_params_rank0 = torch.load("rank0model.pth", 4 | map_location = lambda storage, loc: storage.cuda(0)) 5 | model_params_rank1 = torch.load("rank1model.pth", 6 | map_location = lambda storage, loc: storage.cuda(0)) 7 | master_params_rank0 = torch.load("rank0master.pth", 8 | map_location = lambda storage, loc: storage.cuda(0)) 9 | master_params_rank1 = torch.load("rank1master.pth", 10 | map_location = lambda storage, loc: storage.cuda(0)) 11 | 12 | for model_rank0, model_rank1, master_rank0, master_rank1 in zip( 13 | model_params_rank0, 14 | model_params_rank1, 15 | master_params_rank0, 16 | master_params_rank1): 17 | assert torch.allclose(model_rank0, model_rank1), "Model param mismatch" 18 | assert torch.allclose(master_rank0, master_rank1), "Master param mismatch" 19 | # Some debugging/investigation assistance code: 20 | # maxval, maxind = torch.max(((torch.abs(model_rank0).float())/torch.abs(master_rank0)).view(-1), 0) 21 | # offending_val_half = model_rank0.view(-1)[maxind.item()] 22 | # offending_val_float = master_rank0.view(-1)[maxind.item()] 23 | # print(maxval.item(), maxind.item(), offending_val_half.item(), offending_val_float.item(), 24 | # offending_val_float.half().item()) 25 | # rtol needs to be > 2^-11 because of denormals... 26 | assert torch.allclose(model_rank0, master_rank0.half(), rtol=.005), "Model-master mismatch" 27 | 28 | print("OK: Model and master params match across ranks.") 29 | -------------------------------------------------------------------------------- /apex/tests/distributed/amp_master_params/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | python -m torch.distributed.launch --nproc_per_node=2 amp_master_params.py 3 | 4 | python compare.py 5 | -------------------------------------------------------------------------------- /apex/tests/distributed/synced_batchnorm/test_batchnorm1d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import apex 3 | 4 | model = apex.parallel.SyncBatchNorm(4).cuda() 5 | model.weight.data.uniform_() 6 | model.bias.data.uniform_() 7 | data = torch.rand((8,4)).cuda() 8 | 9 | model_ref = torch.nn.BatchNorm1d(4).cuda() 10 | model_ref.load_state_dict(model.state_dict()) 11 | data_ref = data.clone() 12 | 13 | output = model(data) 14 | output_ref = model_ref(data_ref) 15 | 16 | assert(output.allclose(output_ref)) 17 | assert(model.running_mean.allclose(model_ref.running_mean)) 18 | assert(model.running_var.allclose(model_ref.running_var)) 19 | -------------------------------------------------------------------------------- /apex/tests/distributed/synced_batchnorm/unit_test.sh: -------------------------------------------------------------------------------- 1 | python python_single_gpu_unit_test.py 2 | python single_gpu_unit_test.py 3 | python test_batchnorm1d.py 4 | python -m torch.distributed.launch --nproc_per_node=2 two_gpu_unit_test.py 5 | python -m torch.distributed.launch --nproc_per_node=2 two_gpu_unit_test.py --fp16 6 | python -m torch.distributed.launch --nproc_per_node=2 two_gpu_test_different_batch_size.py --apex 7 | #beware, you need a system with at least 4 gpus to test group_size= 5.1", 17 | "torch >= 7.0", 18 | "lua-cjson" 19 | } 20 | 21 | build = { 22 | type = "builtin", 23 | modules = { 24 | ["coco.env"] = "LuaAPI/env.lua", 25 | ["coco.init"] = "LuaAPI/init.lua", 26 | ["coco.MaskApi"] = "LuaAPI/MaskApi.lua", 27 | ["coco.CocoApi"] = "LuaAPI/CocoApi.lua", 28 | libmaskapi = { 29 | sources = { "common/maskApi.c" }, 30 | incdirs = { "common/" } 31 | } 32 | } 33 | } 34 | 35 | -- luarocks make LuaAPI/rocks/coco-scm-1.rockspec 36 | -- https://github.com/pdollar/coco/raw/master/LuaAPI/rocks/coco-scm-1.rockspec 37 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/cocoDemo.m: -------------------------------------------------------------------------------- 1 | %% Demo for the CocoApi (see CocoApi.m) 2 | 3 | %% initialize COCO api (please specify dataType/annType below) 4 | annTypes = { 'instances', 'captions', 'person_keypoints' }; 5 | dataType='val2014'; annType=annTypes{1}; % specify dataType/annType 6 | annFile=sprintf('../annotations/%s_%s.json',annType,dataType); 7 | coco=CocoApi(annFile); 8 | 9 | %% display COCO categories and supercategories 10 | if( ~strcmp(annType,'captions') ) 11 | cats = coco.loadCats(coco.getCatIds()); 12 | nms={cats.name}; fprintf('COCO categories: '); 13 | fprintf('%s, ',nms{:}); fprintf('\n'); 14 | nms=unique({cats.supercategory}); fprintf('COCO supercategories: '); 15 | fprintf('%s, ',nms{:}); fprintf('\n'); 16 | end 17 | 18 | %% get all images containing given categories, select one at random 19 | catIds = coco.getCatIds('catNms',{'person','dog','skateboard'}); 20 | imgIds = coco.getImgIds('catIds',catIds); 21 | imgId = imgIds(randi(length(imgIds))); 22 | 23 | %% load and display image 24 | img = coco.loadImgs(imgId); 25 | I = imread(sprintf('../images/%s/%s',dataType,img.file_name)); 26 | figure(1); imagesc(I); axis('image'); set(gca,'XTick',[],'YTick',[]) 27 | 28 | %% load and display annotations 29 | annIds = coco.getAnnIds('imgIds',imgId,'catIds',catIds,'iscrowd',[]); 30 | anns = coco.loadAnns(annIds); coco.showAnns(anns); 31 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/evalDemo.m: -------------------------------------------------------------------------------- 1 | %% Demo demonstrating the algorithm result formats for COCO 2 | 3 | %% select results type for demo (either bbox or segm) 4 | type = {'segm','bbox','keypoints'}; type = type{1}; % specify type here 5 | fprintf('Running demo for *%s* results.\n\n',type); 6 | 7 | %% initialize COCO ground truth api 8 | dataDir='../'; prefix='instances'; dataType='val2014'; 9 | if(strcmp(type,'keypoints')), prefix='person_keypoints'; end 10 | annFile=sprintf('%s/annotations/%s_%s.json',dataDir,prefix,dataType); 11 | cocoGt=CocoApi(annFile); 12 | 13 | %% initialize COCO detections api 14 | resFile='%s/results/%s_%s_fake%s100_results.json'; 15 | resFile=sprintf(resFile,dataDir,prefix,dataType,type); 16 | cocoDt=cocoGt.loadRes(resFile); 17 | 18 | %% visialuze gt and dt side by side 19 | imgIds=sort(cocoGt.getImgIds()); imgIds=imgIds(1:100); 20 | imgId = imgIds(randi(100)); img = cocoGt.loadImgs(imgId); 21 | I = imread(sprintf('%s/images/val2014/%s',dataDir,img.file_name)); 22 | figure(1); subplot(1,2,1); imagesc(I); axis('image'); axis off; 23 | annIds = cocoGt.getAnnIds('imgIds',imgId); title('ground truth') 24 | anns = cocoGt.loadAnns(annIds); cocoGt.showAnns(anns); 25 | figure(1); subplot(1,2,2); imagesc(I); axis('image'); axis off; 26 | annIds = cocoDt.getAnnIds('imgIds',imgId); title('results') 27 | anns = cocoDt.loadAnns(annIds); cocoDt.showAnns(anns); 28 | 29 | %% load raw JSON and show exact format for results 30 | fprintf('results structure have the following format:\n'); 31 | res = gason(fileread(resFile)); disp(res) 32 | 33 | %% the following command can be used to save the results back to disk 34 | if(0), f=fopen(resFile,'w'); fwrite(f,gason(res)); fclose(f); end 35 | 36 | %% run COCO evaluation code (see CocoEval.m) 37 | cocoEval=CocoEval(cocoGt,cocoDt,type); 38 | cocoEval.params.imgIds=imgIds; 39 | cocoEval.evaluate(); 40 | cocoEval.accumulate(); 41 | cocoEval.summarize(); 42 | 43 | %% generate Derek Hoiem style analyis of false positives (slow) 44 | if(0), cocoEval.analyze(); end 45 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/gason.m: -------------------------------------------------------------------------------- 1 | function out = gason( in ) 2 | % Convert between JSON strings and corresponding JSON objects. 3 | % 4 | % This parser is based on Gason written and maintained by Ivan Vashchaev: 5 | % https://github.com/vivkin/gason 6 | % Gason is a "lightweight and fast JSON parser for C++". Please see the 7 | % above link for license information and additional details about Gason. 8 | % 9 | % Given a JSON string, gason calls the C++ parser and converts the output 10 | % into an appropriate Matlab structure. As the parsing is performed in mex 11 | % the resulting parser is blazingly fast. Large JSON structs (100MB+) take 12 | % only a few seconds to parse (compared to hours for pure Matlab parsers). 13 | % 14 | % Given a JSON object, gason calls the C++ encoder to convert the object 15 | % back into a JSON string representation. Nearly any Matlab struct, cell 16 | % array, or numeric array represent a valid JSON object. Note that gason() 17 | % can be used to go both from JSON string to JSON object and back. 18 | % 19 | % Gason requires C++11 to compile (for GCC this requires version 4.7 or 20 | % later). The following command compiles the parser (may require tweaking): 21 | % mex('CXXFLAGS=\$CXXFLAGS -std=c++11 -Wall','-largeArrayDims',... 22 | % 'private/gasonMex.cpp','../common/gason.cpp',... 23 | % '-I../common/','-outdir','private'); 24 | % Note the use of the "-std=c++11" flag. A number of precompiled binaries 25 | % are included, please do not contact us for help with compiling. If needed 26 | % you can specify a compiler by adding the option 'CXX="/usr/bin/g++"'. 27 | % 28 | % Note that by default JSON arrays that contain only numbers are stored as 29 | % regular Matlab arrays. Likewise, JSON arrays that contain only objects of 30 | % the same type are stored as Matlab struct arrays. This is much faster and 31 | % can use considerably less memory than always using Matlab cell arrays. 32 | % 33 | % USAGE 34 | % object = gason( string ) 35 | % string = gason( object ) 36 | % 37 | % INPUTS/OUTPUTS 38 | % string - JSON string 39 | % object - JSON object 40 | % 41 | % EXAMPLE 42 | % o = struct('first',{'piotr','ty'},'last',{'dollar','lin'}) 43 | % s = gason( o ) % convert JSON object -> JSON string 44 | % p = gason( s ) % convert JSON string -> JSON object 45 | % 46 | % See also 47 | % 48 | % Microsoft COCO Toolbox. version 2.0 49 | % Data, paper, and tutorials available at: http://mscoco.org/ 50 | % Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 51 | % Licensed under the Simplified BSD License [see coco/license.txt] 52 | 53 | out = gasonMex( 'convert', in ); 54 | -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/private/gasonMex.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/cocoapi/MatlabAPI/private/gasonMex.mexa64 -------------------------------------------------------------------------------- /cocoapi/MatlabAPI/private/gasonMex.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/itec-hust/MusicYOLO/d980a0c0a3723a6c25772c2f7150a82baa1a4ec3/cocoapi/MatlabAPI/private/gasonMex.mexmaci64 -------------------------------------------------------------------------------- /cocoapi/PythonAPI/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | # install pycocotools locally 3 | python setup.py build_ext --inplace 4 | rm -rf build 5 | 6 | install: 7 | # install pycocotools to the Python site-packages 8 | python setup.py build_ext install 9 | rm -rf build -------------------------------------------------------------------------------- /cocoapi/PythonAPI/pycocotools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'tylin' 2 | -------------------------------------------------------------------------------- /cocoapi/PythonAPI/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, Extension 2 | import numpy as np 3 | 4 | # To compile and install locally run "python setup.py build_ext --inplace" 5 | # To install library to Python site-packages run "python setup.py build_ext install" 6 | 7 | ext_modules = [ 8 | Extension( 9 | 'pycocotools._mask', 10 | sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'], 11 | include_dirs = [np.get_include(), '../common'], 12 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], 13 | ) 14 | ] 15 | 16 | setup( 17 | name='pycocotools', 18 | packages=['pycocotools'], 19 | package_dir = {'pycocotools': 'pycocotools'}, 20 | install_requires=[ 21 | 'setuptools>=18.0', 22 | 'cython>=0.27.3', 23 | 'matplotlib>=2.1.0' 24 | ], 25 | version='2.0', 26 | ext_modules= ext_modules 27 | ) 28 | -------------------------------------------------------------------------------- /cocoapi/README.txt: -------------------------------------------------------------------------------- 1 | COCO API - http://cocodataset.org/ 2 | 3 | COCO is a large image dataset designed for object detection, segmentation, person keypoints detection, stuff segmentation, and caption generation. This package provides Matlab, Python, and Lua APIs that assists in loading, parsing, and visualizing the annotations in COCO. Please visit http://cocodataset.org/ for more information on COCO, including for the data, paper, and tutorials. The exact format of the annotations is also described on the COCO website. The Matlab and Python APIs are complete, the Lua API provides only basic functionality. 4 | 5 | In addition to this API, please download both the COCO images and annotations in order to run the demos and use the API. Both are available on the project website. 6 | -Please download, unzip, and place the images in: coco/images/ 7 | -Please download and place the annotations in: coco/annotations/ 8 | For substantially more details on the API please see http://cocodataset.org/#download. 9 | 10 | After downloading the images and annotations, run the Matlab, Python, or Lua demos for example usage. 11 | 12 | To install: 13 | -For Matlab, add coco/MatlabApi to the Matlab path (OSX/Linux binaries provided) 14 | -For Python, run "make" under coco/PythonAPI 15 | -For Lua, run “luarocks make LuaAPI/rocks/coco-scm-1.rockspec” under coco/ 16 | -------------------------------------------------------------------------------- /cocoapi/common/maskApi.h: -------------------------------------------------------------------------------- 1 | /************************************************************************** 2 | * Microsoft COCO Toolbox. version 2.0 3 | * Data, paper, and tutorials available at: http://mscoco.org/ 4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015. 5 | * Licensed under the Simplified BSD License [see coco/license.txt] 6 | **************************************************************************/ 7 | #pragma once 8 | 9 | typedef unsigned int uint; 10 | typedef unsigned long siz; 11 | typedef unsigned char byte; 12 | typedef double* BB; 13 | typedef struct { siz h, w, m; uint *cnts; } RLE; 14 | 15 | /* Initialize/destroy RLE. */ 16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ); 17 | void rleFree( RLE *R ); 18 | 19 | /* Initialize/destroy RLE array. */ 20 | void rlesInit( RLE **R, siz n ); 21 | void rlesFree( RLE **R, siz n ); 22 | 23 | /* Encode binary masks using RLE. */ 24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n ); 25 | 26 | /* Decode binary masks encoded via RLE. */ 27 | void rleDecode( const RLE *R, byte *mask, siz n ); 28 | 29 | /* Compute union or intersection of encoded masks. */ 30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect ); 31 | 32 | /* Compute area of encoded masks. */ 33 | void rleArea( const RLE *R, siz n, uint *a ); 34 | 35 | /* Compute intersection over union between masks. */ 36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ); 37 | 38 | /* Compute non-maximum suppression between bounding masks */ 39 | void rleNms( RLE *dt, siz n, uint *keep, double thr ); 40 | 41 | /* Compute intersection over union between bounding boxes. */ 42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ); 43 | 44 | /* Compute non-maximum suppression between bounding boxes */ 45 | void bbNms( BB dt, siz n, uint *keep, double thr ); 46 | 47 | /* Get bounding boxes surrounding encoded masks. */ 48 | void rleToBbox( const RLE *R, BB bb, siz n ); 49 | 50 | /* Convert bounding boxes to encoded masks. */ 51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ); 52 | 53 | /* Convert polygon to encoded mask. */ 54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ); 55 | 56 | /* Get compressed string representation of encoded mask. */ 57 | char* rleToString( const RLE *R ); 58 | 59 | /* Convert from compressed string representation of encoded mask. */ 60 | void rleFrString( RLE *R, char *s, siz h, siz w ); 61 | -------------------------------------------------------------------------------- /cocoapi/license.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 17 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 20 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | 24 | The views and conclusions contained in the software and documentation are those 25 | of the authors and should not be interpreted as representing official policies, 26 | either expressed or implied, of the FreeBSD Project. 27 | -------------------------------------------------------------------------------- /demo/MegEngine/cpp/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | if [ -z $CXX ];then 5 | echo "please export you c++ toolchain to CXX" 6 | echo "for example:" 7 | echo "build for host: export CXX=g++" 8 | echo "cross build for aarch64-android(always locate in NDK): export CXX=aarch64-linux-android21-clang++" 9 | echo "cross build for aarch64-linux: export CXX=aarch64-linux-gnu-g++" 10 | exit -1 11 | fi 12 | 13 | if [ -z $MGE_INSTALL_PATH ];then 14 | echo "please refsi ./README.md to init MGE_INSTALL_PATH env" 15 | exit -1 16 | fi 17 | 18 | if [ -z $OPENCV_INSTALL_INCLUDE_PATH ];then 19 | echo "please refs ./README.md to init OPENCV_INSTALL_INCLUDE_PATH env" 20 | exit -1 21 | fi 22 | 23 | if [ -z $OPENCV_INSTALL_LIB_PATH ];then 24 | echo "please refs ./README.md to init OPENCV_INSTALL_LIB_PATH env" 25 | exit -1 26 | fi 27 | 28 | INCLUDE_FLAG="-I$MGE_INSTALL_PATH/include -I$OPENCV_INSTALL_INCLUDE_PATH" 29 | LINK_FLAG="-L$MGE_INSTALL_PATH/lib/ -lmegengine -L$OPENCV_INSTALL_LIB_PATH -lopencv_core -lopencv_highgui -lopencv_imgproc -lopencv_imgcodecs" 30 | BUILD_FLAG="-static-libstdc++ -O3 -pie -fPIE -g" 31 | 32 | if [[ $CXX =~ "android" ]]; then 33 | LINK_FLAG="${LINK_FLAG} -llog -lz" 34 | fi 35 | 36 | echo "CXX: $CXX" 37 | echo "MGE_INSTALL_PATH: $MGE_INSTALL_PATH" 38 | echo "INCLUDE_FLAG: $INCLUDE_FLAG" 39 | echo "LINK_FLAG: $LINK_FLAG" 40 | echo "BUILD_FLAG: $BUILD_FLAG" 41 | 42 | echo "[" > compile_commands.json 43 | echo "{" >> compile_commands.json 44 | echo "\"directory\": \"$PWD\"," >> compile_commands.json 45 | echo "\"command\": \"$CXX yolox.cpp -o yolox ${INCLUDE_FLAG} ${LINK_FLAG}\"," >> compile_commands.json 46 | echo "\"file\": \"$PWD/yolox.cpp\"," >> compile_commands.json 47 | echo "}," >> compile_commands.json 48 | echo "]" >> compile_commands.json 49 | $CXX yolox.cpp -o yolox ${INCLUDE_FLAG} ${LINK_FLAG} ${BUILD_FLAG} 50 | 51 | echo "build success, output file: yolox" 52 | if [[ $CXX =~ "android" ]]; then 53 | echo "try command to run:" 54 | echo "adb push/scp $MGE_INSTALL_PATH/lib/libmegengine.so android_phone" 55 | echo "adb push/scp $OPENCV_INSTALL_LIB_PATH/*.so android_phone" 56 | echo "adb push/scp ./yolox yolox_s.mge android_phone" 57 | echo "adb push/scp ../../../assets/dog.jpg android_phone" 58 | echo "adb/ssh to android_phone, then run: LD_LIBRARY_PATH=. ./yolox yolox_s.mge dog.jpg cpu/multithread " 59 | else 60 | echo "try command to run: LD_LIBRARY_PATH=$MGE_INSTALL_PATH/lib/:$OPENCV_INSTALL_LIB_PATH ./yolox yolox_s.mge ../../../assets/dog.jpg cuda/cpu/multithread " 61 | fi 62 | -------------------------------------------------------------------------------- /demo/MegEngine/python/README.md: -------------------------------------------------------------------------------- 1 | # YOLOX-Python-MegEngine 2 | 3 | Python version of YOLOX object detection base on [MegEngine](https://github.com/MegEngine/MegEngine). 4 | 5 | ## Tutorial 6 | 7 | ### Step1: install requirements 8 | 9 | ``` 10 | python3 -m pip install megengine -f https://megengine.org.cn/whl/mge.html 11 | ``` 12 | 13 | ### Step2: convert checkpoint weights from torch's path file 14 | 15 | ``` 16 | python3 convert_weights.py -w yolox_s.pth -o yolox_s_mge.pkl 17 | ``` 18 | 19 | ### Step3: run demo 20 | 21 | This part is the same as torch's python demo, but no need to specify device. 22 | 23 | ``` 24 | python3 demo.py image -n yolox-s -c yolox_s_mge.pkl --path ../../../assets/dog.jpg --conf 0.25 --nms 0.45 --tsize 640 --save_result 25 | ``` 26 | 27 | ### [Optional]Step4: dump model for cpp inference 28 | 29 | > **Note**: result model is dumped with `optimize_for_inference` and `enable_fuse_conv_bias_nonlinearity`. 30 | 31 | ``` 32 | python3 dump.py -n yolox-s -c yolox_s_mge.pkl --dump_path yolox_s.mge 33 | ``` 34 | -------------------------------------------------------------------------------- /demo/MegEngine/python/build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | 4 | import megengine as mge 5 | import megengine.module as M 6 | 7 | from models.yolo_fpn import YOLOFPN 8 | from models.yolo_head import YOLOXHead 9 | from models.yolo_pafpn import YOLOPAFPN 10 | from models.yolox import YOLOX 11 | 12 | 13 | def build_yolox(name="yolox-s"): 14 | num_classes = 80 15 | 16 | # value meaning: depth, width 17 | param_dict = { 18 | "yolox-nano": (0.33, 0.25), 19 | "yolox-tiny": (0.33, 0.375), 20 | "yolox-s": (0.33, 0.50), 21 | "yolox-m": (0.67, 0.75), 22 | "yolox-l": (1.0, 1.0), 23 | "yolox-x": (1.33, 1.25), 24 | } 25 | if name == "yolov3": 26 | depth = 1.0 27 | width = 1.0 28 | backbone = YOLOFPN() 29 | head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu") 30 | model = YOLOX(backbone, head) 31 | else: 32 | assert name in param_dict 33 | kwargs = {} 34 | depth, width = param_dict[name] 35 | if name == "yolox-nano": 36 | kwargs["depthwise"] = True 37 | in_channels = [256, 512, 1024] 38 | backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs) 39 | head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs) 40 | model = YOLOX(backbone, head) 41 | 42 | for m in model.modules(): 43 | if isinstance(m, M.BatchNorm2d): 44 | m.eps = 1e-3 45 | 46 | return model 47 | 48 | 49 | def build_and_load(weight_file, name="yolox-s"): 50 | model = build_yolox(name) 51 | model_weights = mge.load(weight_file) 52 | model.load_state_dict(model_weights, strict=False) 53 | return model 54 | -------------------------------------------------------------------------------- /demo/MegEngine/python/convert_weights.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | import argparse 4 | from collections import OrderedDict 5 | 6 | import megengine as mge 7 | import torch 8 | 9 | 10 | def make_parser(): 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument("-w", "--weights", type=str, help="path of weight file") 13 | parser.add_argument( 14 | "-o", 15 | "--output", 16 | default="weight_mge.pkl", 17 | type=str, 18 | help="path of weight file", 19 | ) 20 | return parser 21 | 22 | 23 | def numpy_weights(weight_file): 24 | torch_weights = torch.load(weight_file, map_location="cpu") 25 | if "model" in torch_weights: 26 | torch_weights = torch_weights["model"] 27 | new_dict = OrderedDict() 28 | for k, v in torch_weights.items(): 29 | new_dict[k] = v.cpu().numpy() 30 | return new_dict 31 | 32 | 33 | def map_weights(weight_file, output_file): 34 | torch_weights = numpy_weights(weight_file) 35 | 36 | new_dict = OrderedDict() 37 | for k, v in torch_weights.items(): 38 | if "num_batches_tracked" in k: 39 | print("drop: {}".format(k)) 40 | continue 41 | if k.endswith("bias"): 42 | print("bias key: {}".format(k)) 43 | v = v.reshape(1, -1, 1, 1) 44 | new_dict[k] = v 45 | elif "dconv" in k and "conv.weight" in k: 46 | print("depthwise conv key: {}".format(k)) 47 | cout, cin, k1, k2 = v.shape 48 | v = v.reshape(cout, 1, cin, k1, k2) 49 | new_dict[k] = v 50 | else: 51 | new_dict[k] = v 52 | 53 | mge.save(new_dict, output_file) 54 | print("save weights to {}".format(output_file)) 55 | 56 | 57 | def main(): 58 | parser = make_parser() 59 | args = parser.parse_args() 60 | map_weights(args.weights, args.output) 61 | 62 | 63 | if __name__ == "__main__": 64 | main() 65 | -------------------------------------------------------------------------------- /demo/MegEngine/python/dump.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) Megvii, Inc. and its affiliates. 4 | 5 | import argparse 6 | 7 | import megengine as mge 8 | import numpy as np 9 | from megengine import jit 10 | 11 | from build import build_and_load 12 | 13 | 14 | def make_parser(): 15 | parser = argparse.ArgumentParser("YOLOX Demo Dump") 16 | parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name") 17 | parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval") 18 | parser.add_argument( 19 | "--dump_path", default="model.mge", help="path to save the dumped model" 20 | ) 21 | return parser 22 | 23 | 24 | def dump_static_graph(model, graph_name="model.mge"): 25 | model.eval() 26 | model.head.decode_in_inference = False 27 | 28 | data = mge.Tensor(np.random.random((1, 3, 640, 640))) 29 | 30 | @jit.trace(capture_as_const=True) 31 | def pred_func(data): 32 | outputs = model(data) 33 | return outputs 34 | 35 | pred_func(data) 36 | pred_func.dump( 37 | graph_name, 38 | arg_names=["data"], 39 | optimize_for_inference=True, 40 | enable_fuse_conv_bias_nonlinearity=True, 41 | ) 42 | 43 | 44 | def main(args): 45 | model = build_and_load(args.ckpt, name=args.name) 46 | dump_static_graph(model, args.dump_path) 47 | 48 | 49 | if __name__ == "__main__": 50 | args = make_parser().parse_args() 51 | main(args) 52 | -------------------------------------------------------------------------------- /demo/MegEngine/python/models/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding:utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | from .darknet import CSPDarknet, Darknet 6 | from .yolo_fpn import YOLOFPN 7 | from .yolo_head import YOLOXHead 8 | from .yolo_pafpn import YOLOPAFPN 9 | from .yolox import YOLOX 10 | -------------------------------------------------------------------------------- /demo/MegEngine/python/models/yolo_fpn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import megengine.functional as F 6 | import megengine.module as M 7 | 8 | from .darknet import Darknet 9 | from .network_blocks import BaseConv, UpSample 10 | 11 | 12 | class YOLOFPN(M.Module): 13 | """ 14 | YOLOFPN module. Darknet 53 is the default backbone of this model. 15 | """ 16 | 17 | def __init__( 18 | self, depth=53, in_features=["dark3", "dark4", "dark5"], 19 | ): 20 | super().__init__() 21 | 22 | self.backbone = Darknet(depth) 23 | self.in_features = in_features 24 | 25 | # out 1 26 | self.out1_cbl = self._make_cbl(512, 256, 1) 27 | self.out1 = self._make_embedding([256, 512], 512 + 256) 28 | 29 | # out 2 30 | self.out2_cbl = self._make_cbl(256, 128, 1) 31 | self.out2 = self._make_embedding([128, 256], 256 + 128) 32 | 33 | # upsample 34 | self.upsample = UpSample(scale_factor=2, mode="bilinear") 35 | 36 | def _make_cbl(self, _in, _out, ks): 37 | return BaseConv(_in, _out, ks, stride=1, act="lrelu") 38 | 39 | def _make_embedding(self, filters_list, in_filters): 40 | m = M.Sequential( 41 | *[ 42 | self._make_cbl(in_filters, filters_list[0], 1), 43 | self._make_cbl(filters_list[0], filters_list[1], 3), 44 | 45 | self._make_cbl(filters_list[1], filters_list[0], 1), 46 | 47 | self._make_cbl(filters_list[0], filters_list[1], 3), 48 | self._make_cbl(filters_list[1], filters_list[0], 1), 49 | ] 50 | ) 51 | return m 52 | 53 | def forward(self, inputs): 54 | """ 55 | Args: 56 | inputs (Tensor): input image. 57 | 58 | Returns: 59 | Tuple[Tensor]: FPN output features.. 60 | """ 61 | # backbone 62 | out_features = self.backbone(inputs) 63 | x2, x1, x0 = [out_features[f] for f in self.in_features] 64 | 65 | # yolo branch 1 66 | x1_in = self.out1_cbl(x0) 67 | x1_in = self.upsample(x1_in) 68 | x1_in = F.concat([x1_in, x1], 1) 69 | out_dark4 = self.out1(x1_in) 70 | 71 | # yolo branch 2 72 | x2_in = self.out2_cbl(out_dark4) 73 | x2_in = self.upsample(x2_in) 74 | x2_in = F.concat([x2_in, x2], 1) 75 | out_dark3 = self.out2(x2_in) 76 | 77 | outputs = (out_dark3, out_dark4, x0) 78 | return outputs 79 | -------------------------------------------------------------------------------- /demo/MegEngine/python/models/yolox.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- encoding: utf-8 -*- 3 | # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. 4 | 5 | import megengine.module as M 6 | 7 | from .yolo_head import YOLOXHead 8 | from .yolo_pafpn import YOLOPAFPN 9 | 10 | 11 | class YOLOX(M.Module): 12 | """ 13 | YOLOX model module. The module list is defined by create_yolov3_modules function. 14 | The network returns loss values from three YOLO layers during training 15 | and detection results during test. 16 | """ 17 | 18 | def __init__(self, backbone=None, head=None): 19 | super().__init__() 20 | if backbone is None: 21 | backbone = YOLOPAFPN() 22 | if head is None: 23 | head = YOLOXHead(80) 24 | 25 | self.backbone = backbone 26 | self.head = head 27 | 28 | def forward(self, x): 29 | # fpn output content features of [dark3, dark4, dark5] 30 | fpn_outs = self.backbone(x) 31 | assert not self.training 32 | outputs = self.head(fpn_outs) 33 | 34 | return outputs 35 | -------------------------------------------------------------------------------- /demo/OpenVINO/README.md: -------------------------------------------------------------------------------- 1 | ## YOLOX for OpenVINO 2 | 3 | * [C++ Demo](./cpp) 4 | * [Python Demo](./python) -------------------------------------------------------------------------------- /demo/OpenVINO/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.4.1) 2 | set(CMAKE_CXX_STANDARD 14) 3 | 4 | project(yolox_openvino_demo) 5 | 6 | find_package(OpenCV REQUIRED) 7 | find_package(InferenceEngine REQUIRED) 8 | find_package(ngraph REQUIRED) 9 | 10 | include_directories( 11 | ${OpenCV_INCLUDE_DIRS} 12 | ${CMAKE_CURRENT_SOURCE_DIR} 13 | ${CMAKE_CURRENT_BINARY_DIR} 14 | ) 15 | 16 | add_executable(yolox_openvino yolox_openvino.cpp) 17 | 18 | target_link_libraries( 19 | yolox_openvino 20 | ${InferenceEngine_LIBRARIES} 21 | ${NGRAPH_LIBRARIES} 22 | ${OpenCV_LIBS} 23 | ) -------------------------------------------------------------------------------- /demo/TensorRT/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | 3 | project(yolox) 4 | 5 | add_definitions(-std=c++11) 6 | 7 | option(CUDA_USE_STATIC_CUDA_RUNTIME OFF) 8 | set(CMAKE_CXX_STANDARD 11) 9 | set(CMAKE_BUILD_TYPE Debug) 10 | 11 | find_package(CUDA REQUIRED) 12 | 13 | include_directories(${PROJECT_SOURCE_DIR}/include) 14 | # include and link dirs of cuda and tensorrt, you need adapt them if yours are different 15 | # cuda 16 | include_directories(/data/cuda/cuda-10.2/cuda/include) 17 | link_directories(/data/cuda/cuda-10.2/cuda/lib64) 18 | # cudnn 19 | include_directories(/data/cuda/cuda-10.2/cudnn/v8.0.4/include) 20 | link_directories(/data/cuda/cuda-10.2/cudnn/v8.0.4/lib64) 21 | # tensorrt 22 | include_directories(/data/cuda/cuda-10.2/TensorRT/v7.2.1.6/include) 23 | link_directories(/data/cuda/cuda-10.2/TensorRT/v7.2.1.6/lib) 24 | 25 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED") 26 | 27 | find_package(OpenCV) 28 | include_directories(${OpenCV_INCLUDE_DIRS}) 29 | 30 | add_executable(yolox ${PROJECT_SOURCE_DIR}/yolox.cpp) 31 | target_link_libraries(yolox nvinfer) 32 | target_link_libraries(yolox cudart) 33 | target_link_libraries(yolox ${OpenCV_LIBS}) 34 | 35 | add_definitions(-O2 -pthread) 36 | 37 | -------------------------------------------------------------------------------- /demo/TensorRT/cpp/README.md: -------------------------------------------------------------------------------- 1 | # YOLOX-TensorRT in C++ 2 | 3 | As YOLOX models are easy to convert to tensorrt using [torch2trt gitrepo](https://github.com/NVIDIA-AI-IOT/torch2trt), 4 | our C++ demo does not include the model converting or constructing like other tenorrt demos. 5 | 6 | 7 | ## Step 1: Prepare serialized engine file 8 | 9 | Follow the trt [python demo README](https://github.com/Megvii-BaseDetection/YOLOX/demo/TensorRT/python/README.md) to convert and save the serialized engine file. 10 | 11 | Check the 'model_trt.engine' file generated from Step 1, which will be automatically saved at the current demo dir. 12 | 13 | 14 | ## Step 2: build the demo 15 | 16 | Please follow the [TensorRT Installation Guide](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html) to install TensorRT. 17 | 18 | And you should set the TensorRT path and CUDA path in CMakeLists.txt. 19 | 20 | If you train your custom dataset, you may need to modify the value of `num_class`. 21 | 22 | ```c++ 23 | const int num_class = 80; 24 | ``` 25 | 26 | Install opencv with ```sudo apt-get install libopencv-dev``` (we don't need a higher version of opencv like v3.3+). 27 | 28 | build the demo: 29 | 30 | ```shell 31 | mkdir build 32 | cd build 33 | cmake .. 34 | make 35 | ``` 36 | 37 | Then run the demo: 38 | 39 | ```shell 40 | ./yolox ../model_trt.engine -i ../../../../assets/dog.jpg 41 | ``` 42 | 43 | or 44 | 45 | ```shell 46 | ./yolox -i 47 | ``` 48 | 49 | -------------------------------------------------------------------------------- /demo/TensorRT/python/README.md: -------------------------------------------------------------------------------- 1 | # YOLOX-TensorRT in Python 2 | 3 | This toturial includes a Python demo for TensorRT. 4 | 5 | ## Install TensorRT Toolkit 6 | 7 | Please follow the [TensorRT Installation Guide](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html) and [torch2trt gitrepo](https://github.com/NVIDIA-AI-IOT/torch2trt) to install TensorRT and torch2trt. 8 | 9 | ## Convert model 10 | 11 | YOLOX models can be easily conveted to TensorRT models using torch2trt 12 | 13 | If you want to convert our model, use the flag -n to specify a model name: 14 | ```shell 15 | python tools/trt.py -n -c 16 | ``` 17 | For example: 18 | ```shell 19 | python tools/trt.py -n yolox-s -c your_ckpt.pth 20 | ``` 21 | can be: yolox-nano, yolox-tiny. yolox-s, yolox-m, yolox-l, yolox-x. 22 | 23 | If you want to convert your customized model, use the flag -f to specify you exp file: 24 | ```shell 25 | python tools/trt.py -f -c 26 | ``` 27 | For example: 28 | ```shell 29 | python tools/trt.py -f /path/to/your/yolox/exps/yolox_s.py -c your_ckpt.pth 30 | ``` 31 | *yolox_s.py* can be any exp file modified by you. 32 | 33 | The converted model and the serialized engine file (for C++ demo) will be saved on your experiment output dir. 34 | 35 | ## Demo 36 | 37 | The TensorRT python demo is merged on our pytorch demo file, so you can run the pytorch demo command with ```--trt```. 38 | 39 | ```shell 40 | python tools/demo.py image -n yolox-s --trt --save_result 41 | ``` 42 | or 43 | ```shell 44 | python tools/demo.py image -f exps/default/yolox_s.py --trt --save_result 45 | ``` 46 | 47 | -------------------------------------------------------------------------------- /demo/ncnn/android/README.md: -------------------------------------------------------------------------------- 1 | # YOLOX-Android-ncnn 2 | 3 | Andoird app of YOLOX object detection base on [ncnn](https://github.com/Tencent/ncnn) 4 | 5 | 6 | ## Tutorial 7 | 8 | ### Step1 9 | 10 | Download ncnn-android-vulkan.zip from [releases of ncnn](https://github.com/Tencent/ncnn/releases). This repo uses 11 | [20210525 release](https://github.com/Tencent/ncnn/releases/download/20210525/ncnn-20210525-android-vulkan.zip) for building. 12 | 13 | ### Step2 14 | 15 | After downloading, please extract your zip file. Then, there are two ways to finish this step: 16 | * put your extracted directory into **app/src/main/jni** 17 | * change the **ncnn_DIR** path in **app/src/main/jni/CMakeLists.txt** to your extracted directory 18 | 19 | ### Step3 20 | Download example param and bin file from [onedrive](https://megvii-my.sharepoint.cn/:u:/g/personal/gezheng_megvii_com/ESXBH_GSSmFMszWJ6YG2VkQB5cWDfqVWXgk0D996jH0rpQ?e=qzEqUh) or [github](https://github.com/Megvii-BaseDetection/storage/releases/download/0.0.1/yolox_s_ncnn.tar.gz). Unzip the file to **app/src/main/assets**. 21 | 22 | ### Step4 23 | Open this project with Android Studio, build it and enjoy! 24 | 25 | ## Reference 26 | 27 | * [ncnn-android-yolov5](https://github.com/nihui/ncnn-android-yolov5) 28 | -------------------------------------------------------------------------------- /demo/ncnn/android/app/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'com.android.application' 2 | 3 | android { 4 | compileSdkVersion 24 5 | buildToolsVersion "29.0.2" 6 | 7 | defaultConfig { 8 | applicationId "com.megvii.yoloXncnn" 9 | archivesBaseName = "$applicationId" 10 | 11 | ndk { 12 | moduleName "ncnn" 13 | abiFilters "armeabi-v7a", "arm64-v8a" 14 | } 15 | minSdkVersion 24 16 | } 17 | 18 | externalNativeBuild { 19 | cmake { 20 | version "3.10.2" 21 | path file('src/main/jni/CMakeLists.txt') 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /demo/ncnn/android/app/src/main/AndroidManifest.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /demo/ncnn/android/app/src/main/java/com/megvii/yoloXncnn/yoloXncnn.java: -------------------------------------------------------------------------------- 1 | // Copyright (C) Megvii, Inc. and its affiliates. All rights reserved. 2 | 3 | package com.megvii.yoloXncnn; 4 | 5 | import android.content.res.AssetManager; 6 | import android.graphics.Bitmap; 7 | 8 | public class YOLOXncnn 9 | { 10 | public native boolean Init(AssetManager mgr); 11 | 12 | public class Obj 13 | { 14 | public float x; 15 | public float y; 16 | public float w; 17 | public float h; 18 | public String label; 19 | public float prob; 20 | } 21 | 22 | public native Obj[] Detect(Bitmap bitmap, boolean use_gpu); 23 | 24 | static { 25 | System.loadLibrary("yoloXncnn"); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /demo/ncnn/android/app/src/main/jni/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(yoloXncnn) 2 | 3 | cmake_minimum_required(VERSION 3.4.1) 4 | 5 | set(ncnn_DIR ${CMAKE_SOURCE_DIR}/ncnn-20210525-android-vulkan/${ANDROID_ABI}/lib/cmake/ncnn) 6 | find_package(ncnn REQUIRED) 7 | 8 | add_library(yoloXncnn SHARED yoloXncnn_jni.cpp) 9 | 10 | target_link_libraries(yoloXncnn 11 | ncnn 12 | 13 | jnigraphics 14 | ) 15 | -------------------------------------------------------------------------------- /demo/ncnn/android/app/src/main/res/layout/main.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 11 | 12 |