├── .gitmodules ├── CMakeLists.txt ├── Dockerfile ├── LICENSE ├── README.md ├── benchmark ├── README.md ├── energy.py ├── native_scripting.py ├── run_ablation1.sh ├── run_ablation2.sh ├── run_baseline_a.sh ├── run_baseline_b.sh └── run_spmm_spatha.sh ├── cmake ├── Cuda.cmake └── Dependencies.cmake ├── end2end ├── bert_pytorch.py ├── gpt2_pytorch.py ├── gpt3_pytorch.py ├── grouped_nmv_tensor.py ├── install.sh ├── install_v64.sh ├── native_scripting.py ├── run_inference.sh ├── run_inference_profile.sh ├── setup.py ├── setup_v64.py ├── spatha_mod │ ├── .gitignore │ ├── README.md │ └── block_sparse │ │ ├── CMakeLists.txt │ │ ├── Makefile │ │ ├── api │ │ ├── CMakeLists.txt │ │ └── spatha.cu │ │ ├── benchmark │ │ ├── CMakeLists.txt │ │ ├── Makefile │ │ ├── argparse_util.h │ │ ├── bench.py │ │ ├── benchmark.spmm_nm.cu │ │ ├── get_best.py │ │ └── timing_util.h │ │ ├── cmake │ │ ├── Cuda.cmake │ │ └── Dependencies.cmake │ │ ├── common │ │ ├── base.h │ │ ├── epilogue.h │ │ ├── library_util.h │ │ ├── memcpy.h │ │ ├── mma.h │ │ ├── swizzle.h │ │ └── vector.h │ │ ├── cuda_array.h │ │ ├── cuda_error.h │ │ ├── spmm │ │ ├── CMakeLists.txt │ │ ├── blockwise_format.h │ │ ├── blockwise_kernel.h │ │ ├── blockwise_library.cu │ │ ├── blockwise_library_v64.cu │ │ ├── blockwise_op.h │ │ ├── spmm_library_decl.h │ │ └── spmm_op.h │ │ └── util │ │ ├── CMakeLists.txt │ │ ├── random_mask.cc │ │ └── random_mask.h └── tests.ipynb ├── include └── spatha │ ├── .gitignore │ ├── README.md │ └── block_sparse │ ├── CMakeLists.txt │ ├── benchmark │ ├── CMakeLists.txt │ ├── argparse_util.h │ ├── benchmark.spmm_nm.cu │ └── timing_util.h │ ├── cmake │ ├── Cuda.cmake │ └── Dependencies.cmake │ ├── common │ ├── base.h │ ├── epilogue.h │ ├── library_util.h │ ├── memcpy.h │ ├── mma.h │ ├── swizzle.h │ └── vector.h │ ├── cuda_array.h │ ├── cuda_error.h │ ├── spmm │ ├── CMakeLists.txt │ ├── blockwise_format.h │ ├── blockwise_format_origin.h │ ├── blockwise_kernel.h │ ├── blockwise_library.cu │ ├── blockwise_op.h │ ├── spmm_library_decl.h │ └── spmm_op.h │ └── util │ ├── CMakeLists.txt │ ├── random_mask.cc │ └── random_mask.h ├── plot ├── energy.py ├── run_ablation1.py ├── run_ablation2.py ├── run_baseline_a.py ├── run_baseline_b.py ├── run_inference.py ├── run_inference_profile.py └── run_spmm_spatha.py ├── result ├── ablation1.csv ├── ablation2.csv ├── baseline_a.csv ├── baseline_b.csv ├── inference.csv └── spmm_bert_spatha.csv ├── sparseml ├── .MAINTAINERS ├── .github │ ├── .gitkeep │ ├── ISSUE_TEMPLATE │ │ ├── bug_report.md │ │ ├── doc-edit.md │ │ └── feature_request.md │ └── workflows │ │ ├── Integrations-post-merge-check.yaml │ │ ├── integrations-check.yaml │ │ ├── quality-check.yaml │ │ └── test-check.yaml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── DEVELOPING.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── NOTICE ├── README.md ├── docker │ ├── Dockerfile │ └── README.md ├── docs │ ├── _static │ │ └── css │ │ │ └── nm-theme-adjustment.css │ ├── _templates │ │ └── versions.html │ ├── api │ │ ├── modules.rst │ │ ├── sparseml.benchmark.rst │ │ ├── sparseml.deepsparse.framework.rst │ │ ├── sparseml.deepsparse.rst │ │ ├── sparseml.deepsparse.sparsification.rst │ │ ├── sparseml.framework.rst │ │ ├── sparseml.keras.datasets.classification.rst │ │ ├── sparseml.keras.datasets.rst │ │ ├── sparseml.keras.framework.rst │ │ ├── sparseml.keras.models.classification.rst │ │ ├── sparseml.keras.models.external.rst │ │ ├── sparseml.keras.models.rst │ │ ├── sparseml.keras.optim.rst │ │ ├── sparseml.keras.rst │ │ ├── sparseml.keras.sparsification.rst │ │ ├── sparseml.keras.utils.rst │ │ ├── sparseml.onnx.benchmark.rst │ │ ├── sparseml.onnx.framework.rst │ │ ├── sparseml.onnx.optim.quantization.rst │ │ ├── sparseml.onnx.optim.rst │ │ ├── sparseml.onnx.rst │ │ ├── sparseml.onnx.sparsification.rst │ │ ├── sparseml.onnx.utils.rst │ │ ├── sparseml.optim.rst │ │ ├── sparseml.pytorch.datasets.classification.rst │ │ ├── sparseml.pytorch.datasets.detection.rst │ │ ├── sparseml.pytorch.datasets.recommendation.rst │ │ ├── sparseml.pytorch.datasets.rst │ │ ├── sparseml.pytorch.datasets.video.rst │ │ ├── sparseml.pytorch.framework.rst │ │ ├── sparseml.pytorch.models.classification.rst │ │ ├── sparseml.pytorch.models.detection.rst │ │ ├── sparseml.pytorch.models.external.rst │ │ ├── sparseml.pytorch.models.recommendation.rst │ │ ├── sparseml.pytorch.models.rst │ │ ├── sparseml.pytorch.nn.rst │ │ ├── sparseml.pytorch.optim.rst │ │ ├── sparseml.pytorch.rst │ │ ├── sparseml.pytorch.sparsification.rst │ │ ├── sparseml.pytorch.utils.quantization.rst │ │ ├── sparseml.pytorch.utils.rst │ │ ├── sparseml.rst │ │ ├── sparseml.sparsification.rst │ │ ├── sparseml.tensorflow_v1.datasets.classification.rst │ │ ├── sparseml.tensorflow_v1.datasets.rst │ │ ├── sparseml.tensorflow_v1.framework.rst │ │ ├── sparseml.tensorflow_v1.models.classification.rst │ │ ├── sparseml.tensorflow_v1.models.rst │ │ ├── sparseml.tensorflow_v1.nn.rst │ │ ├── sparseml.tensorflow_v1.optim.rst │ │ ├── sparseml.tensorflow_v1.rst │ │ ├── sparseml.tensorflow_v1.sparsification.rst │ │ ├── sparseml.tensorflow_v1.utils.rst │ │ ├── sparseml.utils.datasets.rst │ │ └── sparseml.utils.rst │ ├── conf.py │ ├── create_recipe_template.mdx │ ├── favicon.ico │ ├── index.rst │ └── source │ │ ├── code.md │ │ ├── icon-sparseml.png │ │ ├── installation.md │ │ ├── onnx_export.md │ │ └── recipes.md ├── integrations │ ├── README.md │ ├── dbolya-yolact │ │ ├── README.md │ │ ├── recipes │ │ │ ├── yolact.pruned.md │ │ │ ├── yolact.pruned_quant.md │ │ │ └── yolact.quantized.md │ │ └── tutorials │ │ │ └── sparsifying_yolact_using_recipes.md │ ├── huggingface-transformers │ │ ├── README.md │ │ ├── recipes │ │ │ ├── 30epochs_8block75_squad.yaml │ │ │ ├── 30epochs_8block875_squad.yaml │ │ │ ├── bert-base-12layers_prune80.md │ │ │ ├── bert-base-12layers_prune90.md │ │ │ ├── bert-base-12layers_prune95.md │ │ │ ├── bert-base-12layers_prune_quant70.md │ │ │ ├── bert-base-12layers_prune_quant90.md │ │ │ ├── bert-base-3layers_prune70.md │ │ │ ├── bert-base-3layers_prune80.md │ │ │ ├── bert-base-3layers_prune90.md │ │ │ ├── bert-base-3layers_prune95.md │ │ │ ├── bert-base-6layers_prune70.md │ │ │ ├── bert-base-6layers_prune80.md │ │ │ ├── bert-base-6layers_prune90.md │ │ │ ├── bert-base-6layers_prune95.md │ │ │ ├── bert-base-6layers_prune_quant70.md │ │ │ ├── bert-base-6layers_prune_quant90.md │ │ │ ├── oBERT_216_gradual.yaml │ │ │ ├── oBERT_216v128_gradual.yaml │ │ │ ├── oBERT_216v64_gradual.yaml │ │ │ ├── oBERT_28_gradual.yaml │ │ │ ├── oBERT_28v128_gradual.yaml │ │ │ ├── oBERT_28v64_gradual.yaml │ │ │ ├── obs_216_gradual.yaml │ │ │ ├── obs_216_gradual_pair.yaml │ │ │ ├── obs_216v128_gradual_pair.yaml │ │ │ ├── obs_216v64_gradual_pair.yaml │ │ │ ├── obs_28_gradual.yaml │ │ │ ├── obs_28_gradual_pair.yaml │ │ │ ├── obs_28v128_gradual_pair.yaml │ │ │ ├── obs_28v64_gradual_pair.yaml │ │ │ ├── oneshot_oBERT_216.yaml │ │ │ ├── oneshot_oBERT_216v128.yaml │ │ │ ├── oneshot_oBERT_216v64.yaml │ │ │ ├── oneshot_oBERT_28.yaml │ │ │ ├── oneshot_oBERT_28v128.yaml │ │ │ └── oneshot_oBERT_28v64.yaml │ │ ├── scripts │ │ │ ├── 30epochs_gradual_pruning_squad_block8.sh │ │ │ ├── 30epochs_gradual_pruning_squad_block8_875.sh │ │ │ ├── oBERT216_squad.sh │ │ │ ├── oBERT216_squad_gradual.sh │ │ │ ├── oBERT216v128_squad_gradual.sh │ │ │ ├── oBERT216v64_squad_gradual.sh │ │ │ ├── oBERT28_squad.sh │ │ │ ├── oBERT28_squad_gradual.sh │ │ │ ├── oBERT28v128_squad_gradual.sh │ │ │ ├── oBERT28v64_squad_gradual.sh │ │ │ ├── oBERTnm16v128_squad.sh │ │ │ ├── oBERTnm16v64_squad.sh │ │ │ ├── oBERTnm8v128_squad.sh │ │ │ ├── oBERTnm8v64_squad.sh │ │ │ ├── obs216_squad_gradual_pair.sh │ │ │ ├── obs216v128_squad_gradual_pair.sh │ │ │ ├── obs216v64_squad_gradual_pair.sh │ │ │ ├── obs28_squad_gradual_pair.sh │ │ │ ├── obs28v128_squad_gradual_pair.sh │ │ │ └── obs28v64_squad_gradual_pair.sh │ │ └── tutorials │ │ │ ├── Sparse_Transfer_HF_Dense_Model.ipynb │ │ │ ├── bert_sparse_transfer_learning.md │ │ │ ├── images │ │ │ ├── bert_12_6_layers_EM.png │ │ │ └── bert_12_6_layers_F1.png │ │ │ └── sparsifying_bert_using_recipes.md │ ├── keras │ │ ├── README.md │ │ ├── classification.py │ │ ├── notebooks │ │ │ └── classification.ipynb │ │ ├── prune_resnet20.py │ │ ├── recipes │ │ │ └── .gitkeep │ │ └── tutorials │ │ │ └── .gitkeep │ ├── pytorch │ │ ├── README.md │ │ ├── argparser_ │ │ │ ├── nm_argparser_.py │ │ │ └── test_nmargparser.py │ │ ├── export.py │ │ ├── lr_analysis.py │ │ ├── notebooks │ │ │ ├── classification.ipynb │ │ │ ├── detection.ipynb │ │ │ ├── sparse_quantized_transfer_learning.ipynb │ │ │ └── torchvision.ipynb │ │ ├── pr_sensitivity.py │ │ ├── recipes │ │ │ ├── .gitkeep │ │ │ ├── classification.transfer_learn_pruned.md │ │ │ ├── classification.transfer_learn_pruned_quantized.md │ │ │ ├── mobilenet-pruned.md │ │ │ ├── mobilenet-pruned_quant.md │ │ │ ├── resnet50-imagenette-pruned.md │ │ │ ├── resnet50-pruned.md │ │ │ ├── resnet50-pruned_quant-transfer_learn.md │ │ │ └── resnet50-pruned_quant.md │ │ ├── torchvision_sparsification.py │ │ ├── train.py │ │ ├── tutorials │ │ │ ├── .gitkeep │ │ │ ├── classification_sparse_transfer_learning_tutorial.md │ │ │ └── sparsifying_pytorch_models_using_recipes.md │ │ ├── utils.py │ │ └── vision.py │ ├── rwightman-timm │ │ ├── README.md │ │ ├── recipes │ │ │ └── vit_base.85.recal.config.yaml │ │ └── setup_integration.sh │ ├── tensorflow_v1 │ │ ├── README.md │ │ ├── classification.py │ │ ├── notebooks │ │ │ └── classification.ipynb │ │ ├── recipes │ │ │ └── .gitkeep │ │ └── tutorials │ │ │ └── .gitkeep │ ├── ultralytics-yolov3 │ │ ├── README.md │ │ ├── data │ │ │ ├── hyp.pruned.yaml │ │ │ └── hyp.pruned_quantized.yaml │ │ ├── models │ │ │ └── yolov3-spp.lrelu.yaml │ │ ├── recipes │ │ │ ├── yolov3-spp.pruned.md │ │ │ ├── yolov3-spp.pruned.short.md │ │ │ ├── yolov3-spp.pruned_quantized.md │ │ │ ├── yolov3-spp.pruned_quantized.short.md │ │ │ ├── yolov3-spp.test.md │ │ │ ├── yolov3-spp.transfer_learn_pruned.md │ │ │ └── yolov3-spp.transfer_learn_pruned_quantized.md │ │ ├── setup_integration.sh │ │ └── tutorials │ │ │ ├── images │ │ │ ├── pruned-quantized-result.jpeg │ │ │ ├── pruned-quantized-wandb-chart.png │ │ │ └── transfer-learning-wandb-chart.png │ │ │ ├── sparsifying_yolov3_using_recipes.md │ │ │ └── yolov3_sparse_transfer_learning.md │ └── ultralytics-yolov5 │ │ ├── README.md │ │ ├── recipes │ │ ├── yolov5.transfer_learn_pruned.md │ │ ├── yolov5.transfer_learn_pruned_quantized.md │ │ ├── yolov5l.pruned.md │ │ ├── yolov5l.pruned_quantized.md │ │ ├── yolov5s.pruned.md │ │ └── yolov5s.pruned_quantized.md │ │ └── tutorials │ │ ├── images │ │ └── transfer-learning-wandb-chart.png │ │ ├── sparsifying_yolov5_using_recipes.md │ │ └── yolov5_sparse_transfer_learning.md ├── pyproject.toml ├── research │ ├── README.md │ ├── information_retrieval │ │ ├── DPR │ │ │ ├── README.md │ │ │ ├── conf │ │ │ │ ├── README.md │ │ │ │ ├── biencoder_train_cfg.yaml │ │ │ │ ├── ctx_sources │ │ │ │ │ └── default_sources.yaml │ │ │ │ ├── datasets │ │ │ │ │ ├── encoder_train_default.yaml │ │ │ │ │ └── retriever_default.yaml │ │ │ │ ├── dense_retriever.yaml │ │ │ │ ├── encoder │ │ │ │ │ └── hf_bert.yaml │ │ │ │ ├── extractive_reader_train_cfg.yaml │ │ │ │ ├── gen_embs.yaml │ │ │ │ └── train │ │ │ │ │ ├── biencoder_default.yaml │ │ │ │ │ ├── biencoder_local.yaml │ │ │ │ │ ├── biencoder_nq.yaml │ │ │ │ │ └── extractive_reader_default.yaml │ │ │ ├── dense_retriever.py │ │ │ ├── download_data.py │ │ │ ├── dpr │ │ │ │ ├── __init__.py │ │ │ │ ├── data │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── biencoder_data.py │ │ │ │ │ ├── download_data.py │ │ │ │ │ ├── qa_validation.py │ │ │ │ │ ├── reader_data.py │ │ │ │ │ ├── retriever_data.py │ │ │ │ │ └── tables.py │ │ │ │ ├── indexer │ │ │ │ │ └── faiss_indexers.py │ │ │ │ ├── models │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── biencoder.py │ │ │ │ │ ├── fairseq_models.py │ │ │ │ │ ├── hf_models.py │ │ │ │ │ ├── pytext_models.py │ │ │ │ │ └── reader.py │ │ │ │ ├── options.py │ │ │ │ └── utils │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── conf_utils.py │ │ │ │ │ ├── data_utils.py │ │ │ │ │ ├── dist_utils.py │ │ │ │ │ ├── model_utils.py │ │ │ │ │ └── tokenizers.py │ │ │ ├── generate_dense_embeddings.py │ │ │ ├── model_config.yml │ │ │ ├── ms_marco_eval.py │ │ │ ├── requirements.txt │ │ │ ├── train_config.yml │ │ │ └── train_dense_encoder.py │ │ ├── README.md │ │ ├── doc2query │ │ │ ├── README.md │ │ │ ├── indexes │ │ │ │ └── init.txt │ │ │ ├── outputs │ │ │ │ ├── bm25_baseline.txt │ │ │ │ └── init.txt │ │ │ ├── recipes │ │ │ │ ├── 90sparse.yaml │ │ │ │ └── noprune.yaml │ │ │ ├── requirements.txt │ │ │ ├── sparseml_utils.py │ │ │ └── src │ │ │ │ ├── augment_collection.py │ │ │ │ ├── convert_doc_collection_to_jsonl.py │ │ │ │ ├── distill_doc2query.py │ │ │ │ ├── make_doc2query_data.py │ │ │ │ ├── msmarco_passage_eval.py │ │ │ │ ├── run_doc2query.py │ │ │ │ └── sparseml_utils.py │ │ └── elastic_integration │ │ │ ├── README.md │ │ │ ├── chunker.py │ │ │ ├── dense_document.py │ │ │ ├── dense_ranking.py │ │ │ ├── requirements.txt │ │ │ └── run_ranker.py │ ├── mfac │ │ ├── README.md │ │ ├── recipes │ │ │ ├── pruning-mnistnet-one_shot-magnitude.md │ │ │ ├── pruning-mnistnet-one_shot-mfac.md │ │ │ ├── pruning-mobilenet-imagenette-magnitude-short-95.md │ │ │ └── pruning-mobilenet-imagenette-mfac-short-95.md │ │ └── tutorials │ │ │ ├── gradual_pruning_with_mfac.md │ │ │ └── one_shot_pruning_with_mfac.md │ ├── optimal_BERT_surgeon_oBERT │ │ ├── README.md │ │ ├── recipes │ │ │ ├── 30epochs_4block80_squad.yaml │ │ │ ├── 30epochs_4block90_squad.yaml │ │ │ ├── 30epochs_dense_squad.yaml │ │ │ ├── 30epochs_init30_4block80_squad.yaml │ │ │ ├── 30epochs_init30_4block90_squad.yaml │ │ │ ├── 30epochs_init30_unstructured80_squad.yaml │ │ │ ├── 30epochs_init30_unstructured90_squad.yaml │ │ │ ├── 30epochs_unstructured80_mnli.yaml │ │ │ ├── 30epochs_unstructured80_qqp.yaml │ │ │ ├── 30epochs_unstructured80_squad.yaml │ │ │ ├── 30epochs_unstructured90_mnli.yaml │ │ │ ├── 30epochs_unstructured90_qqp.yaml │ │ │ ├── 30epochs_unstructured90_squad.yaml │ │ │ ├── 30epochs_unstructured97_mnli.yaml │ │ │ ├── 30epochs_unstructured97_qqp.yaml │ │ │ ├── 30epochs_unstructured97_squad.yaml │ │ │ ├── 3epochs_unstructured90_mlm.yaml │ │ │ ├── 3epochs_unstructured97_mlm.yaml │ │ │ ├── 8epochs_sparse_transfer_mnli.yaml │ │ │ ├── 8epochs_sparse_transfer_qqp.yaml │ │ │ └── 8epochs_sparse_transfer_squad.yaml │ │ ├── scripts │ │ │ ├── 30epochs_gradual_pruning_mnli_qqp.sh │ │ │ ├── 30epochs_gradual_pruning_squad.sh │ │ │ ├── 3epochs_gradual_pruning_mlm.sh │ │ │ ├── 8epochs_sparse_transfer_mnli_qqp.sh │ │ │ └── 8epochs_sparse_transfer_squad.sh │ │ └── tutorials │ │ │ └── oBERT_demo.ipynb │ └── optimal_lobotomizing │ │ ├── README.md │ │ ├── data │ │ └── init.txt │ │ ├── scripts │ │ └── init.sh │ │ └── src │ │ └── init.py ├── setup.cfg ├── setup.py ├── sparseml.yml ├── sparseml_SS1.sh ├── sparseml_SS2.sh ├── sparseml_SS3.sh ├── src │ ├── README.md │ ├── __init__.py │ └── sparseml │ │ ├── __init__.py │ │ ├── base.py │ │ ├── benchmark │ │ ├── __init__.py │ │ ├── info.py │ │ └── serialization.py │ │ ├── deepsparse │ │ ├── __init__.py │ │ ├── base.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── info.py │ │ └── sparsification │ │ │ ├── __init__.py │ │ │ └── info.py │ │ ├── exporters │ │ ├── __init__.py │ │ ├── base_exporter.py │ │ ├── onnx_to_deepsparse.py │ │ └── transforms │ │ │ ├── __init__.py │ │ │ ├── base_transform.py │ │ │ ├── constants_to_initializers.py │ │ │ ├── conv_to_convinteger_add_cast_mul.py │ │ │ ├── conv_to_qlinearconv.py │ │ │ ├── delete_repeated_qdq.py │ │ │ ├── delete_trivial_onnx_adds.py │ │ │ ├── flatten_qparams.py │ │ │ ├── fold_conv_div_bn.py │ │ │ ├── fold_identity_initializers.py │ │ │ ├── fold_relu_quants.py │ │ │ ├── gemm_to_matmulinteger_add_cast_mul.py │ │ │ ├── gemm_to_qlinearmatmul.py │ │ │ ├── initializers_to_uint8.py │ │ │ ├── matmul_to_matmulinteger_add_cast_mul.py │ │ │ ├── matmul_to_qlinearmatmul.py │ │ │ ├── onnx_transform.py │ │ │ ├── propagate_embedding_quantization.py │ │ │ ├── quantize_qat_embedding.py │ │ │ ├── quantize_residuals.py │ │ │ ├── remove_duplicate_qconv_weights.py │ │ │ ├── remove_duplicate_quantize_ops.py │ │ │ ├── skip_input_quantize.py │ │ │ ├── unwrap_batchnorms.py │ │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── add_quantized_conv_matmul_add_ops.py │ │ │ ├── helpers.py │ │ │ └── matching.py │ │ ├── framework │ │ ├── __init__.py │ │ └── info.py │ │ ├── keras │ │ ├── __init__.py │ │ ├── base.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── imagefolder.py │ │ │ │ ├── imagenet.py │ │ │ │ └── imagenette.py │ │ │ ├── dataset.py │ │ │ ├── helpers.py │ │ │ └── registry.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── info.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ └── resnet.py │ │ │ ├── external │ │ │ │ ├── __init__.py │ │ │ │ └── keras_applications.py │ │ │ └── registry.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── manager.py │ │ │ ├── mask_pruning.py │ │ │ ├── mask_pruning_creator.py │ │ │ ├── modifier.py │ │ │ ├── modifier_epoch.py │ │ │ ├── modifier_lr.py │ │ │ ├── modifier_params.py │ │ │ ├── modifier_pruning.py │ │ │ └── utils.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ └── info.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── callbacks.py │ │ │ ├── compat.py │ │ │ ├── exporter.py │ │ │ ├── logger.py │ │ │ └── model.py │ │ ├── log.py │ │ ├── onnx │ │ ├── __init__.py │ │ ├── base.py │ │ ├── benchmark │ │ │ ├── __init__.py │ │ │ └── info.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── info.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── analyzer_model.py │ │ │ ├── quantization │ │ │ │ ├── __init__.py │ │ │ │ ├── calibration.py │ │ │ │ ├── quantize.py │ │ │ │ └── quantize_model_post_training.py │ │ │ ├── sensitivity_pruning.py │ │ │ └── structured_pruning.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ ├── analyzer.py │ │ │ ├── info.py │ │ │ └── model_info.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── graph_editor.py │ │ │ ├── graph_optimizer.py │ │ │ ├── helpers.py │ │ │ ├── loss.py │ │ │ ├── model.py │ │ │ └── sparse_tensor.py │ │ ├── openpifpaf │ │ ├── README.md │ │ ├── __init__.py │ │ ├── export.py │ │ ├── train.py │ │ └── trainer.py │ │ ├── optim │ │ ├── __init__.py │ │ ├── analyzer.py │ │ ├── helpers.py │ │ ├── manager.py │ │ ├── modifier.py │ │ └── sensitivity.py │ │ ├── pytorch │ │ ├── __init__.py │ │ ├── base.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── cifar.py │ │ │ │ ├── imagefolder.py │ │ │ │ ├── imagenet.py │ │ │ │ ├── imagenette.py │ │ │ │ └── mnist.py │ │ │ ├── detection │ │ │ │ ├── __init__.py │ │ │ │ ├── coco.py │ │ │ │ ├── helpers.py │ │ │ │ └── voc.py │ │ │ ├── generic.py │ │ │ ├── image_classification │ │ │ │ ├── __init__.py │ │ │ │ ├── ffcv.md │ │ │ │ └── ffcv_dataset.py │ │ │ ├── recommendation │ │ │ │ └── __init__.py │ │ │ ├── registry.py │ │ │ └── video │ │ │ │ └── __init__.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── info.py │ │ ├── image_classification │ │ │ ├── README_image_classification.md │ │ │ ├── __init__.py │ │ │ ├── export.py │ │ │ ├── lr_analysis.py │ │ │ ├── pr_sensitivity.py │ │ │ ├── train.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── cli_helpers.py │ │ │ │ ├── constants.py │ │ │ │ ├── helpers.py │ │ │ │ └── trainer.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── darknet.py │ │ │ │ ├── efficientnet.py │ │ │ │ ├── inception_v3.py │ │ │ │ ├── mnist.py │ │ │ │ ├── mobilenet.py │ │ │ │ ├── mobilenet_v2.py │ │ │ │ ├── resnet.py │ │ │ │ └── vgg.py │ │ │ ├── detection │ │ │ │ ├── __init__.py │ │ │ │ ├── ssd.py │ │ │ │ ├── ssd_lite.py │ │ │ │ ├── ssd_mobilenet.py │ │ │ │ ├── ssd_resnet.py │ │ │ │ └── yolo_v3.py │ │ │ ├── external │ │ │ │ ├── __init__.py │ │ │ │ └── torchvision.py │ │ │ ├── recommendation │ │ │ │ └── __init__.py │ │ │ └── registry.py │ │ ├── nn │ │ │ ├── __init__.py │ │ │ ├── activations.py │ │ │ ├── fatrelu.py │ │ │ ├── identity.py │ │ │ └── se.py │ │ ├── opset.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── analyzer_as.py │ │ │ ├── analyzer_module.py │ │ │ ├── analyzer_pruning.py │ │ │ ├── manager.py │ │ │ ├── mask_creator_pruning.py │ │ │ ├── mask_pruning.py │ │ │ ├── mask_pruning_scorer.py │ │ │ ├── optimizer.py │ │ │ ├── sensitivity_as.py │ │ │ ├── sensitivity_lr.py │ │ │ └── sensitivity_pruning.py │ │ ├── recipe_template │ │ │ ├── __init__.py │ │ │ ├── cli.py │ │ │ ├── description.py │ │ │ └── main.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ ├── distillation │ │ │ │ ├── __init__.py │ │ │ │ ├── modifier_distillation.py │ │ │ │ └── modifier_distillation_base.py │ │ │ ├── info.py │ │ │ ├── modifier.py │ │ │ ├── modifier_thinning.py │ │ │ ├── pruning │ │ │ │ ├── __init__.py │ │ │ │ ├── mask_creator.py │ │ │ │ ├── mask_params.py │ │ │ │ ├── modifier_as.py │ │ │ │ ├── modifier_pruning_216_pairwise_obs.py │ │ │ │ ├── modifier_pruning_216_pairwise_obs_v128.py │ │ │ │ ├── modifier_pruning_216_pairwise_obs_v64.py │ │ │ │ ├── modifier_pruning_28_pairwise_obs.py │ │ │ │ ├── modifier_pruning_28_pairwise_obs_v128.py │ │ │ │ ├── modifier_pruning_28_pairwise_obs_v64.py │ │ │ │ ├── modifier_pruning_acdc.py │ │ │ │ ├── modifier_pruning_base.py │ │ │ │ ├── modifier_pruning_constant.py │ │ │ │ ├── modifier_pruning_layer.py │ │ │ │ ├── modifier_pruning_magnitude.py │ │ │ │ ├── modifier_pruning_mfac.py │ │ │ │ ├── modifier_pruning_movement.py │ │ │ │ ├── modifier_pruning_nm_pairwise_obs_v_MP.py │ │ │ │ ├── modifier_pruning_obs.py │ │ │ │ ├── modifier_pruning_obs_216.py │ │ │ │ ├── modifier_pruning_obs_216_gradual.py │ │ │ │ ├── modifier_pruning_obs_216v128.py │ │ │ │ ├── modifier_pruning_obs_216v128_gradual.py │ │ │ │ ├── modifier_pruning_obs_216v64.py │ │ │ │ ├── modifier_pruning_obs_216v64_gradual.py │ │ │ │ ├── modifier_pruning_obs_28.py │ │ │ │ ├── modifier_pruning_obs_28_gradual.py │ │ │ │ ├── modifier_pruning_obs_28v128.py │ │ │ │ ├── modifier_pruning_obs_28v128_gradual.py │ │ │ │ ├── modifier_pruning_obs_28v64.py │ │ │ │ ├── modifier_pruning_obs_28v64_gradual.py │ │ │ │ ├── modifier_pruning_structured.py │ │ │ │ └── scorer.py │ │ │ ├── quantization │ │ │ │ ├── __init__.py │ │ │ │ ├── constants.py │ │ │ │ ├── helpers.py │ │ │ │ ├── legacy_modifier_quantization.py │ │ │ │ ├── modifier_quantization.py │ │ │ │ ├── quantization_scheme.py │ │ │ │ ├── quantize.py │ │ │ │ └── quantize_qat_export.py │ │ │ └── training │ │ │ │ ├── __init__.py │ │ │ │ ├── modifier_epoch.py │ │ │ │ ├── modifier_lr.py │ │ │ │ ├── modifier_params.py │ │ │ │ └── modifier_regularizer.py │ │ ├── torch_to_onnx_exporter.py │ │ ├── torchvision │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── export_onnx.py │ │ │ ├── presets.py │ │ │ ├── sampler.py │ │ │ ├── train.py │ │ │ ├── transforms.py │ │ │ └── utils.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── benchmarker.py │ │ │ ├── callbacks.py │ │ │ ├── distributed.py │ │ │ ├── exporter.py │ │ │ ├── helpers.py │ │ │ ├── logger.py │ │ │ ├── loss.py │ │ │ ├── model.py │ │ │ ├── module.py │ │ │ ├── sparsification.py │ │ │ ├── ssd_helpers.py │ │ │ └── yolo_helpers.py │ │ ├── recipe_template │ │ ├── __init__.py │ │ └── utils.py │ │ ├── sparsification │ │ ├── __init__.py │ │ ├── analyzer.py │ │ ├── info.py │ │ ├── model_info.py │ │ ├── modifier_epoch.py │ │ ├── modifier_lr.py │ │ ├── modifier_params.py │ │ ├── modifier_pruning.py │ │ ├── oracle.py │ │ ├── recipe_builder.py │ │ ├── recipe_editor.py │ │ └── types.py │ │ ├── tensorflow_v1 │ │ ├── __init__.py │ │ ├── base.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── cifar.py │ │ │ │ ├── imagefolder.py │ │ │ │ ├── imagenet.py │ │ │ │ └── imagenette.py │ │ │ ├── dataset.py │ │ │ ├── helpers.py │ │ │ └── registry.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── info.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── mnist.py │ │ │ │ ├── mobilenet.py │ │ │ │ ├── mobilenet_v2.py │ │ │ │ ├── resnet.py │ │ │ │ └── vgg.py │ │ │ ├── estimator.py │ │ │ └── registry.py │ │ ├── nn │ │ │ ├── __init__.py │ │ │ └── layers.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── analyzer_module.py │ │ │ ├── manager.py │ │ │ ├── mask_creator_pruning.py │ │ │ ├── mask_pruning.py │ │ │ ├── modifier.py │ │ │ ├── modifier_epoch.py │ │ │ ├── modifier_lr.py │ │ │ ├── modifier_params.py │ │ │ ├── modifier_pruning.py │ │ │ ├── schedule_lr.py │ │ │ └── sensitivity_pruning.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ └── info.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── exporter.py │ │ │ ├── helpers.py │ │ │ ├── loss.py │ │ │ ├── nets_utils.py │ │ │ ├── summary.py │ │ │ └── variable.py │ │ ├── transformers │ │ ├── __init__.py │ │ ├── docs │ │ │ ├── QUESTION_ANSWERING.md │ │ │ ├── TEXT_CLASSIFICATION.md │ │ │ └── TOKEN_CLASSIFICATION.md │ │ ├── export.py │ │ ├── masked_language_modeling.py │ │ ├── question_answering.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ ├── question_answering.py │ │ │ ├── trainer.py │ │ │ └── training_args.py │ │ ├── text_classification.py │ │ ├── token_classification.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── helpers.py │ │ │ ├── metrics.py │ │ │ └── model.py │ │ ├── utils │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── cifar.py │ │ │ ├── coco.py │ │ │ ├── helpers.py │ │ │ ├── imagenet.py │ │ │ ├── imagenette.py │ │ │ └── voc.py │ │ ├── frameworks.py │ │ ├── helpers.py │ │ ├── restricted_eval.py │ │ ├── singleton.py │ │ ├── worker.py │ │ └── wrapper.py │ │ ├── version.py │ │ ├── yolact │ │ ├── COCO.sh │ │ ├── COCO_test.sh │ │ ├── __init__.py │ │ └── scripts.py │ │ └── yolov5 │ │ ├── __init__.py │ │ ├── data │ │ ├── Argoverse.yaml │ │ ├── GlobalWheat2020.yaml │ │ ├── Objects365.yaml │ │ ├── SKU-110K.yaml │ │ ├── VOC.yaml │ │ ├── VisDrone.yaml │ │ ├── coco.yaml │ │ ├── coco128.yaml │ │ ├── hyps │ │ │ ├── hyp.Objects365.yaml │ │ │ ├── hyp.VOC.yaml │ │ │ ├── hyp.finetune.yaml │ │ │ ├── hyp.scratch-high.yaml │ │ │ ├── hyp.scratch-low.yaml │ │ │ ├── hyp.scratch-med.yaml │ │ │ └── hyp.scratch.yaml │ │ ├── images │ │ │ ├── bus.jpg │ │ │ └── zidane.jpg │ │ ├── scripts │ │ │ ├── download_weights.sh │ │ │ ├── get_coco.sh │ │ │ └── get_coco128.sh │ │ └── xView.yaml │ │ ├── helpers.py │ │ ├── models │ │ ├── hub │ │ │ ├── anchors.yaml │ │ │ ├── yolov3-spp.yaml │ │ │ ├── yolov3-tiny.yaml │ │ │ ├── yolov3.yaml │ │ │ ├── yolov5-bifpn.yaml │ │ │ ├── yolov5-fpn.yaml │ │ │ ├── yolov5-p2.yaml │ │ │ ├── yolov5-p34.yaml │ │ │ ├── yolov5-p6.yaml │ │ │ ├── yolov5-p7.yaml │ │ │ ├── yolov5-panet.yaml │ │ │ ├── yolov5l6.yaml │ │ │ ├── yolov5m6.yaml │ │ │ ├── yolov5n6.yaml │ │ │ ├── yolov5s-ghost.yaml │ │ │ ├── yolov5s-transformer.yaml │ │ │ ├── yolov5s6.yaml │ │ │ └── yolov5x6.yaml │ │ ├── yolov5l.yaml │ │ ├── yolov5m.yaml │ │ ├── yolov5n.yaml │ │ ├── yolov5s.yaml │ │ └── yolov5x.yaml │ │ ├── models_v5.0 │ │ ├── yolov5l.yaml │ │ ├── yolov5m.yaml │ │ ├── yolov5s.yaml │ │ └── yolov5x.yaml │ │ └── scripts.py ├── tests │ ├── __init__.py │ ├── integrations │ │ ├── README.md │ │ ├── __init__.py │ │ ├── base_tester.py │ │ ├── config.py │ │ ├── helpers.py │ │ ├── image_classification │ │ │ ├── __init__.py │ │ │ ├── args.py │ │ │ ├── configs │ │ │ │ ├── commit │ │ │ │ │ ├── recipe_short_prune_quant.md │ │ │ │ │ └── test_ended_to_end_pruned_quantized_mobilenet_imagenette.yaml │ │ │ │ └── pre-commit │ │ │ │ │ ├── recipe_test_cli.md │ │ │ │ │ └── test_cli.yaml │ │ │ └── test_image_classification.py │ │ ├── transformers │ │ │ ├── __init__.py │ │ │ ├── args.py │ │ │ ├── configs │ │ │ │ ├── commit │ │ │ │ │ ├── recipe_short_prune_quant_distil.md │ │ │ │ │ └── test_end_to_end_quantized_question_answering.yaml │ │ │ │ └── pre-commit │ │ │ │ │ ├── test_cli_question_answering.yaml │ │ │ │ │ └── test_cli_text_classification.yaml │ │ │ └── test_transformers.py │ │ └── yolov5 │ │ │ ├── __init__.py │ │ │ ├── args.py │ │ │ ├── configs │ │ │ ├── commit │ │ │ │ ├── recipe_short_prune_quant.md │ │ │ │ └── test_ended_to_end_pruned_quantized.yaml │ │ │ └── pre-commit │ │ │ │ └── test_cli.yaml │ │ │ └── test_yolov5.py │ └── sparseml │ │ ├── __init__.py │ │ ├── benchmark │ │ ├── __init__.py │ │ └── test_serialization.py │ │ ├── conftest.py │ │ ├── deepsparse │ │ ├── __init__.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── test_base.py │ │ └── test_imports.py │ │ ├── exporters │ │ ├── __init__.py │ │ └── transforms │ │ │ ├── __init__.py │ │ │ ├── test_constants_to_initializers.py │ │ │ ├── test_conv_to_convinteger_add_cast_mul.py │ │ │ ├── test_conv_to_qlinearconv.py │ │ │ ├── test_delete_repeated_qdq.py │ │ │ ├── test_delete_trivial_onnx_adds.py │ │ │ ├── test_flatten_qparams.py │ │ │ ├── test_fold_conv_div_bn.py │ │ │ ├── test_fold_identity_initializer.py │ │ │ ├── test_fold_relu_quants.py │ │ │ ├── test_gemm_to_matmulinteger_add_cast_mul.py │ │ │ ├── test_gemm_to_qlinearmatmul.py │ │ │ ├── test_initializers_to_uint8.py │ │ │ ├── test_matmul_to_matmulinteger_add_cast_mul.py │ │ │ ├── test_matmul_to_qlinearmatmul.py │ │ │ ├── test_onnx_transform.py │ │ │ ├── test_propagate_embedding_quantization.py │ │ │ ├── test_quantize_qat_embedding.py │ │ │ ├── test_quantize_residuals.py │ │ │ ├── test_remove_duplicate_qconv_weights.py │ │ │ ├── test_remove_duplicate_quantize_ops.py │ │ │ ├── test_skip_input_quantize.py │ │ │ ├── test_unwrap_batchnorms.py │ │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── test_matching.py │ │ ├── framework │ │ ├── __init__.py │ │ └── test_info.py │ │ ├── keras │ │ ├── __init__.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── mock.py │ │ │ ├── test_mask_pruning.py │ │ │ ├── test_modifier.py │ │ │ ├── test_modifier_epoch.py │ │ │ └── test_modifier_pruning.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── test_base.py │ │ └── test_imports.py │ │ ├── onnx │ │ ├── __init__.py │ │ ├── benchmark │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── helpers.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── quantization │ │ │ │ ├── __init__.py │ │ │ │ ├── helpers.py │ │ │ │ ├── test_calibration.py │ │ │ │ └── test_quantize_model_post_training.py │ │ │ ├── test_analyzer_model.py │ │ │ ├── test_analyzer_model_data │ │ │ │ ├── mobilenet-p.json │ │ │ │ ├── resnet50-pq.json │ │ │ │ ├── resnet50pytorch.json │ │ │ │ └── vggtensorflow.json │ │ │ ├── test_sensitivity_ks.py │ │ │ └── test_sensitivity_ks_data │ │ │ │ ├── mobilenet_loss_approx.json │ │ │ │ ├── mobilenet_loss_one_shot.json │ │ │ │ ├── mobilenet_perf.json │ │ │ │ ├── resnet50_loss_approx.json │ │ │ │ ├── resnet50_loss_one_shot.json │ │ │ │ └── resnet50_perf.json │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ ├── test_analyzer.py │ │ │ ├── test_analyzer_data │ │ │ │ ├── mobilenet_pruning_loss_magnitude.json │ │ │ │ ├── mobilenet_pruning_perf.json │ │ │ │ ├── resnet50_pruning_loss_magnitude.json │ │ │ │ └── resnet50_pruning_perf.json │ │ │ ├── test_info.py │ │ │ └── test_model_info.py │ │ ├── test_base.py │ │ ├── test_imports.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── test_data.py │ │ │ ├── test_extract_node_shape_data │ │ │ ├── mobilenet-p.json │ │ │ └── resnet50-pq.json │ │ │ ├── test_graph_editor.py │ │ │ ├── test_graph_optimizer.py │ │ │ ├── test_helpers.py │ │ │ ├── test_loss.py │ │ │ ├── test_models.py │ │ │ └── test_sparse_tensor.py │ │ ├── optim │ │ ├── __init__.py │ │ ├── test_analyzer.py │ │ ├── test_helpers.py │ │ ├── test_manager.py │ │ ├── test_modifier.py │ │ └── test_sensitivity.py │ │ ├── pytorch │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ └── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── test_cifar.py │ │ │ │ ├── test_imagenette.py │ │ │ │ └── test_mnist.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── helpers.py │ │ ├── image_classification │ │ │ ├── __init__.py │ │ │ ├── resnet_test_recipe.yaml │ │ │ ├── test_export.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ └── test_helpers.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── test_darknet.py │ │ │ │ ├── test_efficientnet.py │ │ │ │ ├── test_inception_v3.py │ │ │ │ ├── test_mnist.py │ │ │ │ ├── test_mobilenet.py │ │ │ │ ├── test_mobilenetv2.py │ │ │ │ ├── test_resnet.py │ │ │ │ └── test_vgg.py │ │ │ ├── detection │ │ │ │ ├── __init__.py │ │ │ │ ├── test_ssd_mobilenet.py │ │ │ │ ├── test_ssd_resnet.py │ │ │ │ └── test_yolo_v3.py │ │ │ ├── external │ │ │ │ ├── __init__.py │ │ │ │ └── test_torchvision.py │ │ │ └── utils.py │ │ ├── nn │ │ │ ├── __init__.py │ │ │ ├── test_activations.py │ │ │ ├── test_fatrelu.py │ │ │ └── test_se.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── test_analyzer_as.py │ │ │ ├── test_analyzer_ks.py │ │ │ ├── test_analyzer_module.py │ │ │ ├── test_manager.py │ │ │ ├── test_mask_creator_pruning.py │ │ │ ├── test_mask_pruning.py │ │ │ ├── test_mask_pruning_scorer.py │ │ │ ├── test_modifier_optimizer.py │ │ │ ├── test_optimizer.py │ │ │ ├── test_sensitivity_ks.py │ │ │ └── test_sensitivity_lr.py │ │ ├── recipe_template │ │ │ ├── test_end_to_end.py │ │ │ └── test_main.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ ├── distillation │ │ │ │ ├── __init__.py │ │ │ │ └── test_modifier_distillation.py │ │ │ ├── pruning │ │ │ │ ├── __init__.py │ │ │ │ ├── helpers.py │ │ │ │ ├── test_mask_creator.py │ │ │ │ ├── test_mask_params.py │ │ │ │ ├── test_mfac_inverse.py │ │ │ │ ├── test_modifier_pruning_acdc.py │ │ │ │ ├── test_modifier_pruning_constant.py │ │ │ │ ├── test_modifier_pruning_layer.py │ │ │ │ ├── test_modifier_pruning_magnitude.py │ │ │ │ ├── test_modifier_pruning_mfac.py │ │ │ │ ├── test_modifier_pruning_movement.py │ │ │ │ ├── test_modifier_pruning_obs.py │ │ │ │ └── test_modifier_pruning_structured.py │ │ │ ├── quantization │ │ │ │ ├── __init__.py │ │ │ │ ├── test_helpers.py │ │ │ │ ├── test_legacy_modifier_quantization.py │ │ │ │ ├── test_modifier_quantization.py │ │ │ │ ├── test_quantization_scheme.py │ │ │ │ ├── test_quantize_qat_export.py │ │ │ │ └── test_yaml_load_quantization_modifiers.py │ │ │ ├── test_info.py │ │ │ ├── test_modifier.py │ │ │ ├── test_modifier_thinning.py │ │ │ └── training │ │ │ │ ├── __init__.py │ │ │ │ ├── test_modifier_epoch.py │ │ │ │ ├── test_modifier_lr.py │ │ │ │ └── test_modifier_params.py │ │ ├── test_base.py │ │ ├── test_imports.py │ │ ├── test_torch_to_onnx_exporter.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── test_benchmarker.py │ │ │ ├── test_exporter.py │ │ │ ├── test_helpers.py │ │ │ ├── test_logger.py │ │ │ ├── test_loss.py │ │ │ ├── test_module.py │ │ │ └── test_ssd_helpers.py │ │ ├── sparsification │ │ ├── __init__.py │ │ ├── recipes │ │ │ ├── mobilenet.yaml │ │ │ └── resnet18.yaml │ │ ├── test_info.py │ │ ├── test_model_info.py │ │ ├── test_oracle.py │ │ └── test_recipe_builder.py │ │ ├── tensorflow_v1 │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ └── classification │ │ │ │ ├── __init__.py │ │ │ │ └── test_imagenette.py │ │ ├── framework │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── helpers.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ └── classification │ │ │ │ ├── __init__.py │ │ │ │ ├── test_mnist.py │ │ │ │ ├── test_mobilenet.py │ │ │ │ ├── test_mobilenetv2.py │ │ │ │ ├── test_resnet.py │ │ │ │ └── test_vgg.py │ │ ├── optim │ │ │ ├── __init__.py │ │ │ ├── test_analyzer_module.py │ │ │ ├── test_manager.py │ │ │ ├── test_mask_creator_pruning.py │ │ │ ├── test_mask_pruning.py │ │ │ ├── test_modifier.py │ │ │ ├── test_modifier_epoch.py │ │ │ ├── test_modifier_lr.py │ │ │ ├── test_modifier_params.py │ │ │ ├── test_modifier_pruning.py │ │ │ ├── test_schedule_lr.py │ │ │ └── test_sensitivity_ks.py │ │ ├── sparsification │ │ │ ├── __init__.py │ │ │ └── test_info.py │ │ ├── test_base.py │ │ ├── test_imports.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── test_helpers.py │ │ │ └── test_variable.py │ │ ├── test_base.py │ │ ├── test_imports.py │ │ ├── test_version.py │ │ ├── transformers │ │ └── utils │ │ │ ├── __init__.py │ │ │ └── test_helpers.py │ │ ├── utils │ │ ├── __init__.py │ │ ├── test_helpers.py │ │ └── test_restricted_eval.py │ │ └── yolov5 │ │ ├── __init__.py │ │ └── helpers.py └── utils │ ├── conftest.py │ ├── copyright.py │ ├── docs_builder.py │ └── pytorch2keras.py └── src ├── CMakeLists.txt ├── benchmark_spmm.cpp ├── cuda_utils.h ├── dataset ├── CMakeLists.txt ├── dataset.cpp └── dataset.hpp ├── format ├── CMakeLists.txt ├── format.cpp ├── format.hpp ├── format_csr.cpp ├── format_csr.hpp ├── format_cvs.cpp ├── format_cvs.hpp ├── format_cxx.cpp ├── format_cxx.hpp ├── format_sr_nm.cpp └── format_sr_nm.hpp ├── gemm ├── CMakeLists.txt ├── cublasLt_gemm.cpp ├── cublasLt_gemm.hpp ├── cublas_gemm.cpp ├── cublas_gemm.hpp ├── gemm.cpp └── gemm.hpp ├── spmm ├── CMakeLists.txt ├── clasp_spmm.cpp ├── clasp_spmm.hpp ├── cusparseLt_spmm.cpp ├── cusparseLt_spmm.hpp ├── spatha_spmm.cpp ├── spatha_spmm.hpp ├── spmm.cpp ├── spmm.hpp ├── sputnik_spmm.cpp └── sputnik_spmm.hpp └── util ├── argparse.h ├── matrix_utils.h └── utils.h /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "include/sputnik"] 2 | path = include/sputnik 3 | url = https://github.com/google-research/sputnik.git 4 | [submodule "end2end/sten"] 5 | path = end2end/sten 6 | url = https://github.com/spcl/sten.git 7 | 8 | [submodule "include/CLASP"] 9 | path = include/CLASP 10 | url = https://github.com/UDC-GAC/CLASP.git 11 | -------------------------------------------------------------------------------- /benchmark/README.md: -------------------------------------------------------------------------------- 1 | # The Spatha Library 2 | 3 | (see /include/spatha/block_sparse/spmm) 4 | 5 | In order to use the N:M version, keep default ```blockwise_kernel.h``` file in the compilation 6 | 7 | To use the baseline kernel, use instead ```blockwise_kernel_baseline.h``` 8 | 9 | To simulate an ideal scenario where no accesses to column-loc, compile spatha with ```blockwise_kernel_ideal.h``` instead 10 | 11 | Finally, in order to evaluate Spatha with 32-bit STS instructions, compile with ```blockwise_kernel_32b.h``` and ```epilogue_32b.h``` (located in /include/spatha/block_sparse/common) -------------------------------------------------------------------------------- /cmake/Cuda.cmake: -------------------------------------------------------------------------------- 1 | function(cuda_find_library out_path lib_name) 2 | find_library(${out_path} ${lib_name} PATHS ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES} 3 | PATH_SUFFIXES lib lib64 REQUIRED) 4 | endfunction() 5 | 6 | function(create_cuda_gencode_flags out archs_args) 7 | set(archs ${archs_args} ${ARGN}) 8 | set(tmp "") 9 | foreach(arch IN LISTS archs) 10 | set(tmp "${tmp} -gencode arch=compute_${arch},code=sm_${arch} -cubin -Xlinker=--whole-archive \ 11 | -Xlinker=--no-whole-archiv") 12 | endforeach(arch) 13 | set(${out} ${tmp} PARENT_SCOPE) 14 | endfunction() 15 | -------------------------------------------------------------------------------- /cmake/Dependencies.cmake: -------------------------------------------------------------------------------- 1 | include(cmake/Cuda.cmake) 2 | 3 | cuda_find_library(CUDART_LIBRARY cudart_static) 4 | cuda_find_library(CUSPARSE_LIBRARY cusparse_static) 5 | set(CUBLASLT_LIBRARY cublasLt) 6 | list(APPEND SpMM_CNN_LIBS "cudart_static;cusparse_static;cublas_static") 7 | #list(APPEND SpMM_CNN_LIBS "cudart_static;cublas_static") 8 | 9 | function(create_cuda_gencode_flags out archs_args) 10 | set(archs ${archs_args} ${ARGN}) 11 | set(tmp "") 12 | foreach(arch IN LISTS archs) 13 | set(tmp "${tmp} -m64 -lineinfo -gencode arch=compute_${arch},code=sm_${arch} -Xlinker=--whole-archive \ 14 | -I ${CUSPARSELT_DIR}/include \ 15 | -Xlinker=${CUSPARSELT_DIR}/lib/libcusparseLt_static.a \ 16 | -Xlinker=--no-whole-archiv") 17 | endforeach(arch) 18 | set(${out} ${tmp} PARENT_SCOPE) 19 | endfunction() -------------------------------------------------------------------------------- /end2end/install.sh: -------------------------------------------------------------------------------- 1 | pip uninstall -y spatha 2 | rm -rf build 3 | rm -rf dist 4 | rm -rf spatha.egg-info 5 | python3 -W ignore setup.py build 6 | python3 -W ignore setup.py install -------------------------------------------------------------------------------- /end2end/install_v64.sh: -------------------------------------------------------------------------------- 1 | pip uninstall -y spatha 2 | rm -rf build 3 | rm -rf dist 4 | rm -rf spatha.egg-info 5 | python3 -W ignore setup_v64.py build 6 | python3 -W ignore setup_v64.py install -------------------------------------------------------------------------------- /end2end/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages, Extension 2 | from torch.utils.cpp_extension import CppExtension, BuildExtension, CUDAExtension 3 | from pybind11.setup_helpers import Pybind11Extension 4 | import sys 5 | 6 | setup( 7 | name='spatha', 8 | version='0.0.1', 9 | description='Custom library for Sparse Tensor Cores', 10 | author='Roberto L. Castro', 11 | author_email='roberto.lopez.castro@udc.es', 12 | ext_modules=[ 13 | CUDAExtension('spatha', 14 | ['spatha_mod/block_sparse/api/spatha.cu'], 15 | extra_compile_args={'cxx':[], 'nvcc':['-arch=sm_86', '--ptxas-options=-v', '-lineinfo']}) 16 | ], 17 | cmdclass={'build_ext': BuildExtension}, 18 | install_requires=['torch'] 19 | ) -------------------------------------------------------------------------------- /end2end/setup_v64.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages, Extension 2 | from torch.utils.cpp_extension import CppExtension, BuildExtension, CUDAExtension 3 | from pybind11.setup_helpers import Pybind11Extension 4 | import sys 5 | 6 | setup( 7 | name='spatha', 8 | version='0.0.1', 9 | description='Custom library for Sparse Tensor Cores', 10 | author='Roberto L. Castro', 11 | author_email='roberto.lopez.castro@udc.es', 12 | ext_modules=[ 13 | CUDAExtension('spatha', 14 | ['spatha_mod/block_sparse/api/spatha.cu'], 15 | extra_compile_args={'cxx':[], 'nvcc':['-arch=sm_86', '--ptxas-options=-v', '-lineinfo', '-DV_64']}) 16 | ], 17 | cmdclass={'build_ext': BuildExtension}, 18 | install_requires=['torch'] 19 | ) -------------------------------------------------------------------------------- /end2end/spatha_mod/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | *.o 3 | *.d 4 | *.a 5 | *.out 6 | libcusparse_lt 7 | .vscode -------------------------------------------------------------------------------- /end2end/spatha_mod/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/end2end/spatha_mod/README.md -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/Makefile: -------------------------------------------------------------------------------- 1 | LIB = libblock_sparse.a 2 | 3 | LIB_OBJS =\ 4 | spmm/blockwise_library.cu.o \ 5 | util/random_mask.cc.o 6 | 7 | BENCHMARK =\ 8 | benchmark/benchmark.spmm_nm.out 9 | 10 | TEST =\ 11 | test/test.spmm.blockwise.out 12 | 13 | CC = g++ 14 | CC_INC = -I.. -I/usr/local/cuda/include 15 | CC_FLAG = -O2 -std=c++11 16 | 17 | NVCC = nvcc 18 | NVCC_INC = -I.. 19 | NVCC_FLAG = -O3 \ 20 | -std=c++17 \ 21 | -arch=sm_$(GPU_CC) \ 22 | -DGPU_CC=$(GPU_CC) 23 | 24 | all: lib benchmark test 25 | 26 | lib: $(LIB) 27 | 28 | $(LIB): $(LIB_OBJS) 29 | ar cr $@ $^ 30 | ranlib $@ 31 | 32 | benchmark: $(BENCHMARK) 33 | 34 | test: $(TEST) 35 | 36 | %.out: %.cu.o $(LIB) 37 | $(NVCC) $^ -o $@ 38 | 39 | %.cu.o: %.cu 40 | $(NVCC) $(NVCC_INC) $(NVCC_FLAG) -c $< -o $@ 41 | 42 | %.cc.o: %.cc 43 | $(CC) $(CC_INC) $(CC_FLAG) -c $< -o $@ 44 | 45 | clean: 46 | rm -f $(LIB) $(BENCHMARK) $(LIB_OBJS) $(TEST) 47 | -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/api/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_OPENSPARSELT_BENCH_SOURCES 2 | ./argparse_util.h 3 | ./timing_util.h 4 | ./benchmark.spmm_nm.cu 5 | ) 6 | 7 | set_source_files_properties(${SPMM_OPENSPARSELT_BENCH_SOURCES} PROPERTIES LANGUAGE CUDA) 8 | 9 | #add_library(spmm SHARED 10 | # ${SPMM_SOURCES}) 11 | 12 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 13 | add_library(openSparseLt_bench STATIC 14 | ${SPMM_OPENSPARSELT_BENCH_SOURCES}) 15 | 16 | target_link_libraries(openSparseLt_bench 17 | PUBLIC cudart nvToolsExt 18 | ) 19 | 20 | target_include_directories(openSparseLt_bench 21 | PUBLIC .) -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/benchmark/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_SPATHA_BENCH_SOURCES 2 | ./argparse_util.h 3 | ./timing_util.h 4 | ./benchmark.spmm_nm.cu 5 | ) 6 | 7 | set_source_files_properties(${SPMM_SPATHA_BENCH_SOURCES} PROPERTIES LANGUAGE CUDA) 8 | 9 | #add_library(spmm SHARED 10 | # ${SPMM_SOURCES}) 11 | 12 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 13 | add_library(spatha_bench STATIC 14 | ${SPMM_SPATHA_BENCH_SOURCES}) 15 | 16 | target_link_libraries(spatha_bench 17 | PUBLIC cudart nvToolsExt 18 | ) 19 | 20 | target_include_directories(spatha_bench 21 | PUBLIC .) -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/benchmark/get_best.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from math import log10, floor 3 | 4 | 5 | def round_to_1(x): 6 | return round(x, -int(floor(log10(abs(x))))) 7 | 8 | df = pd.read_csv('/tmp/transformer.csv', names=['arch', 'm', 'k', 'vrow', 'vcol', 'density', 'seed', 'order', 'n', 'bm', 'bn', 'bk', 'wm', 'wn', 'wk', 'mm', 'mn', 'mk', 'nstage', 'time', 'tflops']) 9 | df['density'] = df['density'].apply(round_to_1) 10 | df = df.groupby(['arch','m','k','vrow','vcol','density','order','n','bm','bn','bk','wm','wk','wn','mm','mn','mk','nstage']).mean().reset_index() 11 | 12 | cfg = df.sort_values(by='time') 13 | cfg = cfg.groupby(['arch','m','k','vrow','vcol','density','n']).apply(pd.DataFrame.head, n=1) 14 | #cfg = cfg.groupby(['arch','m','k','density','n']).apply(pd.DataFrame.head, n=1) 15 | cfg = cfg.drop(['order', 'seed'], axis=1) 16 | 17 | pd.set_option('display.max_rows', 50000) 18 | print(cfg[cfg.vrow==16]) -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/benchmark/timing_util.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | struct GpuTimer 4 | { 5 | cudaEvent_t startEvent; 6 | cudaEvent_t stopEvent; 7 | 8 | GpuTimer() { 9 | cudaEventCreate(&startEvent); 10 | cudaEventCreate(&stopEvent); 11 | } 12 | 13 | ~GpuTimer() 14 | { 15 | cudaEventDestroy(startEvent); 16 | cudaEventDestroy(stopEvent); 17 | } 18 | 19 | void start() 20 | { 21 | cudaEventRecord(startEvent, 0); 22 | } 23 | 24 | void stop() 25 | { 26 | cudaEventRecord(stopEvent, 0); 27 | cudaEventSynchronize(stopEvent); 28 | } 29 | 30 | float elapsed_msecs() 31 | { 32 | float elapsed; 33 | cudaEventElapsedTime(&elapsed, startEvent, stopEvent); 34 | return elapsed; 35 | } 36 | }; 37 | -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/cmake/Cuda.cmake: -------------------------------------------------------------------------------- 1 | # Helper to find CUDA libraries. 2 | function(cuda_find_library out_path lib_name) 3 | find_library(${out_path} ${lib_name} PATHS ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES} 4 | PATH_SUFFIXES lib lib64 REQUIRED) 5 | endfunction() 6 | 7 | # Helper to create CUDA gencode flags. 8 | function(create_cuda_gencode_flags out archs_args) 9 | set(archs ${archs_args} ${ARGN}) 10 | set(tmp "") 11 | foreach(arch IN LISTS archs) 12 | set(tmp "${tmp} -gencode arch=compute_${arch},code=sm_${arch}") 13 | endforeach(arch) 14 | set(${out} ${tmp} PARENT_SCOPE) 15 | endfunction() 16 | -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/cmake/Dependencies.cmake: -------------------------------------------------------------------------------- 1 | include(cmake/Cuda.cmake) 2 | 3 | cuda_find_library(CUDART_LIBRARY cudart_static) 4 | #cuda_find_library(CUBLAS_LIBRARY cublas_static) 5 | cuda_find_library(CUSPARSE_LIBRARY cusparse_static) 6 | list(APPEND SpMM_CNN_LIBS "cudart_static;cusparse_static;cublas_static;cusparse_static") 7 | 8 | # Helper to create CUDA gencode flags. 9 | function(create_cuda_gencode_flags out archs_args) 10 | set(archs ${archs_args} ${ARGN}) 11 | set(tmp "") 12 | foreach(arch IN LISTS archs) 13 | set(tmp "${tmp} -m64 -lineinfo -gencode arch=compute_${arch},code=sm_${arch}") 14 | endforeach(arch) 15 | set(${out} ${tmp} PARENT_SCOPE) 16 | endfunction() -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/common/base.h: -------------------------------------------------------------------------------- 1 | // Lots of small util functions and definitions. 2 | 3 | #pragma once 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace spatha { 14 | // *** math utilities *** 15 | 16 | #define CEIL(x, y) (((x) + (y) -1)/(y)) 17 | #define ROUND_UP(x, y) ((CEIL((x), (y)))*(y)) 18 | 19 | // macro to declare a device-side function 20 | #define DEVICE_INLINE __device__ __forceinline__ 21 | 22 | // *** type for storing a 3D shape *** 23 | template struct ShapeBase { 24 | static constexpr int M = M_, N = N_, K = K_; 25 | }; 26 | 27 | } 28 | -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/common/library_util.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | namespace spatha { 4 | 5 | #define NAME_FUNC(type, fn, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE)\ 6 | type##_##BM##x##BLOCK_N##x##BLOCK_K##_##WARP_M##x##WARP_N##x##WARP_K##_##MMA_M##x##MMA_N##x##MMA_K##_##NSTAGE##_##fn##Fn 7 | 8 | 9 | #define DECL_FUNC(type, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE) \ 10 | extern type##InitFn_t NAME_FUNC(type, Init, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE); \ 11 | extern type##ExecFn_t NAME_FUNC(type, Exec, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE); 12 | 13 | } -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/common/swizzle.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "base.h" 3 | 4 | namespace spatha { 5 | 6 | struct SwizzleIdentity { 7 | DEVICE_INLINE 8 | int operator()(int offset) { 9 | return offset; 10 | } 11 | }; 12 | 13 | struct Swizzle8BWiseXor { 14 | DEVICE_INLINE 15 | int operator()(int offset) { 16 | return (offset ^ 17 | ((offset & (7<<6))>>3)); 18 | } 19 | }; 20 | 21 | } -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/spmm/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_SPATHA_SOURCES 2 | ./blockwise_library.cu 3 | ) 4 | 5 | #cmake_policy(SET CMP0104 OLD) 6 | 7 | set_source_files_properties(${SPMM_SPATHA_SOURCES} PROPERTIES LANGUAGE CUDA) 8 | 9 | include(GNUInstallDirs) 10 | 11 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 12 | 13 | add_library(spatha 14 | STATIC ${SPMM_SPATHA_SOURCES}) 15 | 16 | target_include_directories(spatha 17 | PRIVATE ${PYTHON_INCLUDE_DIRS} 18 | ) 19 | 20 | message(STATUS "TORCH LIBRARIES ENV VARIABLE: ${TORCH_LIBRARIES}") 21 | message(STATUS "TORCH DIRS ENV VARIABLE: ${TORCH_INCLUDE_DIRS}") 22 | 23 | target_link_libraries(spatha 24 | PUBLIC 25 | Python::Python 26 | ${CMAKE_TORCH_LIBRARIES} 27 | ) 28 | 29 | target_include_directories(spatha 30 | PUBLIC ${CMAKE_TORCH_DIRS}) 31 | -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/spmm/spmm_op.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "blockwise_op.h" 4 | -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/util/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_SPATHA_SOURCES 2 | ./random_mask.h 3 | ./random_mask.cc 4 | ) 5 | 6 | set(CMAKE_CXX_STANDARD 11) 7 | 8 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 9 | add_library(spatha_util STATIC 10 | ${SPMM_SPATHA_SOURCES}) 11 | 12 | target_include_directories(spatha_util 13 | PUBLIC .) -------------------------------------------------------------------------------- /end2end/spatha_mod/block_sparse/util/random_mask.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | namespace spatha { 5 | 6 | void random_mask(int nrow, int ncol, int brow, int bcol, float block_density, 7 | int &nnzb, int &padded_nnzb, 8 | std::vector &indptr, std::vector &indices, 9 | int block_divisor, int block_padding, unsigned seed) ; 10 | 11 | void random_mask(int nrow, int ncol, int brow, int bcol, float block_density, 12 | int &nnzb, std::vector &indptr, std::vector &indices, unsigned seed); 13 | 14 | } 15 | -------------------------------------------------------------------------------- /include/spatha/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | *.o 3 | *.d 4 | *.a 5 | *.out 6 | libcusparse_lt 7 | .vscode -------------------------------------------------------------------------------- /include/spatha/README.md: -------------------------------------------------------------------------------- 1 | # The Spatha Library 🗡️ 2 | 3 | Spatha is a high-performance CUDA library oriented to general matrix-matrix operations where the left operand is sparse -------------------------------------------------------------------------------- /include/spatha/block_sparse/benchmark/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_SPATHA_BENCH_SOURCES 2 | ./argparse_util.h 3 | ./timing_util.h 4 | ./benchmark.spmm_nm.cu 5 | ) 6 | 7 | set_source_files_properties(${SPMM_SPATHA_BENCH_SOURCES} PROPERTIES LANGUAGE CUDA) 8 | 9 | #add_library(spmm SHARED 10 | # ${SPMM_SOURCES}) 11 | 12 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 13 | add_library(spatha_bench STATIC 14 | ${SPMM_SPATHA_BENCH_SOURCES}) 15 | 16 | target_link_libraries(spatha_bench 17 | PUBLIC cudart nvToolsExt 18 | ) 19 | 20 | target_include_directories(spatha_bench 21 | PUBLIC .) -------------------------------------------------------------------------------- /include/spatha/block_sparse/benchmark/timing_util.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | struct GpuTimer 4 | { 5 | cudaEvent_t startEvent; 6 | cudaEvent_t stopEvent; 7 | 8 | GpuTimer() { 9 | cudaEventCreate(&startEvent); 10 | cudaEventCreate(&stopEvent); 11 | } 12 | 13 | ~GpuTimer() 14 | { 15 | cudaEventDestroy(startEvent); 16 | cudaEventDestroy(stopEvent); 17 | } 18 | 19 | void start() 20 | { 21 | cudaEventRecord(startEvent, 0); 22 | } 23 | 24 | void stop() 25 | { 26 | cudaEventRecord(stopEvent, 0); 27 | cudaEventSynchronize(stopEvent); 28 | } 29 | 30 | float elapsed_msecs() 31 | { 32 | float elapsed; 33 | cudaEventElapsedTime(&elapsed, startEvent, stopEvent); 34 | return elapsed; 35 | } 36 | }; 37 | -------------------------------------------------------------------------------- /include/spatha/block_sparse/cmake/Cuda.cmake: -------------------------------------------------------------------------------- 1 | # Helper to find CUDA libraries. 2 | function(cuda_find_library out_path lib_name) 3 | find_library(${out_path} ${lib_name} PATHS ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES} 4 | PATH_SUFFIXES lib lib64 REQUIRED) 5 | endfunction() 6 | 7 | # Helper to create CUDA gencode flags. 8 | function(create_cuda_gencode_flags out archs_args) 9 | set(archs ${archs_args} ${ARGN}) 10 | set(tmp "") 11 | foreach(arch IN LISTS archs) 12 | set(tmp "${tmp} -gencode arch=compute_${arch},code=sm_${arch}") 13 | endforeach(arch) 14 | set(${out} ${tmp} PARENT_SCOPE) 15 | endfunction() 16 | -------------------------------------------------------------------------------- /include/spatha/block_sparse/cmake/Dependencies.cmake: -------------------------------------------------------------------------------- 1 | include(cmake/Cuda.cmake) 2 | 3 | cuda_find_library(CUDART_LIBRARY cudart_static) 4 | #cuda_find_library(CUBLAS_LIBRARY cublas_static) 5 | cuda_find_library(CUSPARSE_LIBRARY cusparse_static) 6 | list(APPEND SpMM_CNN_LIBS "cudart_static;cusparse_static;cublas_static;cusparse_static") 7 | 8 | # Helper to create CUDA gencode flags. 9 | function(create_cuda_gencode_flags out archs_args) 10 | set(archs ${archs_args} ${ARGN}) 11 | set(tmp "") 12 | foreach(arch IN LISTS archs) 13 | set(tmp "${tmp} -m64 -lineinfo -gencode arch=compute_${arch},code=sm_${arch}") 14 | endforeach(arch) 15 | set(${out} ${tmp} PARENT_SCOPE) 16 | endfunction() -------------------------------------------------------------------------------- /include/spatha/block_sparse/common/base.h: -------------------------------------------------------------------------------- 1 | // Lots of small util functions and definitions. 2 | 3 | #pragma once 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | namespace spatha { 14 | // *** math utilities *** 15 | 16 | #define CEIL(x, y) (((x) + (y) -1)/(y)) 17 | #define ROUND_UP(x, y) ((CEIL((x), (y)))*(y)) 18 | 19 | // macro to declare a device-side function 20 | #define DEVICE_INLINE __device__ __forceinline__ 21 | 22 | // *** type for storing a 3D shape *** 23 | template struct ShapeBase { 24 | static constexpr int M = M_, N = N_, K = K_; 25 | }; 26 | 27 | } 28 | -------------------------------------------------------------------------------- /include/spatha/block_sparse/common/library_util.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | namespace spatha { 4 | 5 | #define NAME_FUNC(type, fn, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE)\ 6 | type##_##BM##x##BLOCK_N##x##BLOCK_K##_##WARP_M##x##WARP_N##x##WARP_K##_##MMA_M##x##MMA_N##x##MMA_K##_##NSTAGE##_##fn##Fn 7 | 8 | 9 | #define DECL_FUNC(type, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE) \ 10 | extern type##InitFn_t NAME_FUNC(type, Init, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE); \ 11 | extern type##ExecFn_t NAME_FUNC(type, Exec, BM, BLOCK_N, BLOCK_K, WARP_M, WARP_N, WARP_K, MMA_M, MMA_N, MMA_K, NSTAGE); 12 | 13 | } -------------------------------------------------------------------------------- /include/spatha/block_sparse/common/swizzle.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "base.h" 3 | 4 | namespace spatha { 5 | 6 | struct SwizzleIdentity { 7 | DEVICE_INLINE 8 | int operator()(int offset) { 9 | return offset; 10 | } 11 | }; 12 | 13 | struct Swizzle8BWiseXor { 14 | DEVICE_INLINE 15 | int operator()(int offset) { 16 | return (offset ^ 17 | ((offset & (7<<6))>>3)); 18 | } 19 | }; 20 | 21 | } -------------------------------------------------------------------------------- /include/spatha/block_sparse/spmm/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_OPENSPARSELT_SOURCES 2 | ./blockwise_library.cu 3 | ) 4 | 5 | #cmake_policy(SET CMP0104 OLD) 6 | 7 | set_source_files_properties(${SPMM_OPENSPARSELT_SOURCES} PROPERTIES LANGUAGE CUDA) 8 | 9 | include(GNUInstallDirs) 10 | 11 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 12 | 13 | add_library(spatha 14 | STATIC ${SPMM_OPENSPARSELT_SOURCES}) 15 | -------------------------------------------------------------------------------- /include/spatha/block_sparse/spmm/spmm_op.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 Roberto Lopez Castro (roberto.lopez.castro@udc.es). All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #pragma once 18 | 19 | #include "blockwise_op.h" 20 | -------------------------------------------------------------------------------- /include/spatha/block_sparse/util/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(SPMM_OPENSPARSELT_SOURCES 2 | ./random_mask.h 3 | ./random_mask.cc 4 | ) 5 | 6 | set(CMAKE_CXX_STANDARD 11) 7 | 8 | add_definitions(-DGPU_CC=${CUDA_ARCHS}) 9 | add_library(spatha_util STATIC 10 | ${SPMM_OPENSPARSELT_SOURCES}) 11 | 12 | target_include_directories(spatha_util 13 | PUBLIC .) -------------------------------------------------------------------------------- /sparseml/.MAINTAINERS: -------------------------------------------------------------------------------- 1 | # list of active maintainers 2 | # uncommented maintainers will be included in code review triage 3 | # markurtz 4 | # mgoin 5 | # natuan 6 | bfineran 7 | # spacemanidol 8 | rahul-tuli 9 | KSGulin 10 | dbogunowicz 11 | # anmarques 12 | kylesayrs 13 | # eldarkurtic 14 | corey-nm 15 | # chibukach 16 | # shubhra 17 | -------------------------------------------------------------------------------- /sparseml/.github/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/.github/.gitkeep -------------------------------------------------------------------------------- /sparseml/.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | labels: bug 5 | 6 | --- 7 | 8 | **Describe the bug** 9 | A clear and concise description of what the bug is. 10 | 11 | **Expected behavior** 12 | A clear and concise description of what you expected to happen. 13 | 14 | **Environment** 15 | Include all relevant environment information: 16 | 1. OS [e.g. Ubuntu 18.04]: 17 | 2. Python version [e.g. 3.7]: 18 | 3. SparseML version or commit hash [e.g. 0.1.0, `f7245c8`]: 19 | 4. ML framework version(s) [e.g. torch 1.7.1]: 20 | 5. Other Python package versions [e.g. SparseZoo, DeepSparse, numpy, ONNX]: 21 | 6. Other relevant environment information [e.g. hardware, CUDA version]: 22 | 23 | **To Reproduce** 24 | Exact steps to reproduce the behavior: 25 | 26 | 27 | **Errors** 28 | If applicable, add a full print-out of any errors or exceptions that are raised or include screenshots to help explain your problem. 29 | 30 | **Additional context** 31 | Add any other context about the problem here. Also include any relevant files. 32 | -------------------------------------------------------------------------------- /sparseml/.github/ISSUE_TEMPLATE/doc-edit.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Doc edit 3 | about: Propose changes to project documentation 4 | labels: documentation 5 | 6 | --- 7 | 8 | **What is the URL, file, or UI containing proposed doc change** 9 | Where does one find the original content or where would this change go? 10 | 11 | **What is the current content or situation in question** 12 | Copy/paste the source content or describe gap. 13 | 14 | **What is the proposed change** 15 | Add new content. 16 | 17 | **Additional context** 18 | Add any other context about the change here. Also include any relevant files or URLs. 19 | -------------------------------------------------------------------------------- /sparseml/.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | labels: enhancement 5 | 6 | --- 7 | 8 | **Is your feature request related to a problem? Please describe.** 9 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 10 | 11 | **Describe the solution you'd like** 12 | A clear and concise description of what you want to happen. 13 | 14 | **Describe alternatives you've considered** 15 | A clear and concise description of any alternative solutions or features you've considered. 16 | 17 | **Additional context** 18 | Add any other context or screenshots about the feature request here. 19 | -------------------------------------------------------------------------------- /sparseml/.github/workflows/quality-check.yaml: -------------------------------------------------------------------------------- 1 | name: Quality Checks 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - 'release/*' 7 | pull_request: 8 | branches: 9 | - main 10 | - 'release/*' 11 | jobs: 12 | quality-check: 13 | runs-on: ubuntu-22.04 14 | steps: 15 | - uses: actions/setup-python@v4 16 | with: 17 | python-version: '3.9' 18 | - uses: actions/checkout@v2 19 | - uses: actions/checkout@v2 20 | with: 21 | repository: "neuralmagic/sparsezoo" 22 | path: "sparsezoo" 23 | ref: ${{needs.test-setup.outputs.branch}} 24 | - name: "⚙️ Install sparsezoo dependencies" 25 | run: pip3 install sparsezoo/ 26 | - name: "Clean sparsezoo directory" 27 | run: rm -r sparsezoo/ 28 | - name: "⚙️ Install dependencies" 29 | run: pip3 install .[dev] 30 | - name: "🧹 Running quality checks" 31 | run: make quality 32 | -------------------------------------------------------------------------------- /sparseml/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | recursive-include src/sparseml/yolov5 *.yaml *.sh 3 | recursive-include src/sparseml/yolact *.sh 4 | -------------------------------------------------------------------------------- /sparseml/docs/_templates/versions.html: -------------------------------------------------------------------------------- 1 | {%- if current_version %} 2 |
3 | 4 | Other Versions 5 | v: {{ current_version.name }} 6 | 7 | 8 |
9 | {%- if versions.tags %} 10 |
11 |
Tags
12 | {%- for item in versions.tags %} 13 |
{{ item.name }}
14 | {%- endfor %} 15 |
16 | {%- endif %} 17 | {%- if versions.branches %} 18 |
19 |
Branches
20 | {%- for item in versions.branches %} 21 |
{{ item.name }}
22 | {%- endfor %} 23 |
24 | {%- endif %} 25 |
26 |
27 | {%- endif %} -------------------------------------------------------------------------------- /sparseml/docs/api/modules.rst: -------------------------------------------------------------------------------- 1 | .. 2 | Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, 11 | software distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | sparseml 17 | ======== 18 | 19 | .. toctree:: 20 | :maxdepth: 4 21 | 22 | sparseml 23 | -------------------------------------------------------------------------------- /sparseml/docs/api/sparseml.framework.rst: -------------------------------------------------------------------------------- 1 | .. 2 | Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, 11 | software distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | sparseml.framework package 17 | ========================== 18 | 19 | Submodules 20 | ---------- 21 | 22 | sparseml.framework.info module 23 | ------------------------------ 24 | 25 | .. automodule:: sparseml.framework.info 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | Module contents 31 | --------------- 32 | 33 | .. automodule:: sparseml.framework 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | -------------------------------------------------------------------------------- /sparseml/docs/api/sparseml.onnx.benchmark.rst: -------------------------------------------------------------------------------- 1 | .. 2 | Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, 11 | software distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | sparseml.onnx.benchmark package 17 | =============================== 18 | 19 | Submodules 20 | ---------- 21 | 22 | sparseml.onnx.benchmark.info module 23 | ----------------------------------- 24 | 25 | .. automodule:: sparseml.onnx.benchmark.info 26 | :members: 27 | :undoc-members: 28 | :show-inheritance: 29 | 30 | Module contents 31 | --------------- 32 | 33 | .. automodule:: sparseml.onnx.benchmark 34 | :members: 35 | :undoc-members: 36 | :show-inheritance: 37 | -------------------------------------------------------------------------------- /sparseml/docs/api/sparseml.pytorch.datasets.recommendation.rst: -------------------------------------------------------------------------------- 1 | .. 2 | Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, 11 | software distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | sparseml.pytorch.datasets.recommendation package 17 | ================================================ 18 | 19 | Module contents 20 | --------------- 21 | 22 | .. automodule:: sparseml.pytorch.datasets.recommendation 23 | :members: 24 | :undoc-members: 25 | :show-inheritance: 26 | -------------------------------------------------------------------------------- /sparseml/docs/api/sparseml.pytorch.datasets.video.rst: -------------------------------------------------------------------------------- 1 | .. 2 | Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, 11 | software distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | sparseml.pytorch.datasets.video package 17 | ======================================= 18 | 19 | Module contents 20 | --------------- 21 | 22 | .. automodule:: sparseml.pytorch.datasets.video 23 | :members: 24 | :undoc-members: 25 | :show-inheritance: 26 | -------------------------------------------------------------------------------- /sparseml/docs/api/sparseml.pytorch.models.recommendation.rst: -------------------------------------------------------------------------------- 1 | .. 2 | Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, 11 | software distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | sparseml.pytorch.models.recommendation package 17 | ============================================== 18 | 19 | Module contents 20 | --------------- 21 | 22 | .. automodule:: sparseml.pytorch.models.recommendation 23 | :members: 24 | :undoc-members: 25 | :show-inheritance: 26 | -------------------------------------------------------------------------------- /sparseml/docs/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/docs/favicon.ico -------------------------------------------------------------------------------- /sparseml/docs/source/icon-sparseml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/docs/source/icon-sparseml.png -------------------------------------------------------------------------------- /sparseml/integrations/README.md: -------------------------------------------------------------------------------- 1 | 16 | 17 | # Integrations 18 | 19 | This directory contains self-documented examples of SparseML workflows and integrations 20 | with other libraries. Open a Pull Request to 21 | [contribute](https://github.com/neuralmagic/sparseml/blob/main/CONTRIBUTING.md) 22 | your own. 23 | -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/oBERT216_squad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=oneshot_oBERT_216 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 1 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 1.5e-4 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --num_train_epochs 30 \ 26 | --seed 42 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/oneshot_obert216 \ 32 | -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/oBERT216_squad_gradual.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=oBERT_216_gradual 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 2 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --output_dir integrations/huggingface-transformers/output_dir/test_obert716_gradual \ 29 | --overwrite_output_dir \ 30 | --skip_memory_metrics true \ 31 | --report_to none \ -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/oBERT28_squad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=oneshot_oBERT_28 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 1 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 1.5e-4 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --num_train_epochs 30 \ 26 | --seed 42 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/oneshot_obert28 \ 32 | -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/oBERT28_squad_gradual.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=oBERT_28_gradual 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 2 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --output_dir integrations/huggingface-transformers/output_dir/test_obert128_gradual \ 29 | --overwrite_output_dir \ 30 | --skip_memory_metrics true \ 31 | --report_to none \ -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/obs216_squad_gradual_pair.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=obs_216_gradual_pair 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 3 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/test_OBS_216_pairwise_716 -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/obs216v128_squad_gradual_pair.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=obs_216v128_gradual_pair 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 3 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/test_OBS_216v128_pairwise -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/obs216v64_squad_gradual_pair.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=obs_216v64_gradual_pair 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 3 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/test_OBS_216v128_pairwise -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/obs28_squad_gradual_pair.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=obs_28_gradual_pair 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 3 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/test_OBS_28_pairwise_58 -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/obs28v128_squad_gradual_pair.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=obs_28v128_gradual_pair 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 3 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/test_OBS_28v128_pairwise -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/scripts/obs28v64_squad_gradual_pair.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export YAML_NAME=obs_28v64_gradual_pair 4 | export RECIPE=integrations/huggingface-transformers/recipes/${YAML_NAME}.yaml 5 | 6 | #uncomment to run on a single-gpu 7 | #CUDA_VISIBLE_DEVICES=0 python3.10 src/sparseml/transformers/question_answering.py \ 8 | python3.10 -m torch.distributed.launch --nproc_per_node=3 src/sparseml/transformers/question_answering.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-squadv1 \ 10 | --model_name_or_path bert-base-uncased \ 11 | --dataset_name squad \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 3 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 16 \ 21 | --learning_rate 8e-5 \ 22 | --max_seq_length 384 \ 23 | --doc_stride 128 \ 24 | --preprocessing_num_workers 8 \ 25 | --seed 42 \ 26 | --num_train_epochs 50 \ 27 | --recipe ${RECIPE} \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to none \ 31 | --output_dir integrations/huggingface-transformers/output_dir/test_OBS_28v64_pairwise -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/tutorials/images/bert_12_6_layers_EM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/huggingface-transformers/tutorials/images/bert_12_6_layers_EM.png -------------------------------------------------------------------------------- /sparseml/integrations/huggingface-transformers/tutorials/images/bert_12_6_layers_F1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/huggingface-transformers/tutorials/images/bert_12_6_layers_F1.png -------------------------------------------------------------------------------- /sparseml/integrations/keras/recipes/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/keras/recipes/.gitkeep -------------------------------------------------------------------------------- /sparseml/integrations/keras/tutorials/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/keras/tutorials/.gitkeep -------------------------------------------------------------------------------- /sparseml/integrations/pytorch/recipes/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/pytorch/recipes/.gitkeep -------------------------------------------------------------------------------- /sparseml/integrations/pytorch/tutorials/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/pytorch/tutorials/.gitkeep -------------------------------------------------------------------------------- /sparseml/integrations/rwightman-timm/setup_integration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Integration setup command to setup the folder so it is ready to train and sparsify models. 4 | # Creates a pytorch-image-models folder next to this script with all required dependencies from the rwightman/pytorch-image-models repository. 5 | # Command: `bash setup_integration.sh` 6 | 7 | git clone https://github.com/neuralmagic/pytorch-image-models.git 8 | cd pytorch-image-models 9 | git checkout release/1.1 10 | pip install -r requirements.txt 11 | pip install sparseml[torch] 12 | -------------------------------------------------------------------------------- /sparseml/integrations/tensorflow_v1/recipes/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/tensorflow_v1/recipes/.gitkeep -------------------------------------------------------------------------------- /sparseml/integrations/tensorflow_v1/tutorials/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/tensorflow_v1/tutorials/.gitkeep -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov3/data/hyp.pruned.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.005 2 | lrf: 0.1 3 | momentum: 0.843 4 | weight_decay: 0.00036 5 | warmup_epochs: 40.0 6 | warmup_momentum: 0.5 7 | warmup_bias_lr: 0.05 8 | box: 0.0296 9 | cls: 0.243 10 | cls_pw: 0.631 11 | obj: 0.301 12 | obj_pw: 0.911 13 | iou_t: 0.2 14 | anchor_t: 2.91 15 | fl_gamma: 0.0 16 | hsv_h: 0.0138 17 | hsv_s: 0.664 18 | hsv_v: 0.464 19 | degrees: 0.373 20 | translate: 0.245 21 | scale: 0.898 22 | shear: 0.602 23 | perspective: 0.0 24 | flipud: 0.00856 25 | fliplr: 0.5 26 | mosaic: 1.0 27 | mixup: 0.243 -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov3/data/hyp.pruned_quantized.yaml: -------------------------------------------------------------------------------- 1 | lr0: 0.0 2 | lrf: 0.0 3 | momentum: 0.843 4 | weight_decay: 0.00036 5 | warmup_epochs: 40.0 6 | warmup_momentum: 0.5 7 | warmup_bias_lr: 0.05 8 | box: 0.0296 9 | cls: 0.243 10 | cls_pw: 0.631 11 | obj: 0.301 12 | obj_pw: 0.911 13 | iou_t: 0.2 14 | anchor_t: 2.91 15 | fl_gamma: 0.0 16 | hsv_h: 0.0138 17 | hsv_s: 0.664 18 | hsv_v: 0.464 19 | degrees: 0.373 20 | translate: 0.245 21 | scale: 0.898 22 | shear: 0.602 23 | perspective: 0.0 24 | flipud: 0.00856 25 | fliplr: 0.5 26 | mosaic: 1.0 27 | mixup: 0.243 -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov3/setup_integration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Integration setup command to setup the folder so it is ready to train and sparsify models. 4 | # Creates a yolov3 folder next to this script with all required dependencies from the ultralytics/yolov3 repository. 5 | # Command: `bash setup_integration.sh` 6 | 7 | git clone https://github.com/neuralmagic/yolov3.git 8 | cd yolov3 9 | git checkout release/1.1 10 | pip install -r requirements.txt 11 | -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov3/tutorials/images/pruned-quantized-result.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/ultralytics-yolov3/tutorials/images/pruned-quantized-result.jpeg -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov3/tutorials/images/pruned-quantized-wandb-chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/ultralytics-yolov3/tutorials/images/pruned-quantized-wandb-chart.png -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov3/tutorials/images/transfer-learning-wandb-chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/ultralytics-yolov3/tutorials/images/transfer-learning-wandb-chart.png -------------------------------------------------------------------------------- /sparseml/integrations/ultralytics-yolov5/tutorials/images/transfer-learning-wandb-chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/integrations/ultralytics-yolov5/tutorials/images/transfer-learning-wandb-chart.png -------------------------------------------------------------------------------- /sparseml/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 88 3 | target-version = ['py36'] -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/ctx_sources/default_sources.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | dpr_wiki: 4 | _target_: dpr.data.retriever_data.CsvCtxSrc 5 | file: data.wikipedia_split.psgs_w100 6 | id_prefix: 'wiki:' 7 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/datasets/retriever_default.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | nq_test: 4 | _target_: dpr.data.retriever_data.CsvQASrc 5 | file: data.retriever.qas.nq-test 6 | 7 | nq_train: 8 | _target_: dpr.data.retriever_data.CsvQASrc 9 | file: data.retriever.qas.nq-train 10 | 11 | nq_dev: 12 | _target_: dpr.data.retriever_data.CsvQASrc 13 | file: data.retriever.qas.nq-dev 14 | 15 | trivia_test: 16 | _target_: dpr.data.retriever_data.CsvQASrc 17 | file: data.retriever.qas.trivia-test 18 | 19 | trivia_train: 20 | _target_: dpr.data.retriever_data.CsvQASrc 21 | file: data.retriever.qas.trivia-train 22 | 23 | trivia_dev: 24 | _target_: dpr.data.retriever_data.CsvQASrc 25 | file: data.retriever.qas.trivia-dev 26 | 27 | webq_test: 28 | _target_: dpr.data.retriever_data.CsvQASrc 29 | file: data.retriever.qas.webq-test 30 | 31 | curatedtrec_test: 32 | _target_: dpr.data.retriever_data.CsvQASrc 33 | file: data.retriever.qas.curatedtrec-test 34 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/encoder/hf_bert.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | # model type. One of [hf_bert, pytext_bert, fairseq_roberta] 4 | encoder_model_type: hf_bert 5 | 6 | # HuggingFace's config name for model initialization 7 | pretrained_model_cfg: bert-base-uncased 8 | 9 | # Some encoders need to be initialized from a file 10 | pretrained_file: 11 | 12 | # Extra linear layer on top of standard bert/roberta encoder 13 | projection_dim: 0 14 | 15 | # Max length of the encoder input sequence 16 | sequence_length: 256 17 | 18 | dropout: 0.1 19 | 20 | # whether to fix (don't update) context encoder during training or not 21 | fix_ctx_encoder: False 22 | 23 | # if False, the model won't load pre-trained BERT weights 24 | pretrained: True -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/train/biencoder_default.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | batch_size: 4 4 | dev_batch_size: 4 5 | adam_eps: 1e-8 6 | adam_betas: (0.9, 0.999) 7 | max_grad_norm: 1.0 8 | log_batch_step: 100 9 | train_rolling_loss_step: 100 10 | weight_decay: 0.0 11 | learning_rate: 1e-5 12 | 13 | # Linear warmup over warmup_steps. 14 | warmup_steps: 100 15 | 16 | # Number of updates steps to accumulate before performing a backward/update pass. 17 | gradient_accumulation_steps: 1 18 | 19 | # Total number of training epochs to perform. 20 | num_train_epochs: 40 21 | eval_per_epoch: 1 22 | hard_negatives: 1 23 | other_negatives: 0 24 | val_av_rank_hard_neg: 30 25 | val_av_rank_other_neg: 30 26 | val_av_rank_bsz: 128 27 | val_av_rank_max_qs: 10000 28 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/train/biencoder_local.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | batch_size: 4 4 | dev_batch_size: 16 5 | adam_eps: 1e-8 6 | adam_betas: (0.9, 0.999) 7 | max_grad_norm: 2.0 8 | log_batch_step: 1 9 | train_rolling_loss_step: 100 10 | weight_decay: 0.0 11 | learning_rate: 2e-5 12 | 13 | # Linear warmup over warmup_steps. 14 | warmup_steps: 1237 15 | 16 | # Number of updates steps to accumulate before performing a backward/update pass. 17 | gradient_accumulation_steps: 1 18 | 19 | # Total number of training epochs to perform. 20 | num_train_epochs: 40 21 | eval_per_epoch: 1 22 | hard_negatives: 1 23 | other_negatives: 0 24 | val_av_rank_hard_neg: 30 25 | val_av_rank_other_neg: 30 26 | val_av_rank_bsz: 128 27 | val_av_rank_max_qs: 10000 28 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/train/biencoder_nq.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | batch_size: 4 4 | dev_batch_size: 64 5 | adam_eps: 1e-8 6 | adam_betas: (0.9, 0.999) 7 | max_grad_norm: 2.0 8 | log_batch_step: 100 9 | train_rolling_loss_step: 100 10 | weight_decay: 0.0 11 | learning_rate: 2e-5 12 | 13 | # Linear warmup over warmup_steps. 14 | warmup_steps: 1237 15 | 16 | # Number of updates steps to accumulate before performing a backward/update pass. 17 | gradient_accumulation_steps: 1 18 | 19 | # Total number of training epochs to perform. 20 | num_train_epochs: 40 21 | eval_per_epoch: 1 22 | hard_negatives: 1 23 | other_negatives: 0 24 | val_av_rank_hard_neg: 30 25 | val_av_rank_other_neg: 30 26 | val_av_rank_bsz: 128 27 | val_av_rank_max_qs: 10000 28 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/conf/train/extractive_reader_default.yaml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | eval_step: 2000 4 | batch_size: 16 5 | dev_batch_size: 72 6 | adam_eps: 1e-8 7 | adam_betas: (0.9, 0.999) 8 | max_grad_norm: 1.0 9 | log_batch_step: 100 10 | train_rolling_loss_step: 100 11 | weight_decay: 0.0 12 | learning_rate: 1e-5 13 | 14 | # Linear warmup over warmup_steps. 15 | warmup_steps: 0 16 | 17 | # Number of updates steps to accumulate before performing a backward/update pass. 18 | gradient_accumulation_steps: 1 19 | 20 | # Total number of training epochs to perform. 21 | num_train_epochs: 100000 22 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/dpr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/information_retrieval/DPR/dpr/__init__.py -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/dpr/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/information_retrieval/DPR/dpr/data/__init__.py -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/dpr/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/information_retrieval/DPR/dpr/utils/__init__.py -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/dpr/utils/conf_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import hydra 4 | from omegaconf import DictConfig 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | class BiencoderDatasetsCfg(object): 10 | def __init__(self, cfg: DictConfig): 11 | datasets = cfg.datasets 12 | self.train_datasets_names = cfg.train_datasets 13 | logger.info("train_datasets: %s", self.train_datasets_names) 14 | if self.train_datasets_names: 15 | self.train_datasets = [ 16 | hydra.utils.instantiate(datasets[ds_name]) 17 | for ds_name in self.train_datasets_names 18 | ] 19 | else: 20 | self.train_datasets = [] 21 | if cfg.dev_datasets: 22 | self.dev_datasets_names = cfg.dev_datasets 23 | logger.info("dev_datasets: %s", self.dev_datasets_names) 24 | self.dev_datasets = [ 25 | hydra.utils.instantiate(datasets[ds_name]) 26 | for ds_name in self.dev_datasets_names 27 | ] 28 | self.sampling_rates = cfg.train_sampling_rates 29 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/model_config.yml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | # model type. One of [hf_bert, pytext_bert, fairseq_roberta] 4 | encoder_model_type: hf_bert 5 | 6 | # HuggingFace's config name for model initialization 7 | pretrained_model_cfg: bert-base-uncased 8 | 9 | # Some encoders need to be initialized from a file 10 | pretrained_file: 11 | 12 | # Extra linear layer on top of standard bert/roberta encoder 13 | projection_dim: 0 14 | 15 | # Max length of the encoder input sequence 16 | sequence_length: 256 17 | 18 | dropout: 0.1 19 | 20 | # whether to fix (don't update) context encoder during training or not 21 | fix_ctx_encoder: False 22 | 23 | # if False, the model won't load pre-trained BERT weights 24 | pretrained: True -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers 2 | torch 3 | faiss 4 | tqdm 5 | elasticsearch 6 | streamlit 7 | requests 8 | sparseml 9 | faiss-cpu 10 | filelock 11 | numpy 12 | regex 13 | spacy 14 | sparseml -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/DPR/train_config.yml: -------------------------------------------------------------------------------- 1 | # @package _group_ 2 | 3 | batch_size: 4 4 | dev_batch_size: 16 5 | adam_eps: 1e-8 6 | adam_betas: (0.9, 0.999) 7 | max_grad_norm: 2.0 8 | log_batch_step: 1 9 | train_rolling_loss_step: 100 10 | weight_decay: 0.0 11 | learning_rate: 2e-5 12 | 13 | # Linear warmup over warmup_steps. 14 | warmup_steps: 1237 15 | 16 | # Number of updates steps to accumulate before performing a backward/update pass. 17 | gradient_accumulation_steps: 1 18 | 19 | # Total number of training epochs to perform. 20 | num_train_epochs: 40 21 | eval_per_epoch: 1 22 | hard_negatives: 1 23 | other_negatives: 0 24 | val_av_rank_hard_neg: 30 25 | val_av_rank_other_neg: 30 26 | val_av_rank_bsz: 128 27 | val_av_rank_max_qs: 10000 28 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/README.md: -------------------------------------------------------------------------------- 1 | # Compressing Neural Methods for Information Retrieval 2 | Author: @spacemanidol 3 | 4 | Neural Methods for information retrieval have shown tremendous promise. Leveraging language models like BERT and T5 single stage and multi stage systems have exploded and in some cases are able to outperform traditional sparse search(BM25) by 2-3x. 5 | Despite the improvement in quality neural methods prove trippy to use in production. Large model size makes index generation difficult and expensive and requires large GPU clusters. In this folder we explore how compression methods like structured pruning, unstructured pruning and distilliation can be used with the Neural Magic framework to bridge the gap from research to production. 6 | 7 | We experiment with compressing and optimizing sparse search by doing query prediction and expansion with a T5 model and dense retireval using BERT based bi-encoders. The goal of these experiments is to determine if neural models can be made deployable for any type of workload without using GPUs 8 | 9 | ### Doc2Query 10 | Fill out when project is done 11 | 12 | ### DPR 13 | 14 | ### Elastic Search Implementation 15 | 16 | ## Results 17 | 18 | 19 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/doc2query/indexes/init.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/information_retrieval/doc2query/indexes/init.txt -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/doc2query/outputs/bm25_baseline.txt: -------------------------------------------------------------------------------- 1 | ##################### 2 | MRR @10: 0.18741227770955543 3 | QueriesRanked: 6980 4 | ##################### 5 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/doc2query/outputs/init.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/information_retrieval/doc2query/outputs/init.txt -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/doc2query/recipes/noprune.yaml: -------------------------------------------------------------------------------- 1 | version: 1.1.0 2 | 3 | modifiers: 4 | - !EpochRangeModifier 5 | end_epoch: 5 6 | start_epoch: 0.0 7 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/doc2query/src/convert_doc_collection_to_jsonl.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | def convert_collection(args): 6 | with open(args.output_path, 'w', encoding='utf-8') as w: 7 | with open(args.collection_path, encoding='utf-8') as f: 8 | for i, line in enumerate(f): 9 | id, body = line.split('\t') 10 | output_dict = {'id': id, 'contents': body} 11 | w.write(json.dumps(output_dict) + '\n') 12 | 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser(description='Convert MSMARCO tsv passage collection into jsonl files for Anserini.') 16 | parser.add_argument('--collection_path', required=True, help='Path to MS MARCO tsv collection.') 17 | parser.add_argument('--output_path', required=True, help='Output filename.') 18 | args = parser.parse_args() 19 | convert_collection(args) 20 | print('Done!') 21 | -------------------------------------------------------------------------------- /sparseml/research/information_retrieval/elastic_integration/requirements.txt: -------------------------------------------------------------------------------- 1 | transformers 2 | torch 3 | faiss 4 | tqdm 5 | elasticsearch 6 | streamlit 7 | requests 8 | sparseml -------------------------------------------------------------------------------- /sparseml/research/mfac/recipes/pruning-mnistnet-one_shot-magnitude.md: -------------------------------------------------------------------------------- 1 | 16 | 17 | --- 18 | pruning_modifiers: 19 | - !GMPruningModifier 20 | params: __ALL_PRUNABLE__ 21 | init_sparsity: 0.0 22 | final_sparsity: 0.35 23 | start_epoch: 0.0 24 | end_epoch: 1.0 25 | update_frequency: 1.0 26 | --- 27 | 28 | # Pruning MNISTNet with Magnitude 29 | This recipe prunes a model to 35% sparsity using magnitude pruning. 30 | This recipe is intended for use with one-shot pruning and is for example 31 | purposes only. -------------------------------------------------------------------------------- /sparseml/research/optimal_BERT_surgeon_oBERT/recipes/30epochs_dense_squad.yaml: -------------------------------------------------------------------------------- 1 | modifiers: 2 | - !EpochRangeModifier 3 | start_epoch: 0 4 | end_epoch: 30 5 | 6 | training_modifiers: 7 | - !LearningRateFunctionModifier 8 | start_epoch: 0.0 9 | end_epoch: 2.0 10 | lr_func: linear 11 | init_lr: 8e-5 12 | final_lr: 8e-6 13 | - !LearningRateFunctionModifier 14 | start_epoch: 2.0 15 | end_epoch: 30.0 16 | lr_func: cyclic_linear 17 | cycle_epochs: 4.0 18 | init_lr: 8e-5 19 | final_lr: 8e-6 20 | 21 | distillation_modifiers: 22 | - !DistillationModifier 23 | hardness: 1.0 24 | temperature: 2.0 25 | distill_output_keys: [start_logits, end_logits] 26 | -------------------------------------------------------------------------------- /sparseml/research/optimal_BERT_surgeon_oBERT/recipes/8epochs_sparse_transfer_mnli.yaml: -------------------------------------------------------------------------------- 1 | training_modifiers: 2 | - !EpochRangeModifier 3 | start_epoch: 0 4 | end_epoch: 8 5 | 6 | - !LearningRateFunctionModifier 7 | start_epoch: 0 8 | end_epoch: 8 9 | lr_func: linear 10 | init_lr: 1.5e-4 11 | final_lr: 1.5e-6 12 | 13 | pruning_modifiers: 14 | - !ConstantPruningModifier 15 | start_epoch: 0 16 | end_epoch: 8 17 | params: 18 | - re:bert.encoder.layer.*.attention.self.query.weight 19 | - re:bert.encoder.layer.*.attention.self.key.weight 20 | - re:bert.encoder.layer.*.attention.self.value.weight 21 | - re:bert.encoder.layer.*.attention.output.dense.weight 22 | - re:bert.encoder.layer.*.intermediate.dense.weight 23 | - re:bert.encoder.layer.*.output.dense.weight 24 | 25 | distillation_modifiers: 26 | - !DistillationModifier 27 | hardness: 1.0 28 | temperature: 5.5 29 | distill_output_keys: [logits] -------------------------------------------------------------------------------- /sparseml/research/optimal_BERT_surgeon_oBERT/recipes/8epochs_sparse_transfer_qqp.yaml: -------------------------------------------------------------------------------- 1 | training_modifiers: 2 | - !EpochRangeModifier 3 | start_epoch: 0 4 | end_epoch: 8 5 | 6 | - !LearningRateFunctionModifier 7 | start_epoch: 0 8 | end_epoch: 8 9 | lr_func: linear 10 | init_lr: 1.5e-4 11 | final_lr: 1.5e-6 12 | 13 | pruning_modifiers: 14 | - !ConstantPruningModifier 15 | start_epoch: 0 16 | end_epoch: 8 17 | params: 18 | - re:bert.encoder.layer.*.attention.self.query.weight 19 | - re:bert.encoder.layer.*.attention.self.key.weight 20 | - re:bert.encoder.layer.*.attention.self.value.weight 21 | - re:bert.encoder.layer.*.attention.output.dense.weight 22 | - re:bert.encoder.layer.*.intermediate.dense.weight 23 | - re:bert.encoder.layer.*.output.dense.weight 24 | 25 | distillation_modifiers: 26 | - !DistillationModifier 27 | hardness: 1.0 28 | temperature: 5.5 29 | distill_output_keys: [logits] -------------------------------------------------------------------------------- /sparseml/research/optimal_BERT_surgeon_oBERT/recipes/8epochs_sparse_transfer_squad.yaml: -------------------------------------------------------------------------------- 1 | training_modifiers: 2 | - !EpochRangeModifier 3 | start_epoch: 0 4 | end_epoch: 8 5 | 6 | - !LearningRateFunctionModifier 7 | start_epoch: 0 8 | end_epoch: 8 9 | lr_func: linear 10 | init_lr: 1.5e-4 11 | final_lr: 1.5e-6 12 | 13 | pruning_modifiers: 14 | - !ConstantPruningModifier 15 | start_epoch: 0 16 | end_epoch: 8 17 | params: 18 | - re:bert.encoder.layer.*.attention.self.query.weight 19 | - re:bert.encoder.layer.*.attention.self.key.weight 20 | - re:bert.encoder.layer.*.attention.self.value.weight 21 | - re:bert.encoder.layer.*.attention.output.dense.weight 22 | - re:bert.encoder.layer.*.intermediate.dense.weight 23 | - re:bert.encoder.layer.*.output.dense.weight 24 | 25 | distillation_modifiers: 26 | - !DistillationModifier 27 | hardness: 1.0 28 | temperature: 5.5 29 | distill_output_keys: [start_logits, end_logits] -------------------------------------------------------------------------------- /sparseml/research/optimal_BERT_surgeon_oBERT/scripts/30epochs_gradual_pruning_mnli_qqp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RECIPE=research/optimal_BERT_surgeon_oBERT/recipes/MY_COOL_RECIPE_NAME.yaml 4 | # TASK can be either mnli or qqp 5 | export TASK=mnli 6 | # export TASK=qqp 7 | 8 | CUDA_VISIBLE_DEVICES=0 python src/sparseml/transformers/text_classification.py \ 9 | --distill_teacher neuralmagic/oBERT-teacher-${TASK} \ 10 | --model_name_or_path bert-base-uncased \ 11 | --task_name ${TASK} \ 12 | --do_train \ 13 | --fp16 \ 14 | --do_eval \ 15 | --optim adamw_torch \ 16 | --evaluation_strategy epoch \ 17 | --save_strategy epoch \ 18 | --save_total_limit 1 \ 19 | --per_device_train_batch_size 32 \ 20 | --per_device_eval_batch_size 32 \ 21 | --learning_rate 5e-5 \ 22 | --max_seq_length 128 \ 23 | --preprocessing_num_workers 8 \ 24 | --seed 42 \ 25 | --num_train_epochs 30 \ 26 | --recipe ${RECIPE} \ 27 | --output_dir transformers_output_dir \ 28 | --overwrite_output_dir \ 29 | --skip_memory_metrics true \ 30 | --report_to wandb -------------------------------------------------------------------------------- /sparseml/research/optimal_lobotomizing/README.md: -------------------------------------------------------------------------------- 1 | # Optimal Lobotomizing: Exploring the effects of model compression on memorization in language models 2 | Author: @spacemanidol 3 | 4 | Language models have proven to be incredibly effective methods for language understanding and generation. As they are trained on massive textual datasets they memorize 5 | 6 | ### Method 7 | 8 | ### Prep and Data Gen 9 | 1. Find Datasets that focuses on memorization for decoder and encoder models(GPT-NEO) 10 | ### Experiments 11 | 1. Train models 12 | 2. Prune attention heads 13 | 3. Prune layers 14 | 4. Unstructured pruning 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /sparseml/research/optimal_lobotomizing/data/init.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/optimal_lobotomizing/data/init.txt -------------------------------------------------------------------------------- /sparseml/research/optimal_lobotomizing/scripts/init.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/optimal_lobotomizing/scripts/init.sh -------------------------------------------------------------------------------- /sparseml/research/optimal_lobotomizing/src/init.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/research/optimal_lobotomizing/src/init.py -------------------------------------------------------------------------------- /sparseml/setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | profile = black 3 | default_section = FIRSTPARTY 4 | ensure_newline_before_comments = True 5 | force_grid_wrap = 0 6 | include_trailing_comma = True 7 | known_first_party = sparseml,sparsezoo,tests 8 | known_third_party = bs4,requests,packaging,yaml,pydantic,tqdm,numpy,onnx,onnxruntime,pandas,PIL,psutil,scipy,toposort,pytest,torch,torchvision,keras,tensorflow,merge-args,cv2,transformers,datasets,sklearn,seqeval 9 | sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 10 | 11 | line_length = 88 12 | lines_after_imports = 2 13 | multi_line_output = 3 14 | use_parentheses = True 15 | 16 | [flake8] 17 | ignore = E203, E251, E701, W503 18 | max-line-length = 88 -------------------------------------------------------------------------------- /sparseml/sparseml_SS1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH --job-name=sparsifier 4 | #SBATCH --mem=128GB 5 | 6 | #SBATCH --time=4-00:00:00 7 | #SBATCH --partition=amdrtx 8 | 9 | module load cuda/11.7.1 10 | source activate sparseml_artf 11 | 12 | echo "SLURM_JOB_NUM_NODES $SLURM_JOB_NUM_NODES" 13 | echo "HOSTNAME $HOSTNAME" 14 | 15 | echo "Args train.sh: $@" 16 | 17 | # vw_8 18 | srun integrations/huggingface-transformers/scripts/30epochs_gradual_pruning_squad_block8_875.sh 19 | 20 | # 128:2:16 (pair-wise) 21 | srun integrations/huggingface-transformers/scripts/obs216v128_squad_gradual_pair.sh 22 | 23 | # 1:2:16 (pair-wise) 24 | srun integrations/huggingface-transformers/scripts/obs216_squad_gradual_pair.sh -------------------------------------------------------------------------------- /sparseml/src/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality related to running benchmarks across ML frameworks. 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .info import * 22 | from .serialization import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/deepsparse/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality for working with and sparsifying Models in the DeepSparse framework 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .base import * 22 | from .framework import detect_framework, framework_info, is_supported 23 | from .sparsification import sparsification_info 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/deepsparse/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality related to integrating with, detecting, and getting information for 17 | support and sparsification in the DeepSparse framework. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .info import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/deepsparse/sparsification/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | Functionality related to applying, describing, and supporting sparsification 19 | algorithms to models within in the DeepSparse framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .info import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/exporters/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/exporters/transforms/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # flake8: noqa 16 | 17 | from .add_quantized_conv_matmul_add_ops import * 18 | from .helpers import * 19 | from .matching import * 20 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality related to integrating with, detecting, and getting information for 17 | support and sparsification in ML frameworks. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .info import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality for working with and sparsifying Models in the Keras framework 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .base import * 22 | from .framework import detect_framework, framework_info, is_supported 23 | from .sparsification import sparsification_info 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for creating and loading datasets in Keras 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_keras_install as _check_keras_install 22 | from .classification import * 23 | from .dataset import * 24 | from .helpers import * 25 | from .registry import * 26 | 27 | 28 | _check_keras_install() # TODO: remove once files within package load without installs 29 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/datasets/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Datasets related to image classification field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .imagefolder import * 22 | from .imagenet import * 23 | from .imagenette import * 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # flake8: noqa 16 | 17 | """ 18 | Functionality related to detecting and getting information for 19 | support and sparsification in the Keras framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .info import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for creating and loading datasets in Keras 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_keras_install as _check_keras_install 22 | from .classification import * 23 | from .external import * 24 | from .registry import * 25 | 26 | 27 | _check_keras_install() # TODO: remove once files within package load without installs 28 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/models/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # flake8: noqa 16 | 17 | from .resnet import * 18 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/models/external/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Integrations with external model repos to register 17 | with the sparseml.keras model registry. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .keras_applications import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/sparsification/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | Functionality related to applying, describing, and supporting sparsification 19 | algorithms to models within in the Keras framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .info import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Generic code used as utilities and helpers for Keras 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_keras_install as _check_keras_install 22 | from .callbacks import * 23 | from .compat import * 24 | from .exporter import * 25 | from .logger import * 26 | from .model import * 27 | 28 | 29 | _check_keras_install() # TODO: remove once files within package load without installs 30 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/keras/utils/compat.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | try: 16 | import keras as native_keras 17 | except ModuleNotFoundError: 18 | native_keras = None 19 | 20 | import tensorflow 21 | 22 | 23 | __all__ = [ 24 | "assign", 25 | "keras", 26 | ] 27 | 28 | 29 | keras = native_keras if native_keras is not None else tensorflow.keras 30 | 31 | 32 | def assign(lhs, rhs, name=None): 33 | if hasattr(tensorflow, "assign"): 34 | return tensorflow.assign(lhs, rhs, name=name) 35 | else: 36 | return lhs.assign(rhs, name=name) 37 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality for working with and sparsifying Models in the ONNX/ONNXRuntime framework 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .base import * 22 | from .benchmark import * 23 | from .framework import detect_framework, framework_info, is_supported 24 | from .sparsification import ModelInfo, get_analyzer_impls, sparsification_info 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/benchmark/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality related to running benchmarks in the ONNX/ONNXruntime 17 | frameworks. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .info import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # flake8: noqa 16 | 17 | """ 18 | Functionality related to integrating with, detecting, and getting information for 19 | support and sparsification in the ONNX/ONNXRuntime framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .info import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Recalibration code for the ONNX framework. 17 | Handles things like model pruning. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .analyzer_model import * 23 | from .sensitivity_pruning import * 24 | from .structured_pruning import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/optim/quantization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Post training quantization tools for quantizing and calibrating onnx models. 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .calibration import * 22 | from .quantize import * 23 | from .quantize_model_post_training import * 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/sparsification/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | Functionality related to applying, describing, and supporting sparsification 19 | algorithms to models within in the ONNX/ONNXRuntime framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .analyzer import * 25 | from .info import * 26 | from .model_info import * 27 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/onnx/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Generic code used as utilities and helpers for ONNX 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .data import * 22 | from .graph_editor import * 23 | from .graph_optimizer import * 24 | from .helpers import * 25 | from .loss import * 26 | from .model import * 27 | from .sparse_tensor import * 28 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/openpifpaf/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/optim/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Recalibration code shared across ML frameworks. 17 | Handles things like model pruning and increasing activation sparsity. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .analyzer import * 23 | from .helpers import * 24 | from .manager import * 25 | from .modifier import * 26 | from .sensitivity import * 27 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for creating and loading datasets in PyTorch 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_torch_install as _check_torch_install 22 | from .classification import * 23 | from .detection import * 24 | from .generic import * 25 | from .recommendation import * 26 | from .registry import * 27 | from .video import * 28 | 29 | 30 | _check_torch_install() # TODO: remove once files within package load without installs 31 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/datasets/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Datasets related to image classification field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .cifar import * 22 | from .imagefolder import * 23 | from .imagenet import * 24 | from .imagenette import * 25 | from .mnist import * 26 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/datasets/detection/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Datasets related to object detection field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .coco import * 22 | from .helpers import * 23 | from .voc import * 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/datasets/image_classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/datasets/recommendation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Datasets related to recommendations field 17 | """ 18 | 19 | # flake8: noqa 20 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/datasets/video/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Datasets related to video field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # flake8: noqa 16 | 17 | """ 18 | Functionality related to integrating with, detecting, and getting information for 19 | support and sparsification in the PyTorch framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .info import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/image_classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/image_classification/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from .constants import * 18 | from .trainer import * 19 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for creating and loading models in PyTorch 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_torch_install as _check_torch_install 22 | from .classification import * 23 | from .detection import * 24 | from .external import * 25 | from .recommendation import * 26 | from .registry import * 27 | 28 | 29 | _check_torch_install() # TODO: remove once files within package load without installs 30 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/models/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Models related to image classification field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .darknet import * 22 | from .efficientnet import * 23 | from .inception_v3 import * 24 | from .mnist import * 25 | from .mobilenet import * 26 | from .mobilenet_v2 import * 27 | from .resnet import * 28 | from .vgg import * 29 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/models/detection/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Models related to object detection field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .ssd import * 22 | from .ssd_lite import * 23 | from .ssd_mobilenet import * 24 | from .ssd_resnet import * 25 | from .yolo_v3 import * 26 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/models/external/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Integrations with external model repos to register 17 | with the sparseml.pytorch model registry. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .torchvision import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/models/recommendation/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Models related to recommendations 17 | """ 18 | 19 | # flake8: noqa 20 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Layers / operators for PyTorch models 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_torch_install as _check_torch_install 22 | from .activations import * 23 | from .fatrelu import * 24 | from .identity import * 25 | from .se import * 26 | 27 | 28 | _check_torch_install() # TODO: remove once files within package load without installs 29 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/opset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import torch 16 | from packaging import version 17 | 18 | 19 | def _default_opset() -> int: 20 | torch_version = version.parse(torch.__version__) 21 | if torch_version < version.parse("1.3"): 22 | return 9 23 | if torch_version < version.parse("1.10.0"): 24 | return 11 25 | return 13 26 | 27 | 28 | TORCH_DEFAULT_ONNX_OPSET = _default_opset() 29 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/recipe_template/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # flake8: noqa 16 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/sparsification/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | Functionality related to applying, describing, and supporting sparsification 19 | algorithms to models within in the PyTorch framework. 20 | """ 21 | 22 | # flake8: noqa 23 | 24 | from .distillation import * 25 | from .info import * 26 | from .modifier import * 27 | from .modifier_thinning import * 28 | from .pruning import * 29 | from .quantization.modifier_quantization import * 30 | from .training import * 31 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/sparsification/distillation/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from .modifier_distillation import * 18 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/sparsification/quantization/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from .constants import * 18 | from .helpers import * 19 | from .modifier_quantization import * 20 | from .quantization_scheme import * 21 | from .quantize import * 22 | from .quantize_qat_export import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/sparsification/training/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from .modifier_epoch import * 18 | from .modifier_lr import * 19 | from .modifier_params import * 20 | from .modifier_regularizer import * 21 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/pytorch/torchvision/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/recipe_template/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, 12 | # software distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from .utils import * 18 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality for working with and sparsifying Models in the TensorFlow 1.x framework 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .base import * 22 | from .framework import detect_framework, framework_info, is_supported 23 | from .sparsification import sparsification_info 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for creating and loading datasets in TensorFlow 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_tensorflow_install as _check_tensorflow_install 22 | from .classification import * 23 | from .dataset import * 24 | from .registry import * 25 | 26 | 27 | _check_tensorflow_install() # TODO: remove once files load without installs 28 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/datasets/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Datasets related to image classification field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .cifar import * 22 | from .imagefolder import * 23 | from .imagenet import * 24 | from .imagenette import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/framework/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality related to integrating with, detecting, and getting information for 17 | support and sparsification in the TensorFLow 1.x framework. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .info import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Code for creating and loading models in TensorFlow 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_tensorflow_install as _check_tensorflow_install 22 | from .classification import * 23 | from .estimator import * 24 | from .registry import * 25 | 26 | 27 | _check_tensorflow_install() # TODO: remove once files load without installs 28 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/models/classification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Models related to image classification field in computer vision 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .mnist import * 22 | from .mobilenet import * 23 | from .mobilenet_v2 import * 24 | from .resnet import * 25 | from .vgg import * 26 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/nn/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Neural Network layers and ops for TensorFlow V1 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_tensorflow_install as _check_tensorflow_install 22 | from .layers import * 23 | 24 | 25 | _check_tensorflow_install() # TODO: remove once files load without installs 26 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/sparsification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Functionality related to applying, describing, and supporting sparsification 17 | algorithms to models within in the TensorFlow 1.x framework. 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .info import * 23 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Generic code used as utilities and helpers for TensorFlow 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from ..base import check_tensorflow_install as _check_tensorflow_install 22 | from .exporter import * 23 | from .helpers import * 24 | from .loss import * 25 | from .summary import * 26 | from .variable import * 27 | 28 | 29 | _check_tensorflow_install() # TODO: remove once files load without installs 30 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/tensorflow_v1/utils/helpers.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import tensorflow as tf 16 | 17 | 18 | __all__ = [ 19 | "tf_compat", 20 | "tf_compat_div", 21 | ] 22 | 23 | 24 | tf_compat = ( 25 | tf 26 | if not hasattr(tf, "compat") or not hasattr(getattr(tf, "compat"), "v1") 27 | else tf.compat.v1 28 | ) # type: tf 29 | tf_compat_div = ( 30 | tf_compat.div 31 | if not hasattr(tf_compat, "math") 32 | or not hasattr(getattr(tf_compat, "math"), "divide") 33 | else tf_compat.math.divide 34 | ) 35 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/transformers/sparsification/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Objects, classes, and methods for applying sparsification algorithms to 17 | Hugging Face transformers flows 18 | """ 19 | 20 | # flake8: noqa 21 | 22 | from .question_answering import * 23 | from .trainer import * 24 | from .training_args import * 25 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/transformers/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | Utilities for applying sparsification algorithms to Hugging Face transformers flows 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .helpers import * 22 | from .metrics import * 23 | from .model import * 24 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | General utility functions used throughout sparseml 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .frameworks import * 22 | from .helpers import * 23 | from .restricted_eval import * 24 | from .singleton import * 25 | from .worker import * 26 | from .wrapper import * 27 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/utils/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | General utilities for datasets in sparseml 17 | """ 18 | 19 | # flake8: noqa 20 | 21 | from .cifar import * 22 | from .coco import * 23 | from .helpers import * 24 | from .imagenet import * 25 | from .imagenette import * 26 | from .voc import * 27 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/utils/datasets/cifar.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | __all__ = ["CIFAR_10_CLASSES"] 16 | 17 | CIFAR_10_CLASSES = { 18 | 0: "airplane", 19 | 1: "automobile", 20 | 2: "bird", 21 | 3: "cat", 22 | 4: "deer", 23 | 5: "dog", 24 | 6: "frog", 25 | 7: "horse", 26 | 8: "ship", 27 | 9: "truck", 28 | } 29 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/utils/datasets/voc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | __all__ = ["VOC_CLASSES"] 16 | 17 | 18 | VOC_CLASSES = { 19 | 0: "aeroplane", 20 | 1: "bicycle", 21 | 2: "bird", 22 | 3: "boat", 23 | 4: "bottle", 24 | 5: "bus", 25 | 6: "car", 26 | 7: "cat", 27 | 8: "chair", 28 | 9: "cow", 29 | 10: "diningtable", 30 | 11: "dog", 31 | 12: "horse", 32 | 13: "motorbike", 33 | 14: "person", 34 | 15: "pottedplant", 35 | 16: "sheep", 36 | 17: "sofa", 37 | 18: "train", 38 | 19: "tvmonitor", 39 | } 40 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/utils/frameworks.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, 10 | # software distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """ 16 | ML framework tokens 17 | """ 18 | 19 | __all__ = [ 20 | "ONNX_FRAMEWORK", 21 | "KERAS_FRAMEWORK", 22 | "PYTORCH_FRAMEWORK", 23 | "TENSORFLOW_V1_FRAMEWORK", 24 | ] 25 | 26 | 27 | ONNX_FRAMEWORK = "onnx" 28 | KERAS_FRAMEWORK = "keras" 29 | PYTORCH_FRAMEWORK = "pytorch" 30 | TENSORFLOW_V1_FRAMEWORK = "tensorflow_v1" 31 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/yolov5/data/hyps/hyp.Objects365.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | # Hyperparameters for Objects365 training 3 | # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve 4 | # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials 5 | 6 | lr0: 0.00258 7 | lrf: 0.17 8 | momentum: 0.779 9 | weight_decay: 0.00058 10 | warmup_epochs: 1.33 11 | warmup_momentum: 0.86 12 | warmup_bias_lr: 0.0711 13 | box: 0.0539 14 | cls: 0.299 15 | cls_pw: 0.825 16 | obj: 0.632 17 | obj_pw: 1.0 18 | iou_t: 0.2 19 | anchor_t: 3.44 20 | anchors: 3.2 21 | fl_gamma: 0.0 22 | hsv_h: 0.0188 23 | hsv_s: 0.704 24 | hsv_v: 0.36 25 | degrees: 0.0 26 | translate: 0.0902 27 | scale: 0.491 28 | shear: 0.0 29 | perspective: 0.0 30 | flipud: 0.0 31 | fliplr: 0.5 32 | mosaic: 1.0 33 | mixup: 0.0 34 | copy_paste: 0.0 35 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/yolov5/data/hyps/hyp.finetune.yaml: -------------------------------------------------------------------------------- 1 | # Hyperparameters for VOC finetuning 2 | # python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 3 | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials 4 | 5 | 6 | # Hyperparameter Evolution Results 7 | # Generations: 306 8 | # P R mAP.5 mAP.5:.95 box obj cls 9 | # Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 10 | 11 | lr0: 0.0032 12 | lrf: 0.12 13 | momentum: 0.843 14 | weight_decay: 0.00036 15 | warmup_epochs: 2.0 16 | warmup_momentum: 0.5 17 | warmup_bias_lr: 0.05 18 | box: 0.0296 19 | cls: 0.243 20 | cls_pw: 0.631 21 | obj: 0.301 22 | obj_pw: 0.911 23 | iou_t: 0.2 24 | anchor_t: 2.91 25 | # anchors: 3.63 26 | fl_gamma: 0.0 27 | hsv_h: 0.0138 28 | hsv_s: 0.664 29 | hsv_v: 0.464 30 | degrees: 0.373 31 | translate: 0.245 32 | scale: 0.898 33 | shear: 0.602 34 | perspective: 0.0 35 | flipud: 0.00856 36 | fliplr: 0.5 37 | mosaic: 1.0 38 | mixup: 0.243 39 | copy_paste: 0.0 40 | -------------------------------------------------------------------------------- /sparseml/src/sparseml/yolov5/data/images/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/src/sparseml/yolov5/data/images/bus.jpg -------------------------------------------------------------------------------- /sparseml/src/sparseml/yolov5/data/images/zidane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UDC-GAC/venom/8ddaf38ef918d4aedfea12696fed60348c5ccd10/sparseml/src/sparseml/yolov5/data/images/zidane.jpg -------------------------------------------------------------------------------- /sparseml/src/sparseml/yolov5/data/scripts/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 3 | # Download latest models from https://github.com/ultralytics/yolov5/releases 4 | # Example usage: bash path/to/download_weights.sh 5 | # parent 6 | # └── yolov5 7 | # ├── yolov5s.pt ← downloads here 8 | # ├── yolov5m.pt 9 | # └── ... 10 | 11 | python - <