├── CONTRIBUTING.md ├── README.md ├── caffe ├── README.md ├── clean.sh ├── env.sh ├── load-image-dev.sh ├── run-container-dev.sh ├── tools │ ├── addTitleNumber4MDFile.py │ ├── genOfflineModels.sh │ ├── getFileList.sh │ └── getPicRandomDir2Dir.py ├── yolov3-416 │ ├── README.md │ ├── clean.sh │ ├── datasets │ │ ├── 000000000785.jpg │ │ ├── 000000001000.jpg │ │ ├── 000000001584.jpg │ │ ├── 000000002006.jpg │ │ ├── 000000002149.jpg │ │ ├── 000000002592.jpg │ │ ├── 000000003501.jpg │ │ ├── 000000004134.jpg │ │ ├── 000000005477.jpg │ │ ├── 000000007108.jpg │ │ ├── 000000007281.jpg │ │ ├── 000000008211.jpg │ │ ├── 000000013348.jpg │ │ ├── 000000015254.jpg │ │ ├── 000000015272.jpg │ │ ├── 000000017905.jpg │ │ ├── 000000018380.jpg │ │ ├── 000000018837.jpg │ │ ├── 000000020333.jpg │ │ ├── 000000024919.jpg │ │ ├── 000000026204.jpg │ │ ├── 000000026926.jpg │ │ ├── 000000038829.jpg │ │ ├── 000000039670.jpg │ │ ├── 000000044279.jpg │ │ ├── 000000044699.jpg │ │ ├── 000000050896.jpg │ │ ├── 000000067616.jpg │ │ ├── 000000147725.jpg │ │ ├── 000000147745.jpg │ │ ├── 000000148719.jpg │ │ └── 000000155341.jpg │ ├── env.sh │ ├── file_list_2014 │ ├── label_map_coco.txt │ ├── models │ │ ├── download_weights.sh │ │ └── yolov3-416.cfg │ ├── res │ │ ├── map50blue.png │ │ ├── performance-on-coo.png │ │ ├── sayit.jpg │ │ ├── yolov3_000000000785.jpg │ │ ├── yolov3_000000007108.jpg │ │ ├── yolov3_000000013348.jpg │ │ └── yolov3_000000018380.jpg │ ├── test │ │ ├── clean.sh │ │ ├── yolov3_offline_multicore │ │ │ └── clean.sh │ │ ├── yolov3_online_multicore_mfus │ │ │ └── clean.sh │ │ └── yolov3_online_multicore_mlu │ │ │ └── clean.sh │ ├── yolov3_file_list_coco │ ├── yolov3_offline_multicore.sh │ ├── yolov3_online_multicore_mfus.sh │ ├── yolov3_online_multicore_mlu.sh │ ├── yolov3_quantized.ini │ └── yolov3_quantized.sh └── yolov4-mish-416 │ ├── README.md │ ├── clean.sh │ ├── datasets │ ├── 000000000785.jpg │ ├── 000000001000.jpg │ ├── 000000001584.jpg │ ├── 000000002006.jpg │ ├── 000000002149.jpg │ ├── 000000002592.jpg │ ├── 000000003501.jpg │ ├── 000000004134.jpg │ ├── 000000005477.jpg │ ├── 000000007108.jpg │ ├── 000000007281.jpg │ ├── 000000008211.jpg │ ├── 000000013348.jpg │ ├── 000000015254.jpg │ ├── 000000015272.jpg │ ├── 000000017905.jpg │ ├── 000000018380.jpg │ ├── 000000018837.jpg │ ├── 000000020333.jpg │ ├── 000000024919.jpg │ ├── 000000026204.jpg │ ├── 000000026926.jpg │ ├── 000000038829.jpg │ ├── 000000039670.jpg │ ├── 000000044279.jpg │ ├── 000000044699.jpg │ ├── 000000050896.jpg │ ├── 000000067616.jpg │ ├── 000000147725.jpg │ ├── 000000147745.jpg │ ├── 000000148719.jpg │ └── 000000155341.jpg │ ├── env.sh │ ├── file_list_2014 │ ├── label_map_coco.txt │ ├── models │ ├── yolov4-leaky-416.cfg │ └── yolov4-mish-416.cfg │ ├── res │ ├── map50blue.png │ ├── performance-on-coo.png │ ├── sayit.jpg │ ├── yolov3_000000000785.jpg │ ├── yolov3_000000007108.jpg │ ├── yolov3_000000013348.jpg │ └── yolov3_000000018380.jpg │ ├── results.json │ ├── test │ ├── clean.sh │ ├── yolov4_offline_multicore │ │ └── clean.sh │ ├── yolov4_online_multicore_mfus │ │ └── clean.sh │ └── yolov4_online_multicore_mlu │ │ └── clean.sh │ ├── yolov4_file_list_coco │ ├── yolov4_offline_multicore.sh │ ├── yolov4_online_multicore_mfus.sh │ ├── yolov4_online_multicore_mlu.sh │ ├── yolov4_quantized.ini │ └── yolov4_quantized.sh ├── data ├── README.md └── bus.jpg ├── datasets ├── docker ├── Dockerfile.16.04 ├── Dockerfile.18.04 ├── clean.sh ├── pip.conf ├── pre_packages.sh ├── pre_packages18.04.sh ├── requirements.txt ├── sources_16.04.list └── sources_18.04.list ├── edge ├── README.md ├── clean.sh ├── cross_compile │ ├── README4M100-CNStream.md │ ├── build-bsp-all.sh │ ├── build-cnstream-ce3226.sh │ ├── clean.sh │ ├── env-ce3226.sh │ └── update-os.sh ├── dependent_files │ ├── README.md │ └── clean.sh ├── env.sh ├── load-image-dev.sh └── run-container-dev.sh ├── how_to_contribute ├── CONTRIBUTING_ATTENTION.md └── README_TEMPLATE.md ├── mm ├── README.md ├── clean.sh ├── cnvs │ ├── README.md │ ├── clean.sh │ ├── cnmon.sh │ ├── config │ │ ├── cnvs.example.yml │ │ ├── cnvs_matmul_performance_float.yml │ │ ├── cnvs_memory_bandwidth_512M.yml │ │ ├── cnvs_mlu_stress.yml │ │ ├── cnvs_mlulink.yml │ │ ├── cnvs_pcie_16M.yml │ │ ├── cnvs_peak_performance_bfloat16.yml │ │ ├── cnvs_peak_performance_float.yml │ │ ├── cnvs_peak_performance_half.yml │ │ ├── cnvs_peak_performance_int16.yml │ │ ├── cnvs_peak_performance_int8.yml │ │ ├── cnvs_targeted_power.yml │ │ └── cnvs_targeted_stress.yml │ ├── env.sh │ ├── init-cnvs.sh │ ├── matmul-test.sh │ ├── memory-test.sh │ ├── mlu_stress-test.sh │ ├── mlulink-test.sh │ ├── pcie-test.sh │ ├── peak-test.sh │ ├── run-all-test.sh │ ├── targeted_power-test.sh │ └── targeted_stress-test.sh ├── dependent_files │ └── README.md ├── env.sh ├── eval │ ├── README.md │ ├── clean.sh │ ├── env.sh │ └── mm_eval.sh ├── lib │ ├── base.sh │ ├── log.sh │ └── time.sh ├── load-image-dev.sh ├── mm_build │ └── README.md ├── mm_run │ └── README.md ├── perf │ ├── README.md │ ├── clean.sh │ ├── env.sh │ └── mm_perf.sh ├── run-container-dev.sh └── save-image-dev.sh ├── models ├── pytorch ├── README.md ├── clean.sh ├── cnml │ └── README.md ├── deepsort │ ├── 6.png │ ├── README.md │ ├── clean.sh │ ├── create_feature_extract.py │ ├── forward_feature_extract.py │ ├── genoff.py-example │ ├── quantize_feature_extract.py │ ├── run_convertmodel.sh │ └── run_offline.sh ├── env.sh ├── load-image-dev.sh ├── mask-rcnn │ └── README.md ├── run-container-dev.sh ├── tools │ ├── eval.py │ ├── getFileList.sh │ ├── getPicRandomDir2Dir.py │ ├── pytorch_forward.py │ └── run_all_offline_mc.sh ├── yolov4-tiny │ ├── README.md │ ├── load-image-ubuntu18.04-pytorch.sh │ ├── offline │ │ ├── clean.sh │ │ └── genoff │ │ │ └── genoff.py │ ├── online │ │ ├── common_utils.py │ │ └── yolov4 │ │ │ ├── clean.sh │ │ │ ├── data │ │ │ ├── coco.names │ │ │ ├── dog.jpg │ │ │ ├── meat.jpg │ │ │ └── voc.names │ │ │ ├── eval.py │ │ │ ├── model │ │ │ └── download_weights.sh │ │ │ ├── predictions.jpg │ │ │ ├── results.json │ │ │ ├── run_quant.sh │ │ │ └── tool │ │ │ ├── COCO_eval.py │ │ │ ├── __init__.py │ │ │ ├── change_cat_id.py │ │ │ ├── coco_annotation.py │ │ │ ├── config.py │ │ │ ├── darknet2pytorch.py │ │ │ ├── region_loss.py │ │ │ ├── torch_utils.py │ │ │ ├── utils.py │ │ │ └── yolo_layer.py │ ├── pytorch_models │ │ ├── int16 │ │ │ └── checkpoints │ │ │ │ └── clean.sh │ │ ├── int8 │ │ │ └── checkpoints │ │ │ │ └── clean.sh │ │ └── origin │ │ │ └── checkpoints │ │ │ └── clean.sh │ └── run-container-ubuntu18.04-pytorch.sh ├── yolov4 │ ├── README.md │ ├── clean.sh │ ├── datasets │ │ ├── 000000001993.jpg │ │ ├── 000000046252.jpg │ │ ├── 000000065288.jpg │ │ ├── 000000079588.jpg │ │ ├── 000000147729.jpg │ │ ├── 000000170613.jpg │ │ ├── 000000250758.jpg │ │ ├── 000000283520.jpg │ │ ├── 000000303908.jpg │ │ ├── 000000363784.jpg │ │ ├── 000000419408.jpg │ │ ├── 000000460379.jpg │ │ ├── 000000463802.jpg │ │ ├── 000000479155.jpg │ │ ├── 000000526751.jpg │ │ └── 000000565607.jpg │ ├── file_list_datasets │ ├── models │ │ ├── config.ini │ │ ├── download_weights.sh │ │ ├── mlu │ │ │ └── clean.sh │ │ └── yolov4.cfg │ ├── res │ │ └── yolov4.png │ └── test │ │ ├── clean.sh │ │ ├── offline │ │ └── clean.sh │ │ └── online │ │ └── clean.sh └── yolov5 │ ├── README.md │ ├── aligntorch │ ├── README.md │ ├── aligntorch.sh │ ├── build-image-yolov5-align.sh │ ├── clean.sh │ ├── dependent_files │ │ └── README.md │ ├── docker │ │ ├── Dockerfile.18.04 │ │ ├── clean.sh │ │ ├── pip.conf │ │ ├── pre_packages.sh │ │ ├── requirements.txt │ │ └── sources_18.04.list │ ├── env.sh │ ├── load-image-yolov5-align.sh │ ├── run-container-yolov5-align.sh │ ├── save-image-yolov5-align.sh │ ├── sync.sh │ ├── tools │ │ ├── aligntorch.py │ │ └── download-yolov5.sh │ └── weights │ │ ├── clean.sh │ │ └── download-yolov5-weights.sh │ ├── clean.sh │ ├── convertmodel │ ├── README.md │ ├── clean.sh │ ├── convertmodel.py │ ├── data │ │ └── bus.jpg │ ├── detect_mark_layer.py │ ├── detect_mark_qua.py │ ├── genoff.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-35.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── __init__.cpython-37.pyc │ │ │ ├── common.cpython-35.pyc │ │ │ ├── common.cpython-36.pyc │ │ │ ├── common.cpython-37.pyc │ │ │ ├── experimental.cpython-35.pyc │ │ │ ├── experimental.cpython-36.pyc │ │ │ ├── experimental.cpython-37.pyc │ │ │ ├── yolo.cpython-35.pyc │ │ │ ├── yolo.cpython-36.pyc │ │ │ └── yolo.cpython-37.pyc │ │ ├── common.py │ │ ├── experimental.py │ │ ├── export.py │ │ ├── hub │ │ │ ├── anchors.yaml │ │ │ ├── yolov3-spp.yaml │ │ │ ├── yolov3-tiny.yaml │ │ │ ├── yolov3.yaml │ │ │ ├── yolov5-fpn.yaml │ │ │ ├── yolov5-p2.yaml │ │ │ ├── yolov5-p6.yaml │ │ │ ├── yolov5-p7.yaml │ │ │ └── yolov5-panet.yaml │ │ ├── yolo.py │ │ ├── yolov5l.yaml │ │ ├── yolov5m.yaml │ │ ├── yolov5s.yaml │ │ ├── yolov5s.yaml-bak │ │ └── yolov5x.yaml │ ├── quant.py │ ├── run_convertmodel.sh │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-35.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── __init__.cpython-37.pyc │ │ ├── activations.cpython-35.pyc │ │ ├── activations.cpython-36.pyc │ │ ├── activations.cpython-37.pyc │ │ ├── autoanchor.cpython-35.pyc │ │ ├── autoanchor.cpython-36.pyc │ │ ├── autoanchor.cpython-37.pyc │ │ ├── datasets.cpython-35.pyc │ │ ├── datasets.cpython-36.pyc │ │ ├── datasets.cpython-37.pyc │ │ ├── general.cpython-35.pyc │ │ ├── general.cpython-36.pyc │ │ ├── general.cpython-37.pyc │ │ ├── google_utils.cpython-35.pyc │ │ ├── google_utils.cpython-36.pyc │ │ ├── google_utils.cpython-37.pyc │ │ ├── loss.cpython-37.pyc │ │ ├── metrics.cpython-35.pyc │ │ ├── metrics.cpython-36.pyc │ │ ├── metrics.cpython-37.pyc │ │ ├── plots.cpython-35.pyc │ │ ├── plots.cpython-36.pyc │ │ ├── plots.cpython-37.pyc │ │ ├── torch_utils.cpython-35.pyc │ │ ├── torch_utils.cpython-36.pyc │ │ └── torch_utils.cpython-37.pyc │ │ ├── activations.py │ │ ├── autoanchor.py │ │ ├── datasets.py │ │ ├── general.py │ │ ├── google_app_engine │ │ ├── Dockerfile │ │ ├── additional_requirements.txt │ │ └── app.yaml │ │ ├── google_utils.py │ │ ├── loss.py │ │ ├── metrics.py │ │ ├── plots.py │ │ └── torch_utils.py │ ├── offline │ ├── Makefile │ ├── README.md │ ├── clean.sh │ ├── image.jpg │ ├── main.cpp │ └── run_offline.sh │ └── res │ ├── clean.sh │ ├── yolov5-performance.jpg │ └── yolov5.jpg ├── pytorch1.13 ├── README.md ├── benchmark │ └── chatglm2 │ │ ├── README.md │ │ └── tools │ │ └── modeling_chatglm.py ├── chatglm3 │ ├── README.md │ └── tools │ │ ├── accelerator.py │ │ ├── demo.py │ │ ├── dependency_versions_table.py │ │ ├── deploy_env.sh │ │ ├── finetune_pt_multiturn.sh │ │ ├── imports.py │ │ ├── modeling_chatglm_mlu_infer.py │ │ └── modeling_chatglm_mlu_training.py ├── clean.sh ├── env.sh ├── load-image-dev.sh ├── run-container-dev.sh └── save-image-dev.sh ├── pytorch1.9 ├── README.md ├── baichuan │ ├── README.md │ ├── README4OfflineImages.md │ ├── clean.sh │ ├── res │ │ └── baichuan-7B.jpg │ ├── tools │ │ ├── env.sh │ │ ├── inference.py │ │ └── modeling_utils.py │ └── torch_gpu2mlu.py ├── chatglm │ ├── README.md │ ├── README4OfflineImages.md │ ├── clean.sh │ ├── res │ │ ├── aiknight_mlu_chatglm.gif │ │ ├── aiknight_mlu_chatglm_train_cnmon.gif │ │ └── chatglm-6b.jpg │ ├── tools │ │ ├── api.py │ │ ├── cli_demo.py │ │ ├── ds_train_finetune_mlu.sh │ │ ├── inference.py │ │ ├── main.py │ │ ├── main_parallel.py │ │ ├── modeling_chatglm.py │ │ ├── train_mlu.sh │ │ ├── train_parallel_mlu.sh │ │ ├── trainer.py │ │ ├── version.py │ │ └── web_demo.py │ └── torch_gpu2mlu.py ├── chatglm2 │ ├── README.md │ ├── clean.sh │ ├── res │ │ ├── aiknight_mlu_chatglm2.gif │ │ ├── aiknight_mlu_chatglm2_train_cnmon.gif │ │ └── chatglm2-6b.jpg │ └── tools │ │ ├── api.py │ │ ├── cli_demo.py │ │ ├── demo.py │ │ ├── deploy_env.sh │ │ ├── modeling_chatglm.py │ │ ├── profiler_demo.py │ │ └── web_demo.py ├── chinese-llama-alpaca-2 │ ├── README.md │ ├── clean.sh │ ├── res │ │ ├── README.md │ │ ├── chinese-llama-alpaca-2-sys-s.gif │ │ └── chinese-llama-alpaca-2.gif │ └── tools │ │ ├── deploy_env.sh │ │ ├── env.sh │ │ ├── merge_pretrain_model_13b.sh │ │ ├── merge_trainmodel_13b.sh │ │ ├── openai_api_server.py │ │ ├── run_finetuning_13b.sh │ │ ├── run_inference_13b.sh │ │ ├── run_inference_13b_2.sh │ │ ├── run_inference_13b_2_pretrain.sh │ │ ├── run_mlu_eval.sh │ │ ├── run_mlu_eval_2.sh │ │ ├── run_mlu_eval_2_pretrain.sh │ │ └── run_pretraining_13b.sh ├── clean.sh ├── docker │ └── clean.sh ├── env.sh ├── load-image-dev.sh ├── openbiomed │ ├── README.md │ ├── clean.sh │ ├── res │ │ ├── aiknight_openbiomed_cnmon.gif │ │ └── openbiomed.gif │ └── tools │ │ └── sparse.py ├── run-container-dev.sh ├── save-image-dev.sh ├── stdc-seg │ ├── README_MLU.md │ ├── clean.sh │ └── torch_gpu2mlu.py └── yolact │ ├── README_MLU.md │ ├── clean.sh │ └── torch_gpu2mlu.py ├── res ├── aiknight_cars_6_20.gif ├── aiknight_cnmon_3226_20.gif ├── aiknight_wechat_344.jpg ├── cambricon.jpg ├── dev-env-ubuntu-1.png └── note.gif ├── tensorflow ├── README.md ├── clean.sh ├── env.sh ├── load-image-dev.sh └── run-container-dev.sh ├── test └── cnrtexec │ ├── Makefile │ ├── clean.sh │ ├── cnrtexec │ ├── cnrtexec-readme.pdf │ ├── cnrtexec.cpp │ ├── cnrtexec.h │ ├── cnrtexec.o │ ├── main.cpp │ └── main.o └── tools ├── add-branch4github.sh ├── addTitleNumber4MDFile.py ├── cleanDocker.sh ├── getFileList.sh ├── getPicRandomDir2Dir.py ├── killMLUAllPID.sh ├── rmFileGit.sh ├── test-cnmon.sh ├── update_sources.list-ali.sh └── update_sources.list-tuna.sh /caffe/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #Dockerfile(16.04/18.04/CentOS) 5 | #OSVer="16.04" 6 | #if [[ $# -ne 0 ]];then OSVer="${1}";fi 7 | # 1.Source env 8 | source ./env.sh $OSVer 9 | 10 | # 2.rm docker container 11 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 13 | if [ $num_container ]; then sudo docker stop $num_container;fi 14 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | if [ $num_container ]; then sudo docker rm $num_container;fi 16 | 17 | # 3.rmi docker image 18 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 19 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 20 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 21 | -------------------------------------------------------------------------------- /caffe/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2021/08/04 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # Depends: caffe-5.4.602-ubuntu18.04.tar 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | # Source env 12 | source "./env.sh" 13 | #################### main #################### 14 | # 0.Check param 15 | if [[ $# -eq 0 ]];then 16 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 17 | else 18 | FULLNAME_IMAGE="${1}" 19 | fi 20 | # 0.Check File Images 21 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 22 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 23 | fi 24 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 25 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 26 | fi 27 | # 0.Check Docker Images 28 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 29 | echo $num 30 | echo $NAME_IMAGE 31 | 32 | # 1.Load Docker Images 33 | if [ 0 -eq $num ];then 34 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 35 | #load image 36 | sudo docker load < ${FULLNAME_IMAGE} 37 | else 38 | echo "The image($NAME_IMAGE) is already loaded!" 39 | fi 40 | 41 | #echo "All image information:" 42 | #sudo docker images 43 | echo "The image($NAME_IMAGE) information:" 44 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 45 | -------------------------------------------------------------------------------- /caffe/tools/genOfflineModels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #source env.sh 3 | 4 | usage() 5 | { 6 | echo "Usage:" 7 | echo " $0 [1|2|3]" 8 | echo "" 9 | echo " Parameter description:" 10 | echo " [1]: prototxt file" 11 | echo " [2]: caffemodel file" 12 | echo " [3]: offline file name" 13 | echo " [4]: core number" 14 | echo " [5]: batch size" 15 | } 16 | if [ $# -lt 4 ]; then 17 | echo "[ERROR] Unknown parameter." 18 | usage 19 | exit 1 20 | fi 21 | 22 | coreN=4 23 | btsize=1 24 | 25 | if [ $# -eq 4 ]; then 26 | coreN=$4 27 | fi 28 | if [ $# -eq 5 ]; then 29 | coreN=$4 30 | btsize=$5 31 | fi 32 | 33 | $CAFFE_HOME/src/caffe/build/tools/caffe genoff --model $1 \ 34 | --weights $2 \ 35 | --mname ${3}_${btsize}b${coreN}c"_simple" \ 36 | --mcore MLU270 \ 37 | --simple_compile 1 \ 38 | --core_number $coreN \ 39 | --batchsize $btsize 40 | 41 | echo "genoffline models(${3}_${btsize}b${coreN}c_simple.cambricon) done!!" 42 | echo "param number: $#" 43 | echo "core number: $coreN" 44 | echo "batch size: $btsize" 45 | ls -la ${3}_${btsize}b${coreN}c"_simple".cambricon* 46 | -------------------------------------------------------------------------------- /caffe/tools/getFileList.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | usage() 5 | { 6 | echo "Usage:" 7 | echo " $0 [pathdir] [filelist]" 8 | echo "" 9 | echo " Parameter description:" 10 | echo " [pathdir]: Relative path of the directory" 11 | echo " [filelist]: File name for the generated result" 12 | echo " EG: ./genFileList.sh ../data/00015 file.list" 13 | } 14 | 15 | if [ $# -lt 1 ]; then 16 | echo "[ERROR] Unknown parameter." 17 | usage 18 | exit 1 19 | fi 20 | 21 | pathdir=$1 22 | filelist=$2 23 | 24 | function getdir(){ 25 | #echo $1 26 | for file in $1/* 27 | do 28 | if test -f $file 29 | then 30 | #echo $file 31 | arr=(${arr[*]} $file) 32 | else 33 | getdir $file 34 | fi 35 | done 36 | } 37 | 38 | # Recursively call the function: save the path of all files in the folder to the array 39 | getdir $pathdir 40 | # Print the path of all files to file.list 41 | if [ -f "$filelist" ];then 42 | rm -f $filelist 43 | fi 44 | length=${#arr[@]} 45 | for((a=0;a<$length;a++)) 46 | do 47 | echo ${arr[$a]} >> $filelist 48 | done 49 | 50 | #Display filelist 51 | cat $filelist 52 | echo "[pathdir]: $pathdir" 53 | echo "[filelist]: $filelist" 54 | -------------------------------------------------------------------------------- /caffe/tools/getPicRandomDir2Dir.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | #!/usr/bin/env python 3 | 4 | import os, random, shutil 5 | 6 | rate=0.1 #自定义抽取图片的比例,比方说100张抽10张,那就是0.1 7 | picknumber=16 8 | 9 | def moveFile(fileDir): 10 | pathDir = os.listdir(fileDir) #取图片的原始路径 11 | filenumber=len(pathDir) 12 | #picknumber=int(filenumber*rate) #按照rate比例从文件夹中取一定数量图片 13 | sample = random.sample(pathDir, picknumber) #随机选取picknumber数量的样本图片 14 | print (sample) 15 | for name in sample: 16 | shutil.move(fileDir+name, tarDir+name) 17 | return 18 | 19 | if __name__ == '__main__': 20 | fileDir = "/data/datasets/COCO/val2017/" #源图片文件夹路径 21 | tarDir = '/data/test/easy-deploy-mlu/caffe/datasets/COCO/val2017/' #移动到新的文件夹路径 22 | moveFile(fileDir) 23 | -------------------------------------------------------------------------------- /caffe/yolov3-416/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # 清理临时生成的文件 4 | # Usage: 5 | # $ bash clean.sh all/test/models 6 | 7 | usage() 8 | { 9 | echo "Usage:" 10 | echo " $0 all/test/models" 11 | } 12 | 13 | clean_all() { 14 | #clean test 15 | cd ./test 16 | ./clean.sh 17 | cd - 18 | 19 | #clean models 20 | rm -vf ./models/mlu/* 21 | rm -vf ./models/*.weights 22 | } 23 | 24 | if [[ $# -eq 0 ]];then 25 | usage 26 | elif [[ $# -eq 1 ]];then 27 | if [[ "$1" == "all" ]];then 28 | #clean all 29 | clean_all 30 | elif [[ "$1" == "test" ]];then 31 | #clean test 32 | cd ./test && ./clean.sh && cd - 33 | elif [[ "$1" == "models" ]];then 34 | #clean models 35 | rm -vrf ./models/mlu/* && rm -vrf ./models/*.weights 36 | else 37 | usage 38 | fi 39 | else 40 | usage 41 | fi 42 | 43 | -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000000785.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000001000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000001000.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000001584.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000001584.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000002006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000002006.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000002149.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000002149.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000002592.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000002592.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000003501.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000003501.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000004134.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000004134.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000005477.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000005477.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000007108.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000007281.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000007281.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000008211.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000008211.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000013348.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000013348.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000015254.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000015254.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000015272.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000015272.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000017905.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000017905.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000018380.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000018380.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000018837.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000018837.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000020333.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000020333.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000024919.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000024919.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000026204.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000026204.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000026926.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000026926.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000038829.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000038829.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000039670.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000039670.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000044279.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000044279.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000044699.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000044699.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000050896.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000050896.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000067616.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000067616.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000147725.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000147725.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000147745.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000147745.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000148719.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000148719.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/datasets/000000155341.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/datasets/000000155341.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #声明环境变量(该操作每次进入docker都需要进行) 4 | cd $CAFFE_HOME/../ 5 | source env_caffe.sh 6 | cd - 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | export PATH_NETWORK="${PWD}" 9 | export PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | export PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | -------------------------------------------------------------------------------- /caffe/yolov3-416/label_map_coco.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic_light 11 | fire_hydrant 12 | stop_sign 13 | parking_meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports_ball 34 | kite 35 | baseball_bat 36 | baseball_glove 37 | skateboard 38 | surfboard 39 | tennis_racket 40 | bottle 41 | wine_glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot_dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell_phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy_bear 79 | hair_drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /caffe/yolov3-416/models/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Download latest models from https://pjreddie.com/media/files/yolov3.weights 3 | # Usage: 4 | # $ bash download_weights.sh 5 | 6 | #下载yolov3.weights 7 | wget https://pjreddie.com/media/files/yolov3.weights 8 | #回显确认 9 | ls -la -------------------------------------------------------------------------------- /caffe/yolov3-416/res/map50blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/map50blue.png -------------------------------------------------------------------------------- /caffe/yolov3-416/res/performance-on-coo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/performance-on-coo.png -------------------------------------------------------------------------------- /caffe/yolov3-416/res/sayit.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/sayit.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/res/yolov3_000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/yolov3_000000000785.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/res/yolov3_000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/yolov3_000000007108.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/res/yolov3_000000013348.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/yolov3_000000013348.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/res/yolov3_000000018380.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov3-416/res/yolov3_000000018380.jpg -------------------------------------------------------------------------------- /caffe/yolov3-416/test/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | for dir in $(ls) 6 | do 7 | if [ -d "./${dir}" ]; then #先判断是否是目录,然后再执行clean 8 | echo $dir && cd $dir && ./clean.sh && cd - 9 | fi 10 | done -------------------------------------------------------------------------------- /caffe/yolov3-416/test/yolov3_offline_multicore/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /caffe/yolov3-416/test/yolov3_online_multicore_mfus/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /caffe/yolov3-416/test/yolov3_online_multicore_mlu/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /caffe/yolov3-416/yolov3_offline_multicore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 基于SDK-Demo 离线推理 3 | #/opt/cambricon/caffe/src/caffe/build/examples/yolo_v3/yolov3_offline_multicore 4 | # Usage: 5 | # $ bash yolov3_offline_multicore.sh 6 | 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | #PATH_NETWORK="/home/share/yolov3-416" 9 | #PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | #PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | 12 | PATH_TEST_NETWORK="${PATH_NETWORK}/test/yolov3_offline_multicore" 13 | if [ ! -d ${PATH_TEST_NETWORK} ];then mkdir -p ${PATH_TEST_NETWORK};fi 14 | cd ${PATH_TEST_NETWORK} 15 | #基于SDK-Demo 离线推理 16 | /opt/cambricon/caffe/src/caffe/build/examples/yolo_v3/yolov3_offline_multicore \ 17 | -offlinemodel ${PATH_NETWORK_MODELS_MLU}/yolov3_1b4c_simple.cambricon \ 18 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 19 | -images ${PATH_NETWORK}/yolov3_file_list_coco \ 20 | -preprocess_option 4 21 | #ls -la ${PATH_TEST_NETWORK} 22 | echo "PATH_TEST_NETWORK: ${PATH_TEST_NETWORK}" -------------------------------------------------------------------------------- /caffe/yolov3-416/yolov3_online_multicore_mfus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 基于SDK-Demo 在线融合推理 MFUS模式 3 | #/opt/cambricon/caffe/src/caffe/build/examples/yolo_v3/yolov3_online_multicore 4 | # Usage: 5 | # $ bash yolov3_online_multicore_mfus.sh 6 | 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | #PATH_NETWORK="/home/share/yolov3-416" 9 | #PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | #PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | 12 | PATH_TEST_NETWORK="${PATH_NETWORK}/test/yolov3_online_multicore_mfus" 13 | if [ ! -d ${PATH_TEST_NETWORK} ];then mkdir -p ${PATH_TEST_NETWORK};fi 14 | cd ${PATH_TEST_NETWORK} 15 | #基于SDK-Demo 在线融合推理 16 | /opt/cambricon/caffe/src/caffe/build/examples/yolo_v3/yolov3_online_multicore \ 17 | -model ${PATH_NETWORK_MODELS_MLU}/yolov3_int8.prototxt \ 18 | -weights ${PATH_NETWORK_MODELS_MLU}/yolov3.caffemodel \ 19 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 20 | -images ${PATH_NETWORK}/yolov3_file_list_coco \ 21 | -mcore MLU270 \ 22 | -mmode MFUS \ 23 | -preprocess_option 4 24 | #ls -la ${PATH_TEST_NETWORK} 25 | echo "PATH_TEST_NETWORK: ${PATH_TEST_NETWORK}" 26 | -------------------------------------------------------------------------------- /caffe/yolov3-416/yolov3_online_multicore_mlu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 基于SDK-Demo 在线逐层推理 MLU模式 3 | #/opt/cambricon/caffe/src/caffe/build/examples/yolo_v3/yolov3_online_multicore 4 | # Usage: 5 | # $ bash yolov3_online_multicore_mlu.sh 6 | 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | #PATH_NETWORK="/home/share/yolov3-416" 9 | #PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | #PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | 12 | PATH_TEST_NETWORK="${PATH_NETWORK}/test/yolov3_online_multicore_mlu" 13 | if [ ! -d ${PATH_TEST_NETWORK} ];then mkdir -p ${PATH_TEST_NETWORK};fi 14 | cd ${PATH_TEST_NETWORK} 15 | #基于SDK-Demo 在线逐层推理 16 | /opt/cambricon/caffe/src/caffe/build/examples/yolo_v3/yolov3_online_multicore \ 17 | -model ${PATH_NETWORK_MODELS_MLU}/yolov3_int8.prototxt \ 18 | -weights ${PATH_NETWORK_MODELS_MLU}/yolov3.caffemodel \ 19 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 20 | -images ${PATH_NETWORK}/yolov3_file_list_coco \ 21 | -mcore MLU270 \ 22 | -mmode MLU \ 23 | -preprocess_option 4 24 | #ls -la ${PATH_TEST_NETWORK} 25 | echo "PATH_TEST_NETWORK: ${PATH_TEST_NETWORK}" 26 | -------------------------------------------------------------------------------- /caffe/yolov3-416/yolov3_quantized.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | ;blow two are lists, depending on framework 3 | original_models_path = ./models/mlu/yolov3.prototxt 4 | save_model_path = ./models/mlu/yolov3_int8.prototxt 5 | 6 | [data] 7 | ;only one should be set for below two 8 | images_list_path = ./yolov3_file_list_coco 9 | used_images_num = 4 10 | 11 | [weights] 12 | original_weights_path = ./models/mlu/yolov3.caffemodel 13 | 14 | [preprocess] 15 | mean = 0,0,0 16 | std = 0.00392157 17 | scale = 416,416 18 | crop = 416,416 19 | 20 | [config] 21 | quantize_op_list = Conv, FC, LRN 22 | use_firstconv = 1 23 | -------------------------------------------------------------------------------- /caffe/yolov3-416/yolov3_quantized.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 模型量化 3 | # Usage: 4 | # $ bash yolov3_quantized.sh 5 | 6 | #1.生成图片列表 yolov3_file_list_coco 7 | /home/share/tools/getFileList.sh ${PATH_NETWORK}/datasets yolov3_file_list_coco 8 | #2.generate_quantized_pt:/opt/cambricon/caffe/tools/generate_quantized_pt 9 | cd ${PATH_NETWORK} 10 | /opt/cambricon/caffe/tools/generate_quantized_pt -ini_file ${PATH_NETWORK}/yolov3_quantized.ini 11 | ls -la ${PATH_NETWORK_MODELS_MLU}/yolov3_int8.prototxt 12 | #/opt/cambricon/caffe/tools/generate_quantized_pt -ini_file ${PATH_NETWORK}/yolov3_quantized.ini -blobs_dtype INT16 -top_dtype FLOAT32 -outputmodel ${PATH_NETWORK_MODELS_MLU}/yolov3_int16.prototxt 13 | #/opt/cambricon/caffe/tools/generate_quantized_pt -blobs_dtype INT8 \ 14 | # -ini_file ${PATH_NETWORK}/yolov3_quantized.ini \ 15 | # -mode common \ 16 | # -model ${PATH_NETWORK_MODELS_MLU}/yolov3.prototxt \ 17 | # -weights ${PATH_NETWORK_MODELS_MLU}/yolov3.caffemodel \ 18 | # -outputmodel ${PATH_NETWORK_MODELS_MLU}/yolov3_int8.prototxt \ 19 | # -top_dtype FLOAT16 -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # 清理临时生成的文件 4 | # Usage: 5 | # $ bash clean.sh all/test/models 6 | 7 | usage() 8 | { 9 | echo "Usage:" 10 | echo " $0 all/test/models" 11 | } 12 | 13 | clean_all() { 14 | #clean test 15 | cd ./test 16 | ./clean.sh 17 | cd - 18 | 19 | #clean models 20 | rm -vf ./models/mlu/*.cambricon* 21 | rm -vf ./models/*.weights 22 | } 23 | 24 | if [[ $# -eq 0 ]];then 25 | usage 26 | elif [[ $# -eq 1 ]];then 27 | if [[ "$1" == "all" ]];then 28 | #clean all 29 | clean_all 30 | elif [[ "$1" == "test" ]];then 31 | #clean test 32 | cd ./test && ./clean.sh && cd - 33 | elif [[ "$1" == "models" ]];then 34 | #clean models 35 | rm -vrf ./models/mlu/*.cambricon* && rm -vrf ./models/*.weights 36 | else 37 | usage 38 | fi 39 | else 40 | usage 41 | fi 42 | 43 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000000785.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000001000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000001000.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000001584.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000001584.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000002006.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000002006.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000002149.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000002149.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000002592.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000002592.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000003501.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000003501.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000004134.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000004134.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000005477.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000005477.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000007108.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000007281.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000007281.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000008211.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000008211.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000013348.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000013348.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000015254.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000015254.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000015272.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000015272.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000017905.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000017905.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000018380.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000018380.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000018837.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000018837.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000020333.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000020333.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000024919.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000024919.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000026204.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000026204.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000026926.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000026926.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000038829.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000038829.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000039670.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000039670.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000044279.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000044279.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000044699.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000044699.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000050896.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000050896.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000067616.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000067616.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000147725.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000147725.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000147745.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000147745.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000148719.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000148719.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/datasets/000000155341.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/datasets/000000155341.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #声明环境变量(该操作每次进入docker都需要进行) 4 | cd $CAFFE_HOME/../ 5 | source env_caffe.sh 6 | cd - 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | export PATH_NETWORK="${PWD}" 9 | export PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | export PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/label_map_coco.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic_light 11 | fire_hydrant 12 | stop_sign 13 | parking_meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports_ball 34 | kite 35 | baseball_bat 36 | baseball_glove 37 | skateboard 38 | surfboard 39 | tennis_racket 40 | bottle 41 | wine_glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot_dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell_phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy_bear 79 | hair_drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/map50blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/map50blue.png -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/performance-on-coo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/performance-on-coo.png -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/sayit.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/sayit.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/yolov3_000000000785.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/yolov3_000000000785.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/yolov3_000000007108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/yolov3_000000007108.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/yolov3_000000013348.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/yolov3_000000013348.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/res/yolov3_000000018380.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/caffe/yolov4-mish-416/res/yolov3_000000018380.jpg -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/results.json: -------------------------------------------------------------------------------- 1 | [ -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/test/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | for dir in $(ls) 6 | do 7 | if [ -d "./${dir}" ]; then #先判断是否是目录,然后再执行clean 8 | echo $dir && cd $dir && ./clean.sh && cd - 9 | fi 10 | done -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/test/yolov4_offline_multicore/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/test/yolov4_online_multicore_mfus/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/test/yolov4_online_multicore_mlu/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/yolov4_offline_multicore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 基于SDK-Demo 离线推理 3 | #/opt/cambricon/caffe/src/caffe/build/examples/yolo_v4/yolov4_offline_multicore 4 | # Usage: 5 | # $ bash yolov4_offline_multicore.sh 6 | 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | #export PATH_NETWORK="/home/share/caffe/yolov4-mish-416" 9 | #export PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | #export PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | 12 | #基于SDK-Demo 离线推理 13 | PATH_TEST_NETWORK=${PATH_NETWORK}/test/yolov4_offline_multicore 14 | if [ ! -d ${PATH_TEST_NETWORK} ];then mkdir -p ${PATH_TEST_NETWORK};fi 15 | cd ${PATH_TEST_NETWORK} 16 | #yolov4_1b4c_simple 17 | $CAFFE_HOME/src/caffe/build/examples/yolo_v4/yolov4_offline_multicore \ 18 | -offlinemodel ${PATH_NETWORK_MODELS_MLU}/yolov4_1b4c_simple.cambricon \ 19 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 20 | -images ${PATH_NETWORK}/yolov4_file_list_coco \ 21 | -preprocess_option 4 \ 22 | -outputdir . \ 23 | -simple_compile 1 \ 24 | -dump 1 25 | #yolov4_4b4c_simple 26 | $CAFFE_HOME/src/caffe/build/examples/yolo_v4/yolov4_offline_multicore \ 27 | -offlinemodel ${PATH_NETWORK_MODELS_MLU}/yolov4_4b4c_simple.cambricon \ 28 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 29 | -images ${PATH_NETWORK}/yolov4_file_list_coco \ 30 | -preprocess_option 4 \ 31 | -outputdir . \ 32 | -simple_compile 1 \ 33 | -dump 1 -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/yolov4_online_multicore_mfus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 基于SDK-Demo 在线逐层推理 MFUS模式 3 | #/opt/cambricon/caffe/src/caffe/build/examples/yolo_v4/yolov4_online_multicore 4 | # Usage: 5 | # $ bash yolov4_online_multicore_mfus.sh 6 | 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | #export PATH_NETWORK="/home/share/caffe/yolov4-mish-416" 9 | #export PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | #export PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | 12 | #2、基于SDK-Demo 在线融合推理 13 | #$CAFFE_HOME/src/caffe/build/examples/yolo_v4/yolov4_online_multicore 14 | PATH_TEST_NETWORK=${PATH_NETWORK}/test/yolov4_online_multicore_mfus 15 | if [ ! -d ${PATH_TEST_NETWORK} ];then mkdir -p ${PATH_TEST_NETWORK};fi 16 | cd ${PATH_TEST_NETWORK} 17 | $CAFFE_HOME/src/caffe/build/examples/yolo_v4/yolov4_online_multicore \ 18 | -model ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416_int8.prototxt \ 19 | -weights ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416.caffemodel \ 20 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 21 | -images ${PATH_NETWORK}/yolov4_file_list_coco \ 22 | -mmode MFUS \ 23 | -mcore MLU270 \ 24 | -outputdir . \ 25 | -output_dtype FLOAT16 \ 26 | -preprocess_option 4 \ 27 | -dump 1 \ 28 | -simple_compile 1 29 | #ls -la ${PATH_TEST_NETWORK} 30 | echo "PATH_TEST_NETWORK: ${PATH_TEST_NETWORK}" 31 | -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/yolov4_online_multicore_mlu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 基于SDK-Demo 在线逐层推理 MLU模式 3 | #/opt/cambricon/caffe/src/caffe/build/examples/yolo_v4/yolov4_online_multicore 4 | # Usage: 5 | # $ bash yolov4_online_multicore_mlu.sh 6 | 7 | #设置以下操作步骤中用到的全局变量(请保证在进行以下各个步骤之前设置) 8 | #export PATH_NETWORK="/home/share/caffe/yolov4-mish-416" 9 | #export PATH_NETWORK_MODELS="${PATH_NETWORK}/models" 10 | #export PATH_NETWORK_MODELS_MLU="${PATH_NETWORK_MODELS}/mlu" 11 | 12 | #基于SDK-Demo 在线逐层推理 13 | #$CAFFE_HOME/src/caffe/build/examples/yolo_v4/yolov4_online_multicore 14 | PATH_TEST_NETWORK=${PATH_NETWORK}/test/yolov4_online_multicore_mlu 15 | if [ ! -d ${PATH_TEST_NETWORK} ];then mkdir -p ${PATH_TEST_NETWORK};fi 16 | cd ${PATH_TEST_NETWORK} 17 | $CAFFE_HOME/src/caffe/build/examples/yolo_v4/yolov4_online_multicore \ 18 | -model ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416_int8.prototxt \ 19 | -weights ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416.caffemodel \ 20 | -labels ${PATH_NETWORK}/label_map_coco.txt \ 21 | -images ${PATH_NETWORK}/yolov4_file_list_coco \ 22 | -mmode MLU -mcore MLU270 \ 23 | -outputdir . \ 24 | -output_dtype FLOAT16 \ 25 | -preprocess_option 4 \ 26 | -dump 1 \ 27 | -simple_compile 1 28 | #ls -la ${PATH_TEST_NETWORK} 29 | echo "PATH_TEST_NETWORK: ${PATH_TEST_NETWORK}" -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/yolov4_quantized.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | ;blow two are lists, depending on framework 3 | original_models_path = ./models/mlu/yolov4-mish-416.prototxt 4 | save_model_path = ./models/mlu/yolov4-mish-416_int8.prototxt 5 | 6 | [data] 7 | ;only one should be set for below two 8 | images_list_path = ./yolov4_file_list_coco 9 | used_images_num = 4 10 | 11 | [weights] 12 | original_weights_path = ./models/mlu/yolov4-mish-416.caffemodel 13 | 14 | [preprocess] 15 | #mean = 0,0,0 16 | std = 0.00392 17 | scale = 416,416 18 | crop = 416,416 19 | 20 | [config] 21 | quantize_op_list = Conv, FC, LRN 22 | use_firstconv = 1 23 | 24 | ;customer configuration 25 | [custom] 26 | use_custom_preprocess = 0 27 | ;only support ARGB, ABGR, BGRA, RGBA 28 | ;input_format = ARGB 29 | ;only support BGR, RGB 30 | ;filter_format = BGR -------------------------------------------------------------------------------- /caffe/yolov4-mish-416/yolov4_quantized.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 模型量化 3 | # Usage: 4 | # $ bash yolov4_quantized.sh 5 | 6 | #1.生成图片列表 yolov4_file_list_coco 7 | cd ${PATH_NETWORK} 8 | /home/share/tools/getFileList.sh ${PATH_NETWORK}/datasets yolov4_file_list_coco 9 | #2.generate_quantized_pt:/opt/cambricon/caffe/tools/generate_quantized_pt 10 | cd ${PATH_NETWORK} 11 | /opt/cambricon/caffe/tools/generate_quantized_pt -ini_file ${PATH_NETWORK}/yolov4_quantized.ini 12 | ls -la ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416_int8.prototxt 13 | #/opt/cambricon/caffe/tools/generate_quantized_pt -ini_file ${PATH_NETWORK}/yolov4_quantized.ini -blobs_dtype INT16 -top_dtype FLOAT32 -outputmodel ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416_int16.prototxt 14 | #/opt/cambricon/caffe/tools/generate_quantized_pt -blobs_dtype INT8 \ 15 | # -ini_file ${PATH_NETWORK}/yolov4_quantized.ini \ 16 | # -mode common \ 17 | # -model ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416.prototxt \ 18 | # -weights ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416.caffemodel \ 19 | # -outputmodel ${PATH_NETWORK_MODELS_MLU}/yolov4-mish-416_int8.prototxt \ 20 | # -top_dtype FLOAT16 -------------------------------------------------------------------------------- /data/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/data/bus.jpg -------------------------------------------------------------------------------- /datasets: -------------------------------------------------------------------------------- 1 | /data/datasets/ -------------------------------------------------------------------------------- /docker/Dockerfile.16.04: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------- 2 | # Filename: Dockerfile 3 | # UpdateDate: 2021/02/23 4 | # Description: Build docker images for cambricon-caffe. 5 | # Example: 6 | # Depends: Based on ubuntu:16.04 7 | # Notes: 8 | # ------------------------------------------------------------------------------- 9 | # 0.Start FROM ubuntu:16.04 image 10 | FROM ubuntu:16.04 11 | #FROM ubuntu:18.04 12 | 13 | MAINTAINER kang 14 | 15 | # 1.Sync files 16 | COPY . /temp/ 17 | WORKDIR /temp/ 18 | RUN mkdir -p /root/.pip/ && \ 19 | cp pip.conf /root/.pip/ && \ 20 | cp sources_16.04.list /etc/apt/sources.list 21 | 22 | # 2.Pre-installed software 23 | ENV DEBIAN_FRONTEND=noninteractive 24 | RUN bash ./pre_packages.sh 25 | 26 | # 3.Set ENV && Clean 27 | ENV LANG C.UTF-8 28 | ENV TIME_ZONE Asia/Shanghai 29 | RUN echo "${TIME_ZONE}" > /etc/timezone && \ 30 | ln -sf /usr/share/zoneinfo/${TIME_ZONE} /etc/localtime && \ 31 | rm -rf /temp/ && rm -rf /var/lib/apt/lists/* && \ 32 | apt-get clean 33 | 34 | # 4.Set WorkDir 35 | WORKDIR /opt/work/ 36 | -------------------------------------------------------------------------------- /docker/Dockerfile.18.04: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------- 2 | # Filename: Dockerfile 3 | # UpdateDate: 2021/09/27 4 | # Description: Build docker images for cambricon-caffe. 5 | # Example: 6 | # Depends: Based on ubuntu:18.04 7 | # Notes: 8 | # ------------------------------------------------------------------------------- 9 | # 0.Start FROM ubuntu:18.04 image 10 | FROM ubuntu:18.04 11 | 12 | MAINTAINER kang 13 | 14 | # 1.Sync files 15 | COPY . /temp/ 16 | WORKDIR /temp/ 17 | RUN mkdir -p /root/.pip/ && \ 18 | cp pip.conf /root/.pip/ && \ 19 | cp sources_18.04.list /etc/apt/sources.list 20 | 21 | # 2.Pre-installed software 22 | ENV DEBIAN_FRONTEND=noninteractive 23 | RUN bash ./pre_packages18.04.sh 24 | 25 | # 3.Set ENV && Clean 26 | ENV LANG C.UTF-8 27 | ENV TIME_ZONE Asia/Shanghai 28 | RUN echo "${TIME_ZONE}" > /etc/timezone && \ 29 | ln -sf /usr/share/zoneinfo/${TIME_ZONE} /etc/localtime && \ 30 | rm -rf /temp/ && rm -rf /var/lib/apt/lists/* && \ 31 | apt-get clean 32 | 33 | # 4.Set WorkDir 34 | WORKDIR /opt/work/ 35 | -------------------------------------------------------------------------------- /docker/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # 1.del tar.gz.eg: image-ubuntu16.04-caffe-v1.6.0.tar.gz 5 | sudo rm -vf image-*.tar.gz 6 | -------------------------------------------------------------------------------- /docker/pip.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | index-url = https://pypi.doubanio.com/simple 3 | -------------------------------------------------------------------------------- /docker/requirements.txt: -------------------------------------------------------------------------------- 1 | decorator==4.3.0; python_version <= '2.7' 2 | pytest==3.4.0 3 | scipy==1.1.0 4 | munch==2.2.0 5 | opencv-python==3.4.2.17 6 | matplotlib==2.2.2 7 | scikit-image==0.14.2 8 | pillow==5.2.0 9 | sacred==0.7.2 10 | tqdm==4.19.5 11 | shapely==1.7.0 12 | lanms==1.0.2; python_version >= '3.4' 13 | pandas==0.23.2 14 | numpy==1.16.0; python_version < '3.7' 15 | numpy==1.20.1; python_version >= '3.7' 16 | tensorboardX==1.0 17 | boto3==1.5.22 18 | requests==2.18.4 19 | scikit-learn==0.19.2 20 | regex==2018.2.3 21 | nltk==3.2.5 22 | yacs==0.1.6 23 | onnx==1.6.0 24 | typing==3.7.4.3 25 | cpplint; python_version >= '3.5' 26 | pylint; python_version >= '3.5' 27 | -------------------------------------------------------------------------------- /docker/sources_16.04.list: -------------------------------------------------------------------------------- 1 | # deb cdrom:[Ubuntu 16.04 LTS _Xenial Xerus_ - Release amd64 (20160420.1)]/ xenial main restricted 2 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial main restricted 3 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-updates main restricted 4 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial universe 5 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-updates universe 6 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial multiverse 7 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-updates multiverse 8 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-backports main restricted universe multiverse 9 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-security main restricted 10 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-security universe 11 | deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-security multiverse -------------------------------------------------------------------------------- /edge/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #Dockerfile(16.04/18.04/CentOS) 5 | #OSVer="16.04" 6 | #if [[ $# -ne 0 ]];then OSVer="${1}";fi 7 | # 1.Source env 8 | source ./env.sh $OSVer 9 | 10 | # 2.rm docker container 11 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 13 | if [ $num_container ]; then sudo docker stop $num_container;fi 14 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | if [ $num_container ]; then sudo docker rm $num_container;fi 16 | 17 | # 3.rmi docker image 18 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 19 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 20 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 21 | -------------------------------------------------------------------------------- /edge/cross_compile/build-bsp-all.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: build_bsp.sh 5 | # Revision: 1.0.0 6 | # Date: 2022/12/06 7 | # Description: 基于官网提供的mm容器一键编译生成所有镜像文件. 8 | # Example: 9 | # Depends: magicmind_0.13.0-1_ubuntu18.04.tar.gz 10 | # env_ce3226.sh 11 | # update-os.sh 12 | # ce3226v100-sdk-1.1.0.tar.gz --> bsp.tar.gz 13 | # Notes: 14 | # ------------------------------------------------------------------------------- 15 | 16 | # 1. 环境准备 17 | ## 1.1. 进入工作目录 18 | cd /home/share/edge/cross_compile 19 | ## 1.2. 拷贝或下载sdk到[../dependent_files]目录 20 | #cp -rvf /data/ftp/ce3226/sdk/ce3226v100-sdk-1.1.0.tar.gz ../dependent_files 21 | ## 1.3. 解压SDK到本目录 22 | tar zxvf ../dependent_files/ce3226v100-sdk-1.1.0.tar.gz -C ./ 23 | ## 1.4. 解压bsp到本目录 24 | tar zxvf ./ce3226v100-sdk-1.1.0/board/package/bsp.tar.gz -C ./ 25 | ## 1.5. 进入bsp编译目录 26 | cd /home/share/edge/cross_compile/bsp/ce3226v100_build/build 27 | # 2.执行make 28 | make all 29 | # 3.编译完后,在out/目录下是生成所有的bsp镜像文件 30 | ls -la ./out 31 | # 4.设置权限,否则可能会导致tftp下载失败 32 | chmod 644 ./out/ubootenv* 33 | ls -la ./out 34 | # 5.备用操作 35 | ## 5.1.如需要则修改用户权限 36 | #sudo chown cam:cam -R ./out/* 37 | ## 5.2.拷贝到tftp目录 38 | #cp -rvf ./out/*.bin ./out/*.img ./out/*.itb /data/tftp -------------------------------------------------------------------------------- /edge/cross_compile/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # 1.sdk&bsp 5 | rm -rvf ce3226v100-sdk-* bsp 6 | # 2.cnstream 7 | rm -rvf cnstream 8 | -------------------------------------------------------------------------------- /edge/cross_compile/env-ce3226.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export ABI_MODE=1 3 | export BIN_DIR_WORK="/opt/cambricon" 4 | export BIN_DIR_GCC_Linaro="/tmp/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin" 5 | export PATH="$BIN_DIR_GCC_Linaro:$PATH" 6 | export NEUWARE_HOME=/usr/local/neuware/edge/ 7 | export LD_LIBRARY_PATH="/opt/distribute/lib:/usr/local/neuware/edge/lib64" 8 | export CPLUS_INCLUDE_PATH="$CPLUS_INCLUDE_PATH:/usr/local/neuware/edge/include" 9 | -------------------------------------------------------------------------------- /edge/cross_compile/update-os.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: update-os.sh 5 | # Revision: 1.0.0 6 | # Date: 2022/12/06 7 | # Description: 基于官网提供的mm容器还需要安装以下软件及配置,否则会编译BSP会报一些错误. 8 | # Example: 9 | # Depends: magicmind_0.13.0-1_ubuntu18.04.tar.gz 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | 13 | #apt-get install 14 | apt-get update 15 | apt-get upgrade -y 16 | apt-get install -y --no-install-recommends \ 17 | device-tree-compiler bc \ 18 | minicom tftpd-hpa nfs-kernel-server nfs-common 19 | 20 | #pip install 21 | pip install openpyxl 22 | pip install bc 23 | 24 | #编辑 tftpd-hpa && nfs 设置 25 | #sudo vi /etc/default/tftpd-hpa 26 | echo '# /etc/default/tftpd-hpa' > /etc/default/tftpd-hpa && \ 27 | echo 'TFTP_USERNAME="tftp"' >> /etc/default/tftpd-hpa && \ 28 | echo 'TFTP_DIRECTORY="/data/tftp"' >> /etc/default/tftpd-hpa && \ 29 | echo 'TFTP_ADDRESS="0.0.0.0:69"' >> /etc/default/tftpd-hpa && \ 30 | echo 'TFTP_OPTIONS="-l -c -s"' >> /etc/default/tftpd-hpa && \ 31 | echo '/data/nfs *(rw,sync,no_root_squash)' >> /etc/exports 32 | #sudo service tftpd-hpa restart 33 | -------------------------------------------------------------------------------- /edge/dependent_files/README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

Edge依赖库下载说明

4 | 5 |

6 | 7 | **下载方式** 8 | 9 | 1. 可前往[寒武纪开发者社区](https://developer.cambricon.com)注册账号按需下载到本目录; 10 | 2. 可通过官方技术对接人员提供的专属FTP账户指定路径下载; 11 | 3. 关注微信公众号 AIKnight , 发送文字消息, 包含关键字(不区分大小写): **Edge依赖库**, 公众号会自动回复Edge依赖库的下载地址; 12 | 13 | >![](../../res/note.gif) **备注信息:** 14 | >- 请把下载后的依赖库放置到当前目录下(dependent_files),方便根据脚本提示进行后续操作。 15 | 16 | **公众号** 17 | >![](../../res/aiknight_wechat_344.jpg) 18 | 19 | -------------------------------------------------------------------------------- /edge/dependent_files/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # 1.sdk&bsp 5 | rm -rvf ce3226v100-sdk-1.1.0 6 | 7 | # 2.cnstream 8 | rm -rvf ce3226v100-sdk-1.1.0 edge 9 | -------------------------------------------------------------------------------- /edge/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2022/06/06 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # Depends: magicmind_0.10.0-1_ubuntu18.04.tar.gz 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | # Source env 12 | source "./env.sh" 13 | #################### main #################### 14 | # 0.Check param 15 | if [[ $# -eq 0 ]];then 16 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 17 | else 18 | FULLNAME_IMAGE="${1}" 19 | fi 20 | # 0.Check File Images 21 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 22 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 23 | fi 24 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 25 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 26 | fi 27 | # 0.Check Docker Images 28 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 29 | echo $num 30 | echo $NAME_IMAGE 31 | 32 | # 1.Load Docker Images 33 | if [ 0 -eq $num ];then 34 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 35 | #load image 36 | sudo docker load < ${FULLNAME_IMAGE} 37 | else 38 | echo "The image($NAME_IMAGE) is already loaded!" 39 | fi 40 | 41 | #echo "All image information:" 42 | #sudo docker images 43 | echo "The image($NAME_IMAGE) information:" 44 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 45 | -------------------------------------------------------------------------------- /mm/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #Dockerfile(16.04/18.04/CentOS) 5 | #OSVer="16.04" 6 | #if [[ $# -ne 0 ]];then OSVer="${1}";fi 7 | # 1.Source env 8 | source ./env.sh $OSVer 9 | 10 | # 2.rm docker container 11 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 13 | if [ $num_container ]; then sudo docker stop $num_container;fi 14 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | if [ $num_container ]; then sudo docker rm $num_container;fi 16 | 17 | # 3.rmi docker image 18 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 19 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 20 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 21 | -------------------------------------------------------------------------------- /mm/cnvs/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 5 | rm -rvf cnvs_stats log 6 | -------------------------------------------------------------------------------- /mm/cnvs/cnmon.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # 0. 配置待测试板卡 4 | export MLU_VISIBLE_DEVICES=0,1 5 | watch -d -n -1 'cnmon' -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs.example.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | targeted_stress: 10 | test_duration: 60 11 | mlu_stress: 12 | test_duration: 60 13 | targeted_power: 14 | test_duration: 60 15 | memory_bandwidth: 16 | min_bandwidth: 10 17 | peak_performance: 18 | data_type: bfloat16 # 该插件执行卷积操作时输入, 输出数据类型: int8; int16; half; float; tfloat; bfloat16 19 | pcie: 20 | test_pinned: true 21 | test_unpinned: true 22 | test_p2p_on: false 23 | test_p2p_off: false 24 | subtests: 25 | h2d_d2h_single_pinned: 26 | min_pci_generation: 1.0 27 | min_pci_width: 1 28 | min_bandwidth: 0 29 | h2d_d2h_single_unpinned: 30 | min_pci_generation: 1.0 31 | min_pci_width: 1.0 32 | min_bandwidth: 0 33 | 34 | h2d_d2h_concurrent_pinned: 35 | min_bandwidth: 0 36 | h2d_d2h_concurrent_unpinned: 37 | min_bandwidth: 0 38 | 39 | h2d_d2h_latency_pinned: 40 | max_latency: 100000.0 41 | h2d_d2h_latency_unpinned: 42 | max_latency: 100000.0 43 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_matmul_performance_float.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | matmul_performance: 10 | input_data_type: float 11 | output_data_type: float 12 | iterations: 100 13 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_memory_bandwidth_512M.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | memory_bandwidth: 10 | min_bandwidth: 10 11 | data_size: 134217728 12 | iterations: 100 13 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_mlu_stress.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | mlu_stress: 10 | test_duration: 60 # 24小时:86400; 12小时:43200 11 | target_stress: 128000 # 单位:GOPS 12 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_mlulink.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | mlulink: 10 | data_size: 33554432.0 #执行单次测试使用的int数量,实际的数据量为 data_size * sizeof(int) 。 11 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_pcie_16M.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | pcie: 10 | test_pinned: true 11 | test_unpinned: true 12 | test_p2p_on: false 13 | test_p2p_off: false 14 | subtests: 15 | h2d_d2h_single_pinned: 16 | min_pci_generation: 1.0 17 | min_pci_width: 1 18 | min_bandwidth: 0 19 | data_size: 16777216 20 | 21 | h2d_d2h_single_unpinned: 22 | min_pci_generation: 1.0 23 | min_pci_width: 1.0 24 | min_bandwidth: 0 25 | 26 | h2d_d2h_concurrent_pinned: 27 | min_bandwidth: 0 28 | h2d_d2h_concurrent_unpinned: 29 | min_bandwidth: 0 30 | 31 | h2d_d2h_latency_pinned: 32 | max_latency: 100000.0 33 | h2d_d2h_latency_unpinned: 34 | max_latency: 100000.0 35 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_peak_performance_bfloat16.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | peak_performance: 10 | data_type: bfloat16 # 该插件执行卷积操作时输入, 输出数据类型: int8; int16; half; bfloat16; float; tfloat 11 | kernel_width: 4 12 | kernel_height: 3 -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_peak_performance_float.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | peak_performance: 10 | data_type: float # 该插件执行卷积操作时输入, 输出数据类型: int8; int16; half; bfloat16; float; tfloat 11 | kernel_width: 4 12 | kernel_height: 3 -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_peak_performance_half.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | peak_performance: 10 | data_type: half # 该插件执行卷积操作时输入, 输出数据类型: int8; int16; half; bfloat16; float; tfloat 11 | kernel_width: 4 12 | kernel_height: 3 -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_peak_performance_int16.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | peak_performance: 10 | data_type: int16 # 该插件执行卷积操作时输入, 输出数据类型: int8; int16; half; bfloat16; float; tfloat 11 | kernel_width: 4 12 | kernel_height: 3 -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_peak_performance_int8.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | peak_performance: 10 | data_type: int8 # 该插件执行卷积操作时输入, 输出数据类型: int8; int16; half; bfloat16; float; tfloat 11 | kernel_width: 4 12 | kernel_height: 3 -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_targeted_power.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | targeted_power: 10 | test_duration: 60 11 | target_power: 75 # 370-S4: 75W; 370-X4: 150W; 370-X8: 125W(250W/2); 12 | -------------------------------------------------------------------------------- /mm/cnvs/config/cnvs_targeted_stress.yml: -------------------------------------------------------------------------------- 1 | %YAML 1.2 2 | --- 3 | 4 | globals: 5 | logdir: ./ 6 | 7 | custom: 8 | - custom: 9 | targeted_stress: 10 | test_duration: 60 # 24小时:86400; 12小时:43200 11 | target_stress: 128000 # 单位:GOPS 12 | -------------------------------------------------------------------------------- /mm/cnvs/env.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: env.sh 5 | # Revision: 1.0.0 6 | # Date: 2023/04/14 7 | # Description: Common Environment variable 8 | # Example: 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | #################### Function #################### 13 | # Init 14 | init_env() { 15 | source "../lib/time.sh" 16 | source "../lib/log.sh" 17 | init_log 18 | source "../lib/base.sh" 19 | } 20 | 21 | #################### environment variable #################### 22 | #配置测试的板卡编号:举例说明,如果配置值为【1,0】则按照1,0的顺序打开设备,cnmon显示顺序也为1,0。 23 | #export MLU_VISIBLE_DEVICES=0,1 24 | 25 | #################### main #################### 26 | main_time_start=$(date +"%s.%N") 27 | #初始化 28 | init_env 29 | -------------------------------------------------------------------------------- /mm/cnvs/init-cnvs.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: init-cnvs.sh 5 | # Revision: 1.0.0 6 | # Date: 2023/04/14 7 | # Description: 初始化cnvs运行环境。 8 | # Example: 9 | # Depends: 官方提供的mm Docker容器 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | source "./env.sh" 13 | 14 | print_log_echo_info "==================================================" 15 | #以下操作步骤均是在官方提供的Docker容器中进行 16 | #进入测试目录 17 | cd /home/share/mm/cnvs 18 | ########################################################## 19 | # 安装依赖库 20 | apt-get update 21 | apt-get install -y libpci3 libpci3 pciutils tree 22 | apt --fix-broken install 23 | apt-get install -y libyaml-dev 24 | #安装CNVS 25 | #dpkg -i /var/cntoolkit-3.6.1/cnvs_0.12.0-1.ubuntu18.04_amd64.deb 26 | dpkg -i /var/cntoolkit-3.7.2/cnvs_0.13.1-1.ubuntu18.04_amd64.deb 27 | ########################################################## 28 | #结束时间 29 | main_time_end=$(date +"%s.%N") 30 | #计算测试所用的时间 31 | ret=$(timediff $main_time_start $main_time_end) 32 | print_log_echo_info "Total time: $ret s" 33 | print_log_echo_info "==================================================" 34 | -------------------------------------------------------------------------------- /mm/cnvs/matmul-test.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: matmul-test.sh 5 | # Revision: 1.0.0 6 | # Date: 2023/04/14 7 | # Description: 测试加速卡执行一个特定规模的矩阵乘算子,以便用户得到MLU设备的矩阵乘算子性能。 8 | # Example: 9 | # Depends: 官方提供的mm Docker容器 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | source "./env.sh" 13 | TEST_PLUGIN="matmul_performance" 14 | TEST_TITLE="cnvs_${TEST_PLUGIN}_float" 15 | 16 | print_log_echo_info "==================================================" 17 | #以下操作步骤均是在官方提供的Docker容器中进行 18 | #进入测试目录 19 | cd /home/share/mm/cnvs 20 | # 0. 配置待测试板卡 21 | export MLU_VISIBLE_DEVICES=0,1 22 | # 1.开始测试前,打印cnmon信息 23 | #cnmon && sleep 1 24 | cnmon >> $RUNNING_LOG_FILE 25 | # 2. 执行cnvs测试 26 | ########################################################## 27 | print_log_echo_info "[# ${TEST_TITLE}: " 28 | #压入后台执行cnvs。 29 | cnvs -r ${TEST_PLUGIN} -c "./config/${TEST_TITLE}.yml" -v >> ${RUNNING_LOG_FILE} 2>&1 & 30 | #进程压入队列 31 | push_queue_processes $! && sleep 0.1 32 | ########################################################## 33 | #启动【进度条】显示执行进度。直到后台进程执行完成。 34 | print_queue_processes && echo -en "${green}[#" && check_queue_processes_bar 0 0 && echo "]" && echo -en "${none}" && sync && sync 35 | #结束时间 36 | main_time_end=$(date +"%s.%N") 37 | #计算测试所用的时间 38 | ret=$(timediff $main_time_start $main_time_end) 39 | print_log_echo_info "Total time: $ret s" 40 | print_log_echo_info "==================================================" 41 | 42 | # 3.打印测试结果 43 | echo -en "${yellow}" 44 | RUNNING_LOG_FILE_NEW="${RUNNING_LOG_FILE%/*}/${TEST_TITLE}-${RUNNING_LOG_FILE##*/}" 45 | #修改文件名: 根据测试内容,追加前缀 46 | mv $RUNNING_LOG_FILE $RUNNING_LOG_FILE_NEW 47 | cat $RUNNING_LOG_FILE_NEW 48 | ls -la $RUNNING_LOG_FILE_NEW 49 | echo -en "${none}" 50 | -------------------------------------------------------------------------------- /mm/cnvs/memory-test.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: memory-test.sh 5 | # Revision: 1.0.0 6 | # Date: 2023/04/14 7 | # Description: 测试加速卡上读写GDRAM的内存带宽。 8 | # Example: 9 | # Depends: 官方提供的mm Docker容器 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | source "./env.sh" 13 | TEST_PLUGIN="memory_bandwidth" 14 | TEST_TITLE="cnvs_${TEST_PLUGIN}_512M" 15 | 16 | print_log_echo_info "==================================================" 17 | #以下操作步骤均是在官方提供的Docker容器中进行 18 | #进入测试目录 19 | cd /home/share/mm/cnvs 20 | # 0. 配置待测试板卡 21 | export MLU_VISIBLE_DEVICES=0,1 22 | # 1.开始测试前,打印cnmon信息 23 | #cnmon && sleep 1 24 | cnmon >> $RUNNING_LOG_FILE 25 | # 2. 执行cnvs测试 26 | ########################################################## 27 | print_log_echo_info "[# ${TEST_TITLE}: " 28 | #压入后台执行cnvs。 29 | cnvs -r ${TEST_PLUGIN} -c "./config/${TEST_TITLE}.yml" -v >> ${RUNNING_LOG_FILE} 2>&1 & 30 | #进程压入队列 31 | push_queue_processes $! && sleep 0.1 32 | ########################################################## 33 | #启动【进度条】显示执行进度。直到后台进程执行完成。 34 | print_queue_processes && echo -en "${green}[#" && check_queue_processes_bar 0 0 && echo "]" && echo -en "${none}" && sync && sync 35 | #结束时间 36 | main_time_end=$(date +"%s.%N") 37 | #计算测试所用的时间 38 | ret=$(timediff $main_time_start $main_time_end) 39 | print_log_echo_info "Total time: $ret s" 40 | print_log_echo_info "==================================================" 41 | 42 | # 3.打印测试结果 43 | echo -en "${yellow}" 44 | RUNNING_LOG_FILE_NEW="${RUNNING_LOG_FILE%/*}/${TEST_TITLE}-${RUNNING_LOG_FILE##*/}" 45 | #修改文件名: 根据测试内容,追加前缀 46 | mv $RUNNING_LOG_FILE $RUNNING_LOG_FILE_NEW 47 | cat $RUNNING_LOG_FILE_NEW 48 | ls -la $RUNNING_LOG_FILE_NEW 49 | echo -en "${none}" 50 | -------------------------------------------------------------------------------- /mm/cnvs/mlulink-test.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: mlulink-test.sh 5 | # Revision: 1.0.0 6 | # Date: 2023/10/14 7 | # Description: mlulink插件用于测试mlulink性能,报告异常数据。该插件统计单主机下,多设备之间的mlulink单向、双向拷贝带宽,以及单向拷贝延迟。 8 | # Example: 9 | # Depends: 官方提供的mm Docker容器 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | source "./env.sh" 13 | TEST_PLUGIN="mlulink" 14 | TEST_TITLE="cnvs_${TEST_PLUGIN}" 15 | 16 | print_log_echo_info "==================================================" 17 | #以下操作步骤均是在官方提供的Docker容器中进行 18 | #进入测试目录 19 | cd /home/share/mm/cnvs 20 | # 0. 配置待测试板卡 21 | export MLU_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 22 | # 1.开始测试前,打印cnmon信息 23 | #cnmon && sleep 1 24 | cnmon >> $RUNNING_LOG_FILE 25 | # 2. 执行cnvs测试 26 | ########################################################## 27 | print_log_echo_info "[# ${TEST_TITLE}: " 28 | #压入后台执行cnvs。 29 | cnvs -r ${TEST_PLUGIN} -c "./config/${TEST_TITLE}.yml" -v >> ${RUNNING_LOG_FILE} 2>&1 & 30 | #进程压入队列 31 | push_queue_processes $! && sleep 0.1 32 | ########################################################## 33 | #启动【进度条】显示执行进度。直到后台进程执行完成。 34 | print_queue_processes && echo -en "${green}[#" && check_queue_processes_bar 0 0 && echo "]" && echo -en "${none}" && sync && sync 35 | #结束时间 36 | main_time_end=$(date +"%s.%N") 37 | #计算测试所用的时间 38 | ret=$(timediff $main_time_start $main_time_end) 39 | print_log_echo_info "Total time: $ret s" 40 | print_log_echo_info "==================================================" 41 | 42 | # 3.打印测试结果 43 | echo -en "${yellow}" 44 | RUNNING_LOG_FILE_NEW="${RUNNING_LOG_FILE%/*}/${TEST_TITLE}-${RUNNING_LOG_FILE##*/}" 45 | #修改文件名: 根据测试内容,追加前缀 46 | mv $RUNNING_LOG_FILE $RUNNING_LOG_FILE_NEW 47 | cat $RUNNING_LOG_FILE_NEW 48 | ls -la $RUNNING_LOG_FILE_NEW 49 | echo -en "${none}" 50 | -------------------------------------------------------------------------------- /mm/cnvs/pcie-test.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: pcie-test.sh 5 | # Revision: 1.0.0 6 | # Date: 2023/04/14 7 | # Description: 测试加速卡在各类情况下的PCIe拷贝带宽及延迟。 8 | # Example: 9 | # Depends: 官方提供的mm Docker容器 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | source "./env.sh" 13 | TEST_PLUGIN="pcie" 14 | TEST_TITLE="cnvs_${TEST_PLUGIN}_16M" 15 | 16 | print_log_echo_info "==================================================" 17 | #以下操作步骤均是在官方提供的Docker容器中进行 18 | #进入测试目录 19 | cd /home/share/mm/cnvs 20 | # 0. 配置待测试板卡 21 | export MLU_VISIBLE_DEVICES=0,1 22 | # 1. 开始测试前,打印cnmon信息 23 | #cnmon && sleep 1 24 | cnmon >> $RUNNING_LOG_FILE 25 | # 2. 执行cnvs测试 26 | ########################################################## 27 | print_log_echo_info "[# ${TEST_TITLE}: " 28 | #压入后台执行cnvs。 29 | cnvs -r ${TEST_PLUGIN} -c "./config/${TEST_TITLE}.yml" -v >> ${RUNNING_LOG_FILE} 2>&1 & 30 | #进程压入队列 31 | push_queue_processes $! && sleep 0.1 32 | ########################################################## 33 | #启动【进度条】显示执行进度。直到后台进程执行完成。 34 | print_queue_processes && echo -en "${green}[#" && check_queue_processes_bar 0 0 && echo "]" && echo -en "${none}" && sync && sync 35 | #结束时间 36 | main_time_end=$(date +"%s.%N") 37 | #计算测试所用的时间 38 | ret=$(timediff $main_time_start $main_time_end) 39 | print_log_echo_info "Total time: $ret s" 40 | print_log_echo_info "==================================================" 41 | 42 | # 3.打印测试结果 43 | echo -en "${yellow}" 44 | RUNNING_LOG_FILE_NEW="${RUNNING_LOG_FILE%/*}/${TEST_TITLE}-${RUNNING_LOG_FILE##*/}" 45 | #修改文件名: 根据测试内容,追加前缀 46 | mv $RUNNING_LOG_FILE $RUNNING_LOG_FILE_NEW 47 | cat $RUNNING_LOG_FILE_NEW 48 | ls -la $RUNNING_LOG_FILE_NEW 49 | echo -en "${none}" 50 | -------------------------------------------------------------------------------- /mm/cnvs/run-all-test.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #加速卡峰值算力和效率测试 5 | bash ./peak-test.sh 6 | #GEMM矩阵计算性能和效率测试 7 | bash ./matmul-test.sh 8 | #PCIe传输性能测试 9 | bash ./pcie-test.sh 10 | #显存带宽性能测试 11 | bash ./memory-test.sh 12 | #稳定性压力测试(算力)-第一种方法(压测时间可通过修改yml配置文件) 13 | bash ./mlu_stress-test.sh 14 | #稳定性压力测试(算力)-第二种方法(压测时间可通过修改yml配置文件) 15 | bash ./targeted_stress-test.sh 16 | #稳定性压力测试(功耗)-第三种方法(压测时间可通过修改yml配置文件) 17 | bash ./targeted_power-test.sh -------------------------------------------------------------------------------- /mm/dependent_files/README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

Magicmind依赖库下载说明

4 | 5 |

6 | 7 | **下载方式** 8 | 9 | 1. 可前往[寒武纪开发者社区](https://developer.cambricon.com)注册账号按需下载到本目录; 10 | 2. 可通过官方技术对接人员提供的专属FTP账户指定路径下载; 11 | 3. 关注微信公众号 AIKnight , 发送文字消息, 包含关键字(不区分大小写): **mm依赖库**, 公众号会自动回复对应下载地址; 12 | 13 | >![](../../res/note.gif) **备注信息:** 14 | >- 请把下载后的依赖库放置到当前目录下(dependent_files),方便根据脚本提示进行后续操作。 15 | 16 | **公众号** 17 | >![](../../res/aiknight_wechat_344.jpg) 18 | 19 | -------------------------------------------------------------------------------- /mm/eval/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CURRENT_PATH=$(dirname $(readlink -f "$0")) 3 | rm -rvf ${CURRENT_PATH}/core.* 4 | rm -rvf ${CURRENT_PATH}/*.model 5 | rm -rvf ${CURRENT_PATH}/tmp* 6 | rm -rvf ${CURRENT_PATH}/benchmark_preprocessed 7 | rm -rvf ${CURRENT_PATH}/mm_tmp_* 8 | rm -rvf ${CURRENT_PATH}/compile_graph 9 | rm -rvf ${CURRENT_PATH}/log 10 | rm -rvf ${CURRENT_PATH}/output 11 | -------------------------------------------------------------------------------- /mm/eval/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #set -x 3 | # MM Benchmark env 4 | export TEST_SUITES=${CURRENT_PATH}/testsuites 5 | export NEUWARE_HOME=/usr/local/neuware/ 6 | export MODEL_PATH=/data/models/test_models/ 7 | export DATASET_PATH=/data/datasets 8 | export DEVICE_NAME="mlu370_S4" 9 | export DEVICE_ID=0 10 | DEVICE_NAME_GET=$(cnmon info -c "${DEVICE_ID}" -t | grep "Product Name"| awk -F ': ' '{print $2}') 11 | if [ -n "$DEVICE_NAME_GET" ];then 12 | if [ $DEVICE_NAME_GET = "MLU370-S4" ] ; then 13 | export DEVICE_NAME="mlu370_S4" 14 | elif [ $DEVICE_NAME_GET = "MLU370-X4" ] ; then 15 | export DEVICE_NAME="mlu370_X4" 16 | elif [ $DEVICE_NAME_GET = "MLU370-X8" ] ; then 17 | export DEVICE_NAME="mlu370_X8" 18 | fi 19 | fi 20 | 21 | export MM_CPP_MIN_LOG_LEVEL=3 22 | export CNNL_MIN_LOG_LEVEL=3 23 | -------------------------------------------------------------------------------- /mm/lib/time.sh: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------- 2 | # Filename: time.sh 3 | # Revision: 1.0.0 4 | # Date: 2022/09/24 5 | # Description: time lib function 6 | # Example: 7 | # Depends: 8 | # Notes: 9 | # ------------------------------------------------------------------------------- 10 | #################### Basic function #################### 11 | # Get the difference between two times 12 | # $1: start_time 13 | # $2: end_time 14 | timediff() { 15 | # time format:date +"%s.%N", such as 1502758855.907197692 16 | start_time=$1 17 | end_time=$2 18 | 19 | start_s=${start_time%.*} 20 | start_nanos=${start_time#*.} 21 | end_s=${end_time%.*} 22 | end_nanos=${end_time#*.} 23 | 24 | # end_nanos > start_nanos? 25 | # Another way, the time part may start with 0, which means 26 | # it will be regarded as oct format, use "10#" to ensure 27 | # calculateing with decimal 28 | if [ "$end_nanos" -lt "$start_nanos" ];then 29 | end_s=$(( 10#$end_s - 1 )) 30 | end_nanos=$(( 10#$end_nanos + 10**9 )) 31 | fi 32 | 33 | # get timediff 34 | time=$(( 10#$end_s - 10#$start_s )).$(( (10#$end_nanos - 10#$start_nanos)/10**6 )) 35 | 36 | echo $time 37 | } 38 | -------------------------------------------------------------------------------- /mm/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2022/06/06 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # Depends: magicmind_0.10.0-1_ubuntu18.04.tar.gz 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | # Source env 12 | source "./env.sh" 13 | #################### main #################### 14 | # 0.Check param 15 | if [[ $# -eq 0 ]];then 16 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 17 | else 18 | FULLNAME_IMAGE="${1}" 19 | fi 20 | # 0.Check File Images 21 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 22 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 23 | fi 24 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 25 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 26 | fi 27 | # 0.Check Docker Images 28 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 29 | echo $num 30 | echo $NAME_IMAGE 31 | 32 | # 1.Load Docker Images 33 | if [ 0 -eq $num ];then 34 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 35 | #load image 36 | sudo docker load < ${FULLNAME_IMAGE} 37 | else 38 | echo "The image($NAME_IMAGE) is already loaded!" 39 | fi 40 | 41 | #echo "All image information:" 42 | #sudo docker images 43 | echo "The image($NAME_IMAGE) information:" 44 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 45 | -------------------------------------------------------------------------------- /mm/perf/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CURRENT_PATH=$(dirname $(readlink -f "$0")) 3 | rm -rvf ${CURRENT_PATH}/core.* 4 | rm -rvf ${CURRENT_PATH}/*.model 5 | rm -rvf ${CURRENT_PATH}/tmp* 6 | rm -rvf ${CURRENT_PATH}/benchmark_preprocessed 7 | rm -rvf ${CURRENT_PATH}/mm_tmp_* 8 | rm -rvf ${CURRENT_PATH}/compile_graph 9 | rm -rvf ${CURRENT_PATH}/log 10 | rm -rvf ${CURRENT_PATH}/output 11 | -------------------------------------------------------------------------------- /mm/perf/env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #set -x 3 | # MM Benchmark env 4 | export TEST_SUITES=${CURRENT_PATH}/testsuites 5 | export NEUWARE_HOME=/usr/local/neuware/ 6 | export MODEL_PATH=/data/models/test_models/ 7 | export DATASET_PATH=/data/datasets 8 | export DEVICE_NAME="mlu370_S4" 9 | export DEVICE_ID=0 10 | DEVICE_NAME_GET=$(cnmon info -c "${DEVICE_ID}" -t | grep "Product Name"| awk -F ': ' '{print $2}') 11 | if [ -n "$DEVICE_NAME_GET" ];then 12 | if [ $DEVICE_NAME_GET = "MLU370-S4" ] ; then 13 | export DEVICE_NAME="mlu370_S4" 14 | elif [ $DEVICE_NAME_GET = "MLU370-X4" ] ; then 15 | export DEVICE_NAME="mlu370_X4" 16 | elif [ $DEVICE_NAME_GET = "MLU370-X8" ] ; then 17 | export DEVICE_NAME="mlu370_X8" 18 | fi 19 | fi 20 | 21 | export MM_CPP_MIN_LOG_LEVEL=3 22 | export CNNL_MIN_LOG_LEVEL=3 23 | 24 | -------------------------------------------------------------------------------- /models: -------------------------------------------------------------------------------- 1 | /data/models/ -------------------------------------------------------------------------------- /pytorch/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #Dockerfile(16.04/18.04/CentOS) 5 | #OSVer="16.04" 6 | #if [[ $# -ne 0 ]];then OSVer="${1}";fi 7 | # 1.Source env 8 | source ./env.sh $OSVer 9 | 10 | # 2.rm docker container 11 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 13 | if [ $num_container ]; then sudo docker stop $num_container;fi 14 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | if [ $num_container ]; then sudo docker rm $num_container;fi 16 | 17 | # 3.rmi docker image 18 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 19 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 20 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 21 | -------------------------------------------------------------------------------- /pytorch/cnml/README.md: -------------------------------------------------------------------------------- 1 | 2 | **该教程仅仅用于学习,打通流程; 不对效果负责,不承诺商用。** 3 | 4 | # 1. 概述 5 | 本目录下脚本适配于基于CNML的一些实例测试。 6 | 7 | **测试环境搭建参考** [../README.md](../README.md) 8 | 9 | **相关依赖下载方式** 关注微信公众号 AIKnight , 发送: `cnml`, 会得到 `cnml相关依赖包`的下载地址; 10 | 11 | >![](../../res/aiknight_wechat_344.jpg) 12 | 13 | # 2. 编译 14 | 编译cnmml测试程序. 15 | ```bash 16 | cd /usr/local/neuware/samples/cnml/script 17 | #编译 18 | ./compileSP_cnml.sh 19 | ``` 20 | 21 | # 3. 测试 22 | ## 3.1. 算力测试 23 | 使用cnml压测最大算力, 由于软硬件因素限制,可能与实际算力值有偏差. 24 | **命令** 25 | ```bash 26 | cd /usr/local/neuware/samples/cnml/script 27 | #算力测试 28 | ./runExampleTests.sh 8 1 29 | ``` 30 | **实例** 31 | 如下所示实例, 测试日志中`compute TOPS `为 测试出的MLU270算力数值. 32 | ```bash 33 | root@cam-3630:/usr/local/neuware/samples/cnml/script# ./runExampleTests.sh 8 1 34 | /usr/bin/gcc 35 | /usr/local/neuware/samples/cnml/build /usr/local/neuware/samples/cnml/script 36 | -- Running cambricon release cnml test cases. 37 | cnml test, test_code = 8 coreVersion = 1 38 | CNML: 7.10.3 85350b141 39 | CNRT: 4.10.4 41e356b 40 | computing conv op on MLU... 41 | compile cost 244.388 ms 42 | compute forward cost 1.419 ms 43 | compute MAC = 167772160000.000000 44 | compute TOPS = 118.233 T 45 | dumping mlu result to file mlu_output... 46 | /usr/local/neuware/samples/cnml/script 47 | root@cam-3630:/usr/local/neuware/samples/cnml/script# 48 | ``` 49 | -------------------------------------------------------------------------------- /pytorch/deepsort/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/deepsort/6.png -------------------------------------------------------------------------------- /pytorch/deepsort/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf *_result* 7 | rm -rf *.cambricon* 8 | rm -rf *_quantized.pth 9 | shopt -u extglob 10 | -------------------------------------------------------------------------------- /pytorch/deepsort/run_convertmodel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: run_quantize.sh 5 | # UpdateDate: 2021/10/09 6 | # Description: Deepsort convert model for cambricon . 7 | # Example: ./run_convertmodel.sh 8 | # Notes: 9 | # ------------------------------------------------------------------------------- 10 | #################### main #################### 11 | 12 | MODDEL_NAME_IMAGE=6.png #quantize image 13 | MODDEL_NAME_ORG=ckpt.t7 #Originate weights 14 | MODDEL_NAME_OUT=feature_extract_quantized.pth #quantize后的模型 15 | MODDEL_FILE_NAME_QUANTIZE=quantize_feature_extract.py #quantize file name 16 | MODDEL_FILE_NAME_ONLINE=forward_feature_extract.py #online model file name 17 | 18 | # 0.Check param 19 | if [ ! -f "$PWD/$MODDEL_NAME_ORG" ] ; then 20 | echo "Model file ($PWD/$MODDEL_NAME_ORG) not exist, pleas download it or copy !" 21 | fi 22 | 23 | if [ ! -f "$PWD/$MODDEL_NAME_IMAGE" ] ; then 24 | echo "Model file ($PWD/$MODDEL_NAME_IMAGE) not exist, pleas download it or copy !" 25 | fi 26 | 27 | if [ ! -f "$PWD/$MODDEL_FILE_NAME_QUANTIZE" ] ; then 28 | echo "Model file ($PWD/$MODDEL_FILE_NAME_QUANTIZE) not exist, pleas download it or copy !" 29 | fi 30 | 31 | if [ ! -f "$PWD/$MODDEL_FILE_NAME_ONLINE" ] ; then 32 | echo "Model file ($PWD/$MODDEL_NAME_IMAGE) not exist, pleas download it or copy !" 33 | fi 34 | 35 | #1. run quantize 36 | python "$PWD/$MODDEL_FILE_NAME_QUANTIZE" "$PWD/$MODDEL_NAME_IMAGE" "$PWD/$MODDEL_NAME_ORG" "$PWD/$MODDEL_NAME_OUT" 37 | ls -la $MODDEL_NAME_OUT 38 | 39 | #2.quantize result 40 | echo "quantize finish !" 41 | 42 | #3. run online 43 | python "$PWD/$MODDEL_FILE_NAME_ONLINE" ./6.png ./ckpt.t7 "$PWD/$MODDEL_NAME_OUT" 44 | 45 | #4. online result 46 | echo "online finish !" 47 | 48 | -------------------------------------------------------------------------------- /pytorch/deepsort/run_offline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: run_quantize.sh 5 | # UpdateDate: 2021/10/09 6 | # Description: Deepsort convert model for cambricon . 7 | # Example: ./run_convertmodel.sh 8 | # Notes: 9 | # ------------------------------------------------------------------------------- 10 | #################### main #################### 11 | 12 | MODDEL_FILE_NAME_OFFLINE=genoff.py #offline model file name 13 | 14 | # 0.Check param 15 | #if [ ! -d "$WEIGHTS_DIR" ] ; then 16 | # mkdir -p $WEIGHTS_DIR 17 | # echo "Create $WEIGHTS_DIR ok !" 18 | #else 19 | # echo "Directory ($WEIGHTS_DIR): Exist!" 20 | #fi 21 | 22 | if [ ! -f "$PWD/$MODDEL_FILE_NAME_OFFLINE" ] ; then 23 | echo "Model file ($PWD/$MODDEL_NAME_OFFLINE) not exist, pleas download it or copy !" 24 | fi 25 | 26 | #1. run offline 27 | python "$PWD/$MODDEL_FILE_NAME_OFFLINE" -fake_device 0 -model feature_extract -mcore MLU270 -core_number 1 -batch_size 1 -half_input 1 -input_format 1 -mname feature_extract_1b1c_half 28 | 29 | #2. offline result 30 | echo "offline finish !" 31 | -------------------------------------------------------------------------------- /pytorch/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2021/08/04 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # Depends: pytorch-0.15.602-ubuntu16.04.tar 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | # Source env 12 | source "./env.sh" 13 | #################### main #################### 14 | # 0.Check param 15 | if [[ $# -eq 0 ]];then 16 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 17 | else 18 | FULLNAME_IMAGE="${1}" 19 | fi 20 | # 0.Check File Images 21 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 22 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 23 | fi 24 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 25 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 26 | fi 27 | # 0.Check Docker Images 28 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 29 | echo $num 30 | echo $NAME_IMAGE 31 | 32 | # 1.Load Docker Images 33 | if [ 0 -eq $num ];then 34 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 35 | #load image 36 | sudo docker load < ${FULLNAME_IMAGE} 37 | else 38 | echo "The image($NAME_IMAGE) is already loaded!" 39 | fi 40 | 41 | #echo "All image information:" 42 | #sudo docker images 43 | echo "The image($NAME_IMAGE) information:" 44 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 45 | -------------------------------------------------------------------------------- /pytorch/tools/getFileList.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | usage() 5 | { 6 | echo "Usage:" 7 | echo " $0 [pathdir] [filelist]" 8 | echo "" 9 | echo " Parameter description:" 10 | echo " [pathdir]: Relative path of the directory" 11 | echo " [filelist]: File name for the generated result" 12 | echo " EG: ../tools/getFileList.sh /home/share/pytorch/yolov4-416/datasets file_list_datasets" 13 | } 14 | 15 | if [ $# -lt 1 ]; then 16 | echo "[ERROR] Unknown parameter." 17 | usage 18 | exit 1 19 | fi 20 | 21 | pathdir=$1 22 | filelist=$2 23 | 24 | function getdir(){ 25 | #echo $1 26 | for file in $1/* 27 | do 28 | if test -f $file 29 | then 30 | #echo $file 31 | arr=(${arr[*]} $file) 32 | else 33 | getdir $file 34 | fi 35 | done 36 | } 37 | 38 | # Recursively call the function: save the path of all files in the folder to the array 39 | getdir $pathdir 40 | # Print the path of all files to file.list 41 | if [ -f "$filelist" ];then 42 | rm -f $filelist 43 | fi 44 | length=${#arr[@]} 45 | for((a=0;a<$length;a++)) 46 | do 47 | echo ${arr[$a]} >> $filelist 48 | done 49 | 50 | #Display filelist 51 | cat $filelist 52 | echo "[pathdir]: $pathdir" 53 | echo "[filelist]: $filelist" 54 | -------------------------------------------------------------------------------- /pytorch/tools/getPicRandomDir2Dir.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | #!/usr/bin/env python 3 | 4 | import os, random, shutil 5 | 6 | rate=0.1 #自定义抽取图片的比例,比方说100张抽10张,那就是0.1 7 | picknumber=16 8 | 9 | def moveFile(fileDir): 10 | pathDir = os.listdir(fileDir) #取图片的原始路径 11 | filenumber=len(pathDir) 12 | #picknumber=int(filenumber*rate) #按照rate比例从文件夹中取一定数量图片 13 | sample = random.sample(pathDir, picknumber) #随机选取picknumber数量的样本图片 14 | print (sample) 15 | for name in sample: 16 | shutil.move(fileDir+name, tarDir+name) 17 | return 18 | 19 | if __name__ == '__main__': 20 | fileDir = "/data/datasets/COCO--/val2017/" #源图片文件夹路径 21 | tarDir = '/data/datasets/COCO--/val2017-5000/' #移动到新的文件夹路径 22 | moveFile(fileDir) 23 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/load-image-ubuntu18.04-pytorch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-ubuntu16.04-pytorch.sh 5 | # UpdateDate: 2021/08/04 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-ubuntu16.04-pytorch.sh 8 | # Depends: pytorch-0.15.602-ubuntu16.04.tar 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | # Source env 12 | source "./env.sh" 13 | #################### main #################### 14 | # 0.Check param 15 | if [[ $# -eq 0 ]];then 16 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 17 | else 18 | FULLNAME_IMAGE="${1}" 19 | fi 20 | # 0.Check File Images 21 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 22 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 23 | fi 24 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 25 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 26 | fi 27 | # 0.Check Docker Images 28 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 29 | echo $num 30 | echo $NAME_IMAGE 31 | 32 | # 1.Load Docker Images 33 | if [ 0 -eq $num ];then 34 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 35 | #load image 36 | sudo docker load < ${FULLNAME_IMAGE} 37 | else 38 | echo "The image($NAME_IMAGE) is already loaded!" 39 | fi 40 | 41 | #echo "All image information:" 42 | #sudo docker images 43 | echo "The image($NAME_IMAGE) information:" 44 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 45 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/offline/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf *.cambricon* 7 | shopt -u extglob 8 | 9 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf model/*.weights* 7 | rm -rf model/*.cfg* 8 | rm -rf ../../pytorch_models/int8/checkpoints/*.pth* 9 | rm -rf ../../pytorch_models/int16/checkpoints/*.pth* 10 | rm -rf ../../pytorch_models/origin/checkpoints/*.pth* 11 | shopt -u extglob 12 | 13 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/data/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/data/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4-tiny/online/yolov4/data/dog.jpg -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/data/meat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4-tiny/online/yolov4/data/meat.jpg -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/data/voc.names: -------------------------------------------------------------------------------- 1 | aeroplane 2 | bicycle 3 | bird 4 | boat 5 | bottle 6 | bus 7 | car 8 | cat 9 | chair 10 | cow 11 | diningtable 12 | dog 13 | horse 14 | motorbike 15 | person 16 | pottedplant 17 | sheep 18 | sofa 19 | train 20 | tvmonitor 21 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/model/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Download latest models 3 | # Usage: 4 | # $ bash download_weights.sh 5 | 6 | #下载yolov4-tiny.cfg 7 | wget https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4-tiny.cfg 8 | #回显确认 9 | ls -la 10 | 11 | #下载yolov4-tiny.weights 12 | wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights 13 | #回显确认 14 | ls -la 15 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/predictions.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4-tiny/online/yolov4/predictions.jpg -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/run_quant.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # 0. Check param 6 | if [ ! -d "$TORCH_HOME/origin/checkpoints/" ] ; then 7 | mkdir -p $TORCH_HOME/origin/checkpoints/ 8 | echo "Create $TORCH_HOME/origin/checkpoints/ ok !" 9 | else 10 | echo "Directory ($TORCH_HOME/origin/checkpoints/): Exist!" 11 | fi 12 | 13 | if [ ! -d "$TORCH_HOME/int8/checkpoints/" ] ; then 14 | mkdir -p $TORCH_HOME/int8/checkpoints/ 15 | echo "Create $TORCH_HOME/int8/checkpoints/ ok !" 16 | else 17 | echo "Directory ($TORCH_HOME/int8/checkpoints/): Exist!" 18 | fi 19 | 20 | # 1. 模型转换(darknet -> pytorch) 21 | python eval.py -cfgfile model/yolov4-tiny.cfg -weightfile model/yolov4-tiny.weights -darknet2pth true 22 | mv yolov4-tiny.pth $TORCH_HOME/origin/checkpoints/yolov4-tiny.pth 23 | #查看darknet模型转为pytorch后的模型 24 | ls -la ../../pytorch_models/origin/checkpoints/yolov4-tiny.pth 25 | 26 | # 2. 模型量化 27 | python eval.py -quantized_mode 1 -quantization True -yolov4_version yolov4-tiny 28 | mv yolov4-tiny.pth $TORCH_HOME/int8/checkpoints/ 29 | #查看量化后的模型 30 | ls -la ../../pytorch_models/int8/checkpoints/yolov4-tiny.pth 31 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/tool/COCO_eval.py: -------------------------------------------------------------------------------- 1 | from pycocotools.coco import COCO 2 | from pycocotools.cocoeval import COCOeval 3 | import json 4 | import os 5 | 6 | def calculateAp(resFile, annFile): 7 | annType = ['segm','bbox','keypoints'] 8 | annType = annType[1] #specify type here 9 | prefix = 'person_keypoints' if annType=='keypoints' else 'instances' 10 | 11 | #initialize COCO ground truth api 12 | cocoGt=COCO(annFile) 13 | 14 | cocoDt = cocoGt.loadRes(resFile) 15 | result = json.load(open(resFile, 'r')) 16 | imgIds = list() 17 | 18 | for res in result: 19 | if res['image_id'] not in imgIds: 20 | imgIds.append(res['image_id']) 21 | print('total image:{}'.format(len(imgIds))) 22 | 23 | # running evaluation 24 | cocoEval = COCOeval(cocoGt,cocoDt,annType) 25 | cocoEval.params.imgIds = imgIds 26 | cocoEval.evaluate() 27 | cocoEval.accumulate() 28 | cocoEval.summarize() 29 | 30 | return round(cocoEval.stats[0], 3) 31 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/tool/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4-tiny/online/yolov4/tool/__init__.py -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/online/yolov4/tool/change_cat_id.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | coco_class_name = [ 4 | 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 5 | 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 6 | 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 7 | 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 8 | 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 9 | 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 10 | 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 11 | 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 12 | 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 13 | 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 14 | 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 15 | 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 16 | 'scissors', 'teddy bear', 'hair drier', 'toothbrush' 17 | ] 18 | 19 | all_id = [n for n in range(1, 92)] 20 | delete_id = [0, 12, 26, 29, 30, 45, 66, 68, 69, 71, 83, 91] 21 | map_id = list(set(all_id) - set(delete_id)) 22 | 23 | def change_id_func(data): 24 | change_list = list() 25 | for d in data: 26 | ori_id = d['category_id'] 27 | d['category_id'] = map_id[ori_id] 28 | change_list.append(d) 29 | return change_list 30 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/pytorch_models/int16/checkpoints/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf ./*.pth* 7 | shopt -u extglob 8 | 9 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/pytorch_models/int8/checkpoints/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf ./*.pth* 7 | shopt -u extglob 8 | 9 | -------------------------------------------------------------------------------- /pytorch/yolov4-tiny/pytorch_models/origin/checkpoints/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf ./*.pth* 7 | shopt -u extglob 8 | 9 | -------------------------------------------------------------------------------- /pytorch/yolov4/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # 清理临时生成的文件 4 | # Usage: 5 | # $ bash clean.sh all/test/models 6 | 7 | usage() 8 | { 9 | echo "Usage:" 10 | echo " $0 all/test/models" 11 | } 12 | 13 | clean_all() { 14 | #clean test 15 | cd ./test 16 | ./clean.sh 17 | cd - 18 | 19 | #clean models 20 | rm -vf ./models/mlu/*.cambricon* 21 | rm -vf ./models/*.weights 22 | rm -rvf ./models/pytorch_models 23 | } 24 | 25 | if [[ $# -eq 0 ]];then 26 | usage 27 | elif [[ $# -eq 1 ]];then 28 | if [[ "$1" == "all" ]];then 29 | #clean all 30 | clean_all 31 | elif [[ "$1" == "test" ]];then 32 | #clean test 33 | cd ./test && ./clean.sh && cd - 34 | elif [[ "$1" == "models" ]];then 35 | #clean models 36 | rm -vrf ./models/mlu/*.cambricon* && rm -vrf ./models/*.weights 37 | else 38 | usage 39 | fi 40 | else 41 | usage 42 | fi 43 | 44 | -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000001993.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000001993.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000046252.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000046252.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000065288.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000065288.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000079588.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000079588.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000147729.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000147729.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000170613.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000170613.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000250758.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000250758.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000283520.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000283520.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000303908.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000303908.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000363784.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000363784.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000419408.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000419408.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000460379.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000460379.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000463802.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000463802.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000479155.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000479155.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000526751.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000526751.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/datasets/000000565607.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/datasets/000000565607.jpg -------------------------------------------------------------------------------- /pytorch/yolov4/file_list_datasets: -------------------------------------------------------------------------------- 1 | 000000001993.jpg 2 | 000000046252.jpg 3 | 000000065288.jpg 4 | 000000079588.jpg 5 | 000000147729.jpg 6 | 000000170613.jpg 7 | 000000250758.jpg 8 | 000000283520.jpg 9 | 000000303908.jpg 10 | 000000363784.jpg 11 | 000000419408.jpg 12 | 000000460379.jpg 13 | 000000463802.jpg 14 | 000000479155.jpg 15 | 000000526751.jpg 16 | 000000565607.jpg 17 | -------------------------------------------------------------------------------- /pytorch/yolov4/models/download_weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Download latest models 3 | # Usage: 4 | # $ bash download_weights.sh 5 | 6 | #下载yolov4.cfg 7 | #wget https://raw.githubusercontent.com/AlexeyAB/darknet/master/cfg/yolov4.cfg 8 | #回显确认 9 | #ls -la 10 | 11 | #下载yolov4.weights 12 | wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.weights 13 | #回显确认 14 | ls -la -------------------------------------------------------------------------------- /pytorch/yolov4/models/mlu/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /pytorch/yolov4/res/yolov4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov4/res/yolov4.png -------------------------------------------------------------------------------- /pytorch/yolov4/test/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | for dir in $(ls) 6 | do 7 | if [ -d "./${dir}" ]; then #先判断是否是目录,然后再执行clean 8 | echo $dir && cd $dir && ./clean.sh && cd - 9 | fi 10 | done -------------------------------------------------------------------------------- /pytorch/yolov4/test/offline/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /pytorch/yolov4/test/online/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/aligntorch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: aligntorch.sh 5 | # UpdateDate: 2022/03/23 6 | # Description: align torch version for yolov5 . 7 | # Example: ./aligntorch.sh 8 | # Depends: >= pytorch 1.8 9 | # YOLOv5(git clone https://github.com/ultralytics/yolov5 -b v5.0) 10 | # dev-env-ubuntu(https://github.com/CambriconKnight/dev-env-ubuntu) 11 | # Notes: 12 | # ------------------------------------------------------------------------------- 13 | #################### main #################### 14 | WEIGHTS_DIR=$PWD/weights #原始模型存放路径 15 | OUTPUT_DIR=$PWD/output #输出模型存放路径 16 | MODDEL_NAME_ORG=yolov5s.pt #需要转换模型名称,如果是使用自己训练的模型,需要将模型放到weights目录,并修改MODDEL_NAME_ORG宏定义 17 | MODDEL_NAME_OUT=yolov5s.pth #转换后的模型 18 | 19 | # 0.Check param 20 | #weights 21 | if [ ! -d "$WEIGHTS_DIR" ] ; then 22 | mkdir -p $WEIGHTS_DIR 23 | echo "Create $WEIGHTS_DIR OK !" 24 | else 25 | echo "Directory ($WEIGHTS_DIR): Exist!" 26 | fi 27 | #output 28 | if [ ! -d "$OUTPUT_DIR" ] ; then 29 | mkdir -p $OUTPUT_DIR 30 | echo "Create $OUTPUT_DIR OK !" 31 | else 32 | echo "Directory ($OUTPUT_DIR): Exist!" 33 | fi 34 | #Download yolov5 35 | ./tools/download-yolov5.sh 36 | #Download yolov5-weights 37 | if [ ! -f "$WEIGHTS_DIR/$MODDEL_NAME_ORG" ] ; then 38 | echo "Model file ($WEIGHTS_DIR/$MODDEL_NAME_ORG) not exist, pleas download it or copy !" 39 | pushd $WEIGHTS_DIR 40 | ./download-yolov5-weights.sh 41 | popd 42 | fi 43 | 44 | #1. Run 45 | cp -rvf ./tools/aligntorch.py ./yolov5/ 46 | python ./yolov5/aligntorch.py --weights "$WEIGHTS_DIR/$MODDEL_NAME_ORG" --output "$OUTPUT_DIR/$MODDEL_NAME_OUT" 47 | 48 | #2. Result 49 | ls -lh $OUTPUT_DIR/$MODDEL_NAME_OUT 50 | rm -rf ./yolov5/aligntorch.py 51 | echo "Align Torch finish !" 52 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/build-image-yolov5-align.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: build-image-yolov5-align.sh 5 | # UpdateDate: 2022/02/08 6 | # Description: Build docker images for yolov5-align. 7 | # Example: ./build-image-yolov5-align.sh 8 | # Depends: 9 | # YOLOv5(git clone https://github.com/ultralytics/yolov5 -b v5.0) 10 | # https://github.com/ultralytics/yolov5/archive/refs/tags/v5.0.tar.gz 11 | # Notes: 12 | # ------------------------------------------------------------------------------- 13 | #Dockerfile(16.04/18.04/CentOS) 14 | OSVer="18.04" 15 | if [[ $# -ne 0 ]];then OSVer="${1}";fi 16 | # 0. Source env 17 | source ./env.sh $OSVer 18 | #################### main #################### 19 | # 1. check 20 | #if [ ! -d "$PATH_WORK" ];then 21 | # mkdir -p $PATH_WORK 22 | #else 23 | # echo "Directory($PATH_WORK): Exists!" 24 | #fi 25 | 26 | # 2. Download yolov5 27 | ./tools/download-yolov5.sh 28 | 29 | # 3. Build image 30 | echo "====================== build image ======================" 31 | sudo docker build -f ./docker/$FILENAME_DOCKERFILE \ 32 | -t $NAME_IMAGE . 33 | 34 | # 4. Save image 35 | echo "====================== save image ======================" 36 | sudo docker save -o $FILENAME_IMAGE $NAME_IMAGE 37 | sudo chmod 664 $FILENAME_IMAGE 38 | mv $FILENAME_IMAGE ./docker/ 39 | ls -lh ./docker/$FILENAME_IMAGE 40 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Source env 5 | source "./env.sh" 6 | 7 | # 1.PATH_WORK 8 | sudo rm -rvf ${PATH_WORK} 9 | 10 | # 2.FILENAME_IMAGE 11 | sudo rm -vf ${FULLNAME_IMAGE} 12 | 13 | # 3.rm docker container 14 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 16 | if [ $num_container ]; then sudo docker stop $num_container;fi 17 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 18 | if [ $num_container ]; then sudo docker rm $num_container;fi 19 | 20 | # 4.rmi docker image 21 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 22 | num_images=`sudo docker images | grep ${MY_IMAGE} | awk '{print $3}'` 23 | if [ $num_images ]; then sudo docker rmi $num_images;fi 24 | 25 | # 5.output&weights 26 | sudo rm -rvf output ./weights/*.pt -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/dependent_files/README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

YOLOv5源码及模型下载说明

4 | 5 |

6 | 7 | **下载方式** 8 | 9 | 1. 可前往[寒武纪开发者社区](https://developer.cambricon.com)注册账号按需下载到本目录; 10 | 2. 可通过官方技术对接人员提供的专属FTP账户指定路径下载; 11 | 3. 关注微信公众号 AIKnight , 发送文字消息, 包含关键字(不区分大小写): **YOLOv5-v5.0**, 公众号会自动回复YOLOv5源码及模型(版本为v5.0)的下载地址; 12 | 13 | >![](../../../../res/note.gif) **备注信息:** 14 | >- 1. YOLOv5源码及模型(版本为v5.0)下载地址详见本仓库根目录[README.md](../README.md) 1.2章节-软件环境准备。 15 | >- 2. 请把下载后的源码及模型放置到当前目录下(dependent_files),方便根据脚本提示进行后续操作。 16 | 17 | **公众号** 18 | >![](../../../../res/aiknight_wechat_344.jpg) 19 | 20 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/docker/Dockerfile.18.04: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------- 2 | # Filename: Dockerfile 3 | # UpdateDate: 2022/03/18 4 | # Description: Build docker images for YOLOv5. 5 | # Example: 6 | # Depends: Based on Ubuntu 16.04 7 | # Notes: 8 | # ------------------------------------------------------------------------------- 9 | FROM ubuntu:18.04 10 | MAINTAINER CambriconKnight 11 | 12 | # 1.Sync files 13 | RUN echo -e 'nameserver 114.114.114.114' > /etc/resolv.conf 14 | COPY ./docker/* /temp/ 15 | WORKDIR /temp/ 16 | 17 | # 2.Pre-installed software 18 | ENV DEBIAN_FRONTEND=noninteractive 19 | RUN bash ./pre_packages.sh 20 | 21 | # 3.Set ENV && Clean 22 | ENV LANG C.UTF-8 23 | ENV TIME_ZONE Asia/Shanghai 24 | RUN echo "${TIME_ZONE}" > /etc/timezone && \ 25 | ln -sf /usr/share/zoneinfo/${TIME_ZONE} /etc/localtime && \ 26 | rm -rf /temp/ && rm -rf /var/lib/apt/lists/* && \ 27 | apt-get clean 28 | 29 | # 4.Set WorkDir 30 | WORKDIR /home/share -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/docker/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # 1.del tar.gz.eg: image-ubuntu16.04-caffe-v1.6.0.tar.gz 5 | sudo rm -vf image-*.tar.gz 6 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/docker/pip.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | index-url = https://pypi.doubanio.com/simple 3 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/docker/pre_packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #echo -e 'nameserver 114.114.114.114' > /etc/resolv.conf 4 | mkdir -p /root/.pip/ 5 | cp pip.conf /root/.pip/ 6 | cp sources_18.04.list /etc/apt/sources.list 7 | #add-apt-repository ppa:jonathonf/python-3.7 8 | DEBIAN_FRONTEND=noninteractive 9 | rm -rf /var/lib/apt/lists/* \ 10 | && mkdir /var/lib/apt/lists/partial \ 11 | && apt-get clean \ 12 | && apt-get update --fix-missing \ 13 | && apt-get upgrade -y \ 14 | && apt-get install -y --no-install-recommends \ 15 | curl git wget vim build-essential cmake make ca-certificates nasm yasm \ 16 | openssh-server libgoogle-glog-dev libgflags-dev libcurl4-openssl-dev \ 17 | libsdl2-dev libfreetype6-dev \ 18 | lcov apt-utils \ 19 | libopencv-dev python3.6 python3-dev \ 20 | python3-tk \ 21 | python3-pip \ 22 | python-pip \ 23 | net-tools \ 24 | software-properties-common \ 25 | libgtk2.0-dev pkg-config \ 26 | && apt-get clean \ 27 | && rm -rf /var/lib/apt/lists/* \ 28 | && echo -e "\033[0;32m[apt install... Done] \033[0m" 29 | 30 | #设置python 优先级 31 | update-alternatives --install /usr/bin/python python /usr/bin/python3 99 \ 32 | && sed -i "s/\/usr\/bin\/python.*/\/usr\/bin\/python3/g" /usr/bin/pip3 \ 33 | && ln -s /usr/bin/pip3 /usr/bin/pip -f \ 34 | && ln -s /usr/bin/python3 /usr/bin/python -f 35 | 36 | #pip2&pip3安装。 第三方依赖包列表可在 PyTorch 源码主目录下的 requirements.txt 中查询。 37 | python -m pip install --upgrade pip \ 38 | && pip3 install -r requirements.txt \ 39 | && apt-get clean \ 40 | && echo -e "\033[0;32m[pip install -r requirements.txt... Done] \033[0m" 41 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/docker/requirements.txt: -------------------------------------------------------------------------------- 1 | # pip install -r requirements.txt 2 | 3 | # Base ---------------------------------------- 4 | matplotlib>=3.2.2 5 | numpy>=1.18.5 6 | opencv-python>=4.1.2 7 | Pillow>=7.1.2 8 | PyYAML>=5.3.1 9 | scipy>=1.4.1 10 | torch==1.7.0 11 | torchvision>=0.8.1 12 | tqdm>=4.41.0 13 | 14 | # Logging ------------------------------------- 15 | tensorboard>=2.4.1 16 | # wandb 17 | 18 | # Plotting ------------------------------------ 19 | seaborn>=0.11.0 20 | pandas 21 | 22 | # Export -------------------------------------- 23 | # coremltools>=4.1 # CoreML export 24 | # onnx>=1.9.0 # ONNX export 25 | # onnx-simplifier>=0.3.6 # ONNX simplifier 26 | # scikit-learn==0.19.2 # CoreML quantization 27 | # tensorflow>=2.4.1 # TFLite export 28 | # tensorflowjs>=3.9.0 # TF.js export 29 | 30 | # Extras -------------------------------------- 31 | # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 32 | # pycocotools>=2.0 # COCO mAP 33 | # albumentations>=1.0.3 34 | thop # FLOPs computation 35 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/env.sh: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------- 2 | # Filename: env.sh 3 | # Revision: 1.0.0 4 | # Date: 2022/03/23 5 | # Description: Common Environment variable 6 | # Example: 7 | # Depends: 8 | # Notes: 9 | # ------------------------------------------------------------------------------- 10 | #################### version #################### 11 | ## 以下信息,根据各个版本中文件实际名词填写. 12 | #Version 13 | VER="1.0.0" 14 | FILENAME_GITHUB_YOLOv5="yolov5-5.0.tar.gz" 15 | #################### docker #################### 16 | #Work 17 | PATH_WORK="yolov5" 18 | #Dockerfile(16.04/18.04/CentOS) 19 | OSVer="18.04" 20 | if [[ $# -ne 0 ]];then OSVer="${1}";fi 21 | #(Dockerfile.16.04/Dockerfile.18.04/Dockerfile.CentOS) 22 | FILENAME_DOCKERFILE="Dockerfile.$OSVer" 23 | DIR_DOCKER="docker" 24 | #Version 25 | VERSION="v${VER}" 26 | #Organization 27 | ORG="kang" 28 | #Operating system 29 | OS="ubuntu$OSVer" 30 | #Docker image 31 | MY_IMAGE="$ORG/$OS-$PATH_WORK" 32 | #Docker image name(cam/ubuntu16.04-ffmpeg-mlu:v1.6.0) 33 | NAME_IMAGE="$MY_IMAGE:$VERSION" 34 | #FileName DockerImage(image-ubuntu16.04-ffmpeg-mlu-v1.6.0.tar.gz) 35 | FILENAME_IMAGE="image-$OS-$PATH_WORK-$VERSION.tar.gz" 36 | FULLNAME_IMAGE="./docker/${FILENAME_IMAGE}" 37 | #Docker container name 38 | MY_CONTAINER="container-$OS-$PATH_WORK-$VERSION" 39 | 40 | #Font color 41 | none="\033[0m" 42 | black="\033[0;30m" 43 | dark_gray="\033[1;30m" 44 | blue="\033[0;34m" 45 | light_blue="\033[1;34m" 46 | green="\033[0;32m" 47 | light_green="\033[1;32m" 48 | cyan="\033[0;36m" 49 | light_cyan="\033[1;36m" 50 | red="\033[0;31m" 51 | light_red="\033[1;31m" 52 | purple="\033[0;35m" 53 | light_purple="\033[1;35m" 54 | brown="\033[0;33m" 55 | yellow="\033[1;33m" 56 | light_gray="\033[0;37m" 57 | white="\033[1;37m" 58 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/load-image-yolov5-align.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-yolov5-align.sh 5 | # UpdateDate: 2022/03/23 6 | # Description: Loading docker image for IDE. 7 | # Example: 8 | # ./load-image-yolov5-align.sh 9 | # Depends: image-$OS-$PATH_WORK-$VERSION.tar.gz 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | #Dockerfile(16.04/18.04/CentOS) 13 | OSVer="18.04" 14 | if [[ $# -ne 0 ]];then OSVer="${1}";fi 15 | # Source env 16 | source ./env.sh $OSVer 17 | #################### main #################### 18 | # 0.Check param 19 | #if [[ $# -eq 0 ]];then 20 | # echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 21 | #else 22 | # FULLNAME_IMAGE="${1}" 23 | #fi 24 | # 0.Check File Images 25 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 26 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 27 | fi 28 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 29 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 30 | fi 31 | # 0.Check Docker Images 32 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 33 | echo $num 34 | echo $NAME_IMAGE 35 | 36 | # 1.Load Docker Images 37 | if [ 0 -eq $num ];then 38 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 39 | #load image 40 | sudo docker load < ${FULLNAME_IMAGE} 41 | else 42 | echo "The image($NAME_IMAGE) is already loaded!" 43 | fi 44 | 45 | #echo "All image information:" 46 | #sudo docker images 47 | echo "The image($NAME_IMAGE) information:" 48 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | VERSION_YOLOV5="5.0" 5 | PATH_WORK_TMP="yolov5" 6 | NAME_YOLOV5_ALIGN="$PATH_WORK_TMP-$VERSION_YOLOV5" 7 | FILENAME_YOLOV5_ALIGN="${NAME_YOLOV5_ALIGN}.tar.gz" 8 | #################### main #################### 9 | cp -rvf ./dependent_files/$FILENAME_YOLOV5_ALIGN ./ 10 | cp -rvf ./dependent_files/yolov5s.pt ./weights 11 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/tools/aligntorch.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import pandas as pd 4 | import requests 5 | import cv2 6 | import yaml 7 | 8 | from models.yolo import Model 9 | from models.experimental import attempt_load 10 | 11 | 12 | 13 | def pytorch_model_file_align(weights , output): 14 | model=attempt_load(weights) 15 | #print(model) 16 | torch.save(model.state_dict(), output,_use_new_zipfile_serialization=False) 17 | 18 | 19 | if __name__ == '__main__': 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--weights', type=str, default='./weights/yolov5s.pt', help='src model ') 22 | parser.add_argument('--output', type=str, default='./output/yolov5s.pth', help='dst model ') 23 | 24 | opt = parser.parse_args() 25 | print(opt) 26 | weights = opt.weights 27 | output = opt.output 28 | pytorch_model_file_align(weights, output) 29 | 30 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/tools/download-yolov5.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: build-ffmpeg-mlu.sh 5 | # UpdateDate: 2022/02/08 6 | # Description: Build ffmpeg-mlu. 7 | # Example: ./build-ffmpeg-mlu.sh 8 | # Depends: 9 | # YOLOv5(git clone https://github.com/ultralytics/yolov5 -b v5.0) 10 | # https://github.com/ultralytics/yolov5/archive/refs/tags/v5.0.tar.gz 11 | # Notes: 12 | # ------------------------------------------------------------------------------- 13 | #Font color 14 | none="\033[0m" 15 | green="\033[0;32m" 16 | red="\033[0;31m" 17 | yellow="\033[1;33m" 18 | white="\033[1;37m" 19 | #ENV 20 | WORK_DIR="/home/share" 21 | VERSION_YOLOV5="5.0" 22 | PATH_WORK_TMP="yolov5" 23 | NAME_YOLOV5_ALIGN="$PATH_WORK_TMP-$VERSION_YOLOV5" 24 | FILENAME_YOLOV5_ALIGN="${NAME_YOLOV5_ALIGN}.tar.gz" 25 | ############################################################# 26 | # 1. Download yolov5 27 | if [ -f "${FILENAME_YOLOV5_ALIGN}" ];then 28 | echo -e "${green}File(${FILENAME_YOLOV5_ALIGN}): Exists!${none}" 29 | # $FILENAME_YOLOV5_ALIGN 压缩包中已经包含了ffmpeg-mlu补丁 + ffmpeg4.2 30 | tar zxvf $FILENAME_YOLOV5_ALIGN 31 | mv $NAME_YOLOV5_ALIGN $PATH_WORK_TMP 32 | else 33 | echo -e "${green}File(${FILENAME_YOLOV5_ALIGN}): non-existent!${none}" 34 | if [ ! -d "${PATH_WORK_TMP}" ];then 35 | echo -e "${green}git clone yolov5......${none}" 36 | git clone https://github.com/ultralytics/yolov5 -b v$VERSION_YOLOV5 37 | else 38 | echo "Directory($PATH_WORK_TMP): Exists!" 39 | fi 40 | fi 41 | 42 | # del .git 43 | pushd $PATH_WORK_TMP 44 | find ../ -name ".git" | xargs rm -Rf 45 | popd 46 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/weights/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -vf !(*.sh) 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /pytorch/yolov5/aligntorch/weights/download-yolov5-weights.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: download-yolov5-weights.sh 5 | # UpdateDate: 2022/03/23 6 | # Description: Download YOLOv5 Weights. 7 | # Example: ./download-yolov5-weights.sh 8 | # Depends: 9 | # Weights(https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt) 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | ############################################################# 13 | # 1. Download YOLOv5 Weights. 14 | wget https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt -------------------------------------------------------------------------------- /pytorch/yolov5/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | for dir in $(ls) 6 | do 7 | if [ -d "./${dir}" ]; then #先判断是否是目录,然后再执行clean 8 | echo $dir && cd $dir && ./clean.sh && cd - 9 | fi 10 | done -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/README.md: -------------------------------------------------------------------------------- 1 | ## 适配链接:https://github.com/ultralytics/yolov5 2 | 3 | ## 1.cpu推理原始模型 4 | ``` 5 | python3 convertmodel.py --arg cpu 6 | ``` 7 | 8 | ## 2.模型量化 9 | ``` 10 | python3 convertmodel.py --arg quant 11 | ``` 12 | 13 | ## 3.mlu融合推理及生成离线模型 14 | ``` 15 | python3 convertmodel.py --arg mfus --genoff true 16 | ``` 17 | 执行后会在当前路径下生成.cambricon离线模型文件,可在yolov5-off_test-20210727中使用 18 | 19 | ## 4.FAQ 20 | 1) 使用自己训练的模型,需要注意替换使用的yaml及pth文件 21 | --convertmodel.py 22 | ``` 23 | class DetectMark(): 24 | def __init__(self, opt): 25 | # initialize models 26 | ModelPath = None 27 | self.running_mode = opt.arg 28 | self.gen_off = opt.genoff 29 | if ModelPath is not None: 30 | self.model_path = ModelPath 31 | else: 32 | if self.running_mode == 'cpu' or self.running_mode == 'quant': 33 | #self.model_path = './weights/nozip.pt' 34 | self.model_path = './yolov5m-state-31.pth' 35 | else: 36 | self.model_path = './yolov5_quant.pth' 37 | self.model = Model('./models/yolov5m.yaml') 38 | ``` 39 | 40 | 2) 依照自己的代码,修改/models下yolo.py及common.py的参数 -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf *_result* 7 | rm -rf *.cambricon* 8 | rm -rf *_quant.pth 9 | shopt -u extglob 10 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/data/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/data/bus.jpg -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__init__.py -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/common.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/common.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/common.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/common.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/common.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/experimental.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/experimental.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/experimental.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/experimental.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/experimental.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/experimental.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/yolo.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/yolo.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/yolo.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/yolo.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/__pycache__/yolo.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/models/__pycache__/yolo.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/hub/yolov3-spp.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3-SPP head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, SPP, [512, [5, 9, 13]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/hub/yolov3-tiny.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,14, 23,27, 37,58] # P4/16 9 | - [81,82, 135,169, 344,319] # P5/32 10 | 11 | # YOLOv3-tiny backbone 12 | backbone: 13 | # [from, number, module, args] 14 | [[-1, 1, Conv, [16, 3, 1]], # 0 15 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 16 | [-1, 1, Conv, [32, 3, 1]], 17 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 18 | [-1, 1, Conv, [64, 3, 1]], 19 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 20 | [-1, 1, Conv, [128, 3, 1]], 21 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 22 | [-1, 1, Conv, [256, 3, 1]], 23 | [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 24 | [-1, 1, Conv, [512, 3, 1]], 25 | [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 26 | [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 27 | ] 28 | 29 | # YOLOv3-tiny head 30 | head: 31 | [[-1, 1, Conv, [1024, 3, 1]], 32 | [-1, 1, Conv, [256, 1, 1]], 33 | [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) 34 | 35 | [-2, 1, Conv, [128, 1, 1]], 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 38 | [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) 39 | 40 | [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) 41 | ] 42 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/hub/yolov3.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # darknet53 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Conv, [32, 3, 1]], # 0 16 | [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 17 | [-1, 1, Bottleneck, [64]], 18 | [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 19 | [-1, 2, Bottleneck, [128]], 20 | [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 21 | [-1, 8, Bottleneck, [256]], 22 | [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 23 | [-1, 8, Bottleneck, [512]], 24 | [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 25 | [-1, 4, Bottleneck, [1024]], # 10 26 | ] 27 | 28 | # YOLOv3 head 29 | head: 30 | [[-1, 1, Bottleneck, [1024, False]], 31 | [-1, 1, Conv, [512, [1, 1]]], 32 | [-1, 1, Conv, [1024, 3, 1]], 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) 35 | 36 | [-2, 1, Conv, [256, 1, 1]], 37 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 38 | [[-1, 8], 1, Concat, [1]], # cat backbone P4 39 | [-1, 1, Bottleneck, [512, False]], 40 | [-1, 1, Bottleneck, [512, False]], 41 | [-1, 1, Conv, [256, 1, 1]], 42 | [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) 43 | 44 | [-2, 1, Conv, [128, 1, 1]], 45 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 46 | [[-1, 6], 1, Concat, [1]], # cat backbone P3 47 | [-1, 1, Bottleneck, [256, False]], 48 | [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) 49 | 50 | [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 51 | ] 52 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/hub/yolov5-fpn.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, Bottleneck, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 6, BottleneckCSP, [1024]], # 9 25 | ] 26 | 27 | # YOLOv5 FPN head 28 | head: 29 | [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) 30 | 31 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 32 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 33 | [-1, 1, Conv, [512, 1, 1]], 34 | [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) 35 | 36 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 37 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 38 | [-1, 1, Conv, [256, 1, 1]], 39 | [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) 40 | 41 | [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 42 | ] 43 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/hub/yolov5-panet.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, BottleneckCSP, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, BottleneckCSP, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 PANet head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, BottleneckCSP, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/yolov5l.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 1.0 # model depth multiple 4 | width_multiple: 1.0 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/yolov5m.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 80 # number of classes 3 | depth_multiple: 0.67 # model depth multiple 4 | width_multiple: 0.75 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, BottleneckCSP, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, BottleneckCSP, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, BottleneckCSP, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, BottleneckCSP, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, BottleneckCSP, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/yolov5s.yaml: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/yolov5s.yaml-bak: -------------------------------------------------------------------------------- 1 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license 2 | 3 | # Parameters 4 | nc: 80 # number of classes 5 | depth_multiple: 0.33 # model depth multiple 6 | width_multiple: 0.50 # layer channel multiple 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/models/yolov5x.yaml: -------------------------------------------------------------------------------- 1 | # parameters 2 | nc: 1 # number of classes 3 | depth_multiple: 1.33 # model depth multiple 4 | width_multiple: 1.25 # layer channel multiple 5 | 6 | # anchors 7 | anchors: 8 | - [10,13, 16,30, 33,23] # P3/8 9 | - [30,61, 62,45, 59,119] # P4/16 10 | - [116,90, 156,198, 373,326] # P5/32 11 | 12 | # YOLOv5 backbone 13 | backbone: 14 | # [from, number, module, args] 15 | [[-1, 1, Focus, [64, 3]], # 0-P1/2 16 | [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 17 | [-1, 3, C3, [128]], 18 | [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 19 | [-1, 9, C3, [256]], 20 | [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 21 | [-1, 9, C3, [512]], 22 | [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 23 | [-1, 1, SPP, [1024, [5, 9, 13]]], 24 | [-1, 3, C3, [1024, False]], # 9 25 | ] 26 | 27 | # YOLOv5 head 28 | head: 29 | [[-1, 1, Conv, [512, 1, 1]], 30 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 31 | [[-1, 6], 1, Concat, [1]], # cat backbone P4 32 | [-1, 3, C3, [512, False]], # 13 33 | 34 | [-1, 1, Conv, [256, 1, 1]], 35 | [-1, 1, nn.Upsample, [None, 2, 'nearest']], 36 | [[-1, 4], 1, Concat, [1]], # cat backbone P3 37 | [-1, 3, C3, [256, False]], # 17 (P3/8-small) 38 | 39 | [-1, 1, Conv, [256, 3, 2]], 40 | [[-1, 14], 1, Concat, [1]], # cat head P4 41 | [-1, 3, C3, [512, False]], # 20 (P4/16-medium) 42 | 43 | [-1, 1, Conv, [512, 3, 2]], 44 | [[-1, 10], 1, Concat, [1]], # cat head P5 45 | [-1, 3, C3, [1024, False]], # 23 (P5/32-large) 46 | 47 | [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) 48 | ] 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/quant.py: -------------------------------------------------------------------------------- 1 | import torch_mlu 2 | import torch_mlu.core.mlu_model as ct 3 | import torch_mlu.core.mlu_quantize as mlu_quantize 4 | import torch 5 | import torch.nn as nn 6 | import torch.optim as optim 7 | import torchvision 8 | import numpy as np 9 | import torchvision.transforms as transforms 10 | import argparse 11 | from collections import OrderedDict 12 | from PIL import Image 13 | import cv2 14 | import os 15 | import yolo 16 | 17 | if __name__ == "__main__": 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('--cfg', type=str, default='yolov5s.yaml',help='model.yaml') 20 | parser.add_argument('--device', default='cpu',help='cuda device, i.e. 0 or 0,1,2,3 or cpu') 21 | opt = parser.parse_args() 22 | # 获取yolov5网络文件 23 | net = yolo.get_model(opt) 24 | # 配置量化参数 25 | qconfig={'iteration': 1, 'use_avg':False, 'data_scale':1.0, 'firstconv':False, 'per_channel': False} 26 | # 调用量化接口 27 | quantized_net = mlu_quantize.quantize_dynamic_mlu(net.float(),qconfig_spec=qconfig, dtype='int8', gen_quant=True) 28 | # 设置为推理模式 29 | quantized_net = quantized_net.eval().float() 30 | # 对图片作预处理 31 | img_mat = Image.open("./images/image.jpg") 32 | if img_mat.mode != 'RGB': 33 | img_mat = img_mat.convert('RGB') 34 | crop = 960 35 | resize = 960 36 | transform = transforms.Compose([ 37 | transforms.Resize(resize), 38 | transforms.CenterCrop(crop), 39 | transforms.ToTensor(), 40 | ]) 41 | img = transform(img_mat) 42 | im_tensor = torch.unsqueeze(img, 0) 43 | im_tensor = im_tensor.float() 44 | print(im_tensor.shape) 45 | # 执行推理生成量化值 46 | quantized_net(im_tensor) 47 | # 保存量化模型 48 | torch.save(quantized_net.state_dict(), './yolov5s_int8.pt') 49 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/run_convertmodel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: run_convertmodel.sh 5 | # UpdateDate: 2021/09/24 6 | # Description: convert model to cambricon . 7 | # Example: ./run_convertmodel.sh 8 | # Depends: pytorch 1.3 docker 18.04 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | #################### main #################### 12 | # 0.Check param 13 | #1. cpu 推理 14 | python3 convertmodel.py --arg cpu 15 | 16 | #2. 模型量化 17 | python3 convertmodel.py --arg quant 18 | 19 | #3. mlu融合推理及生成离线模型 20 | python3 convertmodel.py --arg mfus --genoff true 21 | 22 | #2.result 23 | echo "======== convert model finish ! ======== " 24 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__init__.py -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/activations.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/activations.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/activations.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/activations.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/activations.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/activations.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/autoanchor.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/autoanchor.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/autoanchor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/autoanchor.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/autoanchor.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/autoanchor.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/datasets.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/datasets.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/datasets.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/datasets.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/datasets.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/datasets.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/general.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/general.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/general.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/general.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/general.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/general.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/google_utils.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/google_utils.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/google_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/google_utils.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/google_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/google_utils.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/loss.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/loss.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/metrics.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/metrics.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/metrics.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/metrics.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/plots.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/plots.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/plots.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/plots.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/plots.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/plots.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/torch_utils.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/torch_utils.cpython-35.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/torch_utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/torch_utils.cpython-36.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/__pycache__/torch_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/convertmodel/utils/__pycache__/torch_utils.cpython-37.pyc -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/google_app_engine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/google-appengine/python 2 | 3 | # Create a virtualenv for dependencies. This isolates these packages from 4 | # system-level packages. 5 | # Use -p python3 or -p python3.7 to select python version. Default is version 2. 6 | RUN virtualenv /env -p python3 7 | 8 | # Setting these environment variables are the same as running 9 | # source /env/bin/activate. 10 | ENV VIRTUAL_ENV /env 11 | ENV PATH /env/bin:$PATH 12 | 13 | RUN apt-get update && apt-get install -y python-opencv 14 | 15 | # Copy the application's requirements.txt and run pip to install all 16 | # dependencies into the virtualenv. 17 | ADD requirements.txt /app/requirements.txt 18 | RUN pip install -r /app/requirements.txt 19 | 20 | # Add the application source code. 21 | ADD . /app 22 | 23 | # Run a WSGI server to serve the application. gunicorn must be declared as 24 | # a dependency in requirements.txt. 25 | CMD gunicorn -b :$PORT main:app 26 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/google_app_engine/additional_requirements.txt: -------------------------------------------------------------------------------- 1 | # add these requirements in your app on top of the existing ones 2 | pip==18.1 3 | Flask==1.0.2 4 | gunicorn==19.9.0 5 | -------------------------------------------------------------------------------- /pytorch/yolov5/convertmodel/utils/google_app_engine/app.yaml: -------------------------------------------------------------------------------- 1 | runtime: custom 2 | env: flex 3 | 4 | service: yolov5app 5 | 6 | liveness_check: 7 | initial_delay_sec: 600 8 | 9 | manual_scaling: 10 | instances: 1 11 | resources: 12 | cpu: 1 13 | memory_gb: 4 14 | disk_size_gb: 20 -------------------------------------------------------------------------------- /pytorch/yolov5/offline/Makefile: -------------------------------------------------------------------------------- 1 | 2 | CFLAGS=-I /usr/local/neuware/include/ -g -O0 -std=c++11 3 | CXXFLAGS=$(CFLAGS) 4 | 5 | LDFALGS=-L /usr/local/neuware/lib64 `pkg-config --libs opencv` 6 | 7 | CXX=g++ 8 | 9 | SRCS=$(wildcard *.cpp) 10 | OBJS=$(SRCS:%.cpp=%.o) 11 | 12 | TARGET=yolov5_offline 13 | 14 | all:$(TARGET) 15 | 16 | $(TARGET):$(OBJS) 17 | $(CXX) -o $@ $(OBJS) $(LDFALGS) -lcnrt -lcndrv -lpthread 18 | 19 | clean: 20 | rm -f $(TARGET) $(OBJS) 21 | 22 | -------------------------------------------------------------------------------- /pytorch/yolov5/offline/README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | make clean 3 | make 4 | ./yolov5_offline_simple_demo ./yolov5s_int8_1b_4c.cambricon ./image.jpg ./output/1.jpg 5 | ``` -------------------------------------------------------------------------------- /pytorch/yolov5/offline/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | rm -rf output *.o yolov5_offline 7 | rm -rf *.cambricon* 8 | shopt -u extglob 9 | -------------------------------------------------------------------------------- /pytorch/yolov5/offline/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/offline/image.jpg -------------------------------------------------------------------------------- /pytorch/yolov5/offline/run_offline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: run_offline.sh 5 | # UpdateDate: 2021/09/29 6 | # Description: offline inference . 7 | # Example: ./run_offline.sh 8 | # Depends: >= pytorch 1.8 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | #################### main #################### 12 | 13 | OUTPUT_DIR=$PWD/output #推理输出结果存放路径 14 | MODDEL_NAME=yolov5s_int8_4b_4c.cambricon #离线模型名称 15 | 16 | # 0.Check param 17 | if [ ! -d "$OUTPUT_DIR" ] ; then 18 | mkdir -p $OUTPUT_DIR 19 | echo "Create $OUTPUT_DIR ok !" 20 | else 21 | echo "Directory ($OUTPUT_DIR): Exist!" 22 | fi 23 | 24 | if [ ! -f "$MODDEL_NAME" ] ; then 25 | echo "Model file ($MODDEL_NAME) not exist, pleas download it or copy !" 26 | cp ../convertmodel/${MODDEL_NAME} ./ 27 | fi 28 | 29 | #1.make 30 | make clean 31 | make 32 | 33 | #2. run 34 | ./yolov5_offline ${MODDEL_NAME} ./image.jpg ./${OUTPUT_DIR}/offline_result.jpg 35 | ls -la $OUTPUT_DIR/ 36 | 37 | #3.results 38 | echo "offline test finish !" 39 | -------------------------------------------------------------------------------- /pytorch/yolov5/res/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | #clean test 5 | shopt -s extglob 6 | # 7 | shopt -u extglob 8 | -------------------------------------------------------------------------------- /pytorch/yolov5/res/yolov5-performance.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/res/yolov5-performance.jpg -------------------------------------------------------------------------------- /pytorch/yolov5/res/yolov5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch/yolov5/res/yolov5.jpg -------------------------------------------------------------------------------- /pytorch1.13/chatglm3/tools/demo.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoTokenizer, AutoModel 2 | import torch 3 | import torch_mlu 4 | 5 | tokenizer = AutoTokenizer.from_pretrained("/workspace/chatglm3/models/chatglm3-6b", trust_remote_code=True) 6 | model = AutoModel.from_pretrained("/workspace/chatglm3/models/chatglm3-6b", trust_remote_code=True).mlu() 7 | model = model.eval() 8 | 9 | response, history = model.chat(tokenizer, "你好", history=[]) 10 | print(response) 11 | 12 | #response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) 13 | #33print(response) -------------------------------------------------------------------------------- /pytorch1.13/chatglm3/tools/finetune_pt_multiturn.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -ex 4 | 5 | PRE_SEQ_LEN=128 6 | LR=2e-2 7 | #NUM_GPUS=1 8 | NUM_GPUS=2 9 | MAX_SEQ_LEN=4096 10 | DEV_BATCH_SIZE=1 11 | GRAD_ACCUMULARION_STEPS=16 12 | MAX_STEP=500 13 | SAVE_INTERVAL=250 14 | 15 | DATESTR=`date +%Y%m%d-%H%M%S` 16 | RUN_NAME=tool_alpaca_pt 17 | 18 | #BASE_MODEL_PATH=THUDM/chatglm3-6b 19 | BASE_MODEL_PATH=/workspace/chatglm3/models/chatglm3-6b 20 | DATASET_PATH=formatted_data/tool_alpaca.jsonl 21 | OUTPUT_DIR=output/${RUN_NAME}-${DATESTR}-${PRE_SEQ_LEN}-${LR} 22 | 23 | mkdir -p $OUTPUT_DIR 24 | MASTER_PORT=$(shuf -n 1 -i 10000-65535) 25 | #MASTER_PORT=$(shuf -n 1 -i 5000-65535) 26 | #torchrun --standalone --nnodes=1 --nproc_per_node=$NUM_GPUS finetune.py \ 27 | python -m torch.distributed.launch --nnodes=1 --nproc_per_node=$NUM_GPUS --master_port $MASTER_PORT finetune.py \ 28 | --train_format multi-turn \ 29 | --train_file $DATASET_PATH \ 30 | --max_seq_length $MAX_SEQ_LEN \ 31 | --preprocessing_num_workers 1 \ 32 | --model_name_or_path $BASE_MODEL_PATH \ 33 | --output_dir $OUTPUT_DIR \ 34 | --per_device_train_batch_size $DEV_BATCH_SIZE \ 35 | --gradient_accumulation_steps $GRAD_ACCUMULARION_STEPS \ 36 | --max_steps $MAX_STEP \ 37 | --logging_steps 1 \ 38 | --save_steps $SAVE_INTERVAL \ 39 | --learning_rate $LR \ 40 | --pre_seq_len $PRE_SEQ_LEN 2>&1 | tee ${OUTPUT_DIR}/train.log 41 | -------------------------------------------------------------------------------- /pytorch1.13/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # 1.Source env 5 | source ./env.sh $OSVer 6 | 7 | # 2.rm docker container 8 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 9 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 10 | if [ $num_container ]; then sudo docker stop $num_container;fi 11 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | if [ $num_container ]; then sudo docker rm $num_container;fi 13 | 14 | # 3.rmi docker image 15 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 16 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 17 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 18 | -------------------------------------------------------------------------------- /pytorch1.13/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2023/03/14 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # ./load-image-dev.sh ${FULLNAME_IMAGES} 9 | # Depends: pytorch-v1.10.0-torch1.9-ubuntu18.04-py37.tar.gz 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # Source env 13 | source "./env.sh" 14 | #################### main #################### 15 | # 0.Check param 16 | if [[ $# -eq 0 ]];then 17 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 18 | else 19 | FULLNAME_IMAGE="${1}" 20 | fi 21 | # 0.Check File Images 22 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 23 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 24 | fi 25 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 26 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 27 | fi 28 | # 0.Check Docker Images 29 | num=`docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 30 | echo $num 31 | echo $NAME_IMAGE 32 | 33 | # 1.Load Docker Images 34 | if [ 0 -eq $num ];then 35 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 36 | #load image 37 | docker load < ${FULLNAME_IMAGE} 38 | else 39 | echo "The image($NAME_IMAGE) is already loaded!" 40 | fi 41 | 42 | #echo "All image information:" 43 | #docker images 44 | echo "The image($NAME_IMAGE) information:" 45 | docker images | grep -e "REPOSITORY" -e $MY_IMAGE 46 | -------------------------------------------------------------------------------- /pytorch1.9/baichuan/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 适配后的代码 5 | rm -rvf baichuan-7B_mlu transformers_mlu 6 | # 删除适配前的源码 7 | rm -rvf baichuan-7B transformers 8 | -------------------------------------------------------------------------------- /pytorch1.9/baichuan/res/baichuan-7B.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/baichuan/res/baichuan-7B.jpg -------------------------------------------------------------------------------- /pytorch1.9/baichuan/tools/inference.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoModelForCausalLM, AutoTokenizer 2 | import torch 3 | import torch_mlu 4 | torch.set_grad_enabled(False) 5 | tokenizer = AutoTokenizer.from_pretrained("./baichuan-7B", trust_remote_code=True) 6 | model = AutoModelForCausalLM.from_pretrained("./baichuan-7B", torch_dtype=torch.float16, trust_remote_code=True).to('mlu') 7 | 8 | text = input('User:') 9 | while(True): 10 | inputs = tokenizer(text, return_tensors='pt') 11 | #inputs = tokenizer('登鹳雀楼->王之涣\n夜雨寄北->', return_tensors='pt') 12 | inputs = inputs.to('mlu') 13 | pred = model.generate(**inputs, max_new_tokens=256,repetition_penalty=1.1) 14 | print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True)) 15 | text = input('User:') -------------------------------------------------------------------------------- /pytorch1.9/baichuan/torch_gpu2mlu.py: -------------------------------------------------------------------------------- 1 | /torch/src/catch/tools/torch_gpu2mlu.py -------------------------------------------------------------------------------- /pytorch1.9/chatglm/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 适配后的代码 5 | rm -rvf ChatGLM-6B_mlu transformers_mlu 6 | # 删除适配前的源码 7 | rm -rvf ChatGLM-6B transformers 8 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm/res/aiknight_mlu_chatglm.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chatglm/res/aiknight_mlu_chatglm.gif -------------------------------------------------------------------------------- /pytorch1.9/chatglm/res/aiknight_mlu_chatglm_train_cnmon.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chatglm/res/aiknight_mlu_chatglm_train_cnmon.gif -------------------------------------------------------------------------------- /pytorch1.9/chatglm/res/chatglm-6b.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chatglm/res/chatglm-6b.jpg -------------------------------------------------------------------------------- /pytorch1.9/chatglm/tools/ds_train_finetune_mlu.sh: -------------------------------------------------------------------------------- 1 | 2 | LR=1e-4 3 | 4 | MASTER_PORT=$(shuf -n 1 -i 10000-65535) 5 | 6 | deepspeed --num_gpus=4 --master_port $MASTER_PORT main.py \ 7 | --deepspeed deepspeed.json \ 8 | --do_train \ 9 | --train_file ../AdvertiseGen/train.json \ 10 | --test_file ../AdvertiseGen/dev.json \ 11 | --prompt_column content \ 12 | --response_column summary \ 13 | --overwrite_cache \ 14 | --model_name_or_path ../../chatglm-6b \ 15 | --output_dir ./output/adgen-chatglm-6b-ft-$LR \ 16 | --overwrite_output_dir \ 17 | --max_source_length 64 \ 18 | --max_target_length 64 \ 19 | --per_device_train_batch_size 4 \ 20 | --per_device_eval_batch_size 1 \ 21 | --gradient_accumulation_steps 1 \ 22 | --predict_with_generate \ 23 | --max_steps 5000 \ 24 | --logging_steps 10 \ 25 | --save_steps 1000 \ 26 | --learning_rate $LR \ 27 | --fp16 28 | 29 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm/tools/inference.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import signal 4 | from transformers import AutoTokenizer, AutoModel 5 | import readline 6 | import time 7 | 8 | tokenizer = AutoTokenizer.from_pretrained("../chatglm-6b", trust_remote_code=True) 9 | model = AutoModel.from_pretrained("../chatglm-6b", trust_remote_code=True).half().mlu() 10 | model = model.eval() 11 | 12 | response, history = model.chat(tokenizer, "Hi...", history=[]) 13 | print(response) 14 | 15 | for question in [ 16 | "ChatGLM-6B 是啥?", 17 | "ChatGPT 是啥?", 18 | "ChatGLM-6B 与 ChatGPT 有什么区别?" 19 | ]: 20 | 21 | time_start = time.time() 22 | response, history = model.chat(tokenizer, question, history=history) 23 | time_end = time.time() 24 | print("==================================================") 25 | print("question: ", question) 26 | print("response: ", response) 27 | print("len(response): ", len(response)) 28 | print("time_end-time_start: ", (time_end-time_start)) 29 | print("token: ", len(response) / (time_end-time_start)) 30 | print("==================================================") 31 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm/tools/train_mlu.sh: -------------------------------------------------------------------------------- 1 | PRE_SEQ_LEN=128 2 | LR=2e-2 3 | 4 | MLU_VISIBLE_DEVICES=0 python3 main.py \ 5 | --do_train \ 6 | --train_file ../AdvertiseGen/train.json \ 7 | --validation_file ../AdvertiseGen/dev.json \ 8 | --prompt_column content \ 9 | --response_column summary \ 10 | --overwrite_cache \ 11 | --model_name_or_path ../../chatglm-6b \ 12 | --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR \ 13 | --overwrite_output_dir \ 14 | --max_source_length 64 \ 15 | --max_target_length 64 \ 16 | --per_device_train_batch_size 1 \ 17 | --per_device_eval_batch_size 1 \ 18 | --gradient_accumulation_steps 16 \ 19 | --predict_with_generate \ 20 | --max_steps 3000 \ 21 | --logging_steps 10 \ 22 | --save_steps 1000 \ 23 | --learning_rate $LR \ 24 | --pre_seq_len $PRE_SEQ_LEN #\ 25 | #--quantization_bit 4 26 | 27 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm/tools/train_parallel_mlu.sh: -------------------------------------------------------------------------------- 1 | PRE_SEQ_LEN=8 2 | LR=1e-2 3 | 4 | python3 main_parallel.py \ 5 | --do_train \ 6 | --train_file ../AdvertiseGen/train.json \ 7 | --validation_file ../AdvertiseGen/dev.json \ 8 | --prompt_column content \ 9 | --response_column summary \ 10 | --overwrite_cache \ 11 | --model_name_or_path ../../chatglm-6b \ 12 | --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR \ 13 | --overwrite_output_dir \ 14 | --max_source_length 64 \ 15 | --max_target_length 64 \ 16 | --per_device_train_batch_size 4 \ 17 | --per_device_eval_batch_size 1 \ 18 | --gradient_accumulation_steps 1 \ 19 | --predict_with_generate \ 20 | --max_steps 3000 \ 21 | --logging_steps 10 \ 22 | --save_steps 1000 \ 23 | --learning_rate $LR \ 24 | --pre_seq_len $PRE_SEQ_LEN #\ 25 | #--quantization_bit 8 26 | 27 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm/tools/version.py: -------------------------------------------------------------------------------- 1 | # -*- encoding:utf-8 -*- 2 | import os 3 | import torch 4 | import subprocess 5 | import numpy as np 6 | import transformers 7 | 8 | # 设置环境变量 9 | #os.environ['MLU_VISIBLE_DEVICES']="8,9" 10 | # 获取环境变量方法1 11 | #os.environ.get('WORKON_HOME') 12 | #获取环境变量方法2(推荐使用这个方法) 13 | #os.getenv('path') 14 | # 删除环境变量 15 | del os.environ['MLU_VISIBLE_DEVICES'] 16 | 17 | print(f"torch version: {torch.__version__}") 18 | print(f"transformers version: {transformers.__version__}") 19 | os.system("cnmon version") 20 | version_cndsp = subprocess.check_output("pip show cndsp | grep -E Version | awk -F ': ' '{print $2}'", shell=True) 21 | #os.system("pip show cndsp | grep -E Version | awk -F ': ' '{print $2}'") 22 | print(f"cndsp version: {version_cndsp}") 23 | 24 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm/torch_gpu2mlu.py: -------------------------------------------------------------------------------- 1 | /torch/src/catch/tools/torch_gpu2mlu.py -------------------------------------------------------------------------------- /pytorch1.9/chatglm2/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 适配后的代码 5 | rm -rvf ChatGLM2-6B_mlu transformers_mlu 6 | # 删除适配前的源码 7 | rm -rvf ChatGLM2-6B transformers 8 | -------------------------------------------------------------------------------- /pytorch1.9/chatglm2/res/aiknight_mlu_chatglm2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chatglm2/res/aiknight_mlu_chatglm2.gif -------------------------------------------------------------------------------- /pytorch1.9/chatglm2/res/aiknight_mlu_chatglm2_train_cnmon.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chatglm2/res/aiknight_mlu_chatglm2_train_cnmon.gif -------------------------------------------------------------------------------- /pytorch1.9/chatglm2/res/chatglm2-6b.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chatglm2/res/chatglm2-6b.jpg -------------------------------------------------------------------------------- /pytorch1.9/chatglm2/tools/demo.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoTokenizer, AutoModel 2 | import torch 3 | import torch_mlu 4 | 5 | tokenizer = AutoTokenizer.from_pretrained("../chatglm2-6b", trust_remote_code=True) 6 | model = AutoModel.from_pretrained("../chatglm2-6b", trust_remote_code=True).half().mlu() 7 | model = model.eval() 8 | response, history = model.chat(tokenizer, "你好", history=[]) 9 | print(response) 10 | 11 | response, history = model.chat(tokenizer, "晚上睡不着应该怎么办?", history=history) 12 | print(response) 13 | 14 | response, history = model.chat(tokenizer, "中国的首都是哪里?", history=history) 15 | print(response) -------------------------------------------------------------------------------- /pytorch1.9/chatglm2/tools/profiler_demo.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoTokenizer, AutoModel 2 | import torch 3 | import torch_mlu 4 | 5 | tokenizer = AutoTokenizer.from_pretrained("/workspace/chatglm2/models/chatglm2-6b", trust_remote_code=True) 6 | model = AutoModel.from_pretrained("/workspace/chatglm2/models/chatglm2-6b", trust_remote_code=True).half().mlu() 7 | model = model.eval() 8 | with torch.profiler.profile( 9 | activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.MLU], 10 | schedule=torch.profiler.schedule(wait=0, warmup=0, active=1), 11 | record_shapes=True, 12 | with_stack=False, 13 | with_flops=True, 14 | on_trace_ready=torch.profiler.tensorboard_trace_handler("mlu_log") 15 | ) as prof: 16 | response, history = model.chat(tokenizer, "你好", history=[]) 17 | prof.step() 18 | print(response) 19 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 适配后的代码 5 | rm -rvf Chinese-LLaMA-Alpaca-2_mlu open-codes-mlu 6 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/res/README.md: -------------------------------------------------------------------------------- 1 | **[资源独立存放路径](https://gitee.com/cambriconknight/dev-open-res/tree/main/dev-env-ubuntu/pytorch1.9/chinese-llama-alpaca-2/res)** -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/res/chinese-llama-alpaca-2-sys-s.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chinese-llama-alpaca-2/res/chinese-llama-alpaca-2-sys-s.gif -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/res/chinese-llama-alpaca-2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/chinese-llama-alpaca-2/res/chinese-llama-alpaca-2.gif -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/merge_pretrain_model_13b.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: merge_pretrain_model_13b.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 合并lora权重 得到最终权重 用于推理。final_model == pretrain_model + train_output_pretraining_13b(sample_lora_pt_13b/adapter_model.bin) 8 | # Example: ./merge_pretrain_model_13b.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | #export Model_DIR=/workspace/chinese-llama-alpaca-2/models 13 | #export chinese_alpaca_2_model_pretrain_done_13b=${Model_DIR}/chinese_alpaca_2_model_pretrain_done_13b 14 | pretrain_model=${chinese_alpaca_2_model_to_train_13b} 15 | final_model=${chinese_alpaca_2_model_pretrain_done_13b} 16 | 17 | # 合并lora权重 得到最终权重 用于推理 18 | echo "Cambricon Running STEP3_MERGE_TRAIN_MODEL......" 19 | checkpoints=`ls -l ${train_output_pretraining_13b} | awk '{print $9}' | grep "checkpoint"` 20 | #mv ${train_output_pretraining_13b}/${checkpoints}/pytorch_model.bin sample_lora_pt_13b/adapter_model.bin 21 | mv ${train_output_pretraining_13b}/${checkpoints}/pytorch_model.bin sample_lora_13b/adapter_model.bin 22 | 23 | echo "Debug INFO:mv done!" 24 | pushd ../ 25 | python scripts/merge_llama2_with_chinese_lora_low_mem_2.py \ 26 | --base_model ${pretrain_model} \ 27 | --lora_model cambricon/sample_lora_13b \ 28 | --output_type huggingface \ 29 | --output_dir ${final_model} 30 | popd 31 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/merge_trainmodel_13b.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: merge_trainmodel_13b.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 合并lora权重 得到最终权重 用于推理。final_model == pretrain_model + train_output_finetuning_13b(sample_lora_13b/adapter_model.bin) 8 | # Example: ./merge_trainmodel_13b.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | #export Model_DIR=/workspace/chinese-llama-alpaca-2/models 13 | #export chinese_alpaca_2_model_train_done_13b=${Model_DIR}/chinese_alpaca_2_model_train_done_13b 14 | pretrain_model=${chinese_alpaca_2_model_to_train_13b} 15 | final_model=${chinese_alpaca_2_model_train_done_13b} 16 | 17 | # 合并lora权重 得到最终权重 用于推理 18 | echo "Cambricon Running STEP3_MERGE_TRAIN_MODEL......" 19 | checkpoints=`ls -l ${train_output_finetuning_13b} | awk '{print $9}' | grep "checkpoint"` 20 | mv ${train_output_finetuning_13b}/${checkpoints}/pytorch_model.bin sample_lora_13b/adapter_model.bin 21 | echo "Debug INFO:mv done!" 22 | pushd ../ 23 | python scripts/merge_llama2_with_chinese_lora_low_mem_2.py \ 24 | --base_model ${pretrain_model} \ 25 | --lora_model cambricon/sample_lora_13b \ 26 | --output_type huggingface \ 27 | --output_dir ${final_model} 28 | popd 29 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/run_inference_13b.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: run_inference_13b.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 推理脚本 8 | # Example: ./run_inference_13b.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # fp16 13 | export MLU_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 14 | 15 | # 13b模型单芯即可运行 16 | #model_path=/data/models/chinese-alpaca-2-13b 17 | #cp -rvf /data/models/chinese-alpaca-2-13b /workspace/chinese-llama-alpaca-2/models/chinese_alpaca_2_model_to_train_13b 18 | #官网预训练模型chinese-alpaca-2-13b 19 | model_path=${chinese_alpaca_2_model_to_train_13b} 20 | 21 | cd ../scripts/inference/ 22 | python gradio_demo.py --base_model ${model_path} --gpus 0,1 23 | # --gpu自动分配,根据:scripts/inference/mlu_load_model.py 中配置的单卡最大内存容量 mlu_memory = "18GiB" 24 | 25 | cd - 26 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/run_inference_13b_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: run_inference_13b_2.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 推理脚本 8 | # Example: ./run_inference_13b_2.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # fp16 13 | export MLU_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 14 | 15 | #微调+Merge后的模型 16 | model_path=${chinese_alpaca_2_model_train_done_13b} 17 | #model_path=/workspace/chinese-llama-alpaca-2/models/chinese_alpaca_2_model_train_done_13b 18 | 19 | cd ../scripts/inference/ 20 | python gradio_demo.py --base_model ${model_path} --gpus 0,1 21 | # --gpu自动分配,根据:scripts/inference/mlu_load_model.py 中配置的单卡最大内存容量 mlu_memory = "18GiB" 22 | 23 | cd - 24 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/run_inference_13b_2_pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: run_inference_13b_2_pretrain.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 针对pretrain之后模型的推理脚本 8 | # Example: ./run_inference_13b_2_pretrain.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # fp16 13 | export MLU_VISIBLE_DEVICES=0,1 14 | 15 | # 预训练+Merge后的模型 16 | model_path=${chinese_alpaca_2_model_pretrain_done_13b} 17 | #model_path=/workspace/chinese-llama-alpaca-2/models/chinese_alpaca_2_model_pretrain_done_13b 18 | 19 | cd ../scripts/inference/ 20 | python gradio_demo.py --base_model ${model_path} --gpus 0,1 21 | # --gpu自动分配,根据:scripts/inference/mlu_load_model.py 中配置的单卡最大内存容量 mlu_memory = "18GiB" 22 | 23 | cd - 24 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/run_mlu_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: run_mlu_eval.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 精度评测脚本 8 | # Example: ./run_mlu_eval.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | set -ex 13 | # 请注意 7b模型指定1芯即可 7b以上模型需要指定2芯.,13b测试需要2卡,目前测试还有问题。 14 | export MLU_VISIBLE_DEVICES=0,1 15 | pushd ../scripts/ceval 16 | 17 | # 官方模型 18 | model_path=${chinese_alpaca_2_model_to_train_13b} 19 | # 训练后模型 20 | #model_path=${chinese_alpaca_2_model_train_done_13b} 21 | 22 | python eval.py \ 23 | --model_path ${model_path} \ 24 | --cot False \ 25 | --few_shot False \ 26 | --with_prompt True \ 27 | --constrained_decoding True \ 28 | --temperature 0.2 \ 29 | --n_times 1 \ 30 | --ntrain 5 \ 31 | --do_save_csv False \ 32 | --do_test False \ 33 | --output_dir ${eval_output} 34 | popd 35 | 36 | 37 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/run_mlu_eval_2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: run_mlu_eval_2.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 微调+merge完后,精度评测脚本 8 | # Example: ./run_mlu_eval_2.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # 请注意 7b模型指定1芯即可 7b以上模型需要指定2芯.,13b测试需要2卡,目前测试还有问题。 13 | export MLU_VISIBLE_DEVICES=0,1 14 | pushd ../scripts/ceval 15 | 16 | # 官方模型 17 | #model_path=${chinese_alpaca_2_model_to_train_13b} 18 | # 训练后模型 19 | model_path=${chinese_alpaca_2_model_train_done_13b} 20 | 21 | python eval.py \ 22 | --model_path ${model_path} \ 23 | --cot False \ 24 | --few_shot False \ 25 | --with_prompt True \ 26 | --constrained_decoding True \ 27 | --temperature 0.2 \ 28 | --n_times 1 \ 29 | --ntrain 5 \ 30 | --do_save_csv False \ 31 | --do_test False \ 32 | --output_dir /workspace/chinese-llama-alpaca-2/Chinese-LLaMA-Alpaca-2_mlu/eval_output 33 | popd 34 | 35 | 36 | -------------------------------------------------------------------------------- /pytorch1.9/chinese-llama-alpaca-2/tools/run_mlu_eval_2_pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | #set -e 4 | # ------------------------------------------------------------------------------- 5 | # Filename: run_mlu_eval_2_pretrain.sh 6 | # UpdateDate: 2023/10/14 7 | # Description: 预训练+merge完后,精度评测脚本 8 | # Example: ./run_mlu_eval_2_pretrain.sh 9 | # Depends: 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # 请注意 7b模型指定1芯即可 7b以上模型需要指定2芯.,13b测试需要2卡,目前测试还有问题。 13 | export MLU_VISIBLE_DEVICES=0,1 14 | pushd ../scripts/ceval 15 | 16 | # 训练后模型 17 | model_path=${chinese_alpaca_2_model_pretrain_done_13b} 18 | 19 | python eval.py \ 20 | --model_path ${model_path} \ 21 | --cot False \ 22 | --few_shot False \ 23 | --with_prompt True \ 24 | --constrained_decoding True \ 25 | --temperature 0.2 \ 26 | --n_times 1 \ 27 | --ntrain 5 \ 28 | --do_save_csv False \ 29 | --do_test False \ 30 | --output_dir /workspace/chinese-llama-alpaca-2/Chinese-LLaMA-Alpaca-2_mlu/eval_output_pretrain 31 | popd 32 | 33 | 34 | -------------------------------------------------------------------------------- /pytorch1.9/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #Dockerfile(16.04/18.04/CentOS) 5 | #OSVer="16.04" 6 | #if [[ $# -ne 0 ]];then OSVer="${1}";fi 7 | # 1.Source env 8 | source ./env.sh $OSVer 9 | 10 | # 2.rm docker container 11 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 13 | if [ $num_container ]; then sudo docker stop $num_container;fi 14 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | if [ $num_container ]; then sudo docker rm $num_container;fi 16 | 17 | # 3.rmi docker image 18 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 19 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 20 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 21 | -------------------------------------------------------------------------------- /pytorch1.9/docker/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | rm -rvf *.tar* 5 | -------------------------------------------------------------------------------- /pytorch1.9/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2023/03/14 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # ./load-image-dev.sh ${FULLNAME_IMAGES} 9 | # Depends: pytorch-v1.10.0-torch1.9-ubuntu18.04-py37.tar.gz 10 | # Notes: 11 | # ------------------------------------------------------------------------------- 12 | # Source env 13 | source "./env.sh" 14 | #################### main #################### 15 | # 0.Check param 16 | if [[ $# -eq 0 ]];then 17 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 18 | else 19 | FULLNAME_IMAGE="${1}" 20 | fi 21 | # 0.Check File Images 22 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 23 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 24 | fi 25 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 26 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 27 | fi 28 | # 0.Check Docker Images 29 | num=`docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 30 | echo $num 31 | echo $NAME_IMAGE 32 | 33 | # 1.Load Docker Images 34 | if [ 0 -eq $num ];then 35 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 36 | #load image 37 | docker load < ${FULLNAME_IMAGE} 38 | else 39 | echo "The image($NAME_IMAGE) is already loaded!" 40 | fi 41 | 42 | #echo "All image information:" 43 | #docker images 44 | echo "The image($NAME_IMAGE) information:" 45 | docker images | grep -e "REPOSITORY" -e $MY_IMAGE 46 | -------------------------------------------------------------------------------- /pytorch1.9/openbiomed/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 适配后的代码 5 | rm -rvf /workspace/openbiomed 6 | -------------------------------------------------------------------------------- /pytorch1.9/openbiomed/res/aiknight_openbiomed_cnmon.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/openbiomed/res/aiknight_openbiomed_cnmon.gif -------------------------------------------------------------------------------- /pytorch1.9/openbiomed/res/openbiomed.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/pytorch1.9/openbiomed/res/openbiomed.gif -------------------------------------------------------------------------------- /pytorch1.9/stdc-seg/README_MLU.md: -------------------------------------------------------------------------------- 1 | 2 | **该教程仅仅用于学习,打通流程; 不对效果负责,不承诺商用。** 3 | 4 | # 1. 环境说明 5 | 参考[../README.md](../README.md) 6 | 7 | # 2. 模型推理 8 | 9 | ## 2.1. 下载代码 10 | ```bash 11 | # 进到容器后,切换到工作目录 12 | cd /home/share/pytorch1.9/stdc-seg 13 | # Clone 14 | git clone https://github.com/MichaelFan01/stdc-seg.git -b master 15 | cd stdc-seg && git checkout 59ff37fbd693b99972c76fcefe97caa14aeb619f 16 | # Install dependencies, 忽略错误 17 | pip install --upgrade pip 18 | pip install -r requirements.txt 19 | ``` 20 | 21 | ## 2.2. 迁移代码 22 | 使用该工具 `torch_gpu2mlu.py` 从GPU模型脚本迁移至MLU设备运行,转换后的模型脚本只支持MLU设备运行。该工具可对模型脚本进行转换,对模型脚本修改位置较多,会对修改位置进行统计,实现开发者快速迁移。 23 | - 在容器环境中,执行以下命令 24 | ```bash 25 | cd /home/share/pytorch1.9/stdc-seg 26 | #建立软连接 27 | ln -s /torch/src/catch/tools/torch_gpu2mlu.py ./ 28 | #执行转换模型脚本 29 | python torch_gpu2mlu.py -i stdc-seg 30 | #显示,【stdc-seg_mlu】文件夹是转换后的代码。 31 | ls -la stdc-seg stdc-seg_mlu 32 | ``` 33 | 34 | - 输出转换结果 35 | ```bash 36 | # Cambricon PyTorch Model Migration Report 37 | Official PyTorch model scripts: /home/share/pytorch1.9/stdc-seg/stdc-seg 38 | Cambricon PyTorch model scripts: /home/share/pytorch1.9/stdc-seg/stdc-seg_mlu 39 | Migration Report: /home/share/pytorch1.9/stdc-seg/stdc-seg_mlu/report.md 40 | ``` 41 | 42 | ## 2.3. 下载模型 43 | 44 | 45 | ## 2.4. 模型推理 46 | 47 | -------------------------------------------------------------------------------- /pytorch1.9/stdc-seg/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # clean 5 | rm -rvf stdc-seg stdc-seg_mlu 6 | -------------------------------------------------------------------------------- /pytorch1.9/stdc-seg/torch_gpu2mlu.py: -------------------------------------------------------------------------------- 1 | /torch/src/catch/tools/torch_gpu2mlu.py -------------------------------------------------------------------------------- /pytorch1.9/yolact/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | # rm yolact yolact_mlu 5 | rm -rvf yolact yolact_mlu 6 | -------------------------------------------------------------------------------- /pytorch1.9/yolact/torch_gpu2mlu.py: -------------------------------------------------------------------------------- 1 | /torch/src/catch/tools/torch_gpu2mlu.py -------------------------------------------------------------------------------- /res/aiknight_cars_6_20.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/res/aiknight_cars_6_20.gif -------------------------------------------------------------------------------- /res/aiknight_cnmon_3226_20.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/res/aiknight_cnmon_3226_20.gif -------------------------------------------------------------------------------- /res/aiknight_wechat_344.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/res/aiknight_wechat_344.jpg -------------------------------------------------------------------------------- /res/cambricon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/res/cambricon.jpg -------------------------------------------------------------------------------- /res/dev-env-ubuntu-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/res/dev-env-ubuntu-1.png -------------------------------------------------------------------------------- /res/note.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/res/note.gif -------------------------------------------------------------------------------- /tensorflow/clean.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | 4 | #Dockerfile(16.04/18.04/CentOS) 5 | #OSVer="16.04" 6 | #if [[ $# -ne 0 ]];then OSVer="${1}";fi 7 | # 1.Source env 8 | source ./env.sh $OSVer 9 | 10 | # 2.rm docker container 11 | #sudo docker stop `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 12 | num_container=`sudo docker ps -a | grep ${MY_CONTAINER} | awk '{print $1}'` 13 | if [ $num_container ]; then sudo docker stop $num_container;fi 14 | #sudo docker rm `sudo docker ps -a | grep container-ubuntu16.04-caffe-v1.6.0 | awk '{print $1}'` 15 | if [ $num_container ]; then sudo docker rm $num_container;fi 16 | 17 | # 3.rmi docker image 18 | #sudo docker rmi `sudo docker images | grep cam/ubuntu16.04-caffe | awk '{print $3}'` 19 | #num_images=`sudo docker images | grep ${MY_IMAGE} | grep ${VERSION} | awk '{print $3}'` 20 | #if [ $num_images ]; then sudo docker rmi $num_images;fi 21 | -------------------------------------------------------------------------------- /tensorflow/load-image-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # ------------------------------------------------------------------------------- 4 | # Filename: load-image-dev.sh 5 | # UpdateDate: 2021/08/04 6 | # Description: Loading docker image for IDE. 7 | # Example: ./load-image-dev.sh 8 | # Depends: tensorflow-1.4.0-2-tf1-ubuntu18.04-py3.tar 9 | # Notes: 10 | # ------------------------------------------------------------------------------- 11 | # Source env 12 | source "./env.sh" 13 | #################### main #################### 14 | # 0.Check param 15 | if [[ $# -eq 0 ]];then 16 | echo -e "${yellow}WARNING: Load images(${FULLNAME_IMAGE}) by default. ${none}" 17 | else 18 | FULLNAME_IMAGE="${1}" 19 | fi 20 | # 0.Check File Images 21 | if [[ ! -f ${FULLNAME_IMAGE} ]]; then 22 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}) does not exist! ${none}" && exit -1 23 | fi 24 | if [[ ! ${FULLNAME_IMAGE} =~ ${FILENAME_IMAGE} ]]; then 25 | echo -e "${red}ERROR: Images(${FULLNAME_IMAGE}), please use images(fileName:${FILENAME_IMAGE})! ${none}" && exit -1 26 | fi 27 | # 0.Check Docker Images 28 | num=`sudo docker images | grep -w "$MY_IMAGE" | grep -w "$VERSION" | wc -l` 29 | echo $num 30 | echo $NAME_IMAGE 31 | 32 | # 1.Load Docker Images 33 | if [ 0 -eq $num ];then 34 | echo "The image($NAME_IMAGE) is not loaded and is loading......" 35 | #load image 36 | sudo docker load < ${FULLNAME_IMAGE} 37 | else 38 | echo "The image($NAME_IMAGE) is already loaded!" 39 | fi 40 | 41 | #echo "All image information:" 42 | #sudo docker images 43 | echo "The image($NAME_IMAGE) information:" 44 | sudo docker images | grep -e "REPOSITORY" -e $MY_IMAGE 45 | -------------------------------------------------------------------------------- /test/cnrtexec/Makefile: -------------------------------------------------------------------------------- 1 | 2 | CFLAGS=-I /usr/local/neuware/include/ -g -std=c++11 3 | CXXFLAGS=$(CFLAGS) 4 | 5 | LDFALGS=-L /usr/local/neuware/lib64 -Wl,-rpath=/usr/local/neuware/lib64 6 | 7 | CXX=g++ 8 | 9 | CNRT_DEMO_SRC=$(wildcard *.cpp) 10 | CNRT_DEMO_OBJ=$(CNRT_DEMO_SRC:%.cpp=%.o) 11 | 12 | CNRT_DEMO=cnrtexec 13 | 14 | all:$(CNRT_DEMO) 15 | 16 | $(CNRT_DEMO):$(CNRT_DEMO_OBJ) 17 | $(CXX) -o $@ $(CNRT_DEMO_OBJ) $(LDFALGS) -lcnrt -lpthread 18 | 19 | clean: 20 | rm -f $(CNRT_DEMO) $(CNRT_DEMO_OBJ) 21 | 22 | -------------------------------------------------------------------------------- /test/cnrtexec/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | make clean 4 | 5 | -------------------------------------------------------------------------------- /test/cnrtexec/cnrtexec: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/test/cnrtexec/cnrtexec -------------------------------------------------------------------------------- /test/cnrtexec/cnrtexec-readme.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/test/cnrtexec/cnrtexec-readme.pdf -------------------------------------------------------------------------------- /test/cnrtexec/cnrtexec.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/test/cnrtexec/cnrtexec.o -------------------------------------------------------------------------------- /test/cnrtexec/main.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CambriconKnight/dev-env-ubuntu/03521fd7768bd60c5703f42e3d52d853a1085b43/test/cnrtexec/main.o -------------------------------------------------------------------------------- /tools/add-branch4github.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | set -e 3 | if [[ $# -eq 0 ]];then 4 | echo -e "\033[1;33m Usage: $0 [version] \033[0m" 5 | exit -1 6 | fi 7 | git branch 8 | #NameBranch="v1.7.602" 9 | NameBranch=$1 10 | #eg: git checkout -b v1.7.602 11 | git checkout -b $NameBranch 12 | #eg: git push origin v1.7.602 13 | git push origin $NameBranch 14 | git branch -a 15 | git checkout master 16 | git branch -a 17 | #删除本地分支,eg: git branch -d v1.7.610 18 | #git branch -d $NameBranch 19 | #删除远程分支,eg: git push origin --delete v1.7.610 20 | #git push origin --delete $NameBranch -------------------------------------------------------------------------------- /tools/cleanDocker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | 5 | # 清理所有停止运行的容器: 6 | #sudo docker container prune 7 | # or 8 | #sudo docker rm $(sudo docker ps -aq) 9 | # 清理已经退出的docker容器 10 | sudo docker rm $(sudo docker ps -a -q --filter status=exited) 11 | 12 | # 清理所有悬挂()镜像: 13 | sudo docker image prune 14 | # or 15 | #sudo docker rmi $(sudo docker images -qf "dangling=true") 16 | 17 | # 清理所有无用数据卷: 18 | #sudo docker volume prune 19 | 20 | ## 由于prune操作是批量删除类的危险操作,所以会有一次确认。 如果不想输入y来确认,可以添加-f操作。慎用! 21 | 22 | # 清理已经退出的docker容器 23 | #sudo docker rm $(sudo docker ps -a -q --filter status=exited) 24 | 25 | # 统计docker容器数量 26 | #sudo docker ps -a | wc -l 27 | 28 | #删除所有停止运行的容器 29 | #sudo docker container prune 30 | 31 | # 统计docker镜像docker数量 32 | #sudo docker images | wc -l 33 | 34 | # 删除所有未被容器使用的镜像: 35 | # docker image prune -a -------------------------------------------------------------------------------- /tools/getFileList.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | usage() 5 | { 6 | echo "Usage:" 7 | echo " $0 [pathdir] [filelist]" 8 | echo "" 9 | echo " Parameter description:" 10 | echo " [pathdir]: Relative path of the directory" 11 | echo " [filelist]: File name for the generated result" 12 | echo " EG: ./genFileList.sh ../data/00015 file.list" 13 | } 14 | 15 | if [ $# -lt 1 ]; then 16 | echo "[ERROR] Unknown parameter." 17 | usage 18 | exit 1 19 | fi 20 | 21 | pathdir=$1 22 | filelist=$2 23 | 24 | function getdir(){ 25 | #echo $1 26 | for file in $1/* 27 | do 28 | if test -f $file 29 | then 30 | #echo $file 31 | arr=(${arr[*]} $file) 32 | else 33 | getdir $file 34 | fi 35 | done 36 | } 37 | 38 | # Recursively call the function: save the path of all files in the folder to the array 39 | getdir $pathdir 40 | # Print the path of all files to file.list 41 | if [ -f "$filelist" ];then 42 | rm -f $filelist 43 | fi 44 | length=${#arr[@]} 45 | for((a=0;a<$length;a++)) 46 | do 47 | echo ${arr[$a]} >> $filelist 48 | done 49 | 50 | #Display filelist 51 | cat $filelist 52 | echo "[pathdir]: $pathdir" 53 | echo "[filelist]: $filelist" 54 | -------------------------------------------------------------------------------- /tools/getPicRandomDir2Dir.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | #!/usr/bin/env python 3 | 4 | import os, random, shutil 5 | 6 | rate=0.1 #自定义抽取图片的比例,比方说100张抽10张,那就是0.1 7 | picknumber=16 8 | 9 | def moveFile(fileDir): 10 | pathDir = os.listdir(fileDir) #取图片的原始路径 11 | filenumber=len(pathDir) 12 | #picknumber=int(filenumber*rate) #按照rate比例从文件夹中取一定数量图片 13 | sample = random.sample(pathDir, picknumber) #随机选取picknumber数量的样本图片 14 | print (sample) 15 | for name in sample: 16 | shutil.move(fileDir+name, tarDir+name) 17 | return 18 | 19 | if __name__ == '__main__': 20 | fileDir = "/data/datasets/COCO/val2017/" #源图片文件夹路径 21 | tarDir = '/data/test/easy-deploy-mlu/caffe/datasets/COCO/val2017/' #移动到新的文件夹路径 22 | moveFile(fileDir) 23 | -------------------------------------------------------------------------------- /tools/killMLUAllPID.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mlu_cnt=0 3 | mdev_dir="/proc/driver/cambricon/mlus" 4 | if [ ! -d "$mdev_dir" ]; then 5 | return 6 | fi 7 | 8 | for mdid in $(ls -a $mdev_dir); do 9 | if [ x"$mdid" != x"." -a x"$mdid" != x".." ]; then 10 | echo $mlu_cnt 11 | cnmon info -c $mlu_cnt | grep PID | awk -F '[:]' '{print $NF}' | xargs kill -9 12 | mlu_cnt=$((mlu_cnt + 1)) 13 | fi 14 | done -------------------------------------------------------------------------------- /tools/rmFileGit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # del .git 5 | find ../ -name ".git" | xargs rm -Rf --------------------------------------------------------------------------------