├── README.md ├── SingleCameraTracking ├── CMakeLists.txt ├── README.md ├── src │ ├── VideoTracker.cpp │ ├── VideoTracker.h │ ├── errmsg.cpp │ ├── errmsg.h │ ├── feature │ │ ├── FeatureExtractor.cpp │ │ ├── FeatureExtractor.h │ │ ├── dataType.h │ │ ├── model.cpp │ │ └── model.h │ ├── main.cpp │ ├── matching │ │ ├── kalmanfilter.cpp │ │ ├── kalmanfilter.h │ │ ├── linear_assignment.cpp │ │ ├── linear_assignment.h │ │ ├── nn_matching.cpp │ │ ├── nn_matching.h │ │ ├── track.cpp │ │ ├── track.h │ │ ├── tracker.cpp │ │ └── tracker.h │ └── thirdPart │ │ ├── hungarianoper.cpp │ │ ├── hungarianoper.h │ │ └── munkres │ │ ├── adapters │ │ ├── adapter.cpp │ │ ├── adapter.h │ │ ├── boostmatrixadapter.cpp │ │ └── boostmatrixadapter.h │ │ ├── matrix.h │ │ ├── munkres.cpp │ │ └── munkres.h └── tools │ └── convert_coco_detection_to_mot.py ├── Track2(ReID) ├── part1_model │ ├── README.md │ ├── examples │ │ ├── cross_trihard_with_crop.py │ │ ├── multi_attribute.py │ │ ├── test_extract_attribute.py │ │ └── train_direction.py │ ├── reid │ │ ├── __init__.py │ │ ├── attribute_trainers.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── aicity_attribute.py │ │ │ ├── aicity_car196.py │ │ │ ├── complete_aicity_car196.py │ │ │ ├── small_vehicle.py │ │ │ └── vehicle_downsample.py │ │ ├── direct_evaluators.py │ │ ├── direct_trainers.py │ │ ├── dist_metric.py │ │ ├── evaluation_metrics │ │ │ ├── __init__.py │ │ │ ├── classification.py │ │ │ └── ranking.py │ │ ├── evaluators.py │ │ ├── extract_attribute.py │ │ ├── extract_direction_from_dir.py │ │ ├── extract_fea_from_dir.py │ │ ├── feature_extraction │ │ │ ├── __init__.py │ │ │ ├── cnn.py │ │ │ ├── database.py │ │ │ └── rerank.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── crossentropylabelsmooth.py │ │ │ ├── dualmatch.py │ │ │ ├── multi_attribute_loss.py │ │ │ ├── npair.py │ │ │ ├── oim.py │ │ │ └── triplet.py │ │ ├── metric_learning │ │ │ ├── __init__.py │ │ │ ├── euclidean.py │ │ │ └── kissme.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── cross_entropy_trihard.py │ │ │ ├── cross_trihard_se_resnet.py │ │ │ ├── cross_trihard_senet.py │ │ │ ├── direction.py │ │ │ ├── multi_attribute_3.py │ │ │ ├── resnet.py │ │ │ └── senet.py │ │ ├── trainers.py │ │ ├── txt_file │ │ │ ├── query.txt │ │ │ ├── test.txt │ │ │ └── validate_list.txt │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── attribute_dataset.py │ │ │ ├── dataset.py │ │ │ ├── preprocessor.py │ │ │ ├── sampler.py │ │ │ └── transforms.py │ │ │ ├── logging.py │ │ │ ├── meters.py │ │ │ ├── osutils.py │ │ │ └── serialization.py │ ├── run_test_attribute.sh │ ├── train_direction.sh │ ├── train_multi_attribute.sh │ └── train_reid_model.sh ├── part2_model │ ├── examples │ │ └── train.py │ ├── readme.md │ ├── reid │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ ├── aicity_attribute.py │ │ │ ├── aicity_car196.py │ │ │ ├── complete_aicity_car196.py │ │ │ ├── cuhk01.py │ │ │ ├── cuhk03.py │ │ │ ├── dukemtmc.py │ │ │ ├── gao_crop_train.py │ │ │ ├── market1501.py │ │ │ ├── new_complete_aicity_car196.py │ │ │ ├── new_train.py │ │ │ ├── small_vehicle.py │ │ │ └── viper.py │ │ ├── dist_metric.py │ │ ├── evaluation_metrics │ │ │ ├── __init__.py │ │ │ ├── classification.py │ │ │ └── ranking.py │ │ ├── evaluators.py │ │ ├── feature_extraction │ │ │ ├── __init__.py │ │ │ ├── cnn.py │ │ │ └── database.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ ├── mgn_loss.py │ │ │ └── xentropy_sac.py │ │ ├── lr_scheduler.py │ │ ├── metric_learning │ │ │ ├── __init__.py │ │ │ ├── euclidean.py │ │ │ └── kissme.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── hrnet.py │ │ │ ├── resnet_mgn.py │ │ │ └── resnet_reid.py │ │ ├── trainers.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── data │ │ │ ├── __init__.py │ │ │ ├── attribute_dataset.py │ │ │ ├── dataset.py │ │ │ ├── dataset.py.bak │ │ │ ├── preprocessor.py │ │ │ ├── sampler.py │ │ │ └── transforms.py │ │ │ ├── logging.py │ │ │ ├── meters.py │ │ │ ├── osutils.py │ │ │ └── serialization.py │ └── run_train.sh ├── part3_model │ ├── README.md │ ├── vehicle-keypoint │ │ ├── .gitignore │ │ ├── experiments │ │ │ └── veri │ │ │ │ └── resnet50 │ │ │ │ └── 256x256_d256x3_adam_lr1e-3.yaml │ │ ├── infer.sh │ │ ├── lib │ │ │ ├── Makefile │ │ │ ├── core │ │ │ │ ├── __init__.py │ │ │ │ ├── config.py │ │ │ │ ├── evaluate.py │ │ │ │ ├── function.py │ │ │ │ ├── inference.py │ │ │ │ └── loss.py │ │ │ ├── dataset │ │ │ │ ├── JointsDataset.py │ │ │ │ ├── __init__.py │ │ │ │ ├── coco.py │ │ │ │ ├── mpii.py │ │ │ │ └── veri.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ └── pose_resnet.py │ │ │ ├── nms │ │ │ │ ├── __init__.py │ │ │ │ ├── cpu_nms.c │ │ │ │ ├── cpu_nms.pyx │ │ │ │ ├── gpu_nms.cpp │ │ │ │ ├── gpu_nms.hpp │ │ │ │ ├── gpu_nms.pyx │ │ │ │ ├── nms.py │ │ │ │ ├── nms_kernel.cu │ │ │ │ └── setup.py │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── transforms.py │ │ │ │ ├── utils.py │ │ │ │ ├── vis.py │ │ │ │ └── zipreader.py │ │ ├── pose_estimation │ │ │ ├── _init_paths.py │ │ │ ├── infer.py │ │ │ ├── train.py │ │ │ └── valid.py │ │ └── train.sh │ └── vehicle-reid-keypoint │ │ ├── examples │ │ ├── data │ │ │ └── small_vehicle │ │ │ │ ├── images │ │ │ │ └── dummy.jpg │ │ │ │ ├── masks │ │ │ │ └── dummy.pkl │ │ │ │ ├── meta.json │ │ │ │ └── splits.json │ │ ├── infer.py │ │ └── train.py │ │ ├── infer.sh │ │ ├── pretrain_models │ │ └── seresnext101_base.pth.tar │ │ ├── reid │ │ ├── .trainers.py.un~ │ │ ├── __init__.py │ │ ├── datasets │ │ │ ├── __init__.py │ │ │ └── aicity.py │ │ ├── dist_metric.py │ │ ├── evaluation_metrics │ │ │ ├── __init__.py │ │ │ ├── classification.py │ │ │ └── ranking.py │ │ ├── evaluators.py │ │ ├── feature_extraction │ │ │ ├── __init__.py │ │ │ ├── cnn.py │ │ │ └── database.py │ │ ├── loss │ │ │ ├── __init__.py │ │ │ └── triplet.py │ │ ├── metric_learning │ │ │ ├── __init__.py │ │ │ ├── euclidean.py │ │ │ └── kissme.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── aicity_masks_senet.py │ │ │ └── senet.py │ │ ├── trainers.py │ │ └── utils │ │ │ ├── .serialization.py.un~ │ │ │ ├── __init__.py │ │ │ ├── data │ │ │ ├── .preprocessor.py.un~ │ │ │ ├── __init__.py │ │ │ ├── dataset.py │ │ │ ├── preprocessor.py │ │ │ ├── sampler.py │ │ │ └── transforms.py │ │ │ ├── logging.py │ │ │ ├── meters.py │ │ │ ├── osutils.py │ │ │ └── serialization.py │ │ └── train.sh └── utility_and_constraint_related_codes │ ├── concat_feature.py │ ├── direction_similarity_generation.py │ ├── gallery_track_info_combine_official_track.py │ ├── generate_result.py │ ├── get_gallery_cam_trackid_from_ori_track_info.py │ ├── group_dis_after_rerank.py │ ├── merge_feature.py │ ├── readme.md │ ├── run_add_dist.py │ ├── run_rerank.py │ ├── test_track.txt │ ├── twf_v2_after_rerank.py │ ├── twf_v2_before_rerank.py │ ├── type_punish.py │ └── type_similarity_generation.py ├── quali_1.jpg └── quali_2.jpg /README.md: -------------------------------------------------------------------------------- 1 | # Implementation of Multi-camera vehicle tracking and re-identification based on visual and spatial-temporal features for 2019 AICity Challenge 2 | This repository contains the source codes of detection, single camera tracking and vehicle reid of our implementation for 2019 AICity Challenge, and we achieve the top 1 in the vehicle reid task. 3 | 4 | ## Dependencies 5 | python 2.7 6 | pytorch 1.0 7 | refer to our source codes for other dependencies 8 | 9 | ## Datasets 10 | Datasets used in our implementation are avialable at [datasets](https://pan.baidu.com/s/1jmvV8GiHfRkpiNlXEvbHdw) (extract code: k1m4) or [google drive](https://drive.google.com/drive/folders/1lckEFvsRVjJXsNoS3fV4m5z8BqLzOFUN?usp=sharing) which includes aicity_attribute.tar.gz (used for vehicle type classification), aicity_train_direction.tar.gz (used for vehicle orientation classification), complete_aicity_car196.tar.gz (used for training vehicle reid models), gao_crop_train.tar.gz (the crop image by applying our detector on our training set), gao_crop_query.tar.gz and gao_crop_gallery.tar.gz (the crop image by applying our detector on aicity_2019 testing set). Please refer to our paper for more details about the datasets. The images for training base vehicle reid models contain data for commercial usage, which will not be open source. 11 | 12 | ## Some Important Pickle Files ## 13 | Several important pickle files are also available at [pickle_file](https://pan.baidu.com/s/1u6d6dX0uPvyrqgOB0O4Qyg) (extract code: p3fg). These files will be introduced in different sub-folders. 14 | 15 | ## Code Structure ## 16 | 17 | **Each part has its own README file.** 18 | 19 | * SingleCameraTracking contains source codes of single camera tracking. 20 | 21 | * Track2(ReID)/part1_model contains source codes of training vehicle type classification model, vehicle orientation classification model and three vehicle reid models including se_resnext101, resnet101 and se_resnet152. 22 | 23 | * Track2(ReID)/part2_model contains the source codes of training three vehicle reid models including resnet50_sac, hrnet and MGN. 24 | 25 | * Track2(ReID)/part3_model contains the source codes of training key-point based reid model. 26 | 27 | * Track2(ReID)/utility_and_constraint_related_codes contain utility codes and constraint related codes. 28 | 29 | ## Qualitative Results ## 30 | Some qualitative results of our method are listed below. The first column shows query images, the remaining columns demonstrate the corresponding top 7 ranking results. Images in red box are false positives and images in green box are true positives. 31 | 32 | * Results generated by baseline mehtod (Resnet50 trained by softmax cross entropy loss and triplet loss). 33 | ![image](https://github.com/wzgwzg/AICity/blob/master/quali_1.jpg) 34 | 35 | * Results generated by our method. 36 | ![image](https://github.com/wzgwzg/AICity/blob/master/quali_2.jpg) 37 | -------------------------------------------------------------------------------- /SingleCameraTracking/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -O3") 4 | include_directories(${PATH_TO_GCC4.8}/include) 5 | find_package(OpenCV REQUIRED) 6 | 7 | include_directories(${OpenCV_INCLUDE_DIRS}) 8 | include_directories(${PATH_TO_DEPENDENCIES}/eigen) 9 | include_directories(${PATH_TO_DEPENDENCIES}/libboost) 10 | include_directories(${PATH_TO_DEPENDENCIES}/gflags/build/include) 11 | include_directories(${PATH_TO_CUDA}/cuda-7.5/include/) 12 | include_directories(${PATH_TO_DEPENDENCIES}/openmpi-1.8/include) 13 | include_directories(${PATH_TO_DEPENDENCIES}/caffe/include) 14 | include_directories(./src 15 | ./src/feature 16 | ./src/matching 17 | ./src/thirdPart 18 | ./src/thirdPart/munkres 19 | ./src/thirdPart/munkres/adapters) 20 | set(CAFFE_LIBS 21 | ${PATH_TO_DEPENDENCIES}/caffe/build/lib/libcaffe.so) 22 | set(GLOG_LIBS ${PATH_TO_DEPENDENCIES}/glog/lib/libglog.so) 23 | set(CUDA_LIBS ${PATH_TO_CUDA}/cuda-7.5/lib64/libcudart.so.7.5 ${PATH_TO_CUDA}/cuda-7.5/lib64/libcublas.so.7.5 ${PATH_TO_CUDA}/cuda-7.5/lib64/libcurand.so.7.5) 24 | set(CAFFE_DEPEND_LIBS ${PATH_TO_DEPENDENCIES}/caffe/lib) 25 | set(LIBS ${OpenCV_LIBS} ${CAFFE_LIBS} ${GLOG_LIBS} ${CUDA_LIBS} 26 | ${CAFFE_DEPENDENCY_LIB}/boost/lib/libboost_system.so) 27 | aux_source_directory(. SRCS) 28 | 29 | file(GLOB_RECURSE SRC_LIST src/*.cpp) 30 | add_executable(run_tracker ${SRC_LIST}) 31 | target_link_libraries(run_tracker ${LIBS}) 32 | -------------------------------------------------------------------------------- /SingleCameraTracking/README.md: -------------------------------------------------------------------------------- 1 | # Single Camera Tracking 2 | This part is based on bitzy's implementation of DeepSORT: https://github.com/bitzy/DeepSort 3 | 4 | ## Dependencies 5 | - opencv 2.4 6 | - caffe 7 | - cuda 7.5 8 | - cudnn 5.0 9 | Refer to our source codes for other dependencies 10 | 11 | ## Preparing data and models 12 | Before running the tracker, you need: 13 | 1. download aic19-track1-mtmc data sets from [TRACK1-DOWNLOAD](http://www.aicitychallenge.org/track1-download/), and release them into the **aic19**. 14 | 2. download detection results from [detections](https://pan.baidu.com/s/1dE2n1f0qKICRbLFm7EWxHA)(extract code: dlqy) and put them into **detection_res**. These pickle files contain detection results of our detector. 15 | 3. download the vehicle reid model from [reid-model](https://pan.baidu.com/s/1smsCRuQaQ4O3jMmOZfXNLg)(extract code: fc3h) and put it into **RUNNINGDATA/caffe_networks**. This model is implemented by pytorch and converted to caffe model. 16 | 17 | ## Running 18 | ``` 19 | cd tools 20 | python convert_coco_detection_to_mot.py 21 | cd .. 22 | mkdir build && cd build 23 | cmake .. 24 | make -j 25 | ./run_tracker train/S01 c001 0 0 26 | ``` 27 | 28 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/VideoTracker.h: -------------------------------------------------------------------------------- 1 | #ifndef VIDEOTRACKER_H_ 2 | #define VIDEOTRACKER_H_ 3 | #include 4 | 5 | /* 6 | * VideoTracker 7 | * 8 | * run: 9 | * -vpath: Path to the video. 10 | * -show_visualization: Decide whether to show the figure. 11 | * 12 | * RunTrackerWithDat: 13 | * -motDir: Path to the tracking sequences. 14 | * -show_visualization: Decide whether to show the figure. 15 | * 16 | * RunTrackerWithDets: 17 | * -motDir: Path to the tracking sequences. 18 | * -show_visualization: Decide whether to show the figure. 19 | */ 20 | class VideoTracker { 21 | public: 22 | bool run(const char* vpath, bool show_visualization); 23 | bool RunTrackerWithDat(const char* motDir, const char* output_path, 24 | bool write_feature, bool show_visualization); 25 | bool RunTrackerWithDets(const char* motDir, const char* output_path, 26 | bool show_visualization, bool write_to_image); 27 | std::string showErrMsg(); 28 | 29 | private: 30 | bool vShowFlag; 31 | std::string errorMsg; 32 | 33 | void GetSeqInfo(std::string sequence_dir,int &min_frame_idx, int &max_frame_idx); 34 | std::string loadFromBUFFERFILE(); 35 | }; 36 | 37 | 38 | #endif /* VIDEOTRACKER_H_ */ 39 | 40 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/errmsg.cpp: -------------------------------------------------------------------------------- 1 | #include "errmsg.h" 2 | #include 3 | 4 | errMsg* errMsg::instance = NULL; 5 | errMsg *errMsg::getInstance() 6 | { 7 | if(instance == NULL) instance = new errMsg(); 8 | return instance; 9 | } 10 | 11 | errMsg::errMsg() 12 | { 13 | } 14 | 15 | void errMsg::out( 16 | std::string file, 17 | std::string func, 18 | std::string msg, bool pause) 19 | { 20 | std::cout << "IN file<" << file << "> " 21 | << func << " : " << msg << std::endl; 22 | if(pause) exit(0); 23 | } 24 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/errmsg.h: -------------------------------------------------------------------------------- 1 | #ifndef ERRMSG_H 2 | #define ERRMSG_H 3 | #include 4 | #include 5 | 6 | class errMsg 7 | { 8 | public: 9 | static errMsg* getInstance(); 10 | void out(std::string file, 11 | std::string func, 12 | std::string msd, 13 | bool pause = true); 14 | private: 15 | errMsg(); 16 | errMsg(const errMsg&); 17 | errMsg& operator=(const errMsg&); 18 | 19 | static errMsg* instance; 20 | }; 21 | 22 | #endif // ERRMSG_H 23 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/feature/FeatureExtractor.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include "opencv2/opencv.hpp" 3 | #include "opencv2/core/core.hpp" 4 | #include "opencv2/highgui/highgui.hpp" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "model.h" 14 | 15 | using namespace caffe; 16 | using std::string; 17 | 18 | class FeatureExtractor 19 | { 20 | public: 21 | static FeatureExtractor* getInstance(); 22 | bool getRectsFeature(const cv::Mat& img, DETECTIONS& d); 23 | bool getRectsFeature(const cv::Mat& img, cv::Rect rect, std::vector& feature); 24 | 25 | private: 26 | static FeatureExtractor* instance; 27 | FeatureExtractor(); 28 | FeatureExtractor(const FeatureExtractor&); 29 | FeatureExtractor& operator = (const FeatureExtractor&); 30 | bool init(); 31 | ~FeatureExtractor(); 32 | 33 | void WrapInputLayer(std::vector* input_channels, 34 | float* &input_data, int width, int height, 35 | int channels); 36 | void Preprocess(const cv::Mat& img, std::vector* input_channels); 37 | void SetMean(const string& mean_file); 38 | 39 | private: 40 | int feature_dim; 41 | std::shared_ptr > net_; 42 | cv::Size input_geometry_; 43 | int num_channels_; 44 | cv::Mat mean_; 45 | }; 46 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/feature/dataType.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #ifndef DATATYPE_H 3 | #define DATATYPEH 4 | 5 | #include 6 | #include 7 | #include 8 | #define FEATURE_DIM 512 9 | #define MAX_IOU_DISTANCE 0.7 10 | #define MAX_AGE 30 11 | #define NUM_INIT 2 12 | 13 | typedef Eigen::Matrix DETECTBOX; 14 | typedef Eigen::Matrix DETECTBOXSS; 15 | typedef Eigen::Matrix FEATURE; 16 | typedef Eigen::Matrix FEATURESS; 17 | //typedef std::vector FEATURESS; 18 | 19 | //Kalmanfilter 20 | //typedef Eigen::Matrix KAL_FILTER; 21 | typedef Eigen::Matrix KAL_MEAN; 22 | typedef Eigen::Matrix KAL_COVA; 23 | typedef Eigen::Matrix KAL_HMEAN; 24 | typedef Eigen::Matrix KAL_HCOVA; 25 | using KAL_DATA = std::pair; 26 | using KAL_HDATA = std::pair; 27 | 28 | //main 29 | using RESULT_DATA = std::pair; 30 | 31 | //tracker: 32 | using TRACKER_DATA = std::pair; 33 | using MATCH_DATA = std::pair; 34 | typedef struct t{ 35 | std::vector matches; 36 | std::vector unmatched_tracks; 37 | std::vector unmatched_detections; 38 | }TRACHER_MATCHD; 39 | 40 | //linear_assignment: 41 | typedef Eigen::Matrix DYNAMICM; 42 | 43 | 44 | #endif // DATATYPE_H 45 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/feature/model.h: -------------------------------------------------------------------------------- 1 | #ifndef MODEL_H 2 | #define MODEL_H 3 | #include "dataType.h" 4 | #include 5 | #include "opencv2/opencv.hpp" 6 | 7 | /** 8 | * Each rect's data structure. 9 | * tlwh: topleft point & (w,h) 10 | * confidence: detection confidence. 11 | * feature: the rect's reid feature. 12 | */ 13 | class DETECTION_ROW { 14 | public: 15 | DETECTBOX tlwh; 16 | float confidence; 17 | FEATURE feature; 18 | DETECTBOX to_xyah() const; 19 | DETECTBOX to_tlbr() const; 20 | int line_idx; 21 | }; 22 | 23 | typedef std::vector DETECTIONS; 24 | 25 | /** 26 | * Get each image's rects & corresponding features. 27 | * Method of filter conf. 28 | * Method of preprocessing. 29 | */ 30 | class ModelDetection 31 | { 32 | 33 | public: 34 | static ModelDetection* getInstance(); 35 | bool loadDataFromFile(const char* motDir, bool withFeature); 36 | bool getFrameDetections(int frame_idx, DETECTIONS& res); 37 | void dataMoreConf(float min_confidence, DETECTIONS& d); 38 | void dataPreprocessing(float max_bbox_overlap, DETECTIONS& d); 39 | 40 | private: 41 | ModelDetection(); 42 | ModelDetection(const ModelDetection&); 43 | ModelDetection& operator =(const ModelDetection&); 44 | static ModelDetection* instance; 45 | 46 | using AREAPAIR = std::pair; 47 | struct cmp { 48 | bool operator()(const AREAPAIR a, const AREAPAIR b) { 49 | return a.second < b.second; 50 | } 51 | }; 52 | std::map data; 53 | void _Qsort(DETECTIONS d, std::vector& a, int low, int high); 54 | bool loadFromFile; 55 | }; 56 | 57 | #endif // MODEL_H 58 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "VideoTracker.h" 5 | using namespace std; 6 | 7 | #define MOTDIR "../aic19/aic19-track1-mtmc/" 8 | #define RUNMOTCAFFE 9 | 10 | int main(int argc, char** argv) 11 | { 12 | if (argc != 5) { 13 | printf("usage: %s dataset sequances_idx is_visualized is_imagewrite\n", argv[0]); 14 | return -1; 15 | } 16 | ::google::InitGoogleLogging(argv[0]); 17 | 18 | string dataset = argv[1]; 19 | string seq_idx = argv[2]; 20 | string is_feature_write = argv[3]; 21 | string is_imagewrite = argv[4]; 22 | bool write_feature; 23 | bool write_to_image; 24 | 25 | if(is_feature_write == "1") { 26 | write_feature = true; 27 | } 28 | else { 29 | write_feature = false; 30 | } 31 | if(is_imagewrite == "1") { 32 | write_to_image = true; 33 | } 34 | else { 35 | write_to_image = false; 36 | } 37 | 38 | stringstream ss_tmp; 39 | ss_tmp << dataset; 40 | vector str_vec; 41 | string str_tmp; 42 | while (getline(ss_tmp, str_tmp, '/')) { 43 | str_vec.push_back(str_tmp); 44 | } 45 | 46 | VideoTracker* t = new VideoTracker; 47 | string seq_path = MOTDIR + dataset + "/" + seq_idx + "/"; 48 | std::cout << seq_path << std::endl; 49 | string output_path = "../result/" + seq_idx + "_" + str_vec[0] + ".txt"; 50 | 51 | #ifdef RUNMOTFEAT 52 | if(t->RunTrackerWithDat(seq_path.data(), output_path.data(), write_feature, write_to_image) == false) { 53 | cout << t->showErrMsg() << endl; 54 | } 55 | #endif 56 | 57 | #ifdef RUNMOTCAFFE 58 | if(t->RunTrackerWithDets(seq_path.data(), output_path.data(), write_feature, write_to_image) == false) { 59 | cout << t->showErrMsg() << endl; 60 | } 61 | #endif 62 | 63 | cout << "Finished." << endl; 64 | return 0; 65 | } 66 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/matching/kalmanfilter.h: -------------------------------------------------------------------------------- 1 | #ifndef KALMANFILTER_H 2 | #define KALMANFILTER_H 3 | 4 | #include "../feature/dataType.h" 5 | 6 | class KalmanFilter 7 | { 8 | public: 9 | static const double chi2inv95[10]; 10 | KalmanFilter(); 11 | KAL_DATA initiate(const DETECTBOX& measurement); 12 | void predict(KAL_MEAN& mean, KAL_COVA& covariance); 13 | KAL_HDATA project(const KAL_MEAN& mean, const KAL_COVA& covariance); 14 | KAL_DATA update(const KAL_MEAN& mean, 15 | const KAL_COVA& covariance, 16 | const DETECTBOX& measurement); 17 | 18 | Eigen::Matrix gating_distance( 19 | const KAL_MEAN& mean, 20 | const KAL_COVA& covariance, 21 | const std::vector& measurements, 22 | bool only_position = false); 23 | 24 | private: 25 | Eigen::Matrix _motion_mat; 26 | Eigen::Matrix _update_mat; 27 | float _std_weight_position; 28 | float _std_weight_velocity; 29 | }; 30 | 31 | #endif // KALMANFILTER_H 32 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/matching/linear_assignment.h: -------------------------------------------------------------------------------- 1 | #ifndef LINEAR_ASSIGNMENT_H 2 | #define LINEAR_ASSIGNMENT_H 3 | #include "../feature/dataType.h" 4 | #include "tracker.h" 5 | 6 | #define INFTY_COST 1e5 7 | class tracker; 8 | //for matching; 9 | class linear_assignment 10 | { 11 | linear_assignment(); 12 | linear_assignment(const linear_assignment& ); 13 | linear_assignment& operator=(const linear_assignment&); 14 | static linear_assignment* instance; 15 | 16 | public: 17 | static linear_assignment* getInstance(); 18 | TRACHER_MATCHD matching_cascade(tracker* distance_metric, 19 | tracker::GATED_METRIC_FUNC distance_metric_func, 20 | float max_distance, 21 | int cascade_depth, 22 | std::vector& tracks, 23 | const DETECTIONS& detections, 24 | std::vector &track_indices, 25 | std::vector detection_indices = std::vector()); 26 | TRACHER_MATCHD min_cost_matching( 27 | tracker* distance_metric, 28 | tracker::GATED_METRIC_FUNC distance_metric_func, 29 | float max_distance, 30 | std::vector& tracks, 31 | const DETECTIONS& detections, 32 | std::vector& track_indices, 33 | std::vector& detection_indices); 34 | DYNAMICM gate_cost_matrix( 35 | KalmanFilter* kf, 36 | DYNAMICM& cost_matrix, 37 | std::vector& tracks, 38 | const DETECTIONS& detections, 39 | const std::vector& track_indices, 40 | const std::vector& detection_indices, 41 | float gated_cost = INFTY_COST, 42 | bool only_position = false); 43 | }; 44 | 45 | #endif // LINEAR_ASSIGNMENT_H 46 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/matching/nn_matching.h: -------------------------------------------------------------------------------- 1 | #ifndef NN_MATCHING_H 2 | #define NN_MATCHING_H 3 | 4 | #include "../feature/dataType.h" 5 | 6 | #include 7 | 8 | //A tool to calculate distance; 9 | class NearNeighborDisMetric{ 10 | public: 11 | enum METRIC_TYPE{euclidean=1, cosine}; 12 | NearNeighborDisMetric(METRIC_TYPE metric, 13 | float matching_threshold, 14 | int budget); 15 | DYNAMICM distance(const FEATURESS& features, const std::vector &targets); 16 | void partial_fit(std::vector& tid_feats, std::vector& active_targets); 17 | float mating_threshold; 18 | 19 | private: 20 | typedef Eigen::VectorXf (NearNeighborDisMetric::*PTRFUN)(const FEATURESS&, const FEATURESS&); 21 | Eigen::VectorXf _nncosine_distance(const FEATURESS& x, const FEATURESS& y); 22 | Eigen::VectorXf _nneuclidean_distance(const FEATURESS& x, const FEATURESS& y); 23 | 24 | Eigen::MatrixXf _pdist(const FEATURESS& x, const FEATURESS& y); 25 | Eigen::MatrixXf _cosine_distance(const FEATURESS & a, const FEATURESS& b, bool data_is_normalized = false); 26 | private: 27 | PTRFUN _metric; 28 | int budget; 29 | std::map samples; 30 | }; 31 | 32 | #endif // NN_MATCHING_H 33 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/matching/track.cpp: -------------------------------------------------------------------------------- 1 | #include "track.h" 2 | 3 | Track::Track(KAL_MEAN& mean, KAL_COVA& covariance, int track_id, int n_init, int max_age, const FEATURE& feature) 4 | { 5 | this->mean = mean; 6 | this->covariance = covariance; 7 | this->track_id = track_id; 8 | this->hits = 1; 9 | this->age = 1; 10 | this->time_since_update = 0; 11 | this->state = TrackState::Tentative; 12 | features = FEATURESS(1, FEATURE_DIM); 13 | features.row(0) = feature;//features.rows() must = 0; 14 | 15 | this->_n_init = n_init; 16 | this->_max_age = max_age; 17 | } 18 | 19 | void Track::predit(KalmanFilter *kf) 20 | { 21 | /*Propagate the state distribution to the current time step using a 22 | Kalman filter prediction step. 23 | 24 | Parameters 25 | ---------- 26 | kf : kalman_filter.KalmanFilter 27 | The Kalman filter. 28 | */ 29 | 30 | kf->predict(this->mean, this->covariance); 31 | this->age += 1; 32 | this->time_since_update += 1; 33 | } 34 | 35 | void Track::update(KalmanFilter * const kf, const DETECTION_ROW& detection) 36 | { 37 | KAL_DATA pa = kf->update(this->mean, this->covariance, detection.to_xyah()); 38 | this->mean = pa.first; 39 | this->covariance = pa.second; 40 | 41 | featuresAppendOne(detection.feature); 42 | this->hits += 1; 43 | this->time_since_update = 0; 44 | if(this->state == TrackState::Tentative && this->hits >= this->_n_init) { 45 | this->state = TrackState::Confirmed; 46 | } 47 | } 48 | 49 | void Track::mark_missed() 50 | { 51 | if(this->state == TrackState::Tentative) { 52 | this->state = TrackState::Deleted; 53 | } else if(this->time_since_update > this->_max_age) { 54 | this->state = TrackState::Deleted; 55 | } 56 | } 57 | 58 | bool Track::is_confirmed() 59 | { 60 | return this->state == TrackState::Confirmed; 61 | } 62 | 63 | bool Track::is_deleted() 64 | { 65 | return this->state == TrackState::Deleted; 66 | } 67 | 68 | bool Track::is_tentative() 69 | { 70 | return this->state == TrackState::Tentative; 71 | } 72 | 73 | DETECTBOX Track::to_tlwh() 74 | { 75 | DETECTBOX ret = mean.leftCols(4); 76 | ret(2) *= ret(3); 77 | ret.leftCols(2) -= (ret.rightCols(2)/2); 78 | return ret; 79 | } 80 | 81 | void Track::featuresAppendOne(const FEATURE &f) 82 | { 83 | int size = this->features.rows(); 84 | FEATURESS newfeatures = FEATURESS(size+1, FEATURE_DIM); 85 | newfeatures.block(0, 0, size, FEATURE_DIM) = this->features; 86 | newfeatures.row(size) = f; 87 | features = newfeatures; 88 | } 89 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/matching/track.h: -------------------------------------------------------------------------------- 1 | #ifndef TRACK_H 2 | #define TRACK_H 3 | 4 | #include "../feature/dataType.h" 5 | 6 | #include "kalmanfilter.h" 7 | #include "../feature/model.h" 8 | 9 | class Track 10 | { 11 | /*""" 12 | A single target track with state space `(x, y, a, h)` and associated 13 | velocities, where `(x, y)` is the center of the bounding box, `a` is the 14 | aspect ratio and `h` is the height. 15 | 16 | Parameters 17 | ---------- 18 | mean : ndarray 19 | Mean vector of the initial state distribution. 20 | covariance : ndarray 21 | Covariance matrix of the initial state distribution. 22 | track_id : int 23 | A unique track identifier. 24 | n_init : int 25 | Number of consecutive detections before the track is confirmed. The 26 | track state is set to `Deleted` if a miss occurs within the first 27 | `n_init` frames. 28 | max_age : int 29 | The maximum number of consecutive misses before the track state is 30 | set to `Deleted`. 31 | feature : Optional[ndarray] 32 | Feature vector of the detection this track originates from. If not None, 33 | this feature is added to the `features` cache. 34 | 35 | Attributes 36 | ---------- 37 | mean : ndarray 38 | Mean vector of the initial state distribution. 39 | covariance : ndarray 40 | Covariance matrix of the initial state distribution. 41 | track_id : int 42 | A unique track identifier. 43 | hits : int 44 | Total number of measurement updates. 45 | age : int 46 | Total number of frames since first occurance. 47 | time_since_update : int 48 | Total number of frames since last measurement update. 49 | state : TrackState 50 | The current track state. 51 | features : List[ndarray] 52 | A cache of features. On each measurement update, the associated feature 53 | vector is added to this list. 54 | 55 | """*/ 56 | enum TrackState {Tentative = 1, Confirmed, Deleted}; 57 | 58 | public: 59 | Track(KAL_MEAN& mean, KAL_COVA& covariance, int track_id, 60 | int n_init, int max_age, const FEATURE& feature); 61 | void predit(KalmanFilter *kf); 62 | void update(KalmanFilter * const kf, const DETECTION_ROW &detection); 63 | void mark_missed(); 64 | bool is_confirmed(); 65 | bool is_deleted(); 66 | bool is_tentative(); 67 | DETECTBOX to_tlwh(); 68 | int time_since_update; 69 | int track_id; 70 | FEATURESS features; 71 | KAL_MEAN mean; 72 | KAL_COVA covariance; 73 | 74 | int hits; 75 | int age; 76 | int _n_init; 77 | int _max_age; 78 | TrackState state; 79 | private: 80 | void featuresAppendOne(const FEATURE& f); 81 | }; 82 | 83 | #endif // TRACK_H 84 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/matching/tracker.h: -------------------------------------------------------------------------------- 1 | #ifndef TRACKER_H 2 | #define TRACKER_H 3 | #include 4 | 5 | 6 | #include "kalmanfilter.h" 7 | #include "track.h" 8 | #include "../feature/model.h" 9 | 10 | class NearNeighborDisMetric; 11 | 12 | class tracker 13 | { 14 | public: 15 | NearNeighborDisMetric* metric; 16 | float max_iou_distance; 17 | int max_age; 18 | int n_init; 19 | 20 | KalmanFilter* kf; 21 | 22 | int _next_idx; 23 | public: 24 | std::vector tracks; 25 | tracker(/*NearNeighborDisMetric* metric,*/ 26 | float max_cosine_distance, int nn_budget, 27 | float max_iou_distance = MAX_IOU_DISTANCE, 28 | int max_age = MAX_AGE, int n_init = NUM_INIT); 29 | void predict(); 30 | void update(const DETECTIONS& detections); 31 | typedef DYNAMICM (tracker::* GATED_METRIC_FUNC)( 32 | std::vector& tracks, 33 | const DETECTIONS& dets, 34 | const std::vector& track_indices, 35 | const std::vector& detection_indices); 36 | private: 37 | void _match(const DETECTIONS& detections, TRACHER_MATCHD& res); 38 | void _initiate_track(const DETECTION_ROW& detection); 39 | public: 40 | DYNAMICM gated_matric( 41 | std::vector& tracks, 42 | const DETECTIONS& dets, 43 | const std::vector& track_indices, 44 | const std::vector& detection_indices); 45 | DYNAMICM iou_cost( 46 | std::vector& tracks, 47 | const DETECTIONS& dets, 48 | const std::vector& track_indices, 49 | const std::vector& detection_indices); 50 | Eigen::VectorXf iou(DETECTBOX& bbox, 51 | DETECTBOXSS &candidates); 52 | }; 53 | 54 | #endif // TRACKER_H 55 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/hungarianoper.cpp: -------------------------------------------------------------------------------- 1 | #include "hungarianoper.h" 2 | 3 | Eigen::Matrix HungarianOper::Solve(const DYNAMICM &cost_matrix) 4 | { 5 | int rows = cost_matrix.rows(); 6 | int cols = cost_matrix.cols(); 7 | Matrix matrix(rows, cols); 8 | for (int row = 0; row < rows; row++) { 9 | for (int col = 0; col < cols; col++) { 10 | matrix(row, col) = cost_matrix(row, col); 11 | } 12 | } 13 | //Munkres get matrix; 14 | Munkres m; 15 | m.solve(matrix); 16 | 17 | // 18 | std::vector> pairs; 19 | for (int row = 0; row < rows; row++) { 20 | for (int col = 0; col < cols; col++) { 21 | int tmp = (int)matrix(row, col); 22 | if (tmp == 0) pairs.push_back(std::make_pair(row, col)); 23 | } 24 | } 25 | // 26 | int count = pairs.size(); 27 | Eigen::Matrix re(count, 2); 28 | for (int i = 0; i < count; i++) { 29 | re(i, 0) = pairs[i].first; 30 | re(i, 1) = pairs[i].second; 31 | } 32 | return re; 33 | }//end Solve; 34 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/hungarianoper.h: -------------------------------------------------------------------------------- 1 | #ifndef HUNGARIANOPER_H 2 | #define HUNGARIANOPER_H 3 | #include "munkres/munkres.h" 4 | #include "munkres/adapters/boostmatrixadapter.h" 5 | #include "../feature/dataType.h" 6 | 7 | class HungarianOper { 8 | public: 9 | static Eigen::Matrix Solve(const DYNAMICM &cost_matrix); 10 | }; 11 | 12 | #endif // HUNGARIANOPER_H 13 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/munkres/adapters/adapter.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2015 Miroslav Krajicek 3 | * 4 | * This program is free software; you can redistribute it and/or modify 5 | * it under the terms of the GNU General Public License as published by 6 | * the Free Software Foundation; either version 2 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU General Public License 15 | * along with this program; if not, write to the Free Software 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 | */ 18 | 19 | #include "adapter.h" 20 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/munkres/adapters/adapter.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2015 Miroslav Krajicek 3 | * 4 | * This program is free software; you can redistribute it and/or modify 5 | * it under the terms of the GNU General Public License as published by 6 | * the Free Software Foundation; either version 2 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU General Public License 15 | * along with this program; if not, write to the Free Software 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 | */ 18 | 19 | #ifndef _ADAPTER_H_ 20 | #define _ADAPTER_H_ 21 | 22 | #include "../matrix.h" 23 | #include "../munkres.h" 24 | 25 | template class Adapter 26 | { 27 | public: 28 | virtual Matrix convertToMatrix(const Container &con) const = 0; 29 | virtual void convertFromMatrix(Container &con, const Matrix &matrix) const = 0; 30 | virtual void solve(Container &con) 31 | { 32 | auto matrix = convertToMatrix(con); 33 | m_munkres.solve(matrix); 34 | convertFromMatrix(con, matrix); 35 | } 36 | protected: 37 | Munkres m_munkres; 38 | }; 39 | 40 | #endif /* _ADAPTER_H_ */ 41 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/munkres/adapters/boostmatrixadapter.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2015 Miroslav Krajicek 3 | * 4 | * This program is free software; you can redistribute it and/or modify 5 | * it under the terms of the GNU General Public License as published by 6 | * the Free Software Foundation; either version 2 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU General Public License 15 | * along with this program; if not, write to the Free Software 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 | */ 18 | 19 | #include "boostmatrixadapter.h" 20 | 21 | //template class BoostMatrixAdapter; 22 | //template class BoostMatrixAdapter; 23 | //template class BoostMatrixAdapter; 24 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/munkres/adapters/boostmatrixadapter.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2015 Miroslav Krajicek 3 | * 4 | * This program is free software; you can redistribute it and/or modify 5 | * it under the terms of the GNU General Public License as published by 6 | * the Free Software Foundation; either version 2 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU General Public License 15 | * along with this program; if not, write to the Free Software 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 | */ 18 | 19 | #ifndef _BOOSTMATRIXADAPTER_H_ 20 | #define _BOOSTMATRIXADAPTER_H_ 21 | 22 | #include "adapter.h" 23 | #ifndef WIN32 24 | #include 25 | #endif 26 | #include 27 | 28 | template class BoostMatrixAdapter : public Adapter > 29 | { 30 | public: 31 | virtual Matrix convertToMatrix(const boost::numeric::ublas::matrix &boost_matrix) const override 32 | { 33 | const auto rows = boost_matrix.size1 (); 34 | const auto columns = boost_matrix.size2 (); 35 | Matrix matrix (rows, columns); 36 | for (int i = 0; i < rows; ++i) { 37 | for (int j = 0; j < columns; ++j) { 38 | matrix (i, j) = boost_matrix (i, j); 39 | } 40 | } 41 | return matrix; 42 | } 43 | 44 | virtual void convertFromMatrix(boost::numeric::ublas::matrix &boost_matrix,const Matrix &matrix) const override 45 | { 46 | const auto rows = matrix.rows(); 47 | const auto columns = matrix.columns(); 48 | for (int i = 0; i < rows; ++i) { 49 | for (int j = 0; j < columns; ++j) { 50 | boost_matrix (i, j) = matrix (i, j); 51 | } 52 | } 53 | } 54 | }; 55 | 56 | #endif /* _BOOSTMATRIXADAPTER_H_ */ 57 | -------------------------------------------------------------------------------- /SingleCameraTracking/src/thirdPart/munkres/munkres.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2007 John Weaver 3 | * Copyright (c) 2015 Miroslav Krajicek 4 | * 5 | * This program is free software; you can redistribute it and/or modify 6 | * it under the terms of the GNU General Public License as published by 7 | * the Free Software Foundation; either version 2 of the License, or 8 | * (at your option) any later version. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * GNU General Public License for more details. 14 | * 15 | * You should have received a copy of the GNU General Public License 16 | * along with this program; if not, write to the Free Software 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 | */ 19 | 20 | #include "munkres.h" 21 | 22 | template class Munkres; 23 | template class Munkres; 24 | template class Munkres; 25 | 26 | -------------------------------------------------------------------------------- /SingleCameraTracking/tools/convert_coco_detection_to_mot.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | dt_file = '../detection_res/aicity2019_track_train.pkl.json' 5 | #dt_file = '../detection_res/aicity2019_track_test.pkl.json' 6 | if 'train' in dt_file: 7 | dataset = 'train' 8 | elif 'test' in dt_file: 9 | dataset = 'test' 10 | else: 11 | print('error') 12 | exit() 13 | mapping_file = '../detection_res/'+dataset+'.json' 14 | 15 | fp = open(dt_file, 'r') 16 | dets = json.load(fp) 17 | fp.close() 18 | fp = open(mapping_file) 19 | info = json.load(fp) 20 | fp.close() 21 | 22 | id_filename = {} 23 | det_all = {} 24 | for iminfo in info["images"]: 25 | id_filename[iminfo["id"]] = iminfo["file_name"] 26 | for det in dets: 27 | id = det["image_id"] 28 | filename = id_filename[id] 29 | score = det["score"] 30 | bbox = det["bbox"] 31 | label = det["category_id"] 32 | 33 | filename = filename.split('/') 34 | scene = filename[2] 35 | cam = filename[3] 36 | frame = int(filename[5].split('.')[0]) 37 | 38 | if scene not in det_all: 39 | det_all[scene] = {} 40 | if cam not in det_all[scene]: 41 | det_all[scene][cam] = {} 42 | if frame not in det_all[scene][cam]: 43 | det_all[scene][cam][frame] = [] 44 | det_all[scene][cam][frame].append(det) 45 | 46 | for scene in det_all: 47 | for cam in det_all[scene]: 48 | frame_list = det_all[scene][cam] 49 | fpath = os.path.join('../aic19/aic19-track1-mtmc',dataset, scene, cam, 'det/det_se154_csc_ms.txt') 50 | fp = open(fpath, 'w') 51 | for frame in frame_list: 52 | for det_out in frame_list[frame]: 53 | bbox = det_out["bbox"] 54 | x = bbox[0] 55 | y = bbox[1] 56 | w = bbox[2] 57 | h = bbox[3] 58 | score = det_out["score"] 59 | label = det_out["category_id"] 60 | fp.write('%d,-1,%.3f,%.3f,%.3f,%.3f,%.5f,-1,-1,-1\n' % (frame,x,y,w,h,score)) 61 | fp.close() 62 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/README.md: -------------------------------------------------------------------------------- 1 | This folder contains source codes of training vehicle type classification model, vehicle orientation classification model and three vehicle reid models including se_resnext101, resnet101 and se_resnet152. These codes are based on [open-reid](https://github.com/Cysu/open-reid). 2 | 3 | 4 | **Before running these codes,the environments shouled be prepared and related paths should be modified.** 5 | **Specifically, all dependencies should be installed, the datasets should be put into './examples/data' and pretrained models should be put into './pretrain_models/'.** 6 | 7 | ## Code Structure 8 | 9 | Run ```sh train_reid_model.sh``` to train vehicle reid models. To train different models mentioned above, the network architecture name should be modified. Available network names can be found in 'reid/models/__init__.py'. 10 | 11 | Run ```sh train_direction.sh``` to train vehicle orientation classification model. 12 | 13 | Run ```sh train_multi_attribute.sh``` to train train_multi_attribute.sh. 14 | 15 | Run ```sh run_test_attribute.sh``` to get vehicle type prediction results. Specifically, two pickle files should be obtained containing softmax vehicle type vectors of each test image. The trained model should be put into the related path indicated by 'run_test_attribute.sh'. 16 | 17 | Run ```python ./reid/extract_fea_from_dir.py``` to extract reid features of each test image. In our implementation, the average feature of one image and its corresponding flip image is used. Please note again, some paths or network architecture name should be modified before running the code. Finally, pickle files containing reid features of each test image will be obtained. 18 | 19 | Run ``` python ./reid/extract_direction_from_dir.py``` to extract softmax vehicle orientation vectors of each test image. Similarly, model weights, paths, etc. should be set up. 20 | 21 | ## Some Important Pickle Files Related to This Part ## 22 | 23 | Several important pickle files are available at [pickle_file](https://pan.baidu.com/s/1u6d6dX0uPvyrqgOB0O4Qyg) (extract code: p3fg). Some files related to this part are listed below. The remainings will be introduced by other parts. 24 | 25 | ```query_newbig7_cat_complete.pkl and test_newbig7_cat_complete.pkl```: The final reid feature, which is the concatenation of all 7 models' results mentioned in our paper. Most models provide the average feature of normal images and the corresponding crop image generated by our detector. 26 | 27 | ```distmat_newbig7_no_strategy_no_multi_no_rerank.pickle```: Euclidean distance matrix between each pair of test images. 28 | 29 | ```query_direction.pkl and gallery_direction.pkl```: The predicted vehicle orientation results of query and gallery images, respectively. 30 | 31 | ```q_q_direct_sim.pkl, q_g_direct_sim.pkl and g_g_direct_sim.pkl```: The vehicle orientation similarity between each pair of images of query_query, query_gallery and gallery_gallery sets, respectively. 32 | 33 | ```query_new_attribute.pkl and gallery_new_attribute.pkl```: The predicted vehicle type results of query and gallery images, respectively. 34 | 35 | ```type_sim.pkl```: The vehicle type similarity between each pair of images. 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from . import datasets 4 | from . import evaluation_metrics 5 | from . import feature_extraction 6 | from . import loss 7 | from . import metric_learning 8 | from . import models 9 | from . import utils 10 | from . import dist_metric 11 | from . import evaluators, direct_evaluators 12 | from . import trainers, direct_trainers, attribute_trainers 13 | 14 | __version__ = '0.2.0' 15 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import warnings 3 | 4 | from .small_vehicle import Small_Vehicle 5 | from .vehicle_downsample import Downsample_Vehicle 6 | from .aicity_car196 import Aicity_Car196 7 | from .aicity_attribute import Aicity_Attribute 8 | from .complete_aicity_car196 import Complete_Aicity_Car196 9 | 10 | 11 | __factory = { 12 | 'small_vehicle': Small_Vehicle, 13 | 'downsample_vehicle': Downsample_Vehicle, 14 | 'aicity_car196': Aicity_Car196, 15 | 'aicity_attribute': Aicity_Attribute, 16 | 'complete_aicity_car196': Complete_Aicity_Car196, 17 | } 18 | 19 | 20 | def names(): 21 | return sorted(__factory.keys()) 22 | 23 | 24 | def create(name, root, *args, **kwargs): 25 | """ 26 | Create a dataset instance. 27 | 28 | Parameters 29 | ---------- 30 | name : str 31 | The dataset name. Can be one of 'viper', 'cuhk01', 'cuhk03', 32 | 'market1501', and 'dukemtmc'. 33 | root : str 34 | The path to the dataset directory. 35 | split_id : int, optional 36 | The index of data split. Default: 0 37 | num_val : int or float, optional 38 | When int, it means the number of validation identities. When float, 39 | it means the proportion of validation to all the trainval. Default: 100 40 | download : bool, optional 41 | If True, will download the dataset. Default: False 42 | """ 43 | if name not in __factory: 44 | raise KeyError("Unknown dataset:", name) 45 | return __factory[name](root, *args, **kwargs) 46 | 47 | 48 | def get_dataset(name, root, *args, **kwargs): 49 | warnings.warn("get_dataset is deprecated. Use create instead.") 50 | return create(name, root, *args, **kwargs) 51 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/datasets/aicity_car196.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | from ..utils.data import Dataset 5 | from ..utils.osutils import mkdir_if_missing 6 | from ..utils.serialization import write_json 7 | 8 | 9 | class Aicity_Car196(Dataset): 10 | url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view' 11 | md5 = '65005ab7d12ec1c44de4eeafe813e68a' 12 | 13 | def __init__(self, root, split_id=0, num_val=100, download=True): 14 | super(Aicity_Car196, self).__init__(root, split_id=split_id) 15 | 16 | if download: 17 | self.download() 18 | 19 | if not self._check_integrity(): 20 | raise RuntimeError("Dataset not found or corrupted. " + 21 | "You can use download=True to download it.") 22 | 23 | self.load(num_val) 24 | 25 | def download(self): 26 | if self._check_integrity(): 27 | print("Files already downloaded and verified") 28 | return 29 | 30 | import re 31 | import hashlib 32 | import shutil 33 | from glob import glob 34 | from zipfile import ZipFile 35 | 36 | raw_dir = osp.join(self.root, 'raw') 37 | mkdir_if_missing(raw_dir) 38 | 39 | # Download the raw zip file 40 | fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') 41 | if osp.isfile(fpath) and \ 42 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 43 | print("Using downloaded file: " + fpath) 44 | else: 45 | raise RuntimeError("Please download the dataset manually from {} " 46 | "to {}".format(self.url, fpath)) 47 | 48 | # Extract the file 49 | exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') 50 | if not osp.isdir(exdir): 51 | print("Extracting zip file") 52 | with ZipFile(fpath) as z: 53 | z.extractall(path=raw_dir) 54 | 55 | # Format 56 | images_dir = osp.join(self.root, 'images') 57 | mkdir_if_missing(images_dir) 58 | 59 | # 1501 identities (+1 for background) with 6 camera views each 60 | identities = [[[] for _ in range(6)] for _ in range(1502)] 61 | 62 | def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')): 63 | fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) 64 | pids = set() 65 | for fpath in fpaths: 66 | fname = osp.basename(fpath) 67 | pid, cam = map(int, pattern.search(fname).groups()) 68 | if pid == -1: continue # junk images are just ignored 69 | assert 0 <= pid <= 1501 # pid == 0 means background 70 | assert 1 <= cam <= 6 71 | cam -= 1 72 | pids.add(pid) 73 | fname = ('{:08d}_{:02d}_{:04d}.jpg' 74 | .format(pid, cam, len(identities[pid][cam]))) 75 | identities[pid][cam].append(fname) 76 | shutil.copy(fpath, osp.join(images_dir, fname)) 77 | return pids 78 | 79 | trainval_pids = register('bounding_box_train') 80 | gallery_pids = register('bounding_box_test') 81 | query_pids = register('query') 82 | assert query_pids <= gallery_pids 83 | assert trainval_pids.isdisjoint(gallery_pids) 84 | 85 | # Save meta information into a json file 86 | meta = {'name': 'Aicity_Car196', 'shot': 'multiple', 'num_cameras': 41, 87 | 'identities': identities} 88 | write_json(meta, osp.join(self.root, 'meta.json')) 89 | 90 | # Save the only training / test split 91 | splits = [{ 92 | 'trainval': sorted(list(trainval_pids)), 93 | 'query': sorted(list(query_pids)), 94 | 'gallery': sorted(list(gallery_pids))}] 95 | write_json(splits, osp.join(self.root, 'splits.json')) 96 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/datasets/small_vehicle.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | from ..utils.data import Dataset 5 | from ..utils.osutils import mkdir_if_missing 6 | from ..utils.serialization import write_json 7 | 8 | 9 | class Small_Vehicle(Dataset): 10 | url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view' 11 | md5 = '65005ab7d12ec1c44de4eeafe813e68a' 12 | 13 | def __init__(self, root, split_id=0, num_val=100, download=True): 14 | super(Small_Vehicle, self).__init__(root, split_id=split_id) 15 | 16 | if download: 17 | self.download() 18 | 19 | if not self._check_integrity(): 20 | raise RuntimeError("Dataset not found or corrupted. " + 21 | "You can use download=True to download it.") 22 | 23 | self.load(num_val) 24 | 25 | def download(self): 26 | if self._check_integrity(): 27 | print("Files already downloaded and verified") 28 | return 29 | 30 | import re 31 | import hashlib 32 | import shutil 33 | from glob import glob 34 | from zipfile import ZipFile 35 | 36 | raw_dir = osp.join(self.root, 'raw') 37 | mkdir_if_missing(raw_dir) 38 | 39 | # Download the raw zip file 40 | fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') 41 | if osp.isfile(fpath) and \ 42 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 43 | print("Using downloaded file: " + fpath) 44 | else: 45 | raise RuntimeError("Please download the dataset manually from {} " 46 | "to {}".format(self.url, fpath)) 47 | 48 | # Extract the file 49 | exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') 50 | if not osp.isdir(exdir): 51 | print("Extracting zip file") 52 | with ZipFile(fpath) as z: 53 | z.extractall(path=raw_dir) 54 | 55 | # Format 56 | images_dir = osp.join(self.root, 'images') 57 | mkdir_if_missing(images_dir) 58 | 59 | # 1501 identities (+1 for background) with 6 camera views each 60 | identities = [[[] for _ in range(6)] for _ in range(1502)] 61 | 62 | def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')): 63 | fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) 64 | pids = set() 65 | for fpath in fpaths: 66 | fname = osp.basename(fpath) 67 | pid, cam = map(int, pattern.search(fname).groups()) 68 | if pid == -1: continue # junk images are just ignored 69 | assert 0 <= pid <= 1501 # pid == 0 means background 70 | assert 1 <= cam <= 6 71 | cam -= 1 72 | pids.add(pid) 73 | fname = ('{:08d}_{:02d}_{:04d}.jpg' 74 | .format(pid, cam, len(identities[pid][cam]))) 75 | identities[pid][cam].append(fname) 76 | shutil.copy(fpath, osp.join(images_dir, fname)) 77 | return pids 78 | 79 | trainval_pids = register('bounding_box_train') 80 | gallery_pids = register('bounding_box_test') 81 | query_pids = register('query') 82 | assert query_pids <= gallery_pids 83 | assert trainval_pids.isdisjoint(gallery_pids) 84 | 85 | # Save meta information into a json file 86 | meta = {'name': 'Small_Vehicle', 'shot': 'multiple', 'num_cameras': 41, 87 | 'identities': identities} 88 | write_json(meta, osp.join(self.root, 'meta.json')) 89 | 90 | # Save the only training / test split 91 | splits = [{ 92 | 'trainval': sorted(list(trainval_pids)), 93 | 'query': sorted(list(query_pids)), 94 | 'gallery': sorted(list(gallery_pids))}] 95 | write_json(splits, osp.join(self.root, 'splits.json')) 96 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/direct_evaluators.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import time 3 | 4 | import torch 5 | from torch.autograd import Variable 6 | from .evaluation_metrics import accuracy 7 | from .utils.meters import AverageMeter 8 | import numpy as np 9 | import os 10 | 11 | 12 | 13 | class Evaluator(object): 14 | def __init__(self, model): 15 | super(Evaluator, self).__init__() 16 | self.model = model 17 | 18 | def evaluate(self, data_loader, print_freq=1, metric=None): 19 | self.model.eval() 20 | batch_time = AverageMeter() 21 | data_time = AverageMeter() 22 | precisions = AverageMeter() 23 | 24 | end = time.time() 25 | for i, (imgs, fnames, directs) in enumerate(data_loader): 26 | data_time.update(time.time() - end) 27 | inputs = [Variable(imgs, requires_grad=False)] 28 | targets = Variable(directs.cuda()) 29 | outputs = self.model(*inputs) 30 | prec, = accuracy(outputs.data, targets.data) 31 | prec = prec[0] 32 | precisions.update(prec, targets.size(0)) 33 | batch_time.update(time.time() - end) 34 | end = time.time() 35 | 36 | if (i + 1) % print_freq == 0: 37 | print('Test: [{}/{}]\t' 38 | 'Time {:.3f} ({:.3f})\t' 39 | 'Data {:.3f} ({:.3f})\t' 40 | 'Prec {:.2%} ({:.2%})\t' 41 | .format(i + 1, len(data_loader), 42 | batch_time.val, batch_time.avg, 43 | data_time.val, data_time.avg, 44 | precisions.val, precisions.avg)) 45 | 46 | return precisions.avg 47 | 48 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/dist_metric.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | from .evaluators import extract_features 6 | from .metric_learning import get_metric 7 | 8 | 9 | class DistanceMetric(object): 10 | def __init__(self, algorithm='euclidean', *args, **kwargs): 11 | super(DistanceMetric, self).__init__() 12 | self.algorithm = algorithm 13 | self.metric = get_metric(algorithm, *args, **kwargs) 14 | 15 | def train(self, model, data_loader): 16 | if self.algorithm == 'euclidean': return 17 | features, labels = extract_features(model, data_loader) 18 | features = torch.stack(features.values()).numpy() 19 | labels = torch.Tensor(list(labels.values())).numpy() 20 | self.metric.fit(features, labels) 21 | 22 | def transform(self, X): 23 | if torch.is_tensor(X): 24 | X = X.numpy() 25 | X = self.metric.transform(X) 26 | X = torch.from_numpy(X) 27 | else: 28 | X = self.metric.transform(X) 29 | return X 30 | 31 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/evaluation_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .classification import accuracy 4 | from .ranking import cmc, mean_ap 5 | 6 | __all__ = [ 7 | 'accuracy', 8 | 'cmc', 9 | 'mean_ap', 10 | ] 11 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/evaluation_metrics/classification.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from ..utils import to_torch 4 | 5 | 6 | def accuracy(output, target, topk=(1,)): 7 | output, target = to_torch(output), to_torch(target) 8 | maxk = max(topk) 9 | batch_size = target.size(0) 10 | 11 | _, pred = output.topk(maxk, 1, True, True) 12 | pred = pred.t() 13 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 14 | 15 | ret = [] 16 | for k in topk: 17 | correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True) 18 | ret.append(correct_k.mul_(1. / batch_size)) 19 | return ret 20 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/feature_extraction/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .cnn import extract_cnn_feature, extract_extra_attrib_feature 4 | from .rerank import re_ranking 5 | from .database import FeatureDatabase 6 | 7 | __all__ = [ 8 | 'extract_cnn_feature', 9 | 'extract_extra_attrib_feature', 10 | 're_ranking', 11 | 'FeatureDatabase', 12 | ] 13 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/feature_extraction/cnn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from collections import OrderedDict 3 | 4 | from torch.autograd import Variable 5 | import torch 6 | from ..utils import to_torch 7 | 8 | 9 | def extract_cnn_feature(model, inputs, modules=None): 10 | model.eval() 11 | inputs = to_torch(inputs) 12 | inputs = Variable(inputs, requires_grad=False) 13 | with torch.no_grad(): 14 | if modules is None: 15 | outputs = model(inputs) 16 | outputs = outputs.data.cpu() 17 | return outputs 18 | # Register forward hook for each module 19 | outputs = OrderedDict() 20 | handles = [] 21 | for m in modules: 22 | outputs[id(m)] = None 23 | def func(m, i, o): outputs[id(m)] = o.data.cpu() 24 | handles.append(m.register_forward_hook(func)) 25 | model(inputs) 26 | for h in handles: 27 | h.remove() 28 | return list(outputs.values()) 29 | 30 | 31 | def extract_extra_attrib_feature(model, inputs, modules=None): 32 | model.eval() 33 | imgs, logo_feas, attrib_feas = inputs 34 | imgs, logo_feas, attrib_feas = to_torch(imgs), to_torch(logo_feas), to_torch(attrib_feas) 35 | imgs = Variable(imgs, requires_grad=False) 36 | logo_feas = Variable(logo_feas, requires_grad=False) 37 | attrib_feas = Variable(attrib_feas, requires_grad=False) 38 | if modules is None: 39 | outputs = model(imgs, logo_feas, attrib_feas) 40 | outputs = outputs.data.cpu() 41 | return outputs 42 | # Register forward hook for each module 43 | outputs = OrderedDict() 44 | handles = [] 45 | for m in modules: 46 | outputs[id(m)] = None 47 | def func(m, i, o): outputs[id(m)] = o.data.cpu() 48 | handles.append(m.register_forward_hook(func)) 49 | model(inputs) 50 | for h in handles: 51 | h.remove() 52 | return list(outputs.values()) 53 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/feature_extraction/database.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import h5py 4 | import numpy as np 5 | from torch.utils.data import Dataset 6 | 7 | 8 | class FeatureDatabase(Dataset): 9 | def __init__(self, *args, **kwargs): 10 | super(FeatureDatabase, self).__init__() 11 | self.fid = h5py.File(*args, **kwargs) 12 | 13 | def __enter__(self): 14 | return self 15 | 16 | def __exit__(self, exc_type, exc_val, exc_tb): 17 | self.close() 18 | 19 | def __getitem__(self, keys): 20 | if isinstance(keys, (tuple, list)): 21 | return [self._get_single_item(k) for k in keys] 22 | return self._get_single_item(keys) 23 | 24 | def _get_single_item(self, key): 25 | return np.asarray(self.fid[key]) 26 | 27 | def __setitem__(self, key, value): 28 | if key in self.fid: 29 | if self.fid[key].shape == value.shape and \ 30 | self.fid[key].dtype == value.dtype: 31 | self.fid[key][...] = value 32 | else: 33 | del self.fid[key] 34 | self.fid.create_dataset(key, data=value) 35 | else: 36 | self.fid.create_dataset(key, data=value) 37 | 38 | def __delitem__(self, key): 39 | del self.fid[key] 40 | 41 | def __len__(self): 42 | return len(self.fid) 43 | 44 | def __iter__(self): 45 | return iter(self.fid) 46 | 47 | def flush(self): 48 | self.fid.flush() 49 | 50 | def close(self): 51 | self.fid.close() 52 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/feature_extraction/rerank.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.spatial.distance import cdist 3 | 4 | """ 5 | API 6 | probFea: all feature vectors of the query set, shape = (image_size, feature_dim) 7 | galFea: all feature vectors of the gallery set, shape = (image_size, feature_dim) 8 | k1,k2,lambda: parameters, the original paper is (k1=20, k2=6, lambda=0.3) 9 | """ 10 | 11 | #def re_ranking(query_num, original_dist, k1=20, k2=6, lambda_value=0.3): 12 | def re_ranking(query_num, original_dist, k1=20, k2=6, lambda_value=0.5): 13 | all_num = original_dist.shape[0] 14 | gallery_num = original_dist.shape[0] 15 | print ('--- query num: %d, all num: %d'%(query_num, all_num)) 16 | 17 | original_dist = np.transpose(original_dist/np.max(original_dist,axis=0)) 18 | V = np.zeros_like(original_dist).astype(np.float16) 19 | initial_rank = np.argsort(original_dist).astype(np.int32) 20 | 21 | print('starting re_ranking') 22 | for i in range(all_num): 23 | # k-reciprocal neighbors 24 | forward_k_neigh_index = initial_rank[i,:k1+1] 25 | backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1] 26 | fi = np.where(backward_k_neigh_index==i)[0] 27 | k_reciprocal_index = forward_k_neigh_index[fi] 28 | k_reciprocal_expansion_index = k_reciprocal_index 29 | for j in range(len(k_reciprocal_index)): 30 | candidate = k_reciprocal_index[j] 31 | candidate_forward_k_neigh_index = initial_rank[candidate,:int(np.around(k1/2))+1] 32 | candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,:int(np.around(k1/2))+1] 33 | fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0] 34 | candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate] 35 | if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2/3*len(candidate_k_reciprocal_index): 36 | k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index) 37 | 38 | k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index) 39 | weight = np.exp(-original_dist[i,k_reciprocal_expansion_index]) 40 | V[i,k_reciprocal_expansion_index] = weight/np.sum(weight) 41 | original_dist = original_dist[:query_num,] 42 | if k2 != 1: 43 | V_qe = np.zeros_like(V,dtype=np.float16) 44 | for i in range(all_num): 45 | V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:],axis=0) 46 | V = V_qe 47 | del V_qe 48 | del initial_rank 49 | invIndex = [] 50 | for i in range(gallery_num): 51 | invIndex.append(np.where(V[:,i] != 0)[0]) 52 | 53 | jaccard_dist = np.zeros_like(original_dist,dtype = np.float16) 54 | 55 | for i in range(query_num): 56 | temp_min = np.zeros(shape=[1,gallery_num],dtype=np.float16) 57 | indNonZero = np.where(V[i,:] != 0)[0] 58 | indImages = [] 59 | indImages = [invIndex[ind] for ind in indNonZero] 60 | for j in range(len(indNonZero)): 61 | temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]]) 62 | jaccard_dist[i] = 1-temp_min/(2-temp_min) 63 | 64 | final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value 65 | del original_dist 66 | del V 67 | del jaccard_dist 68 | final_dist = final_dist[:query_num,query_num:] 69 | print ('finish re_ranking') 70 | 71 | return final_dist 72 | 73 | if __name__ == '__main__': 74 | qfeat = np.random.rand(1300, 1300) 75 | dist = re_ranking(300, qfeat) 76 | print dist, dist.shape 77 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .oim import oim, OIM, OIMLoss 4 | from .triplet import TripletLoss 5 | from .npair import NPairLoss, NPairAngularLoss, BatchHardLoss 6 | from .dualmatch import DualMatch, DualMatchTest, MultiPartNPairLoss 7 | from .crossentropylabelsmooth import CrossEntropyLabelSmooth 8 | from .multi_attribute_loss import MultiAttributeLoss, TypeAttributeLoss 9 | 10 | __all__ = [ 11 | 'oim', 12 | 'OIM', 13 | 'OIMLoss', 14 | 'TripletLoss', 15 | 'NPairLoss', 16 | 'NPairAngularLoss', 17 | 'BatchHardLoss', 18 | 'DualMatch', 19 | 'DualMatchTest', 20 | 'MultiPartNPairLoss', 21 | 'CrossEntropyLabelSmooth', 22 | 'MultiAttributeLoss' 23 | 'TypeAttributeLoss' 24 | ] 25 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/loss/crossentropylabelsmooth.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import math 4 | import numpy as np 5 | import torch 6 | from torch import nn 7 | from torch.autograd import Variable 8 | 9 | class CrossEntropyLabelSmooth(nn.Module): 10 | """Cross entropy loss with label smoothing regularizer. 11 | Reference: 12 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 13 | Equation: y = (1 - epsilon) * y + epsilon / K. 14 | Args: 15 | num_classes (int): number of classes. 16 | epsilon (float): weight. 17 | """ 18 | def __init__(self, num_classes, epsilon=0.1, use_gpu=True): 19 | super(CrossEntropyLabelSmooth, self).__init__() 20 | self.num_classes = num_classes 21 | self.epsilon = epsilon 22 | self.use_gpu = use_gpu 23 | self.logsoftmax = nn.LogSoftmax() 24 | 25 | def forward(self, inputs, targets): 26 | """ 27 | Args: 28 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 29 | targets: ground truth labels with shape (num_classes) 30 | """ 31 | log_probs = self.logsoftmax(inputs) 32 | targets = Variable(torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)) 33 | if self.use_gpu: targets = targets.cuda() 34 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 35 | loss = (- targets * log_probs).mean(0).sum() 36 | return loss 37 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/loss/dualmatch.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import math 4 | import numpy as np 5 | import torch 6 | from torch import nn 7 | from torch.nn import functional as F 8 | from torch.autograd import Variable 9 | 10 | 11 | class DualMatch(nn.Module): 12 | def __init__(self, gamma=0.1): 13 | super(DualMatch, self).__init__() 14 | self.gamma = gamma 15 | 16 | def _forward(self, x): 17 | n, c, l = x.size() 18 | x_flatten = x.permute(2, 0, 1).contiguous() 19 | x_flatten_trans = x_flatten.transpose(1, 2).contiguous() 20 | sim_mat = x_flatten.bmm(x_flatten_trans) 21 | sim_mat = sim_mat.permute(1, 2, 0).contiguous() 22 | sim_mat = sim_mat.mul(self.gamma) 23 | 24 | return sim_mat 25 | 26 | def forward(self, x): 27 | sim_mat = self._forward(x) 28 | return sim_mat 29 | 30 | 31 | class DualMatchTest(nn.Module): 32 | def __init__(self): 33 | super(DualMatchTest, self).__init__() 34 | 35 | def _dualmatch(self, q, g): 36 | qg_sim = q.sub(g).pow(2).sum(1, keepdim=True) 37 | qg_sim = qg_sim.view(qg_sim.size(0), -1) 38 | sim_output = qg_sim.mean(1, keepdim=True) 39 | return sim_output 40 | 41 | def forward(self, q, g): 42 | sim_output = self._dualmatch(q, g) 43 | return sim_output 44 | 45 | 46 | #class DualMatchTest(nn.Module): 47 | # def __init__(self): 48 | # super(DualMatchTest, self).__init__() 49 | # 50 | # def _dualmatch(self, q, g): 51 | # n, c, l = q.size() 52 | # q_expand = q.view(n, c, l, 1).expand(n, c, l, l).contiguous() 53 | # g_expand = g.view(n, c, 1, l).expand(n, c, l, l).contiguous() 54 | # qg_sim = q_expand.sub(g_expand).pow(2).sum(1, keepdim=True) 55 | # qg_sim = qg_sim.view(n, l, l) 56 | # qg_sim_weight = F.softmax(qg_sim.view(n*l, l).mul(-10)) 57 | # qg_sim_weight = qg_sim_weight.view(n, l, l) 58 | # qg_sim = qg_sim.mul(qg_sim_weight).sum(2, keepdim=True) 59 | # qg_sim = qg_sim.view(n, -1) 60 | # sim_output = qg_sim.mean(1, keepdim=True) 61 | # return sim_output 62 | # 63 | # def forward(self, q, g): 64 | # sim_output = self._dualmatch(q, g) 65 | # return sim_output 66 | 67 | 68 | class MultiPartNPairLoss(nn.Module): 69 | def __init__(self): 70 | super(MultiPartNPairLoss, self).__init__() 71 | 72 | def _forward(self, Ws, targets): 73 | labels = targets.data.cpu().numpy() 74 | sim_mat, score_mat = tuple(Ws.split(1, 0)) 75 | sim_mat = sim_mat.view(sim_mat.size(1), sim_mat.size(2), sim_mat.size(3)) 76 | score_mat = score_mat.view(score_mat.size(1), score_mat.size(2), score_mat.size(3)) 77 | Ws = sim_mat 78 | # Wg = sim_mat.mul(score_mat).sum(2, keepdim=True).view(Ws.size(0), Ws.size(1)) 79 | Wg = Ws.sum(2, keepdim=True).view(Ws.size(0), Ws.size(1)) 80 | 81 | part_losses = [] 82 | Ws = Ws.split(1, 2) 83 | for W in Ws: 84 | part_loss = self._calc_loss(W.contiguous().view(W.size(0), W.size(1)), Wg, labels) 85 | part_losses.append(part_loss) 86 | 87 | return part_losses 88 | 89 | def forward(self, Ws, targets): 90 | losses = self._forward(Ws, targets) 91 | return losses 92 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/loss/oim.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | from torch import nn, autograd 6 | 7 | 8 | class OIM(autograd.Function): 9 | def __init__(self, lut, momentum=0.5): 10 | super(OIM, self).__init__() 11 | self.lut = lut 12 | self.momentum = momentum 13 | 14 | def forward(self, inputs, targets): 15 | self.save_for_backward(inputs, targets) 16 | outputs = inputs.mm(self.lut.t()) 17 | return outputs 18 | 19 | def backward(self, grad_outputs): 20 | inputs, targets = self.saved_tensors 21 | grad_inputs = None 22 | if self.needs_input_grad[0]: 23 | grad_inputs = grad_outputs.mm(self.lut) 24 | for x, y in zip(inputs, targets): 25 | self.lut[y] = self.momentum * self.lut[y] + (1. - self.momentum) * x 26 | self.lut[y] /= self.lut[y].norm() 27 | return grad_inputs, None 28 | 29 | 30 | def oim(inputs, targets, lut, momentum=0.5): 31 | return OIM(lut, momentum=momentum)(inputs, targets) 32 | 33 | 34 | class OIMLoss(nn.Module): 35 | def __init__(self, num_features, num_classes, scalar=1.0, momentum=0.5, 36 | weight=None, size_average=True): 37 | super(OIMLoss, self).__init__() 38 | self.num_features = num_features 39 | self.num_classes = num_classes 40 | self.momentum = momentum 41 | self.scalar = scalar 42 | self.weight = weight 43 | self.size_average = size_average 44 | 45 | self.register_buffer('lut', torch.zeros(num_classes, num_features)) 46 | 47 | def forward(self, inputs, targets): 48 | inputs = oim(inputs, targets, self.lut, momentum=self.momentum) 49 | inputs *= self.scalar 50 | loss = F.cross_entropy(inputs, targets, weight=self.weight, 51 | size_average=self.size_average) 52 | return loss, inputs 53 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/metric_learning/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from metric_learn import (ITML_Supervised, LMNN, LSML_Supervised, 4 | SDML_Supervised, NCA, LFDA, RCA_Supervised) 5 | 6 | from .euclidean import Euclidean 7 | from .kissme import KISSME 8 | 9 | __factory = { 10 | 'euclidean': Euclidean, 11 | 'kissme': KISSME, 12 | 'itml': ITML_Supervised, 13 | 'lmnn': LMNN, 14 | 'lsml': LSML_Supervised, 15 | 'sdml': SDML_Supervised, 16 | 'nca': NCA, 17 | 'lfda': LFDA, 18 | 'rca': RCA_Supervised, 19 | } 20 | 21 | 22 | def get_metric(algorithm, *args, **kwargs): 23 | if algorithm not in __factory: 24 | raise KeyError("Unknown metric:", algorithm) 25 | return __factory[algorithm](*args, **kwargs) 26 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/metric_learning/euclidean.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | from metric_learn.base_metric import BaseMetricLearner 5 | 6 | 7 | class Euclidean(BaseMetricLearner): 8 | def __init__(self): 9 | self.M_ = None 10 | 11 | def metric(self): 12 | return self.M_ 13 | 14 | def fit(self, X): 15 | self.M_ = np.eye(X.shape[1]) 16 | self.X_ = X 17 | 18 | def transform(self, X=None): 19 | if X is None: 20 | return self.X_ 21 | return X 22 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/metric_learning/kissme.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | from metric_learn.base_metric import BaseMetricLearner 5 | 6 | 7 | def validate_cov_matrix(M): 8 | M = (M + M.T) * 0.5 9 | k = 0 10 | I = np.eye(M.shape[0]) 11 | while True: 12 | try: 13 | _ = np.linalg.cholesky(M) 14 | break 15 | except np.linalg.LinAlgError: 16 | # Find the nearest positive definite matrix for M. Modified from 17 | # http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd 18 | # Might take several minutes 19 | k += 1 20 | w, v = np.linalg.eig(M) 21 | min_eig = v.min() 22 | M += (-min_eig * k * k + np.spacing(min_eig)) * I 23 | return M 24 | 25 | 26 | class KISSME(BaseMetricLearner): 27 | def __init__(self): 28 | self.M_ = None 29 | 30 | def metric(self): 31 | return self.M_ 32 | 33 | def fit(self, X, y=None): 34 | n = X.shape[0] 35 | if y is None: 36 | y = np.arange(n) 37 | X1, X2 = np.meshgrid(np.arange(n), np.arange(n)) 38 | X1, X2 = X1[X1 < X2], X2[X1 < X2] 39 | matches = (y[X1] == y[X2]) 40 | num_matches = matches.sum() 41 | num_non_matches = len(matches) - num_matches 42 | idxa = X1[matches] 43 | idxb = X2[matches] 44 | S = X[idxa] - X[idxb] 45 | C1 = S.transpose().dot(S) / num_matches 46 | p = np.random.choice(num_non_matches, num_matches, replace=False) 47 | idxa = X1[~matches] 48 | idxb = X2[~matches] 49 | idxa = idxa[p] 50 | idxb = idxb[p] 51 | S = X[idxa] - X[idxb] 52 | C0 = S.transpose().dot(S) / num_matches 53 | self.M_ = np.linalg.inv(C1) - np.linalg.inv(C0) 54 | self.M_ = validate_cov_matrix(self.M_) 55 | self.X_ = X 56 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/models/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .resnet import * 4 | from .multi_attribute_3 import * 5 | from .cross_entropy_trihard import * 6 | from .cross_trihard_senet import * 7 | from .cross_trihard_se_resnet import * 8 | from .direction import * 9 | 10 | 11 | __factory = { 12 | 'multi_attribute_3_resnet50':multi_attribute_3_resnet50, 13 | 'cross_entropy_trihard_resnet101':cross_entropy_trihard_resnet101, 14 | 'cross_trihard_senet101':cross_trihard_senet101, 15 | 'cross_trihard_se_resnet152':cross_trihard_se_resnet152, 16 | 'direction_resnet50':direction_resnet50, 17 | } 18 | 19 | 20 | def names(): 21 | return sorted(__factory.keys()) 22 | 23 | 24 | def create(name, *args, **kwargs): 25 | """ 26 | Create a model instance. 27 | 28 | Parameters 29 | ---------- 30 | name : str 31 | Model name. Can be one of 'inception', 'resnet18', 'resnet34', 32 | 'resnet50', 'resnet101', and 'resnet152'. 33 | pretrained : bool, optional 34 | Only applied for 'resnet*' models. If True, will use ImageNet pretrained 35 | model. Default: True 36 | cut_at_pooling : bool, optional 37 | If True, will cut the model before the last global pooling layer and 38 | ignore the remaining kwargs. Default: False 39 | num_features : int, optional 40 | If positive, will append a Linear layer after the global pooling layer, 41 | with this number of output units, followed by a BatchNorm layer. 42 | Otherwise these layers will not be appended. Default: 256 for 43 | 'inception', 0 for 'resnet*' 44 | norm : bool, optional 45 | If True, will normalize the feature to be unit L2-norm for each sample. 46 | Otherwise will append a ReLU layer after the above Linear layer if 47 | num_features > 0. Default: False 48 | dropout : float, optional 49 | If positive, will append a Dropout layer with this dropout rate. 50 | Default: 0 51 | num_classes : int, optional 52 | If positive, will append a Linear layer at the end as the classifier 53 | with this number of output units. Default: 0 54 | """ 55 | if name not in __factory: 56 | raise KeyError("Unknown model:", name) 57 | return __factory[name](*args, **kwargs) 58 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/models/cross_trihard_se_resnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | from torch.nn import functional as F 5 | import math 6 | from senet import * 7 | 8 | __all__ = ['cross_trihard_se_resnet152'] 9 | 10 | class Cross_Trihard_Seresnet(nn.Module): 11 | 12 | def __init__(self, num_classes, num_features): 13 | super(Cross_Trihard_Seresnet, self).__init__() 14 | self.base = SENet(block=SEResNetBottleneck, 15 | layers=[3, 8, 36, 3], 16 | groups=1, 17 | reduction=16, 18 | dropout_p=None, 19 | inplanes=64, 20 | input_3x3=False, 21 | downsample_kernel_size=1, 22 | downsample_padding=0, 23 | last_stride=1) 24 | self.num_classes = num_classes 25 | self.num_features = num_features 26 | 27 | self.dropout = nn.Dropout() 28 | self.dim_red_conv = nn.Conv2d(512 * SEResNeXtBottleneck.expansion, self.num_features, 1, bias=False) 29 | nn.init.kaiming_normal_(self.dim_red_conv.weight.data, mode='fan_out') 30 | 31 | self.dim_red_bn = nn.BatchNorm2d(self.num_features) 32 | self.dim_red_bn.weight.data.fill_(1) 33 | self.dim_red_bn.bias.data.zero_() 34 | 35 | self.fc = nn.Linear(self.num_features, self.num_classes, True) 36 | nn.init.normal_(self.fc.weight, std=0.001) 37 | nn.init.constant_(self.fc.bias, 0) 38 | 39 | def forward(self, x): 40 | x = self.base(x) 41 | x = nn.functional.avg_pool2d(x, x.size()[2:]) 42 | x = self.dropout(x) 43 | x = self.dim_red_conv(x) 44 | if self.training: 45 | x_g = x 46 | x = self.dim_red_bn(x) 47 | x = x.contiguous().view(-1, self.num_features) 48 | x = self.fc(x) 49 | return x, x_g 50 | else: 51 | x = x.div(x.norm(2, 1, keepdim=True).add(1e-8).expand_as(x)) 52 | x = x.view(x.size(0), -1) 53 | return x 54 | 55 | def cross_trihard_se_resnet152(*args, **kwargs): 56 | print('se_resnet152') 57 | model = Cross_Trihard_Seresnet(*args, **kwargs) 58 | return model 59 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/models/cross_trihard_senet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | from torch.nn import functional as F 5 | import math 6 | from senet import * 7 | 8 | __all__ = ['cross_trihard_senet101'] 9 | 10 | class Cross_Trihard_Senet(nn.Module): 11 | 12 | def __init__(self, num_classes, num_features): 13 | super(Cross_Trihard_Senet, self).__init__() 14 | self.base = SENet(block=SEResNeXtBottleneck, 15 | layers=[3, 4, 23, 3], 16 | groups=32, 17 | reduction=16, 18 | dropout_p=None, 19 | inplanes=64, 20 | input_3x3=False, 21 | downsample_kernel_size=1, 22 | downsample_padding=0, 23 | last_stride=1) 24 | self.num_classes = num_classes 25 | self.num_features = num_features 26 | 27 | self.dropout = nn.Dropout() 28 | self.dim_red_conv = nn.Conv2d(512 * SEResNeXtBottleneck.expansion, self.num_features, 1, bias=False) 29 | nn.init.kaiming_normal_(self.dim_red_conv.weight.data, mode='fan_out') 30 | 31 | self.dim_red_bn = nn.BatchNorm2d(self.num_features) 32 | self.dim_red_bn.weight.data.fill_(1) 33 | self.dim_red_bn.bias.data.zero_() 34 | 35 | self.fc = nn.Linear(self.num_features, self.num_classes, True) 36 | nn.init.normal_(self.fc.weight, std=0.001) 37 | nn.init.constant_(self.fc.bias, 0) 38 | 39 | def forward(self, x): 40 | x = self.base(x) 41 | x = nn.functional.avg_pool2d(x, x.size()[2:]) 42 | x = self.dropout(x) 43 | x = self.dim_red_conv(x) 44 | if self.training: 45 | x_g = x 46 | x = self.dim_red_bn(x) 47 | x = x.contiguous().view(-1, self.num_features) 48 | x = self.fc(x) 49 | return x, x_g 50 | else: 51 | x = x.div(x.norm(2, 1, keepdim=True).add(1e-8).expand_as(x)) 52 | x = x.view(x.size(0), -1) 53 | return x 54 | 55 | def cross_trihard_senet101(*args, **kwargs): 56 | model = Cross_Trihard_Senet(*args, **kwargs) 57 | return model 58 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | 6 | def to_numpy(tensor): 7 | if torch.is_tensor(tensor): 8 | return tensor.cpu().numpy() 9 | elif type(tensor).__module__ != 'numpy': 10 | raise ValueError("Cannot convert {} to numpy array" 11 | .format(type(tensor))) 12 | return tensor 13 | 14 | 15 | def to_torch(ndarray): 16 | if type(ndarray).__module__ == 'numpy': 17 | return torch.from_numpy(ndarray) 18 | elif not torch.is_tensor(ndarray): 19 | raise ValueError("Cannot convert {} to torch tensor" 20 | .format(type(ndarray))) 21 | return ndarray 22 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/utils/data/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .dataset import Dataset 4 | from .attribute_dataset import Attribute_Dataset 5 | from .preprocessor import Preprocessor, Attribute_Preprocessor, Flip_Preprocessor, Direct_Preprocessor 6 | from .sampler import RandomIdentitySampler, RandomIdentityAttributeSampler 7 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/utils/logging.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | import sys 4 | 5 | from .osutils import mkdir_if_missing 6 | 7 | 8 | class Logger(object): 9 | def __init__(self, fpath=None): 10 | self.console = sys.stdout 11 | self.file = None 12 | if fpath is not None: 13 | mkdir_if_missing(os.path.dirname(fpath)) 14 | self.file = open(fpath, 'w') 15 | 16 | def __del__(self): 17 | self.close() 18 | 19 | def __enter__(self): 20 | pass 21 | 22 | def __exit__(self, *args): 23 | self.close() 24 | 25 | def write(self, msg): 26 | self.console.write(msg) 27 | if self.file is not None: 28 | self.file.write(msg) 29 | 30 | def flush(self): 31 | self.console.flush() 32 | if self.file is not None: 33 | self.file.flush() 34 | os.fsync(self.file.fileno()) 35 | 36 | def close(self): 37 | self.console.close() 38 | if self.file is not None: 39 | self.file.close() 40 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/utils/meters.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class AverageMeter(object): 5 | """Computes and stores the average and current value""" 6 | 7 | def __init__(self): 8 | self.val = 0 9 | self.avg = 0 10 | self.sum = 0 11 | self.count = 0 12 | 13 | def reset(self): 14 | self.val = 0 15 | self.avg = 0 16 | self.sum = 0 17 | self.count = 0 18 | 19 | def update(self, val, n=1): 20 | self.val = val 21 | self.sum += val * n 22 | self.count += n 23 | self.avg = self.sum / self.count 24 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/utils/osutils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | import errno 4 | 5 | 6 | def mkdir_if_missing(dir_path): 7 | try: 8 | os.makedirs(dir_path) 9 | except OSError as e: 10 | if e.errno != errno.EEXIST: 11 | raise 12 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/reid/utils/serialization.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import json 3 | import os.path as osp 4 | import shutil 5 | 6 | import torch 7 | from torch.nn import Parameter 8 | 9 | from .osutils import mkdir_if_missing 10 | 11 | 12 | def read_json(fpath): 13 | with open(fpath, 'r') as f: 14 | obj = json.load(f) 15 | return obj 16 | 17 | 18 | def write_json(obj, fpath): 19 | mkdir_if_missing(osp.dirname(fpath)) 20 | with open(fpath, 'w') as f: 21 | json.dump(obj, f, indent=4, separators=(',', ': ')) 22 | 23 | 24 | def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'): 25 | mkdir_if_missing(osp.dirname(fpath)) 26 | torch.save(state, fpath) 27 | if is_best: 28 | shutil.copy(fpath, osp.join(osp.dirname(fpath), 'model_best.pth.tar')) 29 | 30 | 31 | def load_checkpoint(fpath): 32 | if osp.isfile(fpath): 33 | checkpoint = torch.load(fpath) 34 | print("=> Loaded checkpoint '{}'".format(fpath)) 35 | return checkpoint 36 | else: 37 | raise ValueError("=> No checkpoint found at '{}'".format(fpath)) 38 | 39 | 40 | def copy_state_dict(state_dict, model, strip=None): 41 | tgt_state = model.state_dict() 42 | copied_names = set() 43 | for name, param in state_dict.items(): 44 | if strip is not None and name.startswith(strip): 45 | name = name[len(strip):] 46 | if name not in tgt_state: 47 | continue 48 | if isinstance(param, Parameter): 49 | param = param.data 50 | if param.size() != tgt_state[name].size(): 51 | print('mismatch:', name, param.size(), tgt_state[name].size()) 52 | continue 53 | tgt_state[name].copy_(param) 54 | copied_names.add(name) 55 | 56 | missing = set(tgt_state.keys()) - copied_names 57 | if len(missing) > 0: 58 | print("missing keys in state_dict:", missing) 59 | 60 | return model 61 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/run_test_attribute.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=8,9 \ 2 | python examples/test_extract_attribute.py \ 3 | -a multi_attribute_3_resnet50 \ 4 | -b 32 \ 5 | -d aicity_attribute \ 6 | --combine-trainval \ 7 | --weights ./logs/attribute_model/the_trained_model_weights_file \ 8 | --logs-dir ./logs/attribute_model \ 9 | --lr 0.01 \ 10 | --weight-decay 0.0005 \ 11 | --epochs 100 \ 12 | --step_size 70 \ 13 | --lr_mult 1.0 \ 14 | --metric_loss_weight 0.02 \ 15 | --height 288 \ 16 | --width 384 \ 17 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/train_direction.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=14,15 \ 2 | python examples/train_direction.py \ 3 | -a direction_resnet50 \ 4 | -b 32 \ 5 | --weights ./pretrain_models/resnet50-19c8e357.pth \ 6 | --logs-dir ./logs/direction_output/ \ 7 | --optimizer 1 \ 8 | --lr 1e-3 \ 9 | --weight-decay 0.0005 \ 10 | --epochs 20 \ 11 | --step_size 50 \ 12 | --step_size2 70 \ 13 | --step_size3 90 \ 14 | --lr_mult 1.0 \ 15 | --big_height 310 \ 16 | --big_width 414 \ 17 | --target_height 288 \ 18 | --target_width 384 \ 19 | --epoch_inter 2 \ 20 | --start_save 0 \ 21 | --dense_evaluate 100 \ 22 | --warm_up_ep 10 \ 23 | 24 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/train_multi_attribute.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=12,13 \ 2 | python examples/multi_attribute.py \ 3 | -a multi_attribute_3_resnet50 \ 4 | -b 64 \ 5 | -d aicity_attribute \ 6 | --combine-trainval \ 7 | --weights ./pretrain_models/resnet50-19c8e357.pth \ 8 | --logs-dir ./logs/adam_new_attribute/ \ 9 | --optimizer 1 \ 10 | --lr 0.01 \ 11 | --weight-decay 0.0005 \ 12 | --epochs 800 \ 13 | --step_size 400 \ 14 | --step_size2 700 \ 15 | --step_size3 750 \ 16 | --lr_mult 1.0 \ 17 | --metric_loss_weight 0.02 \ 18 | --big_height 310 \ 19 | --big_width 414 \ 20 | --target_height 288 \ 21 | --target_width 384 \ 22 | --epoch_inter 100 \ 23 | --start_save 0 \ 24 | --dense_evaluate 800 \ 25 | --warm_up_ep 100 \ 26 | --is_cls 1 \ 27 | 28 | -------------------------------------------------------------------------------- /Track2(ReID)/part1_model/train_reid_model.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=4,5,6,7 \ 2 | python examples/cross_trihard_with_crop.py \ 3 | -a cross_trihard_senet101 \ 4 | -b 128 \ 5 | -d complete_aicity_car196 \ 6 | --combine-trainval \ 7 | --weights ./pretrain_models/se_resnext101_32x4d-3b2fe3d8.pth \ 8 | --logs-dir ./output/cross_trihard_senet101/ \ 9 | --optimizer 1 \ 10 | --lr 3e-4 \ 11 | --weight-decay 0.0005 \ 12 | --epochs 100 \ 13 | --step_size 50 \ 14 | --step_size2 70 \ 15 | --step_size3 90 \ 16 | --lr_mult 1.0 \ 17 | --metric_loss_weight 0.02 \ 18 | --big_height 310 \ 19 | --big_width 414 \ 20 | --target_height 288 \ 21 | --target_width 384 \ 22 | --epoch_inter 10 \ 23 | --start_save 20 \ 24 | --dense_evaluate 100 \ 25 | --warm_up_ep 20 \ 26 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/readme.md: -------------------------------------------------------------------------------- 1 | This part contains the source codes of training three vehicle reid models including resnet50_sac, hrnet and MGN. As for feature extraction, related codes in 'part1_model' can be used. 2 | 3 | ## preparing dataset 4 | The dataset defination is the same as the one in [open-reid](https://github.com/Cysu/open-reid). 5 | Users should define their own dataset in the './reid/datasets' 6 | The data should be put into the folder './examples/data' 7 | 8 | ## training 9 | ``` 10 | sh run.sh 11 | ``` 12 | The example code trains the resnet50_sac network (which is proposed in [self-attention learning for person re-identification](http://www.bmva.org/bmvc/2018/contents/papers/0613.pdf)), other supported networks can be found in the './reid/models' folder. 13 | The meanings of key parameters can be found in the './examples/train.py' 14 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from . import datasets 4 | from . import evaluation_metrics 5 | from . import feature_extraction 6 | from . import loss 7 | from . import metric_learning 8 | from . import models 9 | from . import utils 10 | from . import dist_metric 11 | from . import evaluators 12 | from . import trainers 13 | 14 | __version__ = '1.0.0' 15 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import warnings 3 | 4 | from .cuhk01 import CUHK01 5 | from .cuhk03 import CUHK03 6 | from .dukemtmc import DukeMTMC 7 | from .market1501 import Market1501 8 | from .viper import VIPeR 9 | from .small_vehicle import Small_Vehicle 10 | from .aicity_car196 import Aicity_Car196 11 | from .complete_aicity_car196 import Complete_Aicity_Car196 12 | from .gao_crop_train import Gao_Crop_Train 13 | from .new_complete_aicity_car196 import New_Complete_Aicity_Car196 14 | from .aicity_attribute import Aicity_Attribute 15 | from .new_train import New_Train 16 | 17 | 18 | __factory = { 19 | 'viper': VIPeR, 20 | 'cuhk01': CUHK01, 21 | 'cuhk03': CUHK03, 22 | 'market1501': Market1501, 23 | 'dukemtmc': DukeMTMC, 24 | 'small_vehicle': Small_Vehicle, 25 | 'aicity_car196': Aicity_Car196, 26 | 'complete_aicity_car196':Complete_Aicity_Car196, 27 | 'gao_crop_train':Gao_Crop_Train, 28 | 'new_complete_aicity_car196':New_Complete_Aicity_Car196, 29 | 'aicity_attribute': Aicity_Attribute, 30 | 'new_train':New_Train, 31 | } 32 | 33 | 34 | def names(): 35 | return sorted(__factory.keys()) 36 | 37 | 38 | def create(name, root, *args, **kwargs): 39 | """ 40 | Create a dataset instance. 41 | 42 | Parameters 43 | ---------- 44 | name : str 45 | The dataset name. Can be one of 'viper', 'cuhk01', 'cuhk03', 46 | 'market1501', and 'dukemtmc'. 47 | root : str 48 | The path to the dataset directory. 49 | split_id : int, optional 50 | The index of data split. Default: 0 51 | num_val : int or float, optional 52 | When int, it means the number of validation identities. When float, 53 | it means the proportion of validation to all the trainval. Default: 100 54 | download : bool, optional 55 | If True, will download the dataset. Default: False 56 | """ 57 | if name not in __factory: 58 | raise KeyError("Unknown dataset:", name) 59 | return __factory[name](root, *args, **kwargs) 60 | 61 | 62 | def get_dataset(name, root, *args, **kwargs): 63 | warnings.warn("get_dataset is deprecated. Use create instead.") 64 | return create(name, root, *args, **kwargs) 65 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/datasets/aicity_car196.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | from ..utils.data import Dataset 5 | from ..utils.osutils import mkdir_if_missing 6 | from ..utils.serialization import write_json 7 | 8 | 9 | class Aicity_Car196(Dataset): 10 | url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view' 11 | md5 = '65005ab7d12ec1c44de4eeafe813e68a' 12 | 13 | def __init__(self, root, split_id=0, num_val=100, download=True): 14 | super(Aicity_Car196, self).__init__(root, split_id=split_id) 15 | 16 | if download: 17 | self.download() 18 | 19 | if not self._check_integrity(): 20 | raise RuntimeError("Dataset not found or corrupted. " + 21 | "You can use download=True to download it.") 22 | 23 | self.load(num_val) 24 | 25 | def download(self): 26 | if self._check_integrity(): 27 | print("Files already downloaded and verified") 28 | return 29 | 30 | import re 31 | import hashlib 32 | import shutil 33 | from glob import glob 34 | from zipfile import ZipFile 35 | 36 | raw_dir = osp.join(self.root, 'raw') 37 | mkdir_if_missing(raw_dir) 38 | 39 | # Download the raw zip file 40 | fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') 41 | if osp.isfile(fpath) and \ 42 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 43 | print("Using downloaded file: " + fpath) 44 | else: 45 | raise RuntimeError("Please download the dataset manually from {} " 46 | "to {}".format(self.url, fpath)) 47 | 48 | # Extract the file 49 | exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') 50 | if not osp.isdir(exdir): 51 | print("Extracting zip file") 52 | with ZipFile(fpath) as z: 53 | z.extractall(path=raw_dir) 54 | 55 | # Format 56 | images_dir = osp.join(self.root, 'images') 57 | mkdir_if_missing(images_dir) 58 | 59 | # 1501 identities (+1 for background) with 6 camera views each 60 | identities = [[[] for _ in range(6)] for _ in range(1502)] 61 | 62 | def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')): 63 | fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) 64 | pids = set() 65 | for fpath in fpaths: 66 | fname = osp.basename(fpath) 67 | pid, cam = map(int, pattern.search(fname).groups()) 68 | if pid == -1: continue # junk images are just ignored 69 | assert 0 <= pid <= 1501 # pid == 0 means background 70 | assert 1 <= cam <= 6 71 | cam -= 1 72 | pids.add(pid) 73 | fname = ('{:08d}_{:02d}_{:04d}.jpg' 74 | .format(pid, cam, len(identities[pid][cam]))) 75 | identities[pid][cam].append(fname) 76 | shutil.copy(fpath, osp.join(images_dir, fname)) 77 | return pids 78 | 79 | trainval_pids = register('bounding_box_train') 80 | gallery_pids = register('bounding_box_test') 81 | query_pids = register('query') 82 | assert query_pids <= gallery_pids 83 | assert trainval_pids.isdisjoint(gallery_pids) 84 | 85 | # Save meta information into a json file 86 | meta = {'name': 'Aicity_Car196', 'shot': 'multiple', 'num_cameras': 41, 87 | 'identities': identities} 88 | write_json(meta, osp.join(self.root, 'meta.json')) 89 | 90 | # Save the only training / test split 91 | splits = [{ 92 | 'trainval': sorted(list(trainval_pids)), 93 | 'query': sorted(list(query_pids)), 94 | 'gallery': sorted(list(gallery_pids))}] 95 | write_json(splits, osp.join(self.root, 'splits.json')) 96 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/datasets/cuhk01.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | import numpy as np 5 | 6 | from ..utils.data import Dataset 7 | from ..utils.osutils import mkdir_if_missing 8 | from ..utils.serialization import write_json 9 | 10 | 11 | class CUHK01(Dataset): 12 | url = 'https://docs.google.com/spreadsheet/viewform?formkey=dF9pZ1BFZkNiMG1oZUdtTjZPalR0MGc6MA' 13 | md5 = 'e6d55c0da26d80cda210a2edeb448e98' 14 | 15 | def __init__(self, root, split_id=0, num_val=100, download=True): 16 | super(CUHK01, self).__init__(root, split_id=split_id) 17 | 18 | if download: 19 | self.download() 20 | 21 | if not self._check_integrity(): 22 | raise RuntimeError("Dataset not found or corrupted. " + 23 | "You can use download=True to download it.") 24 | 25 | self.load(num_val) 26 | 27 | def download(self): 28 | if self._check_integrity(): 29 | print("Files already downloaded and verified") 30 | return 31 | 32 | import hashlib 33 | import shutil 34 | from glob import glob 35 | from zipfile import ZipFile 36 | 37 | raw_dir = osp.join(self.root, 'raw') 38 | mkdir_if_missing(raw_dir) 39 | 40 | # Download the raw zip file 41 | fpath = osp.join(raw_dir, 'CUHK01.zip') 42 | if osp.isfile(fpath) and \ 43 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 44 | print("Using downloaded file: " + fpath) 45 | else: 46 | raise RuntimeError("Please download the dataset manually from {} " 47 | "to {}".format(self.url, fpath)) 48 | 49 | # Extract the file 50 | exdir = osp.join(raw_dir, 'campus') 51 | if not osp.isdir(exdir): 52 | print("Extracting zip file") 53 | with ZipFile(fpath) as z: 54 | z.extractall(path=raw_dir) 55 | 56 | # Format 57 | images_dir = osp.join(self.root, 'images') 58 | mkdir_if_missing(images_dir) 59 | 60 | identities = [[[] for _ in range(2)] for _ in range(971)] 61 | 62 | files = sorted(glob(osp.join(exdir, '*.png'))) 63 | for fpath in files: 64 | fname = osp.basename(fpath) 65 | pid, cam = int(fname[:4]), int(fname[4:7]) 66 | assert 1 <= pid <= 971 67 | assert 1 <= cam <= 4 68 | pid, cam = pid - 1, (cam - 1) // 2 69 | fname = ('{:08d}_{:02d}_{:04d}.png' 70 | .format(pid, cam, len(identities[pid][cam]))) 71 | identities[pid][cam].append(fname) 72 | shutil.copy(fpath, osp.join(images_dir, fname)) 73 | 74 | # Save meta information into a json file 75 | meta = {'name': 'cuhk01', 'shot': 'multiple', 'num_cameras': 2, 76 | 'identities': identities} 77 | write_json(meta, osp.join(self.root, 'meta.json')) 78 | 79 | # Randomly create ten training and test split 80 | num = len(identities) 81 | splits = [] 82 | for _ in range(10): 83 | pids = np.random.permutation(num).tolist() 84 | trainval_pids = sorted(pids[:num // 2]) 85 | test_pids = sorted(pids[num // 2:]) 86 | split = {'trainval': trainval_pids, 87 | 'query': test_pids, 88 | 'gallery': test_pids} 89 | splits.append(split) 90 | write_json(splits, osp.join(self.root, 'splits.json')) 91 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/datasets/market1501.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | from ..utils.data import Dataset 5 | from ..utils.osutils import mkdir_if_missing 6 | from ..utils.serialization import write_json 7 | 8 | 9 | class Market1501(Dataset): 10 | url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view' 11 | md5 = '65005ab7d12ec1c44de4eeafe813e68a' 12 | 13 | def __init__(self, root, split_id=0, num_val=100, download=True): 14 | super(Market1501, self).__init__(root, split_id=split_id) 15 | 16 | if download: 17 | self.download() 18 | 19 | if not self._check_integrity(): 20 | raise RuntimeError("Dataset not found or corrupted. " + 21 | "You can use download=True to download it.") 22 | 23 | self.load(num_val) 24 | 25 | def download(self): 26 | if self._check_integrity(): 27 | print("Files already downloaded and verified") 28 | return 29 | 30 | import re 31 | import hashlib 32 | import shutil 33 | from glob import glob 34 | from zipfile import ZipFile 35 | 36 | raw_dir = osp.join(self.root, 'raw') 37 | mkdir_if_missing(raw_dir) 38 | 39 | # Download the raw zip file 40 | fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') 41 | if osp.isfile(fpath) and \ 42 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 43 | print("Using downloaded file: " + fpath) 44 | else: 45 | raise RuntimeError("Please download the dataset manually from {} " 46 | "to {}".format(self.url, fpath)) 47 | 48 | # Extract the file 49 | exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') 50 | if not osp.isdir(exdir): 51 | print("Extracting zip file") 52 | with ZipFile(fpath) as z: 53 | z.extractall(path=raw_dir) 54 | 55 | # Format 56 | images_dir = osp.join(self.root, 'images') 57 | mkdir_if_missing(images_dir) 58 | 59 | # 1501 identities (+1 for background) with 6 camera views each 60 | identities = [[[] for _ in range(6)] for _ in range(1502)] 61 | 62 | def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')): 63 | fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) 64 | pids = set() 65 | for fpath in fpaths: 66 | fname = osp.basename(fpath) 67 | pid, cam = map(int, pattern.search(fname).groups()) 68 | if pid == -1: continue # junk images are just ignored 69 | assert 0 <= pid <= 1501 # pid == 0 means background 70 | assert 1 <= cam <= 6 71 | cam -= 1 72 | pids.add(pid) 73 | fname = ('{:08d}_{:02d}_{:04d}.jpg' 74 | .format(pid, cam, len(identities[pid][cam]))) 75 | identities[pid][cam].append(fname) 76 | shutil.copy(fpath, osp.join(images_dir, fname)) 77 | return pids 78 | 79 | trainval_pids = register('bounding_box_train') 80 | gallery_pids = register('bounding_box_test') 81 | query_pids = register('query') 82 | assert query_pids <= gallery_pids 83 | assert trainval_pids.isdisjoint(gallery_pids) 84 | 85 | # Save meta information into a json file 86 | meta = {'name': 'Market1501', 'shot': 'multiple', 'num_cameras': 6, 87 | 'identities': identities} 88 | write_json(meta, osp.join(self.root, 'meta.json')) 89 | 90 | # Save the only training / test split 91 | splits = [{ 92 | 'trainval': sorted(list(trainval_pids)), 93 | 'query': sorted(list(query_pids)), 94 | 'gallery': sorted(list(gallery_pids))}] 95 | write_json(splits, osp.join(self.root, 'splits.json')) 96 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/datasets/new_train.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | from ..utils.data import Dataset 5 | from ..utils.osutils import mkdir_if_missing 6 | from ..utils.serialization import write_json 7 | 8 | 9 | class New_Train(Dataset): 10 | url = 'https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view' 11 | md5 = '65005ab7d12ec1c44de4eeafe813e68a' 12 | 13 | def __init__(self, root, split_id=0, num_val=100, download=True): 14 | super(New_Train, self).__init__(root, split_id=split_id) 15 | 16 | if download: 17 | self.download() 18 | 19 | if not self._check_integrity(): 20 | raise RuntimeError("Dataset not found or corrupted. " + 21 | "You can use download=True to download it.") 22 | 23 | self.load(num_val) 24 | 25 | def download(self): 26 | if self._check_integrity(): 27 | print("Files already downloaded and verified") 28 | return 29 | 30 | import re 31 | import hashlib 32 | import shutil 33 | from glob import glob 34 | from zipfile import ZipFile 35 | 36 | raw_dir = osp.join(self.root, 'raw') 37 | mkdir_if_missing(raw_dir) 38 | 39 | # Download the raw zip file 40 | fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip') 41 | if osp.isfile(fpath) and \ 42 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 43 | print("Using downloaded file: " + fpath) 44 | else: 45 | raise RuntimeError("Please download the dataset manually from {} " 46 | "to {}".format(self.url, fpath)) 47 | 48 | # Extract the file 49 | exdir = osp.join(raw_dir, 'Market-1501-v15.09.15') 50 | if not osp.isdir(exdir): 51 | print("Extracting zip file") 52 | with ZipFile(fpath) as z: 53 | z.extractall(path=raw_dir) 54 | 55 | # Format 56 | images_dir = osp.join(self.root, 'images') 57 | mkdir_if_missing(images_dir) 58 | 59 | # 1501 identities (+1 for background) with 6 camera views each 60 | identities = [[[] for _ in range(6)] for _ in range(1502)] 61 | 62 | def register(subdir, pattern=re.compile(r'([-\d]+)_c(\d)')): 63 | fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg'))) 64 | pids = set() 65 | for fpath in fpaths: 66 | fname = osp.basename(fpath) 67 | pid, cam = map(int, pattern.search(fname).groups()) 68 | if pid == -1: continue # junk images are just ignored 69 | assert 0 <= pid <= 1501 # pid == 0 means background 70 | assert 1 <= cam <= 6 71 | cam -= 1 72 | pids.add(pid) 73 | fname = ('{:08d}_{:02d}_{:04d}.jpg' 74 | .format(pid, cam, len(identities[pid][cam]))) 75 | identities[pid][cam].append(fname) 76 | shutil.copy(fpath, osp.join(images_dir, fname)) 77 | return pids 78 | 79 | trainval_pids = register('bounding_box_train') 80 | gallery_pids = register('bounding_box_test') 81 | query_pids = register('query') 82 | assert query_pids <= gallery_pids 83 | assert trainval_pids.isdisjoint(gallery_pids) 84 | 85 | # Save meta information into a json file 86 | meta = {'name': 'New_Train', 'shot': 'multiple', 'num_cameras': 41, 87 | 'identities': identities} 88 | write_json(meta, osp.join(self.root, 'meta.json')) 89 | 90 | # Save the only training / test split 91 | splits = [{ 92 | 'trainval': sorted(list(trainval_pids)), 93 | 'query': sorted(list(query_pids)), 94 | 'gallery': sorted(list(gallery_pids))}] 95 | write_json(splits, osp.join(self.root, 'splits.json')) 96 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/datasets/viper.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | import numpy as np 5 | 6 | from ..utils.data import Dataset 7 | from ..utils.osutils import mkdir_if_missing 8 | from ..utils.serialization import write_json 9 | 10 | 11 | class VIPeR(Dataset): 12 | url = 'http://users.soe.ucsc.edu/~manduchi/VIPeR.v1.0.zip' 13 | md5 = '1c2d9fc1cc800332567a0da25a1ce68c' 14 | 15 | def __init__(self, root, split_id=0, num_val=100, download=True): 16 | super(VIPeR, self).__init__(root, split_id=split_id) 17 | 18 | if download: 19 | self.download() 20 | 21 | if not self._check_integrity(): 22 | raise RuntimeError("Dataset not found or corrupted. " + 23 | "You can use download=True to download it.") 24 | 25 | self.load(num_val) 26 | 27 | def download(self): 28 | if self._check_integrity(): 29 | print("Files already downloaded and verified") 30 | return 31 | 32 | import hashlib 33 | from glob import glob 34 | from scipy.misc import imsave, imread 35 | from six.moves import urllib 36 | from zipfile import ZipFile 37 | 38 | raw_dir = osp.join(self.root, 'raw') 39 | mkdir_if_missing(raw_dir) 40 | 41 | # Download the raw zip file 42 | fpath = osp.join(raw_dir, 'VIPeR.v1.0.zip') 43 | if osp.isfile(fpath) and \ 44 | hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5: 45 | print("Using downloaded file: " + fpath) 46 | else: 47 | print("Downloading {} to {}".format(self.url, fpath)) 48 | urllib.request.urlretrieve(self.url, fpath) 49 | 50 | # Extract the file 51 | exdir = osp.join(raw_dir, 'VIPeR') 52 | if not osp.isdir(exdir): 53 | print("Extracting zip file") 54 | with ZipFile(fpath) as z: 55 | z.extractall(path=raw_dir) 56 | 57 | # Format 58 | images_dir = osp.join(self.root, 'images') 59 | mkdir_if_missing(images_dir) 60 | cameras = [sorted(glob(osp.join(exdir, 'cam_a', '*.bmp'))), 61 | sorted(glob(osp.join(exdir, 'cam_b', '*.bmp')))] 62 | assert len(cameras[0]) == len(cameras[1]) 63 | identities = [] 64 | for pid, (cam1, cam2) in enumerate(zip(*cameras)): 65 | images = [] 66 | # view-0 67 | fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, 0, 0) 68 | imsave(osp.join(images_dir, fname), imread(cam1)) 69 | images.append([fname]) 70 | # view-1 71 | fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, 1, 0) 72 | imsave(osp.join(images_dir, fname), imread(cam2)) 73 | images.append([fname]) 74 | identities.append(images) 75 | 76 | # Save meta information into a json file 77 | meta = {'name': 'VIPeR', 'shot': 'single', 'num_cameras': 2, 78 | 'identities': identities} 79 | write_json(meta, osp.join(self.root, 'meta.json')) 80 | 81 | # Randomly create ten training and test split 82 | num = len(identities) 83 | splits = [] 84 | for _ in range(10): 85 | pids = np.random.permutation(num).tolist() 86 | trainval_pids = sorted(pids[:num // 2]) 87 | test_pids = sorted(pids[num // 2:]) 88 | split = {'trainval': trainval_pids, 89 | 'query': test_pids, 90 | 'gallery': test_pids} 91 | splits.append(split) 92 | write_json(splits, osp.join(self.root, 'splits.json')) 93 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/dist_metric.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | from .evaluators import extract_features 6 | from .metric_learning import get_metric 7 | 8 | 9 | class DistanceMetric(object): 10 | def __init__(self, algorithm='euclidean', *args, **kwargs): 11 | super(DistanceMetric, self).__init__() 12 | self.algorithm = algorithm 13 | self.metric = get_metric(algorithm, *args, **kwargs) 14 | 15 | def train(self, model, data_loader): 16 | if self.algorithm == 'euclidean': return 17 | features, labels = extract_features(model, data_loader) 18 | features = torch.stack(features.values()).numpy() 19 | labels = torch.Tensor(list(labels.values())).numpy() 20 | self.metric.fit(features, labels) 21 | 22 | def transform(self, X): 23 | if torch.is_tensor(X): 24 | X = X.numpy() 25 | X = self.metric.transform(X) 26 | X = torch.from_numpy(X) 27 | else: 28 | X = self.metric.transform(X) 29 | return X 30 | 31 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/evaluation_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .classification import accuracy 4 | from .ranking import cmc, mean_ap 5 | 6 | __all__ = [ 7 | 'accuracy', 8 | 'cmc', 9 | 'mean_ap', 10 | ] 11 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/evaluation_metrics/classification.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from ..utils import to_torch 4 | 5 | 6 | def accuracy(output, target, topk=(1,)): 7 | output, target = to_torch(output), to_torch(target) 8 | maxk = max(topk) 9 | batch_size = target.size(0) 10 | 11 | _, pred = output.topk(maxk, 1, True, True) 12 | pred = pred.t() 13 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 14 | 15 | ret = [] 16 | for k in topk: 17 | correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True) 18 | ret.append(correct_k.mul_(1. / batch_size)) 19 | return ret 20 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/feature_extraction/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .cnn import extract_cnn_feature 4 | 5 | 6 | __all__ = [ 7 | 'extract_cnn_feature', 8 | ] 9 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/feature_extraction/cnn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from collections import OrderedDict 3 | import torch 4 | from torch.autograd import Variable 5 | 6 | from ..utils import to_torch 7 | 8 | def extract_cnn_feature(model, inputs): 9 | model.eval() 10 | with torch.no_grad(): 11 | inputs = Variable(inputs).cuda() 12 | outputs = model(inputs) 13 | final_fea = outputs[-1] 14 | return final_fea.data.cpu() 15 | 16 | 17 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/feature_extraction/database.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import h5py 4 | import numpy as np 5 | from torch.utils.data import Dataset 6 | 7 | 8 | class FeatureDatabase(Dataset): 9 | def __init__(self, *args, **kwargs): 10 | super(FeatureDatabase, self).__init__() 11 | self.fid = h5py.File(*args, **kwargs) 12 | 13 | def __enter__(self): 14 | return self 15 | 16 | def __exit__(self, exc_type, exc_val, exc_tb): 17 | self.close() 18 | 19 | def __getitem__(self, keys): 20 | if isinstance(keys, (tuple, list)): 21 | return [self._get_single_item(k) for k in keys] 22 | return self._get_single_item(keys) 23 | 24 | def _get_single_item(self, key): 25 | return np.asarray(self.fid[key]) 26 | 27 | def __setitem__(self, key, value): 28 | if key in self.fid: 29 | if self.fid[key].shape == value.shape and \ 30 | self.fid[key].dtype == value.dtype: 31 | self.fid[key][...] = value 32 | else: 33 | del self.fid[key] 34 | self.fid.create_dataset(key, data=value) 35 | else: 36 | self.fid.create_dataset(key, data=value) 37 | 38 | def __delitem__(self, key): 39 | del self.fid[key] 40 | 41 | def __len__(self): 42 | return len(self.fid) 43 | 44 | def __iter__(self): 45 | return iter(self.fid) 46 | 47 | def flush(self): 48 | self.fid.flush() 49 | 50 | def close(self): 51 | self.fid.close() 52 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .mgn_loss import MGN_loss 4 | from .xentropy_sac import XentropyLoss_SAC 5 | 6 | __all__ = ['MGN_loss','XentropyLoss_SAC'] 7 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/loss/mgn_loss.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | from torch.autograd import Variable 7 | import pdb 8 | from ..evaluation_metrics import accuracy 9 | 10 | class MGN_loss(nn.Module): 11 | def __init__(self, margin1=1.2, num_instances=4, alpha=1.0, gamma =1.0,theta=0.1, has_trip = False): 12 | super(MGN_loss, self).__init__() 13 | self.margin1 = margin1 14 | self.num_instances = num_instances 15 | self.alpha = alpha 16 | # self.beta = beta 17 | self.gamma = gamma 18 | self.theta = theta 19 | self.has_trip = has_trip 20 | 21 | print(self.alpha,self.gamma,self.theta) 22 | # self.xentropy_loss = nn.CrossEntropyLoss() 23 | 24 | 25 | def forward(self, inputs, targets): 26 | softmax_out = inputs[0] 27 | trip_out = inputs[1] 28 | 29 | sf_num = len(softmax_out) 30 | total_cls_loss = 0 31 | for i in range(0,sf_num): 32 | # total_cls_loss += self.xentropy_loss(softmax_out[i],targets) 33 | total_cls_loss += F.cross_entropy(softmax_out[i], targets) 34 | 35 | trip_num = len(trip_out) 36 | total_trip_loss = 0 37 | if self.has_trip: 38 | for i in range(0,trip_num): 39 | input_fea = trip_out[i] 40 | n = input_fea.size(0) 41 | num_person= n // self.num_instances 42 | # Compute pairwise distance, replace by the official when merged 43 | dist = torch.pow(input_fea, 2).sum(1).expand(n, n) 44 | dist = dist + dist.t() 45 | dist.addmm_(1, -2, input_fea, input_fea.t()) 46 | dist = dist.clamp(min=1e-12).sqrt() # for numerical stability 47 | # For each anchor, find the hardest positive and negative 48 | mask = targets.expand(n, n).eq(targets.expand(n, n).t()) 49 | 50 | dist_ap, dist_an = [], [] 51 | for i in range(n): 52 | hard_positive = dist[i][mask[i]].max() 53 | dist_ap.append(hard_positive) 54 | 55 | hard_negative = dist[i][mask[i] == 0].min(0) 56 | dist_an.append(hard_negative[0]) 57 | dist_ap = torch.cat(dist_ap) 58 | dist_an = torch.cat(dist_an) 59 | # Compute ranking hinge loss 60 | 61 | y = dist_an.data.new() 62 | y.resize_as_(dist_an.data) 63 | y.fill_(1) 64 | y = Variable(y) 65 | temp_trip_loss = F.margin_ranking_loss(dist_an,dist_ap,y,self.margin1) 66 | total_trip_loss += temp_trip_loss 67 | loss = self.gamma*total_cls_loss + self.alpha*total_trip_loss 68 | accuracy_val, = accuracy(softmax_out[0].data, targets.data) 69 | prec = accuracy_val[0] 70 | return loss, prec 71 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/loss/xentropy_sac.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | from torch import nn 5 | from torch.autograd import Variable 6 | import pdb 7 | from ..evaluation_metrics import accuracy 8 | 9 | class XentropyLoss_SAC(nn.Module): 10 | def __init__(self,alpha=1.0,gamma=1.0,theta=0.0): 11 | super(XentropyLoss_SAC, self).__init__() 12 | self.xentropy_loss = nn.CrossEntropyLoss() 13 | self.alpha = alpha 14 | self.gamma = gamma 15 | self.theta = theta 16 | 17 | 18 | def forward(self, inputs, targets): 19 | inputs_fea=inputs[0] 20 | l2_side = inputs[2] 21 | l3_side = inputs[3] 22 | l4_side = inputs[4] 23 | 24 | xentropy = self.xentropy_loss(inputs_fea,targets) 25 | # pdb.set_trace() 26 | loss42 = torch.sqrt((l4_side-l2_side).pow(2).sum()) 27 | loss43 = torch.sqrt((l4_side-l3_side).pow(2).sum()) 28 | loss = self.theta*(loss42+loss43)+ self.gamma *xentropy 29 | accuracy_val, = accuracy(inputs_fea.data, targets.data) 30 | prec = accuracy_val[0] 31 | 32 | return loss, prec 33 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | @author: liaoxingyu 4 | @contact: sherlockliao01@gmail.com 5 | """ 6 | from bisect import bisect_right 7 | import torch 8 | 9 | 10 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 11 | # separating MultiStepLR with WarmupLR 12 | # but the current LRScheduler design doesn't allow it 13 | 14 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 15 | def __init__( 16 | self, 17 | optimizer, 18 | milestones, 19 | gamma=0.1, 20 | warmup_factor=1.0 / 3, 21 | warmup_iters=500, 22 | warmup_method="linear", 23 | last_epoch=-1, 24 | ): 25 | if not list(milestones) == sorted(milestones): 26 | raise ValueError( 27 | "Milestones should be a list of" " increasing integers. Got {}", 28 | milestones, 29 | ) 30 | 31 | if warmup_method not in ("constant", "linear"): 32 | raise ValueError( 33 | "Only 'constant' or 'linear' warmup_method accepted" 34 | "got {}".format(warmup_method) 35 | ) 36 | self.milestones = milestones 37 | self.gamma = gamma 38 | self.warmup_factor = warmup_factor 39 | self.warmup_iters = warmup_iters 40 | self.warmup_method = warmup_method 41 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 42 | 43 | def get_lr(self): 44 | warmup_factor = 1 45 | if self.last_epoch < self.warmup_iters: 46 | if self.warmup_method == "constant": 47 | warmup_factor = self.warmup_factor 48 | elif self.warmup_method == "linear": 49 | alpha = float(self.last_epoch) / self.warmup_iters 50 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 51 | return [ 52 | base_lr 53 | * warmup_factor 54 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 55 | for base_lr in self.base_lrs 56 | ] 57 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/metric_learning/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from metric_learn import (ITML_Supervised, LMNN, LSML_Supervised, 4 | SDML_Supervised, NCA, LFDA, RCA_Supervised) 5 | 6 | from .euclidean import Euclidean 7 | from .kissme import KISSME 8 | 9 | __factory = { 10 | 'euclidean': Euclidean, 11 | 'kissme': KISSME, 12 | 'itml': ITML_Supervised, 13 | 'lmnn': LMNN, 14 | 'lsml': LSML_Supervised, 15 | 'sdml': SDML_Supervised, 16 | 'nca': NCA, 17 | 'lfda': LFDA, 18 | 'rca': RCA_Supervised, 19 | } 20 | 21 | 22 | def get_metric(algorithm, *args, **kwargs): 23 | if algorithm not in __factory: 24 | raise KeyError("Unknown metric:", algorithm) 25 | return __factory[algorithm](*args, **kwargs) 26 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/metric_learning/euclidean.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | from metric_learn.base_metric import BaseMetricLearner 5 | 6 | 7 | class Euclidean(BaseMetricLearner): 8 | def __init__(self): 9 | self.M_ = None 10 | 11 | def metric(self): 12 | return self.M_ 13 | 14 | def fit(self, X): 15 | self.M_ = np.eye(X.shape[1]) 16 | self.X_ = X 17 | 18 | def transform(self, X=None): 19 | if X is None: 20 | return self.X_ 21 | return X 22 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/metric_learning/kissme.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | from metric_learn.base_metric import BaseMetricLearner 5 | 6 | 7 | def validate_cov_matrix(M): 8 | M = (M + M.T) * 0.5 9 | k = 0 10 | I = np.eye(M.shape[0]) 11 | while True: 12 | try: 13 | _ = np.linalg.cholesky(M) 14 | break 15 | except np.linalg.LinAlgError: 16 | # Find the nearest positive definite matrix for M. Modified from 17 | # http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd 18 | # Might take several minutes 19 | k += 1 20 | w, v = np.linalg.eig(M) 21 | min_eig = v.min() 22 | M += (-min_eig * k * k + np.spacing(min_eig)) * I 23 | return M 24 | 25 | 26 | class KISSME(BaseMetricLearner): 27 | def __init__(self): 28 | self.M_ = None 29 | 30 | def metric(self): 31 | return self.M_ 32 | 33 | def fit(self, X, y=None): 34 | n = X.shape[0] 35 | if y is None: 36 | y = np.arange(n) 37 | X1, X2 = np.meshgrid(np.arange(n), np.arange(n)) 38 | X1, X2 = X1[X1 < X2], X2[X1 < X2] 39 | matches = (y[X1] == y[X2]) 40 | num_matches = matches.sum() 41 | num_non_matches = len(matches) - num_matches 42 | idxa = X1[matches] 43 | idxb = X2[matches] 44 | S = X[idxa] - X[idxb] 45 | C1 = S.transpose().dot(S) / num_matches 46 | p = np.random.choice(num_non_matches, num_matches, replace=False) 47 | idxa = X1[~matches] 48 | idxb = X2[~matches] 49 | idxa = idxa[p] 50 | idxb = idxb[p] 51 | S = X[idxa] - X[idxb] 52 | C0 = S.transpose().dot(S) / num_matches 53 | self.M_ = np.linalg.inv(C1) - np.linalg.inv(C0) 54 | self.M_ = validate_cov_matrix(self.M_) 55 | self.X_ = X 56 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/models/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | from .resnet_reid import ResNet_reid_50 5 | from .resnet_reid import ResNet_reid_101 6 | from .resnet_mgn import ResNet50_mgn_lr 7 | from .resnet_mgn import ResNet101_mgn_lr 8 | from .hrnet import HighResolutionNet_reid 9 | 10 | __factory = { 11 | 'ResNet_reid_50': ResNet_reid_50, 12 | 'ResNet_reid_101': ResNet_reid_101, 13 | 'ResNet50_mgn_lr': ResNet50_mgn_lr, 14 | 'ResNet101_mgn_lr': ResNet101_mgn_lr, 15 | 'HighResolutionNet_reid': HighResolutionNet_reid, 16 | } 17 | 18 | 19 | def names(): 20 | return sorted(__factory.keys()) 21 | 22 | 23 | def create(name, *args, **kwargs): 24 | """ 25 | Create a model instance. 26 | 27 | Parameters 28 | ---------- 29 | name : str 30 | Model name. Can be one of 'inception', 'resnet18', 'resnet34', 31 | 'resnet50', 'resnet101', and 'resnet152'. 32 | pretrained : bool, optional 33 | Only applied for 'resnet*' models. If True, will use ImageNet pretrained 34 | model. Default: True 35 | cut_at_pooling : bool, optional 36 | If True, will cut the model before the last global pooling layer and 37 | ignore the remaining kwargs. Default: False 38 | num_features : int, optional 39 | If positive, will append a Linear layer after the global pooling layer, 40 | with this number of output units, followed by a BatchNorm layer. 41 | Otherwise these layers will not be appended. Default: 256 for 42 | 'inception', 0 for 'resnet*' 43 | norm : bool, optional 44 | If True, will normalize the feature to be unit L2-norm for each sample. 45 | Otherwise will append a ReLU layer after the above Linear layer if 46 | num_features > 0. Default: False 47 | dropout : float, optional 48 | If positive, will append a Dropout layer with this dropout rate. 49 | Default: 0 50 | num_classes : int, optional 51 | If positive, will append a Linear layer at the end as the classifier 52 | with this number of output units. Default: 0 53 | """ 54 | if name not in __factory: 55 | raise KeyError("Unknown model:", name) 56 | return __factory[name](*args, **kwargs) 57 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/trainers.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import time 3 | 4 | import torch 5 | from torch.autograd import Variable 6 | 7 | from .evaluation_metrics import accuracy 8 | from .loss import MGN_loss, XentropyLoss_SAC 9 | from .utils.meters import AverageMeter 10 | 11 | 12 | class BaseTrainer(object): 13 | def __init__(self, model, criterion): 14 | super(BaseTrainer, self).__init__() 15 | self.model = model 16 | self.criterion = criterion 17 | 18 | def train(self, epoch, data_loader, optimizer, print_freq=1): 19 | self.model.train() 20 | lr = optimizer.param_groups[0].get('lr') 21 | 22 | batch_time = AverageMeter() 23 | data_time = AverageMeter() 24 | losses = AverageMeter() 25 | precisions = AverageMeter() 26 | 27 | end = time.time() 28 | for i, inputs in enumerate(data_loader): 29 | data_time.update(time.time() - end) 30 | 31 | inputs, targets = self._parse_data(inputs) 32 | loss, prec1 = self._forward(inputs, targets) 33 | 34 | losses.update(loss.item(), targets.size(0)) 35 | precisions.update(prec1, targets.size(0)) 36 | 37 | optimizer.zero_grad() 38 | loss.backward() 39 | optimizer.step() 40 | 41 | batch_time.update(time.time() - end) 42 | end = time.time() 43 | 44 | if (i + 1) % print_freq == 0: 45 | print('Epoch: [{}][{}/{}]\t' 46 | 'Base_Lr: {:0.5f} \t' 47 | 'Time {:.3f} ({:.3f})\t' 48 | 'Data {:.3f} ({:.3f})\t' 49 | 'Loss {:.3f} ({:.3f})\t' 50 | 'Prec {:.2%} ({:.2%})\t' 51 | .format(epoch, i + 1, len(data_loader), 52 | lr, 53 | batch_time.val, batch_time.avg, 54 | data_time.val, data_time.avg, 55 | losses.val, losses.avg, 56 | precisions.val, precisions.avg)) 57 | 58 | def _parse_data(self, inputs): 59 | raise NotImplementedError 60 | 61 | def _forward(self, inputs, targets): 62 | raise NotImplementedError 63 | 64 | 65 | class Trainer(BaseTrainer): 66 | def _parse_data(self, inputs): 67 | imgs, _, pids, _ = inputs 68 | inputs = [Variable(imgs)] 69 | targets = Variable(pids.cuda()) 70 | return inputs, targets 71 | def _forward(self, inputs, targets): 72 | outputs = self.model(*inputs) 73 | softmax_out = outputs[0] 74 | if isinstance(self.criterion, torch.nn.CrossEntropyLoss): 75 | loss = self.criterion(softmax_out, targets) 76 | prec, = accuracy(softmax_out.data, targets.data) 77 | prec = prec[0] 78 | elif isinstance(self.criterion, MGN_loss): 79 | loss, prec = self.criterion(outputs, targets) 80 | elif isinstance(self.criterion, XentropyLoss_SAC): 81 | loss, prec = self.criterion(outputs, targets) 82 | 83 | else: 84 | raise ValueError("Unsupported loss:", self.criterion) 85 | return loss, prec 86 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | 6 | def to_numpy(tensor): 7 | if torch.is_tensor(tensor): 8 | return tensor.cpu().numpy() 9 | elif type(tensor).__module__ != 'numpy': 10 | raise ValueError("Cannot convert {} to numpy array" 11 | .format(type(tensor))) 12 | return tensor 13 | 14 | 15 | def to_torch(ndarray): 16 | if type(ndarray).__module__ == 'numpy': 17 | return torch.from_numpy(ndarray) 18 | elif not torch.is_tensor(ndarray): 19 | raise ValueError("Cannot convert {} to torch tensor" 20 | .format(type(ndarray))) 21 | return ndarray 22 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/data/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .dataset import Dataset 4 | from .attribute_dataset import Attribute_Dataset 5 | from .preprocessor import Preprocessor, Attribute_Preprocessor 6 | from .sampler import RandomIdentitySampler, RandomIdentityAttributeSampler 7 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/data/dataset.py.bak: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os.path as osp 3 | 4 | import numpy as np 5 | 6 | from ..serialization import read_json 7 | 8 | 9 | def _pluck(identities, indices, relabel=False): 10 | ret = [] 11 | for index, pid in enumerate(indices): 12 | pid_images = identities[pid] 13 | for camid, cam_images in enumerate(pid_images): 14 | for fname in cam_images: 15 | name = osp.splitext(fname)[0] 16 | x, y, _ = map(int, name.split('_')) 17 | assert pid == x and camid == y 18 | if relabel: 19 | ret.append((fname, index, camid)) 20 | else: 21 | ret.append((fname, pid, camid)) 22 | return ret 23 | 24 | 25 | class Dataset(object): 26 | def __init__(self, root, split_id=0): 27 | self.root = root 28 | self.split_id = split_id 29 | self.meta = None 30 | self.split = None 31 | self.train, self.val, self.trainval = [], [], [] 32 | self.query, self.gallery = [], [] 33 | self.num_train_ids, self.num_val_ids, self.num_trainval_ids = 0, 0, 0 34 | 35 | @property 36 | def images_dir(self): 37 | return osp.join(self.root, 'images') 38 | 39 | def load(self, num_val=0.3, verbose=True): 40 | splits = read_json(osp.join(self.root, 'splits.json')) 41 | if self.split_id >= len(splits): 42 | raise ValueError("split_id exceeds total splits {}" 43 | .format(len(splits))) 44 | self.split = splits[self.split_id] 45 | 46 | # Randomly split train / val 47 | trainval_pids = np.asarray(self.split['trainval']) 48 | np.random.shuffle(trainval_pids) 49 | num = len(trainval_pids) 50 | if isinstance(num_val, float): 51 | num_val = int(round(num * num_val)) 52 | if num_val >= num or num_val < 0: 53 | raise ValueError("num_val exceeds total identities {}" 54 | .format(num)) 55 | train_pids = sorted(trainval_pids[:-num_val]) 56 | val_pids = sorted(trainval_pids[-num_val:]) 57 | 58 | self.meta = read_json(osp.join(self.root, 'meta.json')) 59 | identities = self.meta['identities'] 60 | self.train = _pluck(identities, train_pids, relabel=True) 61 | self.val = _pluck(identities, val_pids, relabel=True) 62 | self.trainval = _pluck(identities, trainval_pids, relabel=True) 63 | self.query = _pluck(identities, self.split['query']) 64 | self.gallery = _pluck(identities, self.split['gallery']) 65 | self.num_train_ids = len(train_pids) 66 | self.num_val_ids = len(val_pids) 67 | self.num_trainval_ids = len(trainval_pids) 68 | 69 | if verbose: 70 | print(self.__class__.__name__, "dataset loaded") 71 | print(" subset | # ids | # images") 72 | print(" ---------------------------") 73 | print(" train | {:5d} | {:8d}" 74 | .format(self.num_train_ids, len(self.train))) 75 | print(" val | {:5d} | {:8d}" 76 | .format(self.num_val_ids, len(self.val))) 77 | print(" trainval | {:5d} | {:8d}" 78 | .format(self.num_trainval_ids, len(self.trainval))) 79 | print(" query | {:5d} | {:8d}" 80 | .format(len(self.split['query']), len(self.query))) 81 | print(" gallery | {:5d} | {:8d}" 82 | .format(len(self.split['gallery']), len(self.gallery))) 83 | 84 | def _check_integrity(self): 85 | return osp.isdir(osp.join(self.root, 'images')) and \ 86 | osp.isfile(osp.join(self.root, 'meta.json')) and \ 87 | osp.isfile(osp.join(self.root, 'splits.json')) 88 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/data/preprocessor.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os.path as osp 3 | 4 | from PIL import Image 5 | 6 | 7 | class Preprocessor(object): 8 | def __init__(self, dataset, root=None, transform=None): 9 | super(Preprocessor, self).__init__() 10 | self.dataset = dataset 11 | self.root = root 12 | self.transform = transform 13 | 14 | def __len__(self): 15 | return len(self.dataset) 16 | 17 | def __getitem__(self, indices): 18 | if isinstance(indices, (tuple, list)): 19 | return [self._get_single_item(index) for index in indices] 20 | return self._get_single_item(indices) 21 | 22 | def _get_single_item(self, index): 23 | fname, pid, camid = self.dataset[index] 24 | fpath = fname 25 | if self.root is not None: 26 | fpath = osp.join(self.root, fname) 27 | img = Image.open(fpath).convert('RGB') 28 | if self.transform is not None: 29 | img = self.transform(img) 30 | return img, fname, pid, camid 31 | 32 | 33 | class Attribute_Preprocessor(object): 34 | def __init__(self, dataset, root=None, transform=None): 35 | super(Attribute_Preprocessor, self).__init__() 36 | self.dataset = dataset 37 | self.root = root 38 | self.transform = transform 39 | 40 | def __len__(self): 41 | return len(self.dataset) 42 | 43 | def __getitem__(self, indices): 44 | if isinstance(indices, (tuple, list)): 45 | return [self._get_single_item(index) for index in indices] 46 | return self._get_single_item(indices) 47 | 48 | def _get_single_item(self, index): 49 | fname, pid, camid, color, car_type, roof, window, logo = self.dataset[index] 50 | fpath = fname 51 | if self.root is not None: 52 | fpath = osp.join(self.root, fname) 53 | img = Image.open(fpath).convert('RGB') 54 | if self.transform is not None: 55 | img = self.transform(img) 56 | return img, fname, pid, camid, color, car_type, roof, window, logo 57 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/logging.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | import sys 4 | 5 | from .osutils import mkdir_if_missing 6 | 7 | 8 | class Logger(object): 9 | def __init__(self, fpath=None): 10 | self.console = sys.stdout 11 | self.file = None 12 | if fpath is not None: 13 | mkdir_if_missing(os.path.dirname(fpath)) 14 | self.file = open(fpath, 'w') 15 | 16 | def __del__(self): 17 | self.close() 18 | 19 | def __enter__(self): 20 | pass 21 | 22 | def __exit__(self, *args): 23 | self.close() 24 | 25 | def write(self, msg): 26 | self.console.write(msg) 27 | if self.file is not None: 28 | self.file.write(msg) 29 | 30 | def flush(self): 31 | self.console.flush() 32 | if self.file is not None: 33 | self.file.flush() 34 | os.fsync(self.file.fileno()) 35 | 36 | def close(self): 37 | self.console.close() 38 | if self.file is not None: 39 | self.file.close() 40 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/meters.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class AverageMeter(object): 5 | """Computes and stores the average and current value""" 6 | 7 | def __init__(self): 8 | self.val = 0 9 | self.avg = 0 10 | self.sum = 0 11 | self.count = 0 12 | 13 | def reset(self): 14 | self.val = 0 15 | self.avg = 0 16 | self.sum = 0 17 | self.count = 0 18 | 19 | def update(self, val, n=1): 20 | self.val = val 21 | self.sum += val * n 22 | self.count += n 23 | self.avg = self.sum / self.count 24 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/osutils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | import errno 4 | 5 | 6 | def mkdir_if_missing(dir_path): 7 | try: 8 | os.makedirs(dir_path) 9 | except OSError as e: 10 | if e.errno != errno.EEXIST: 11 | raise 12 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/reid/utils/serialization.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import json 3 | import os.path as osp 4 | import shutil 5 | 6 | import torch 7 | from torch.nn import Parameter 8 | 9 | from .osutils import mkdir_if_missing 10 | 11 | 12 | def read_json(fpath): 13 | with open(fpath, 'r') as f: 14 | obj = json.load(f) 15 | return obj 16 | 17 | 18 | def write_json(obj, fpath): 19 | mkdir_if_missing(osp.dirname(fpath)) 20 | with open(fpath, 'w') as f: 21 | json.dump(obj, f, indent=4, separators=(',', ': ')) 22 | 23 | 24 | def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'): 25 | mkdir_if_missing(osp.dirname(fpath)) 26 | torch.save(state, fpath) 27 | if is_best: 28 | shutil.copy(fpath, osp.join(osp.dirname(fpath), 'model_best.pth.tar')) 29 | 30 | 31 | def load_checkpoint(fpath): 32 | if osp.isfile(fpath): 33 | checkpoint = torch.load(fpath) 34 | print("=> Loaded checkpoint '{}'".format(fpath)) 35 | return checkpoint 36 | else: 37 | raise ValueError("=> No checkpoint found at '{}'".format(fpath)) 38 | 39 | 40 | def copy_state_dict(state_dict, model, strip=None): 41 | tgt_state = model.state_dict() 42 | copied_names = set() 43 | for name, param in state_dict.items(): 44 | if strip is not None and name.startswith(strip): 45 | name = name[len(strip):] 46 | if name not in tgt_state: 47 | continue 48 | if isinstance(param, Parameter): 49 | param = param.data 50 | if param.size() != tgt_state[name].size(): 51 | print('mismatch:', name, param.size(), tgt_state[name].size()) 52 | continue 53 | tgt_state[name].copy_(param) 54 | copied_names.add(name) 55 | 56 | missing = set(tgt_state.keys()) - copied_names 57 | if len(missing) > 0: 58 | print("missing keys in state_dict:", missing) 59 | 60 | return model 61 | -------------------------------------------------------------------------------- /Track2(ReID)/part2_model/run_train.sh: -------------------------------------------------------------------------------- 1 | export CUDA_VISIBLE_DEVICES=0,1,2,3 2 | python examples/train.py \ 3 | -a HighResolutionNet_reid \ 4 | -b 32 \ 5 | -d gao_crop_train \ 6 | --combine-trainval \ 7 | --frozen_sublayer True \ 8 | --weights ./weights/hrnetv2_w32_imagenet_pretrained.pth \ 9 | --logs-dir ./logs/model_288_384/ \ 10 | --lr 0.01 \ 11 | --gamma 0.1 \ 12 | --weight-decay 0.0005 \ 13 | --warm_up_factor 0.01 \ 14 | --warm_up_iter 100 \ 15 | --step_epoch 600 900 1000 \ 16 | --epochs 1100 \ 17 | --lr_mult 1.0 \ 18 | --metric_loss_weight 0.5 \ 19 | --big_height 288 \ 20 | --big_width 384 \ 21 | --target_height 288 \ 22 | --target_width 384 \ 23 | --epoch_inter 1 \ 24 | --start_save 50 \ 25 | --dense_evaluate 1090 \ 26 | --num_instances 4 \ 27 | 28 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/README.md: -------------------------------------------------------------------------------- 1 | # Vehicle Reid using Keypoints 2 | 3 | ## Pipeline 4 | 5 | ### Data 6 | We labeled keypoints on the training data of track2 following the definition of vehicle keypoint in "Orientation Invariant Feature Embedding and Spatial Temporal Regularization for Vehicle Re-identification". The labeled data are keypoint_train.txt(for training)/keypoint_test.txt(for validation) in vehicle-keypoint/data/aicity_keypoint. Note that the key point annotation corresponds to the renamed aicity training set which can be found in complete_aicity_car196.tar.gz mentioned in the main README file. 7 | 8 | ### Training 9 | 1. run vehicle-keypoint/train.sh to train vehicle keypoint model. 10 | 2. run vehicle-keypoint/infer.sh to generate heatmap of training data. 11 | 3. run vehicle-reid-keypoint/train.sh to train vehicle-reid model. 12 | 13 | ### Testing 14 | 1. run vehicle-keypoint/infer.sh to generate heatmap of testing data. 15 | 2. run vehicle-reid-keypoint/infer.sh to generate reid features for retrieval. 16 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | parts/ 18 | sdist/ 19 | var/ 20 | wheels/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | MANIFEST 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *.cover 45 | .hypothesis/ 46 | .pytest_cache/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | db.sqlite3 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # Environments 83 | .env 84 | .venv 85 | env/ 86 | venv/ 87 | ENV/ 88 | env.bak/ 89 | venv.bak/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | .spyproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | 98 | # mkdocs documentation 99 | /site 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | /data 105 | /output 106 | /models 107 | /log 108 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/experiments/veri/resnet50/256x256_d256x3_adam_lr1e-3.yaml: -------------------------------------------------------------------------------- 1 | GPUS: '0,1,2,3' 2 | DATA_DIR: '' 3 | OUTPUT_DIR: 'output' 4 | LOG_DIR: 'log' 5 | WORKERS: 8 6 | PRINT_FREQ: 100 7 | 8 | CUDNN: 9 | BENCHMARK: True 10 | DETERMINISTIC: False 11 | ENABLED: True 12 | DATASET: 13 | DATASET: 'veri' 14 | ROOT: 'data/aicity_keypoint/' 15 | TEST_SET: 'keypoint_test.txt' 16 | TRAIN_SET: 'keypoint_train.txt' 17 | FLIP: true 18 | ROT_FACTOR: 40 19 | SCALE_FACTOR: 0.3 20 | MODEL: 21 | NAME: 'pose_resnet' 22 | PRETRAINED: 'models/pytorch/coco/pose_resnet_50_256x192.pth.tar' 23 | IMAGE_SIZE: 24 | - 256 25 | - 256 26 | NUM_JOINTS: 20 27 | EXTRA: 28 | TARGET_TYPE: 'gaussian' 29 | HEATMAP_SIZE: 30 | - 64 31 | - 64 32 | SIGMA: 2 33 | FINAL_CONV_KERNEL: 1 34 | DECONV_WITH_BIAS: false 35 | NUM_DECONV_LAYERS: 3 36 | NUM_DECONV_FILTERS: 37 | - 256 38 | - 256 39 | - 256 40 | NUM_DECONV_KERNELS: 41 | - 4 42 | - 4 43 | - 4 44 | NUM_LAYERS: 50 45 | LOSS: 46 | USE_TARGET_WEIGHT: true 47 | TRAIN: 48 | BATCH_SIZE: 32 49 | SHUFFLE: true 50 | BEGIN_EPOCH: 0 51 | END_EPOCH: 140 52 | RESUME: false 53 | OPTIMIZER: 'adam' 54 | LR: 0.001 55 | LR_FACTOR: 0.1 56 | LR_STEP: 57 | - 90 58 | - 120 59 | WD: 0.0001 60 | GAMMA1: 0.99 61 | GAMMA2: 0.0 62 | MOMENTUM: 0.9 63 | NESTEROV: false 64 | TEST: 65 | BATCH_SIZE: 32 66 | FLIP_TEST: false 67 | MODEL_FILE: '' 68 | DEBUG: 69 | DEBUG: false 70 | SAVE_BATCH_IMAGES_GT: true 71 | SAVE_BATCH_IMAGES_PRED: true 72 | SAVE_HEATMAPS_GT: true 73 | SAVE_HEATMAPS_PRED: true 74 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/infer.sh: -------------------------------------------------------------------------------- 1 | input_dir="" # input training/testing images 2 | image_list="" # list of image names 3 | output_dir="" # output dir of the keypoint heatmap of input images 4 | 5 | # for example 6 | # input_dir="data/aicity_keypoint/images" 7 | # image_list="./images.txt" 8 | # output_dir="../vehicle-reid-keypoint/examples/data/small_vehicle/masks" 9 | 10 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 11 | python pose_estimation/infer.py \ 12 | --cfg experiments/veri/resnet50/256x256_d256x3_adam_lr1e-3.yaml \ 13 | --gpus 0,1,2,3 \ 14 | --batch-size 64 \ 15 | --input-dir $input_dir \ 16 | --input-list $image_list \ 17 | --model-file model_best.pth.tar \ 18 | --output-dir $output_dir \ 19 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | cd nms; python setup.py build_ext --inplace; rm -rf build; cd ../../ 3 | clean: 4 | cd nms; rm *.so; cd ../../ 5 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-keypoint/lib/core/__init__.py -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/core/evaluate.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import numpy as np 12 | 13 | from core.inference import get_max_preds 14 | 15 | 16 | def calc_dists(preds, target, normalize): 17 | preds = preds.astype(np.float32) 18 | target = target.astype(np.float32) 19 | dists = np.zeros((preds.shape[1], preds.shape[0])) 20 | for n in range(preds.shape[0]): 21 | for c in range(preds.shape[1]): 22 | if target[n, c, 0] > 1 and target[n, c, 1] > 1: 23 | normed_preds = preds[n, c, :] / normalize[n] 24 | normed_targets = target[n, c, :] / normalize[n] 25 | dists[c, n] = np.linalg.norm(normed_preds - normed_targets) 26 | else: 27 | dists[c, n] = -1 28 | return dists 29 | 30 | 31 | def dist_acc(dists, thr=0.5): 32 | ''' Return percentage below threshold while ignoring values with a -1 ''' 33 | dist_cal = np.not_equal(dists, -1) 34 | num_dist_cal = dist_cal.sum() 35 | if num_dist_cal > 0: 36 | return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal 37 | else: 38 | return -1 39 | 40 | 41 | def accuracy(output, target, hm_type='gaussian', thr=0.5): 42 | ''' 43 | Calculate accuracy according to PCK, 44 | but uses ground truth heatmap rather than x,y locations 45 | First value to be returned is average accuracy across 'idxs', 46 | followed by individual accuracies 47 | ''' 48 | idx = list(range(output.shape[1])) 49 | norm = 1.0 50 | if hm_type == 'gaussian': 51 | pred, _ = get_max_preds(output) 52 | target, _ = get_max_preds(target) 53 | h = output.shape[2] 54 | w = output.shape[3] 55 | norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10 56 | dists = calc_dists(pred, target, norm) 57 | 58 | acc = np.zeros((len(idx) + 1)) 59 | avg_acc = 0 60 | cnt = 0 61 | 62 | for i in range(len(idx)): 63 | acc[i + 1] = dist_acc(dists[idx[i]]) 64 | if acc[i + 1] >= 0: 65 | avg_acc = avg_acc + acc[i + 1] 66 | cnt += 1 67 | 68 | avg_acc = avg_acc / cnt if cnt != 0 else 0 69 | if cnt != 0: 70 | acc[0] = avg_acc 71 | return acc, avg_acc, cnt, pred 72 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/core/inference.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import math 12 | 13 | import numpy as np 14 | 15 | from utils.transforms import transform_preds 16 | 17 | 18 | def get_max_preds(batch_heatmaps): 19 | ''' 20 | get predictions from score maps 21 | heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) 22 | ''' 23 | assert isinstance(batch_heatmaps, np.ndarray), \ 24 | 'batch_heatmaps should be numpy.ndarray' 25 | assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim' 26 | 27 | batch_size = batch_heatmaps.shape[0] 28 | num_joints = batch_heatmaps.shape[1] 29 | width = batch_heatmaps.shape[3] 30 | heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1)) 31 | idx = np.argmax(heatmaps_reshaped, 2) 32 | maxvals = np.amax(heatmaps_reshaped, 2) 33 | 34 | maxvals = maxvals.reshape((batch_size, num_joints, 1)) 35 | idx = idx.reshape((batch_size, num_joints, 1)) 36 | 37 | preds = np.tile(idx, (1, 1, 2)).astype(np.float32) 38 | 39 | preds[:, :, 0] = (preds[:, :, 0]) % width 40 | preds[:, :, 1] = np.floor((preds[:, :, 1]) / width) 41 | 42 | pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2)) 43 | pred_mask = pred_mask.astype(np.float32) 44 | 45 | preds *= pred_mask 46 | return preds, maxvals 47 | 48 | 49 | def get_final_preds(config, batch_heatmaps, center, scale): 50 | coords, maxvals = get_max_preds(batch_heatmaps) 51 | 52 | heatmap_height = batch_heatmaps.shape[2] 53 | heatmap_width = batch_heatmaps.shape[3] 54 | 55 | # post-processing 56 | if config.TEST.POST_PROCESS: 57 | for n in range(coords.shape[0]): 58 | for p in range(coords.shape[1]): 59 | hm = batch_heatmaps[n][p] 60 | px = int(math.floor(coords[n][p][0] + 0.5)) 61 | py = int(math.floor(coords[n][p][1] + 0.5)) 62 | if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1: 63 | diff = np.array([hm[py][px+1] - hm[py][px-1], 64 | hm[py+1][px]-hm[py-1][px]]) 65 | coords[n][p] += np.sign(diff) * .25 66 | 67 | preds = coords.copy() 68 | 69 | # Transform back 70 | for i in range(coords.shape[0]): 71 | preds[i] = transform_preds(coords[i], center[i], scale[i], 72 | [heatmap_width, heatmap_height]) 73 | 74 | return preds, maxvals 75 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/core/loss.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import torch.nn as nn 12 | 13 | 14 | class JointsMSELoss(nn.Module): 15 | def __init__(self, use_target_weight): 16 | super(JointsMSELoss, self).__init__() 17 | # self.criterion = nn.MSELoss(size_average=True) 18 | self.criterion = nn.MSELoss(reduction='mean') 19 | self.use_target_weight = use_target_weight 20 | 21 | def forward(self, output, target, target_weight): 22 | batch_size = output.size(0) 23 | num_joints = output.size(1) 24 | heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1) 25 | heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1) 26 | loss = 0 27 | 28 | for idx in range(num_joints): 29 | heatmap_pred = heatmaps_pred[idx].squeeze() 30 | heatmap_gt = heatmaps_gt[idx].squeeze() 31 | if self.use_target_weight: 32 | loss += 0.5 * self.criterion( 33 | heatmap_pred.mul(target_weight[:, idx]), 34 | heatmap_gt.mul(target_weight[:, idx]) 35 | ) 36 | else: 37 | loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt) 38 | 39 | return loss / num_joints 40 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | #from .mpii import MPIIDataset as mpii 12 | #from .coco import COCODataset as coco 13 | from .veri import VERIDataset as veri 14 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/dataset/veri.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import logging 12 | import os 13 | import pickle 14 | from collections import defaultdict 15 | from collections import OrderedDict 16 | 17 | import json_tricks as json 18 | import numpy as np 19 | 20 | from dataset.JointsDataset import JointsDataset 21 | from nms.nms import oks_nms 22 | 23 | 24 | logger = logging.getLogger(__name__) 25 | 26 | 27 | class VERIDataset(JointsDataset): 28 | def __init__(self, cfg, root, image_set, is_train, transform=None): 29 | super(VERIDataset, self).__init__(cfg, root, image_set, is_train, transform) 30 | self.image_width = cfg.MODEL.IMAGE_SIZE[0] 31 | self.image_height = cfg.MODEL.IMAGE_SIZE[1] 32 | self.pixel_std = 200.0 33 | 34 | self.num_joints = 20 35 | self.db = self._get_db() 36 | 37 | if is_train and cfg.DATASET.SELECT_DATA: 38 | self.db = self.select_data(self.db) 39 | 40 | logger.info('=> load {} samples'.format(len(self.db))) 41 | 42 | def _get_db(self): 43 | db = [] 44 | 45 | with open(os.path.join(self.root, self.image_set), 'r') as fp: 46 | for line in fp: 47 | tokens = line.strip().split() 48 | image_path = os.path.join(self.root, tokens[0]) 49 | anno_joints = tokens[1:41] 50 | joints_3d = np.zeros((self.num_joints, 3), dtype=np.float) 51 | joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float) 52 | for ipt in range(self.num_joints): 53 | x = anno_joints[ipt * 2 + 0] 54 | y = anno_joints[ipt * 2 + 1] 55 | joints_3d[ipt, 0] = x 56 | joints_3d[ipt, 1] = y 57 | joints_3d[ipt, 2] = 0 58 | t_vis = 0 59 | if x >= 0 and y >= 0: 60 | t_vis = 1 61 | joints_3d_vis[ipt, 0] = t_vis 62 | joints_3d_vis[ipt, 1] = t_vis 63 | joints_3d_vis[ipt, 2] = 0 64 | 65 | center = np.array( \ 66 | [self.image_width * 0.5, self.image_height * 0.5], \ 67 | dtype=np.float32) 68 | scale = np.array( \ 69 | [self.image_width / self.pixel_std, self.image_height / self.pixel_std], \ 70 | dtype=np.float32) 71 | 72 | db.append({ 73 | 'image': image_path, 74 | 'center': center, 75 | 'scale': scale, 76 | 'joints_3d': joints_3d, 77 | 'joints_3d_vis': joints_3d_vis, 78 | 'filename': '', 79 | 'imgnum': 0, 80 | }) 81 | 82 | return db 83 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/models/__init__.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import models.pose_resnet 12 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-keypoint/lib/nms/__init__.py -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/nms/cpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn) 5 | # ------------------------------------------------------------------------------ 6 | 7 | import numpy as np 8 | cimport numpy as np 9 | 10 | cdef inline np.float32_t max(np.float32_t a, np.float32_t b): 11 | return a if a >= b else b 12 | 13 | cdef inline np.float32_t min(np.float32_t a, np.float32_t b): 14 | return a if a <= b else b 15 | 16 | def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): 17 | cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0] 18 | cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1] 19 | cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2] 20 | cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3] 21 | cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4] 22 | 23 | cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) 24 | cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1].astype('i') 25 | 26 | cdef int ndets = dets.shape[0] 27 | cdef np.ndarray[np.int_t, ndim=1] suppressed = \ 28 | np.zeros((ndets), dtype=np.int) 29 | 30 | # nominal indices 31 | cdef int _i, _j 32 | # sorted indices 33 | cdef int i, j 34 | # temp variables for box i's (the box currently under consideration) 35 | cdef np.float32_t ix1, iy1, ix2, iy2, iarea 36 | # variables for computing overlap with box j (lower scoring box) 37 | cdef np.float32_t xx1, yy1, xx2, yy2 38 | cdef np.float32_t w, h 39 | cdef np.float32_t inter, ovr 40 | 41 | keep = [] 42 | for _i in range(ndets): 43 | i = order[_i] 44 | if suppressed[i] == 1: 45 | continue 46 | keep.append(i) 47 | ix1 = x1[i] 48 | iy1 = y1[i] 49 | ix2 = x2[i] 50 | iy2 = y2[i] 51 | iarea = areas[i] 52 | for _j in range(_i + 1, ndets): 53 | j = order[_j] 54 | if suppressed[j] == 1: 55 | continue 56 | xx1 = max(ix1, x1[j]) 57 | yy1 = max(iy1, y1[j]) 58 | xx2 = min(ix2, x2[j]) 59 | yy2 = min(iy2, y2[j]) 60 | w = max(0.0, xx2 - xx1 + 1) 61 | h = max(0.0, yy2 - yy1 + 1) 62 | inter = w * h 63 | ovr = inter / (iarea + areas[j] - inter) 64 | if ovr >= thresh: 65 | suppressed[j] = 1 66 | 67 | return keep 68 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/nms/gpu_nms.hpp: -------------------------------------------------------------------------------- 1 | void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, 2 | int boxes_dim, float nms_overlap_thresh, int device_id); 3 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/nms/gpu_nms.pyx: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn) 5 | # ------------------------------------------------------------------------------ 6 | 7 | import numpy as np 8 | cimport numpy as np 9 | 10 | assert sizeof(int) == sizeof(np.int32_t) 11 | 12 | cdef extern from "gpu_nms.hpp": 13 | void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) 14 | 15 | def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, 16 | np.int32_t device_id=0): 17 | cdef int boxes_num = dets.shape[0] 18 | cdef int boxes_dim = dets.shape[1] 19 | cdef int num_out 20 | cdef np.ndarray[np.int32_t, ndim=1] \ 21 | keep = np.zeros(boxes_num, dtype=np.int32) 22 | cdef np.ndarray[np.float32_t, ndim=1] \ 23 | scores = dets[:, 4] 24 | cdef np.ndarray[np.int32_t, ndim=1] \ 25 | order = scores.argsort()[::-1].astype(np.int32) 26 | cdef np.ndarray[np.float32_t, ndim=2] \ 27 | sorted_dets = dets[order, :] 28 | _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) 29 | keep = keep[:num_out] 30 | return list(order[keep]) 31 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-keypoint/lib/utils/__init__.py -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/utils/utils.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import os 12 | import logging 13 | import time 14 | from pathlib import Path 15 | 16 | import torch 17 | import torch.optim as optim 18 | 19 | from core.config import get_model_name 20 | 21 | 22 | def create_logger(cfg, cfg_name, phase='train'): 23 | root_output_dir = Path(cfg.OUTPUT_DIR) 24 | # set up logger 25 | if not root_output_dir.exists(): 26 | print('=> creating {}'.format(root_output_dir)) 27 | root_output_dir.mkdir() 28 | 29 | dataset = cfg.DATASET.DATASET + '_' + cfg.DATASET.HYBRID_JOINTS_TYPE \ 30 | if cfg.DATASET.HYBRID_JOINTS_TYPE else cfg.DATASET.DATASET 31 | dataset = dataset.replace(':', '_') 32 | model, _ = get_model_name(cfg) 33 | cfg_name = os.path.basename(cfg_name).split('.')[0] 34 | 35 | final_output_dir = root_output_dir / dataset / model / cfg_name 36 | 37 | print('=> creating {}'.format(final_output_dir)) 38 | final_output_dir.mkdir(parents=True) 39 | 40 | time_str = time.strftime('%Y-%m-%d-%H-%M') 41 | log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase) 42 | final_log_file = final_output_dir / log_file 43 | head = '%(asctime)-15s %(message)s' 44 | logging.basicConfig(filename=str(final_log_file), 45 | format=head) 46 | logger = logging.getLogger() 47 | logger.setLevel(logging.INFO) 48 | console = logging.StreamHandler() 49 | logging.getLogger('').addHandler(console) 50 | 51 | tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \ 52 | (cfg_name + '_' + time_str) 53 | print('=> creating {}'.format(tensorboard_log_dir)) 54 | tensorboard_log_dir.mkdir(parents=True) 55 | 56 | return logger, str(final_output_dir), str(tensorboard_log_dir) 57 | 58 | 59 | def get_optimizer(cfg, model): 60 | optimizer = None 61 | if cfg.TRAIN.OPTIMIZER == 'sgd': 62 | optimizer = optim.SGD( 63 | model.parameters(), 64 | lr=cfg.TRAIN.LR, 65 | momentum=cfg.TRAIN.MOMENTUM, 66 | weight_decay=cfg.TRAIN.WD, 67 | nesterov=cfg.TRAIN.NESTEROV 68 | ) 69 | elif cfg.TRAIN.OPTIMIZER == 'adam': 70 | optimizer = optim.Adam( 71 | model.parameters(), 72 | lr=cfg.TRAIN.LR 73 | ) 74 | 75 | return optimizer 76 | 77 | 78 | def save_checkpoint(states, is_best, output_dir, 79 | filename='checkpoint.pth.tar'): 80 | torch.save(states, os.path.join(output_dir, filename)) 81 | if is_best and 'state_dict' in states: 82 | torch.save(states['state_dict'], 83 | os.path.join(output_dir, 'model_best.pth.tar')) 84 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/lib/utils/zipreader.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import os 12 | import zipfile 13 | import xml.etree.ElementTree as ET 14 | 15 | import cv2 16 | import numpy as np 17 | 18 | _im_zfile = [] 19 | _xml_path_zip = [] 20 | _xml_zfile = [] 21 | 22 | 23 | def imread(filename, flags=cv2.IMREAD_COLOR): 24 | global _im_zfile 25 | path = filename 26 | pos_at = path.index('@') 27 | if pos_at == -1: 28 | print("character '@' is not found from the given path '%s'"%(path)) 29 | assert 0 30 | path_zip = path[0: pos_at] 31 | path_img = path[pos_at + 2:] 32 | if not os.path.isfile(path_zip): 33 | print("zip file '%s' is not found"%(path_zip)) 34 | assert 0 35 | for i in range(len(_im_zfile)): 36 | if _im_zfile[i]['path'] == path_zip: 37 | data = _im_zfile[i]['zipfile'].read(path_img) 38 | return cv2.imdecode(np.frombuffer(data, np.uint8), flags) 39 | 40 | _im_zfile.append({ 41 | 'path': path_zip, 42 | 'zipfile': zipfile.ZipFile(path_zip, 'r') 43 | }) 44 | data = _im_zfile[-1]['zipfile'].read(path_img) 45 | 46 | return cv2.imdecode(np.frombuffer(data, np.uint8), flags) 47 | 48 | 49 | def xmlread(filename): 50 | global _xml_path_zip 51 | global _xml_zfile 52 | path = filename 53 | pos_at = path.index('@') 54 | if pos_at == -1: 55 | print("character '@' is not found from the given path '%s'"%(path)) 56 | assert 0 57 | path_zip = path[0: pos_at] 58 | path_xml = path[pos_at + 2:] 59 | if not os.path.isfile(path_zip): 60 | print("zip file '%s' is not found"%(path_zip)) 61 | assert 0 62 | for i in range(len(_xml_path_zip)): 63 | if _xml_path_zip[i] == path_zip: 64 | data = _xml_zfile[i].open(path_xml) 65 | return ET.fromstring(data.read()) 66 | _xml_path_zip.append(path_zip) 67 | print("read new xml file '%s'"%(path_zip)) 68 | _xml_zfile.append(zipfile.ZipFile(path_zip, 'r')) 69 | data = _xml_zfile[-1].open(path_xml) 70 | return ET.fromstring(data.read()) 71 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/pose_estimation/_init_paths.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import os.path as osp 12 | import sys 13 | 14 | 15 | def add_path(path): 16 | if path not in sys.path: 17 | sys.path.insert(0, path) 18 | 19 | 20 | this_dir = osp.dirname(__file__) 21 | 22 | lib_path = osp.join(this_dir, '..', 'lib') 23 | add_path(lib_path) 24 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-keypoint/train.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 2 | python pose_estimation/train.py \ 3 | --cfg experiments/veri/resnet50/256x256_d256x3_adam_lr1e-3.yaml \ 4 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/examples/data/small_vehicle/images/dummy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-reid-keypoint/examples/data/small_vehicle/images/dummy.jpg -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/examples/data/small_vehicle/masks/dummy.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-reid-keypoint/examples/data/small_vehicle/masks/dummy.pkl -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/infer.sh: -------------------------------------------------------------------------------- 1 | image_dir="" # dir of input testing images 2 | masks_dir="" # dir of the keypoint heatmap of testing images generated from vehicle-keypoint/infer.sh 3 | image_list="" # list of image names 4 | model_dir="" # final reid model 5 | output_dir="" # dir of output file 6 | output_pkl="" # name of output file 7 | 8 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 9 | python examples/infer.py \ 10 | --image-dir $image_dir \ 11 | --masks-dir $masks_dir \ 12 | --input-list $image_list \ 13 | --output-dir $output_dir \ 14 | --output-pkl $output_pkl \ 15 | -a aicity_masks_seresnext101 \ 16 | --model-type 'masks' \ 17 | --weights $model_dir/model_best.pth.tar \ 18 | --num-m-features 128 \ 19 | --masks 20 \ 20 | --height 288 \ 21 | --width 384 \ 22 | --batch-size 32 \ 23 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/pretrain_models/seresnext101_base.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-reid-keypoint/pretrain_models/seresnext101_base.pth.tar -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/.trainers.py.un~: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/.trainers.py.un~ -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from . import datasets 4 | from . import evaluation_metrics 5 | from . import feature_extraction 6 | from . import loss 7 | from . import metric_learning 8 | from . import models 9 | from . import utils 10 | from . import dist_metric 11 | from . import evaluators 12 | from . import trainers 13 | 14 | __version__ = '0.2.0' 15 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import warnings 3 | 4 | from .aicity import * 5 | 6 | 7 | __factory = { 8 | 'small_vehicle': Small_Vehicle, 9 | } 10 | 11 | 12 | def names(): 13 | return sorted(__factory.keys()) 14 | 15 | 16 | def create(name, root, *args, **kwargs): 17 | """ 18 | Create a dataset instance. 19 | 20 | Parameters 21 | ---------- 22 | name : str 23 | The dataset name. Can be one of 'viper', 'cuhk01', 'cuhk03', 24 | 'market1501', and 'dukemtmc'. 25 | root : str 26 | The path to the dataset directory. 27 | split_id : int, optional 28 | The index of data split. Default: 0 29 | num_val : int or float, optional 30 | When int, it means the number of validation identities. When float, 31 | it means the proportion of validation to all the trainval. Default: 100 32 | download : bool, optional 33 | If True, will download the dataset. Default: False 34 | """ 35 | if name not in __factory: 36 | raise KeyError("Unknown dataset:", name) 37 | return __factory[name](root, *args, **kwargs) 38 | 39 | 40 | def get_dataset(name, root, *args, **kwargs): 41 | warnings.warn("get_dataset is deprecated. Use create instead.") 42 | return create(name, root, *args, **kwargs) 43 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/datasets/aicity.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import os.path as osp 3 | 4 | from ..utils.data import Dataset 5 | 6 | 7 | class Small_Vehicle(Dataset): 8 | def __init__(self, root, split_id=0, num_val=100, download=True): 9 | super(Small_Vehicle, self).__init__(root, split_id=split_id) 10 | 11 | if not self._check_integrity(): 12 | raise RuntimeError("Dataset not found or corrupted. " + 13 | "You can use download=True to download it.") 14 | 15 | self.load(num_val) 16 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/dist_metric.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | from .evaluators import extract_features 6 | from .metric_learning import get_metric 7 | 8 | 9 | class DistanceMetric(object): 10 | def __init__(self, algorithm='euclidean', *args, **kwargs): 11 | super(DistanceMetric, self).__init__() 12 | self.algorithm = algorithm 13 | self.metric = get_metric(algorithm, *args, **kwargs) 14 | 15 | def train(self, model, data_loader): 16 | if self.algorithm == 'euclidean': return 17 | features, labels = extract_features(model, data_loader) 18 | features = torch.stack(features.values()).numpy() 19 | labels = torch.Tensor(list(labels.values())).numpy() 20 | self.metric.fit(features, labels) 21 | 22 | def transform(self, X): 23 | if torch.is_tensor(X): 24 | X = X.numpy() 25 | X = self.metric.transform(X) 26 | X = torch.from_numpy(X) 27 | else: 28 | X = self.metric.transform(X) 29 | return X 30 | 31 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/evaluation_metrics/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .classification import accuracy 4 | from .ranking import cmc, mean_ap 5 | 6 | __all__ = [ 7 | 'accuracy', 8 | 'cmc', 9 | 'mean_ap', 10 | ] 11 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/evaluation_metrics/classification.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from ..utils import to_torch 4 | 5 | 6 | def accuracy(output, target, topk=(1,)): 7 | output, target = to_torch(output), to_torch(target) 8 | maxk = max(topk) 9 | batch_size = target.size(0) 10 | 11 | _, pred = output.topk(maxk, 1, True, True) 12 | pred = pred.t() 13 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 14 | 15 | ret = [] 16 | for k in topk: 17 | correct_k = correct[:k].view(-1).float().sum(dim=0, keepdim=True) 18 | ret.append(correct_k.mul_(1. / batch_size)) 19 | return ret 20 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/feature_extraction/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .cnn import extract_cnn_feature 4 | from .rerank import re_ranking 5 | from .database import FeatureDatabase 6 | 7 | __all__ = [ 8 | 'extract_cnn_feature', 9 | 're_ranking', 10 | 'FeatureDatabase', 11 | ] 12 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/feature_extraction/cnn.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from collections import OrderedDict 3 | 4 | from torch.autograd import Variable 5 | 6 | from ..utils import to_torch 7 | 8 | 9 | def extract_cnn_feature(model, inputs, modules=None): 10 | model.eval() 11 | assert (len(inputs) == 1 or len(inputs) == 2) 12 | has_mask = (len(inputs) == 2) 13 | if has_mask: 14 | inputs, masks = inputs 15 | else: 16 | inputs = inputs[0] 17 | inputs = to_torch(inputs) 18 | inputs = Variable(inputs) 19 | if has_mask: 20 | masks = to_torch(masks) 21 | masks = Variable(masks, requires_grad=False) 22 | inputs = [inputs, masks] 23 | else: 24 | inputs = [inputs, None] 25 | if modules is None: 26 | outputs = model(*inputs) 27 | outputs = outputs.data.cpu() 28 | return outputs 29 | # Register forward hook for each module 30 | outputs = OrderedDict() 31 | handles = [] 32 | for m in modules: 33 | outputs[id(m)] = None 34 | def func(m, i, o): outputs[id(m)] = o.data.cpu() 35 | handles.append(m.register_forward_hook(func)) 36 | model(inputs) 37 | for h in handles: 38 | h.remove() 39 | return list(outputs.values()) 40 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/feature_extraction/database.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import h5py 4 | import numpy as np 5 | from torch.utils.data import Dataset 6 | 7 | 8 | class FeatureDatabase(Dataset): 9 | def __init__(self, *args, **kwargs): 10 | super(FeatureDatabase, self).__init__() 11 | self.fid = h5py.File(*args, **kwargs) 12 | 13 | def __enter__(self): 14 | return self 15 | 16 | def __exit__(self, exc_type, exc_val, exc_tb): 17 | self.close() 18 | 19 | def __getitem__(self, keys): 20 | if isinstance(keys, (tuple, list)): 21 | return [self._get_single_item(k) for k in keys] 22 | return self._get_single_item(keys) 23 | 24 | def _get_single_item(self, key): 25 | return np.asarray(self.fid[key]) 26 | 27 | def __setitem__(self, key, value): 28 | if key in self.fid: 29 | if self.fid[key].shape == value.shape and \ 30 | self.fid[key].dtype == value.dtype: 31 | self.fid[key][...] = value 32 | else: 33 | del self.fid[key] 34 | self.fid.create_dataset(key, data=value) 35 | else: 36 | self.fid.create_dataset(key, data=value) 37 | 38 | def __delitem__(self, key): 39 | del self.fid[key] 40 | 41 | def __len__(self): 42 | return len(self.fid) 43 | 44 | def __iter__(self): 45 | return iter(self.fid) 46 | 47 | def flush(self): 48 | self.fid.flush() 49 | 50 | def close(self): 51 | self.fid.close() 52 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .triplet import TripletLoss 4 | 5 | __all__ = [ 6 | 'TripletLoss', 7 | ] 8 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/metric_learning/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from metric_learn import (ITML_Supervised, LMNN, LSML_Supervised, 4 | SDML_Supervised, NCA, LFDA, RCA_Supervised) 5 | 6 | from .euclidean import Euclidean 7 | from .kissme import KISSME 8 | 9 | __factory = { 10 | 'euclidean': Euclidean, 11 | 'kissme': KISSME, 12 | 'itml': ITML_Supervised, 13 | 'lmnn': LMNN, 14 | 'lsml': LSML_Supervised, 15 | 'sdml': SDML_Supervised, 16 | 'nca': NCA, 17 | 'lfda': LFDA, 18 | 'rca': RCA_Supervised, 19 | } 20 | 21 | 22 | def get_metric(algorithm, *args, **kwargs): 23 | if algorithm not in __factory: 24 | raise KeyError("Unknown metric:", algorithm) 25 | return __factory[algorithm](*args, **kwargs) 26 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/metric_learning/euclidean.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | from metric_learn.base_metric import BaseMetricLearner 5 | 6 | 7 | class Euclidean(BaseMetricLearner): 8 | def __init__(self): 9 | self.M_ = None 10 | 11 | def metric(self): 12 | return self.M_ 13 | 14 | def fit(self, X): 15 | self.M_ = np.eye(X.shape[1]) 16 | self.X_ = X 17 | 18 | def transform(self, X=None): 19 | if X is None: 20 | return self.X_ 21 | return X 22 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/metric_learning/kissme.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | from metric_learn.base_metric import BaseMetricLearner 5 | 6 | 7 | def validate_cov_matrix(M): 8 | M = (M + M.T) * 0.5 9 | k = 0 10 | I = np.eye(M.shape[0]) 11 | while True: 12 | try: 13 | _ = np.linalg.cholesky(M) 14 | break 15 | except np.linalg.LinAlgError: 16 | # Find the nearest positive definite matrix for M. Modified from 17 | # http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd 18 | # Might take several minutes 19 | k += 1 20 | w, v = np.linalg.eig(M) 21 | min_eig = v.min() 22 | M += (-min_eig * k * k + np.spacing(min_eig)) * I 23 | return M 24 | 25 | 26 | class KISSME(BaseMetricLearner): 27 | def __init__(self): 28 | self.M_ = None 29 | 30 | def metric(self): 31 | return self.M_ 32 | 33 | def fit(self, X, y=None): 34 | n = X.shape[0] 35 | if y is None: 36 | y = np.arange(n) 37 | X1, X2 = np.meshgrid(np.arange(n), np.arange(n)) 38 | X1, X2 = X1[X1 < X2], X2[X1 < X2] 39 | matches = (y[X1] == y[X2]) 40 | num_matches = matches.sum() 41 | num_non_matches = len(matches) - num_matches 42 | idxa = X1[matches] 43 | idxb = X2[matches] 44 | S = X[idxa] - X[idxb] 45 | C1 = S.transpose().dot(S) / num_matches 46 | p = np.random.choice(num_non_matches, num_matches, replace=False) 47 | idxa = X1[~matches] 48 | idxb = X2[~matches] 49 | idxa = idxa[p] 50 | idxb = idxb[p] 51 | S = X[idxa] - X[idxb] 52 | C0 = S.transpose().dot(S) / num_matches 53 | self.M_ = np.linalg.inv(C1) - np.linalg.inv(C0) 54 | self.M_ = validate_cov_matrix(self.M_) 55 | self.X_ = X 56 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/models/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .aicity_masks_senet import * 4 | 5 | 6 | __factory = { 7 | 'aicity_masks_seresnext101': aicity_masks_seresnext101, 8 | } 9 | 10 | 11 | def names(): 12 | return sorted(__factory.keys()) 13 | 14 | 15 | def create(name, *args, **kwargs): 16 | """ 17 | Create a model instance. 18 | 19 | Parameters 20 | ---------- 21 | name : str 22 | Model name. Can be one of 'inception', 'resnet18', 'resnet34', 23 | 'resnet50', 'resnet101', and 'resnet152'. 24 | pretrained : bool, optional 25 | Only applied for 'resnet*' models. If True, will use ImageNet pretrained 26 | model. Default: True 27 | cut_at_pooling : bool, optional 28 | If True, will cut the model before the last global pooling layer and 29 | ignore the remaining kwargs. Default: False 30 | num_features : int, optional 31 | If positive, will append a Linear layer after the global pooling layer, 32 | with this number of output units, followed by a BatchNorm layer. 33 | Otherwise these layers will not be appended. Default: 256 for 34 | 'inception', 0 for 'resnet*' 35 | norm : bool, optional 36 | If True, will normalize the feature to be unit L2-norm for each sample. 37 | Otherwise will append a ReLU layer after the above Linear layer if 38 | num_features > 0. Default: False 39 | dropout : float, optional 40 | If positive, will append a Dropout layer with this dropout rate. 41 | Default: 0 42 | num_classes : int, optional 43 | If positive, will append a Linear layer at the end as the classifier 44 | with this number of output units. Default: 0 45 | """ 46 | if name not in __factory: 47 | raise KeyError("Unknown model:", name) 48 | return __factory[name](*args, **kwargs) 49 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/.serialization.py.un~: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/.serialization.py.un~ -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | 5 | 6 | def to_numpy(tensor): 7 | if torch.is_tensor(tensor): 8 | return tensor.cpu().numpy() 9 | elif type(tensor).__module__ != 'numpy': 10 | raise ValueError("Cannot convert {} to numpy array" 11 | .format(type(tensor))) 12 | return tensor 13 | 14 | 15 | def to_torch(ndarray): 16 | if type(ndarray).__module__ == 'numpy': 17 | return torch.from_numpy(ndarray) 18 | elif not torch.is_tensor(ndarray): 19 | raise ValueError("Cannot convert {} to torch tensor" 20 | .format(type(ndarray))) 21 | return ndarray 22 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/data/.preprocessor.py.un~: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/data/.preprocessor.py.un~ -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/data/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .dataset import Dataset 4 | from .preprocessor import Preprocessor, PreprocessorWithMasks 5 | from .sampler import RandomIdentitySampler, RandomIdentityAndCameraSampler 6 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/data/preprocessor.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import os.path as osp 4 | import math 5 | import random 6 | import numpy as np 7 | 8 | from PIL import Image 9 | import cPickle 10 | 11 | import torch 12 | 13 | 14 | class Preprocessor(object): 15 | def __init__(self, dataset, root=None, transform=None): 16 | super(Preprocessor, self).__init__() 17 | self.dataset = dataset 18 | self.root = root 19 | self.transform = transform 20 | 21 | def __len__(self): 22 | return len(self.dataset) 23 | 24 | def __getitem__(self, indices): 25 | if isinstance(indices, (tuple, list)): 26 | return [self._get_single_item(index) for index in indices] 27 | return self._get_single_item(indices) 28 | 29 | def _get_single_item(self, index): 30 | fname, pid, camid = self.dataset[index] 31 | fpath = fname 32 | if self.root is not None: 33 | fpath = osp.join(self.root, fname) 34 | img = Image.open(fpath).convert('RGB') 35 | if self.transform is not None: 36 | img = self.transform(img) 37 | return img, fname, pid, camid 38 | 39 | 40 | def do_resize(img, height, width): 41 | w, h = img.size 42 | if h == height and w == width: 43 | return img 44 | return img.resize((width, height), Image.BILINEAR) 45 | 46 | 47 | class PreprocessorWithMasks(object): 48 | def __init__(self, dataset, root=None, masks_root=None, 49 | height=None, width=None, num_masks=None, 50 | transform=None, is_training=True): 51 | super(PreprocessorWithMasks, self).__init__() 52 | self.dataset = dataset 53 | self.root = root 54 | self.masks_root = masks_root 55 | self.height = height 56 | self.width = width 57 | self.num_masks = num_masks 58 | self.transform = transform 59 | self.is_training = is_training 60 | 61 | def __len__(self): 62 | return len(self.dataset) 63 | 64 | def __getitem__(self, indices): 65 | if isinstance(indices, (tuple, list)): 66 | return [self._get_single_item(index) for index in indices] 67 | return self._get_single_item(indices) 68 | 69 | def _get_single_item(self, index): 70 | fname, pid, camid = self.dataset[index] 71 | fpath = fname; masks_fpath = None 72 | if self.root is not None: 73 | fpath = osp.join(self.root, fname) 74 | if self.masks_root is not None: 75 | masks_fpath = osp.join(self.masks_root, fname+'.pkl') 76 | is_hf = False 77 | if self.is_training: 78 | is_hf = random.random() < 0.5 79 | img = Image.open(fpath).convert('RGB') 80 | img = do_resize(img, self.height, self.width) 81 | if is_hf: 82 | img = img.transpose(Image.FLIP_LEFT_RIGHT) 83 | if masks_fpath is not None: 84 | with open(masks_fpath, 'r') as fp: 85 | masks = cPickle.load(fp) 86 | c, h, w = masks.shape 87 | if is_hf: 88 | masks = masks.reshape((-1, w)) 89 | for i in range(c*h): 90 | masks[i] = masks[i][::-1] 91 | masks = masks.reshape(c, h, w) 92 | if self.transform is not None: 93 | img = self.transform(img) 94 | masks = torch.from_numpy(masks).type_as(img) 95 | return img, masks, fname, pid, camid 96 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/data/sampler.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from collections import defaultdict 3 | 4 | import os 5 | import random 6 | import numpy as np 7 | import torch 8 | from torch.utils.data.sampler import ( 9 | Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler, 10 | WeightedRandomSampler) 11 | 12 | 13 | class RandomIdentitySampler(Sampler): 14 | def __init__(self, data_source, num_instances=1): 15 | self.data_source = data_source 16 | self.num_instances = num_instances 17 | self.index_dic = defaultdict(list) 18 | for index, (_, pid, _) in enumerate(data_source): 19 | self.index_dic[pid].append(index) 20 | self.pids = list(self.index_dic.keys()) 21 | self.num_samples = len(self.pids) 22 | 23 | def __len__(self): 24 | return self.num_samples * self.num_instances 25 | 26 | def __iter__(self): 27 | indices = torch.randperm(self.num_samples) 28 | ret = [] 29 | for i in indices: 30 | pid = self.pids[i] 31 | t = self.index_dic[pid] 32 | if len(t) >= self.num_instances: 33 | t = np.random.choice(t, size=self.num_instances, replace=False) 34 | else: 35 | t = np.random.choice(t, size=self.num_instances, replace=True) 36 | ret.extend(t) 37 | return iter(ret) 38 | 39 | 40 | class RandomIdentityAndCameraSampler(Sampler): 41 | def __init__(self, data_source, num_instances=1): 42 | self.data_source = data_source 43 | self.num_instances = num_instances 44 | self.index_dic = {} 45 | for index, (_, pid, camid) in enumerate(data_source): 46 | if pid not in self.index_dic: 47 | self.index_dic[pid] = {} 48 | if camid not in self.index_dic[pid]: 49 | self.index_dic[pid][camid] = [] 50 | self.index_dic[pid][camid].append(index) 51 | self.pids = list(self.index_dic.keys()) 52 | self.num_samples = len(self.pids) 53 | 54 | def __len__(self): 55 | return self.num_samples * self.num_instances 56 | 57 | def __iter__(self): 58 | indices = torch.randperm(self.num_samples) 59 | ret = [] 60 | for i in indices: 61 | pid = self.pids[i] 62 | camids = self.index_dic[pid].keys() 63 | if len(camids) >= self.num_instances: 64 | camids = np.random.choice(camids, size=self.num_instances, replace=False) 65 | else: 66 | new_camids = [] 67 | while (self.num_instances-len(new_camids) >= len(camids)): 68 | new_camids.extend(camids) 69 | if self.num_instances-len(new_camids) != 0: 70 | supl_camids = np.random.choice(camids, size=(self.num_instances-len(new_camids)), replace=False) 71 | new_camids.extend(supl_camids) 72 | camids = new_camids 73 | t = [] 74 | for camid in camids: 75 | index = np.random.choice(self.index_dic[pid][camid], size=1, replace=False) 76 | t.append(index[0]) 77 | ret.extend(t) 78 | return iter(ret) 79 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/logging.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | import sys 4 | 5 | from .osutils import mkdir_if_missing 6 | 7 | 8 | class Logger(object): 9 | def __init__(self, fpath=None): 10 | self.console = sys.stdout 11 | self.file = None 12 | if fpath is not None: 13 | mkdir_if_missing(os.path.dirname(fpath)) 14 | self.file = open(fpath, 'w') 15 | 16 | def __del__(self): 17 | self.close() 18 | 19 | def __enter__(self): 20 | pass 21 | 22 | def __exit__(self, *args): 23 | self.close() 24 | 25 | def write(self, msg): 26 | self.console.write(msg) 27 | if self.file is not None: 28 | self.file.write(msg) 29 | 30 | def flush(self): 31 | self.console.flush() 32 | if self.file is not None: 33 | self.file.flush() 34 | os.fsync(self.file.fileno()) 35 | 36 | def close(self): 37 | self.console.close() 38 | if self.file is not None: 39 | self.file.close() 40 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/meters.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | 4 | class AverageMeter(object): 5 | """Computes and stores the average and current value""" 6 | 7 | def __init__(self): 8 | self.val = 0 9 | self.avg = 0 10 | self.sum = 0 11 | self.count = 0 12 | 13 | def reset(self): 14 | self.val = 0 15 | self.avg = 0 16 | self.sum = 0 17 | self.count = 0 18 | 19 | def update(self, val, n=1): 20 | self.val = val 21 | self.sum += val * n 22 | self.count += n 23 | self.avg = self.sum / self.count 24 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/osutils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import os 3 | import errno 4 | 5 | 6 | def mkdir_if_missing(dir_path): 7 | try: 8 | os.makedirs(dir_path) 9 | except OSError as e: 10 | if e.errno != errno.EEXIST: 11 | raise 12 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/reid/utils/serialization.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import json 3 | import os.path as osp 4 | import shutil 5 | 6 | import torch 7 | from torch.nn import Parameter 8 | 9 | from .osutils import mkdir_if_missing 10 | 11 | 12 | def read_json(fpath): 13 | with open(fpath, 'r') as f: 14 | obj = json.load(f) 15 | return obj 16 | 17 | 18 | def write_json(obj, fpath): 19 | mkdir_if_missing(osp.dirname(fpath)) 20 | with open(fpath, 'w') as f: 21 | json.dump(obj, f, indent=4, separators=(',', ': ')) 22 | 23 | 24 | def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'): 25 | # mkdir_if_missing(osp.dirname(fpath)) 26 | # torch.save(state, fpath) 27 | # if is_best: 28 | # shutil.copy(fpath, osp.join(osp.dirname(fpath), 'model_best.pth.tar')) 29 | mkdir_if_missing(osp.dirname(fpath)) 30 | if is_best: 31 | torch.save(state, fpath) 32 | 33 | 34 | def load_checkpoint(fpath): 35 | if osp.isfile(fpath): 36 | checkpoint = torch.load(fpath) 37 | print("=> Loaded checkpoint '{}'".format(fpath)) 38 | return checkpoint 39 | else: 40 | raise ValueError("=> No checkpoint found at '{}'".format(fpath)) 41 | 42 | 43 | def copy_state_dict(state_dict, model, strip=None): 44 | tgt_state = model.state_dict() 45 | copied_names = set() 46 | for name, param in state_dict.items(): 47 | if strip is not None and name.startswith(strip): 48 | name = name[len(strip):] 49 | if name not in tgt_state: 50 | continue 51 | if isinstance(param, Parameter): 52 | param = param.data 53 | if param.size() != tgt_state[name].size(): 54 | print('mismatch:', name, param.size(), tgt_state[name].size()) 55 | continue 56 | tgt_state[name].copy_(param) 57 | copied_names.add(name) 58 | 59 | missing = set(tgt_state.keys()) - copied_names 60 | if len(missing) > 0: 61 | print("missing keys in state_dict:", missing) 62 | 63 | return model 64 | -------------------------------------------------------------------------------- /Track2(ReID)/part3_model/vehicle-reid-keypoint/train.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 2 | python examples/train.py \ 3 | -a aicity_masks_seresnext101 \ 4 | --model-type 'masks' \ 5 | --weights ./pretrain_models/seresnext101_base.pth.tar \ 6 | --num-m-features 128 \ 7 | --masks 20 \ 8 | -d small_vehicle \ 9 | --combine-trainval \ 10 | --height 288 \ 11 | --width 384 \ 12 | -b 64 \ 13 | --lr 0.03 \ 14 | --lrm 0.1 \ 15 | --weight-decay 0.0005 \ 16 | --epochs 250 \ 17 | --warm-up-ep 50 \ 18 | --step-size 100 \ 19 | --start-save 200 \ 20 | --seed 0 \ 21 | --logs-dir ./logs 22 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/concat_feature.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import numpy as np 4 | 5 | def load_features(pickle_file_path): 6 | pickle_file = open(pickle_file_path, 'rb') 7 | features = pickle.load(pickle_file) 8 | pickle_file.close() 9 | return features 10 | 11 | if __name__ == '__main__': 12 | print('running...') 13 | root_path = os.getcwd() 14 | query_file_list = ['query_avg_res101_complete.pkl', 'query_kp_features_seresnext101_288by384_init1e1_lr3e2.pkl', 'query_se_res152_multi.pkl', 'query_avg_se_complete.pkl', 'query_mgn101_288_384_combine_fea_multi.pkl', 'query_hrnet_288_384_combine_fea_multi.pkl', 'query_sac101_288_384_combine_fea_multi.pkl'] 15 | test_file_list = ['gallery_avg_res101_complete.pkl', 'gallery_kp_features_seresnext101_288by384_init1e1_lr3e2.pkl', 'gallery_se_res152.pkl', 'gallery_avg_se_complete.pkl', 'gallery_mgn101_288_384_combine_fea.pkl', 'gallery_hrnet_288_384_combine_fea.pkl', 'gallery_sac101_288_384_combine_fea.pkl'] 16 | query_feature_list = [] 17 | test_feature_list = [] 18 | for i in range(len(query_file_list)): 19 | query_feature = load_features(root_path + '/pickle_file/pure_features/' + query_file_list[i]) 20 | query_feature_list.append(query_feature) 21 | test_feature = load_features(root_path + '/pickle_file/pure_features/' + test_file_list[i]) 22 | test_feature_list.append(test_feature) 23 | 24 | concat_query_features = {} 25 | for k in query_feature_list[0].keys(): 26 | concat_query_features[k] = np.concatenate([query_feature_list[i][k] for i in range(len(query_feature_list))]) 27 | 28 | concat_test_features = {} 29 | for k in test_feature_list[0].keys(): 30 | concat_test_features[k] = np.concatenate([test_feature_list[i][k] for i in range(len(test_feature_list))]) 31 | 32 | query_res_file = open(root_path + '/pickle_file/query_newbig7_cat_complete.pkl', 'wb') 33 | pickle.dump(concat_query_features, query_res_file) 34 | query_res_file.close() 35 | test_res_file = open(root_path + '/pickle_file/test_newbig7_cat_complete.pkl', 'wb') 36 | pickle.dump(concat_test_features, test_res_file) 37 | test_res_file.close() 38 | print('Done') 39 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/direction_similarity_generation.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import os 3 | import numpy as np 4 | 5 | 6 | if __name__ == '__main__': 7 | print('running...') 8 | root_path = os.getcwd() 9 | 10 | query_direct_pfile = open(root_path + 'pickle_file/direction_file/query_direction.pkl', 'rb') 11 | query_direct = pickle.load(query_direct_pfile) 12 | query_direct_pfile.close() 13 | gallery_direct_pfile = open(root_path + 'pickle_file/direction_file/gallery_direction.pkl', 'rb') 14 | gallery_direct = pickle.load(gallery_direct_pfile) 15 | gallery_direct_pfile.close() 16 | 17 | query_keys = list(query_direct.keys()) 18 | query_keys.sort() 19 | query_direct = np.concatenate([np.expand_dims(query_direct[f], 0) for f in query_keys]) 20 | gallery_keys = list(gallery_direct.keys()) 21 | gallery_keys.sort() 22 | gallery_direct = np.concatenate([np.expand_dims(gallery_direct[f], 0) for f in gallery_keys]) 23 | gallery_direct = np.transpose(gallery_direct) 24 | 25 | q_g_direct_sim = np.dot(query_direct, gallery_direct) 26 | with open(root_path + 'pickle_file/q_g_direct_sim.pkl', 'wb') as p_f: 27 | pickle.dump(q_g_direct_sim, p_f, protocol=2) 28 | 29 | q_q_direct_sim = np.dot(query_direct, np.transpose(query_direct)) 30 | with open(root_path + 'pickle_file/q_q_direct_sim.pkl', 'wb') as p_f: 31 | pickle.dump(q_q_direct_sim, p_f, protocol=2) 32 | 33 | g_g_direct_sim = np.dot(gallery_direct, np.transpose(gallery_direct)) 34 | with open(root_path + 'pickle_file/g_g_direct_sim.pkl', 'wb') as p_f: 35 | pickle.dump(g_g_direct_sim, p_f, protocol=2) 36 | 37 | print('Done') 38 | 39 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/gallery_track_info_combine_official_track.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import pickle 3 | import pdb 4 | import numpy as np 5 | import os 6 | 7 | 8 | with open('./gallery_track_info.pkl','r') as fid: 9 | gallery_track_info = pickle.load(fid) 10 | print('load gallery_track_info') 11 | 12 | 13 | with open('gallery_cid_info.pkl','r') as fid: 14 | gallery_cid_track_info = pickle.load(fid) 15 | 16 | with open('group_gallery_info_by_ori_track_info.pkl','r') as fid: 17 | group_gallery_info_ori_track_info = pickle.load(fid) 18 | 19 | new_gallery_track_info = {} 20 | for i in range(18290): 21 | gallery_name = str(i+1).zfill(6)+'.jpg' 22 | new_gallery_track_info[gallery_name]={} 23 | new_gallery_track_info[gallery_name]['cam'] = '' 24 | new_gallery_track_info[gallery_name]['id'] ='' 25 | new_gallery_track_info[gallery_name]['start_time'] ='' 26 | new_gallery_track_info[gallery_name]['end_time'] ='' 27 | new_gallery_track_info[gallery_name]['group_id'] ='' 28 | new_gallery_track_info[gallery_name]['cam_set'] = set() 29 | 30 | count = 0 31 | for i in range(18290): 32 | gallery_name = str(i+1).zfill(6)+'.jpg' 33 | gallery_cam = gallery_track_info[gallery_name]['cam'] 34 | gallery_id = gallery_track_info[gallery_name]['id'] 35 | gallery_stime = gallery_track_info[gallery_name]['start_time'] 36 | gallery_etime = gallery_track_info[gallery_name]['end_time'] 37 | gallery_frame = gallery_track_info[gallery_name]['frame'] 38 | 39 | gallery_group_id = gallery_cid_track_info[gallery_name]['groupid'] 40 | gallery_cam_set = gallery_cid_track_info[gallery_name]['cams'] 41 | 42 | 43 | gallery_group_imgs = group_gallery_info_ori_track_info[gallery_group_id]['pic'] 44 | 45 | new_stime = gallery_stime 46 | new_etime = gallery_etime 47 | for each_img in gallery_group_imgs: 48 | img_cam = gallery_track_info[each_img]['cam'] 49 | if img_cam != gallery_cam: 50 | continue 51 | img_stime = gallery_track_info[each_img]['start_time'] 52 | img_etime = gallery_track_info[each_img]['end_time'] 53 | max_stime = max(gallery_stime, img_stime) 54 | min_etime = min(gallery_etime, img_etime) 55 | if max_stime-min_etime>1000: 56 | continue 57 | new_stime = min(new_stime, img_stime) 58 | new_etime = max(new_etime, img_etime) 59 | 60 | new_gallery_track_info[gallery_name]['cam'] = gallery_cam 61 | new_gallery_track_info[gallery_name]['id'] = gallery_id 62 | new_gallery_track_info[gallery_name]['frame'] = gallery_frame 63 | new_gallery_track_info[gallery_name]['start_time'] = new_stime 64 | new_gallery_track_info[gallery_name]['end_time'] = new_etime 65 | new_gallery_track_info[gallery_name]['group_id'] = gallery_group_id 66 | new_gallery_track_info[gallery_name]['cam_set'] = gallery_cam_set 67 | if (new_etime-new_stime)-(gallery_etime-gallery_stime)>100: 68 | count+=1 69 | print(gallery_name,gallery_cam,gallery_id,gallery_cam_set,new_stime,new_etime,gallery_stime,gallery_etime) 70 | print(count) 71 | 72 | 73 | with open('new_gallery_track_info.pkl','w') as fid: 74 | pickle.dump(new_gallery_track_info,fid) 75 | pdb.set_trace() 76 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/generate_result.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import time 3 | from collections import OrderedDict 4 | from collections import defaultdict 5 | from sklearn.metrics import average_precision_score 6 | import numpy as np 7 | import torch 8 | import pickle 9 | import os 10 | 11 | 12 | def to_numpy(tensor): 13 | if torch.is_tensor(tensor): 14 | return tensor.cpu().numpy() 15 | elif type(tensor).__module__ != 'numpy': 16 | raise ValueError("Cannot convert {} to numpy array" 17 | .format(type(tensor))) 18 | return tensor 19 | 20 | def extract_features(pickle_file): 21 | f = open(pickle_file, 'rb') 22 | features = pickle.load(f) 23 | f.close() 24 | return features 25 | 26 | 27 | 28 | def vehicle_pairwise_distance(query_features, test_features, query, gallery): 29 | x = torch.cat([torch.from_numpy(query_features[f]).unsqueeze(0) for f in query], 0) 30 | y = torch.cat([torch.from_numpy(test_features[f]).unsqueeze(0) for f in gallery], 0) 31 | m, n = x.size(0), y.size(0) 32 | x = x.view(m, -1) 33 | y = y.view(n, -1) 34 | dist = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \ 35 | torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t() 36 | dist.addmm_(1, -2, x, y.t()) 37 | return dist 38 | 39 | 40 | if __name__ == '__main__': 41 | print('running...') 42 | root_path = os.getcwd() 43 | 44 | 45 | distmat_pfile = open(root_path + '/reid/pickle_file/dist_file/may_9_gd1_twf2_rerank_type_direct.pkl', 'r') 46 | distmat = pickle.load(distmat_pfile) 47 | distmat_pfile.close() 48 | 49 | sort_distmat_index = np.argsort(distmat, axis=1) 50 | with open('track2.txt', 'w') as f: 51 | for item in sort_distmat_index: 52 | for i in range(99): 53 | f.write(str(item[i] + 1) + ' ') 54 | f.write(str(item[99] + 1) + '\n') 55 | 56 | print('Done') 57 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/get_gallery_cam_trackid_from_ori_track_info.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | import shutil 4 | import pdb 5 | import numpy as np 6 | import copy 7 | 8 | with open('test_track.txt','r') as fid: 9 | test_track_ori = fid.readlines() 10 | print('load given gallery track info') 11 | 12 | 13 | with open('gallery_track_info.pkl','r') as fid: 14 | gallery_track_info = pickle.load(fid) 15 | print('load my gallery group res') 16 | 17 | 18 | 19 | ori_gallery_group = {} 20 | count = 0 21 | groups_info_each_gallery_ori = {} 22 | 23 | for line in test_track_ori: 24 | track_contain_id = line.strip() 25 | track_ids = track_contain_id.split(' ') 26 | ori_gallery_group[count] = track_ids 27 | 28 | for each_gallery in track_ids: 29 | groups_info_each_gallery_ori[each_gallery]=count 30 | count +=1 31 | # pdb.set_trace() 32 | gallery_combine_ori_track_info = {} 33 | for i in range(18290): 34 | gallery_name = str(i+1).zfill(6)+'.jpg' 35 | print(gallery_name) 36 | 37 | gallery_cam_extend = set() 38 | gallery_cam = gallery_track_info[gallery_name]['cam'] 39 | gallery_id = gallery_track_info[gallery_name]['id'] 40 | gallery_cid = gallery_cam +'_'+ str(gallery_id) 41 | gallery_cam_extend.add(gallery_cid) 42 | 43 | group_id = groups_info_each_gallery_ori[gallery_name] 44 | group_imgs = ori_gallery_group[group_id] 45 | for each_img in group_imgs: 46 | cur_img_cam = gallery_track_info[each_img]['cam'] 47 | cur_img_id = gallery_track_info[each_img]['id'] 48 | cur_cid = cur_img_cam +'_'+str(cur_img_id) 49 | gallery_cam_extend.add(cur_cid) 50 | gallery_combine_ori_track_info[gallery_name] = {} 51 | gallery_combine_ori_track_info[gallery_name]['cam'] = gallery_cam 52 | gallery_combine_ori_track_info[gallery_name]['id'] = gallery_id 53 | gallery_combine_ori_track_info[gallery_name]['cams'] = gallery_cam_extend 54 | gallery_combine_ori_track_info[gallery_name]['groupid'] = group_id 55 | pdb.set_trace() 56 | with open('gallery_cid_info.pkl','w') as fid: 57 | pickle.dump(gallery_combine_ori_track_info,fid) 58 | 59 | group_gallery_info_ori_track_info = {} 60 | for i in range(count): 61 | print(i) 62 | group_gallery_info_ori_track_info[i] = {} 63 | group_gallery_info_ori_track_info[i]['pic'] = ori_gallery_group[i] 64 | group_gallery_info_ori_track_info[i]['cam'] = gallery_combine_ori_track_info[ ori_gallery_group[i][0] ]['cams'] 65 | with open('group_gallery_info_by_ori_track_info.pkl','w') as fid: 66 | pickle.dump(group_gallery_info_ori_track_info,fid) 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/group_dis_after_rerank.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pdb 3 | import shutil 4 | import numpy as np 5 | import pickle 6 | 7 | 8 | with open('qg_dist.pkl','r') as fid: 9 | qg_dist = pickle.load(fid) 10 | 11 | with open('group_gallery_info_by_ori_track_info.pkl','r') as fid: 12 | group_gallery_info_ori_track_info = pickle.load(fid) 13 | 14 | 15 | group_dis = np.zeros((1052,18290)) 16 | for each_group in group_gallery_info_ori_track_info.keys(): 17 | pics = group_gallery_info_ori_track_info[each_group]['pic'] 18 | pic_index = [int(ii.split('.')[0])-1 for ii in pics] 19 | group_all_dis = qg_dist[:,pic_index] 20 | group_min_dis = np.min(group_all_dis,axis=1) 21 | for ii in pic_index: 22 | group_dis[:,ii] = group_min_dis 23 | with open('group_dis.pkl','w') as fid: 24 | pickle.dump(group_dis,fid) 25 | 26 | cur_dist = qg_dist + group_dis 27 | with open('new_q_g_dist.pkl','w') as fid: 28 | pickle.dump(cur_dist,fid) 29 | print("Done!") 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/merge_feature.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | 5 | def load_features(pickle_file_path): 6 | pickle_file = open(pickle_file_path, 'rb') 7 | features = pickle.load(pickle_file) 8 | pickle_file.close() 9 | return features 10 | 11 | if __name__ == '__main__': 12 | print('running...') 13 | root_path = os.getcwd() 14 | res_file = open(root_path + '/pickle_file/save_file_name.pickle', 'wb') 15 | features1 = load_features(root_path + '/pickle_file/your_feature_pickle_file1.pickle') 16 | features2 = load_features(root_path + '/pickle_file/your_feature_pickle_file2.pickle') 17 | final_features = {} 18 | for k in features.keys(): 19 | final_features[k] = (features1[k] + features2[k]) / 2 20 | pickle.dump(final_features, res_file) 21 | res_file.close() 22 | print('Done') 23 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/readme.md: -------------------------------------------------------------------------------- 1 | ## Utility and Constraint Related Codes ## 2 | 3 | **Please modify related paths before running the codes.** 4 | 5 | * merge_feature.py 6 | Compute the average feature of two feature files. 7 | 8 | * concat_feature.py 9 | Concatenate features. 10 | 11 | * gallery_track_info_combine_official_track.py 12 | Combine original track info with tracking algorithm results. 13 | 14 | * get_gallery_cam_trackid_from_ori_track_info.py 15 | Get info from test_track.txt, 16 | 17 | * direction_similarity_generation.py 18 | Compute vehicle orientation similarity. 19 | 20 | * type_similarity_generation.py 21 | Compute vehicle type similarity. 22 | 23 | * run_add_dist.py 24 | Add cam_id and direction constraint punishment. Refer to our paper for more details. It contains the function for calculating euclidean distance matrix. 25 | 26 | * type_punish.py 27 | Add vehicle type constraint punishment. 28 | 29 | * twf_v2_before_rerank.py 30 | Add time windows filter (temporal constraint) before rerank. 31 | 32 | * run_rerank.py 33 | Run reranking algorithm. 34 | 35 | * twf_v2_after_rerank.py 36 | Add time windows filter(temporal constraint) after rerank. 37 | 38 | * group_dis_after_rerank.py 39 | Add group distance on the original q_g_dist. 40 | 41 | * generate_result.py 42 | Generate the final ranking results. 43 | 44 | 45 | In order to get the final ranking results, the user should: 46 | 1. Train all reid models and get the merged and concatenated features. 47 | 2. Calculate the original euclidean distance matrix. 48 | 3. Use run_add_dist.py, type_punish.py and twf_v2_before_rerank.py to add constraint punishment. 49 | 4. Run run_rerank.py to conduct reranking. 50 | 5. Use run_add_dist.py, type_punish.py and twf_v2_after_rerank.py to add constraint punishment again. 51 | 6. Run group_dis_after_rerank.py to add group distance. 52 | 7. Run generate_result.py to get the final ranking results. 53 | Note: tracking and other auxiliary information will be used in this procedure. 54 | 55 | ## Some Important Pickle Files Related to This Part ## 56 | 57 | Several important pickle files are available at [pickle_file](https://pan.baidu.com/s/1u6d6dX0uPvyrqgOB0O4Qyg)(extract code: p3fg). Some files related to this part are listed below. The remainings will be introduced by other parts. 58 | 59 | * gallery_cid_info.pkl 60 | This file contains each gallery image’s cam, trackid, groupid and all (cams_trackids) in this groups. 61 | 62 | * group_gallery_info_by_ori_track_info.pkl 63 | This file contains each group’s gallery images and all (cam_trackids) in this group. 64 | 65 | * query_track_info.pkl and gallery_track_info.pkl 66 | These two files contain cam, trackid, track_start_time and track_end_time information of each query and gallery image. all these info is obtained from our tracking algorithm. 67 | 68 | * new_gallery_track_info.pkl 69 | This file contains each gallery image’s cam, trackid, track_start_time, track_end_time, etc. This info is combined with the tracking algorithm results(gallery_track_info). 70 | 71 | * gallery_track_map.pkl 72 | This file contains the map between gallery image name and the corresponding trackid. 73 | 74 | * may_9_gd1_twf2_rerank_type_direct.pkl 75 | The final distance matrix used to generate ranking results. 76 | -------------------------------------------------------------------------------- /Track2(ReID)/utility_and_constraint_related_codes/type_punish.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import os 3 | import numpy as np 4 | import pdb 5 | 6 | 7 | if __name__ == '__main__': 8 | print('running...') 9 | root_path = os.getcwd() 10 | 11 | q_track_file = open(root_path + '/reid/pickle_file/track_info/query_track_info.pkl', 'rb') 12 | g_track_file = open(root_path + '/reid/pickle_file/track_info/gallery_track_info.pkl', 'rb') 13 | q_track_info = pickle.load(q_track_file) 14 | g_track_info = pickle.load(g_track_file) 15 | q_track_file.close() 16 | g_track_file.close() 17 | 18 | 19 | type_sim_file = open(root_path + '/reid/pickle_file/track_info/type_sim.pkl', 'rb') 20 | type_sim = pickle.load(type_sim_file) 21 | type_sim_file.close() 22 | 23 | dist_name = 'distmat_strategy' 24 | distmat_pfile = open(root_path + '/reid/pickle_file/dist_file/' + dist_name + '.pkl', 'rb') 25 | distmat = pickle.load(distmat_pfile) 26 | distmat_pfile.close() 27 | for i in range(1, 1053): 28 | q_name = '%06d' %i 29 | q_name = q_name + '.jpg' 30 | q_cam = q_track_info[q_name]['cam'] 31 | for j in range(1, 18291): 32 | g_name = '%06d' %(j) 33 | g_name = g_name + '.jpg' 34 | g_cam = g_track_info[g_name]['cam'] 35 | if q_cam == 'c035' or g_cam == 'c035': 36 | if distmat[i-1][j-1] > 3.0 and type_sim[i-1][j-1] < 0.1: 37 | distmat[i-1][j-1] += 5.0 38 | 39 | dist_pf = open(root_path + '/reid/pickle_file/distmat_with_type_punish.pkl', 'wb') 40 | pickle.dump(distmat, dist_pf) 41 | dist_pf.close() 42 | 43 | print('Done') 44 | 45 | -------------------------------------------------------------------------------- /quali_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/quali_1.jpg -------------------------------------------------------------------------------- /quali_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wzgwzg/AICity/c52d5d478cb4bb4d86251dc3b1379d94f378659e/quali_2.jpg --------------------------------------------------------------------------------