├── .idea
├── .gitignore
├── SW_Faster_ICR_CCR.iml
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── README.md
├── __pycache__
├── _init_paths.cpython-36.pyc
└── pseudo.cpython-36.pyc
├── _init_paths.py
├── cfgs
├── res101.yml
├── res101_ls.yml
├── res50.yml
└── vgg16.yml
├── dafaster_train_net.py
├── data
└── cache
│ ├── cityscape_2007_test_t_gt_roidb.pkl
│ ├── cityscape_2007_train_s_gt_roidb.pkl
│ └── cityscape_2007_train_t_gt_roidb.pkl
├── eval
├── __init__.py
├── __pycache__
│ └── _init_paths.cpython-36.pyc
├── _init_paths.py
├── test.py
├── test_SW_ICR_CCR.py
├── test_msda.py
└── test_strong_weak.py
├── lib
├── build
│ ├── lib.linux-x86_64-3.6
│ │ ├── datasets
│ │ │ ├── __init__.py
│ │ │ ├── boxes.py
│ │ │ ├── cityscape.py
│ │ │ ├── cityscapes_car.py
│ │ │ ├── clipart.py
│ │ │ ├── coco.py
│ │ │ ├── config_dataset.py
│ │ │ ├── convert_cityscapes_to_caronly_coco.py
│ │ │ ├── convert_cityscapes_to_unlabeled_caronly_coco.py
│ │ │ ├── convert_sim10k_to_coco.py
│ │ │ ├── ds_utils.py
│ │ │ ├── factory.py
│ │ │ ├── imagenet.py
│ │ │ ├── imdb.py
│ │ │ ├── pascal_voc.py
│ │ │ ├── pascal_voc_rbg.py
│ │ │ ├── pascal_voc_water.py
│ │ │ ├── rpc.py
│ │ │ ├── rpc_fake.py
│ │ │ ├── segms.py
│ │ │ ├── sim10k_coco.py
│ │ │ ├── vg.py
│ │ │ ├── vg_eval.py
│ │ │ ├── voc_eval.py
│ │ │ ├── voc_eval_no_add_1.py
│ │ │ └── water.py
│ │ ├── model
│ │ │ ├── _C.cpython-36m-x86_64-linux-gnu.so
│ │ │ ├── __init__.py
│ │ │ ├── faster_rcnn
│ │ │ │ ├── __init__.py
│ │ │ │ ├── faster_rcnn.py
│ │ │ │ ├── resnet.py
│ │ │ │ └── vgg16.py
│ │ │ ├── nms
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _ext
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── nms
│ │ │ │ │ │ └── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── nms_cpu.py
│ │ │ │ ├── nms_gpu.py
│ │ │ │ └── nms_wrapper.py
│ │ │ ├── roi_align
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _ext
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── roi_align
│ │ │ │ │ │ └── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── functions
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── roi_align.py
│ │ │ │ └── modules
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── roi_align.py
│ │ │ ├── roi_crop
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _ext
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── crop_resize
│ │ │ │ │ │ └── __init__.py
│ │ │ │ │ └── roi_crop
│ │ │ │ │ │ └── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── functions
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── crop_resize.py
│ │ │ │ │ ├── gridgen.py
│ │ │ │ │ └── roi_crop.py
│ │ │ │ └── modules
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── gridgen.py
│ │ │ │ │ └── roi_crop.py
│ │ │ ├── roi_layers
│ │ │ │ ├── __init__.py
│ │ │ │ ├── nms.py
│ │ │ │ ├── roi_align.py
│ │ │ │ └── roi_pool.py
│ │ │ ├── roi_pooling
│ │ │ │ ├── __init__.py
│ │ │ │ ├── _ext
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── roi_pooling
│ │ │ │ │ │ └── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── functions
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── roi_pool.py
│ │ │ │ └── modules
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── roi_pool.py
│ │ │ ├── rpn
│ │ │ │ ├── __init__.py
│ │ │ │ ├── anchor_target_layer.py
│ │ │ │ ├── bbox_transform.py
│ │ │ │ ├── generate_anchors.py
│ │ │ │ ├── proposal_layer.py
│ │ │ │ ├── proposal_target_layer_cascade.py
│ │ │ │ └── rpn.py
│ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── blob.py
│ │ │ │ ├── config.py
│ │ │ │ ├── logger.py
│ │ │ │ └── net_utils.py
│ │ ├── roi_da_data_layer
│ │ │ ├── __init__.py
│ │ │ ├── minibatch.py
│ │ │ ├── roibatchLoader.py
│ │ │ └── roidb.py
│ │ └── roi_data_layer
│ │ │ ├── __init__.py
│ │ │ ├── minibatch.py
│ │ │ ├── roibatchLoader.py
│ │ │ └── roidb.py
│ └── temp.linux-x86_64-3.6
│ │ └── mnt
│ │ └── data2
│ │ └── JINSU
│ │ └── CR-DA-DET
│ │ └── SW_Faster_ICR_CCR
│ │ └── lib
│ │ └── model
│ │ └── csrc
│ │ ├── cpu
│ │ ├── ROIAlign_cpu.o
│ │ └── nms_cpu.o
│ │ ├── cuda
│ │ ├── ROIAlign_cuda.o
│ │ ├── ROIPool_cuda.o
│ │ └── nms.o
│ │ └── vision.o
├── datasets.tar
├── datasets
│ ├── KITTI_car.py
│ ├── VOCdevkit-matlab-wrapper
│ │ ├── get_voc_opts.m
│ │ ├── voc_eval.m
│ │ └── xVOCap.m
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── KITTI_car.cpython-36.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── bdd100k_daytime.cpython-36.pyc
│ │ ├── bdd100k_daytime_car.cpython-36.pyc
│ │ ├── bdd100k_dd.cpython-36.pyc
│ │ ├── bdd100k_night.cpython-36.pyc
│ │ ├── bdd_eval.cpython-36.pyc
│ │ ├── cityscape.cpython-36.pyc
│ │ ├── cityscapes_car.cpython-36.pyc
│ │ ├── cityscapes_ms_car.cpython-36.pyc
│ │ ├── clipart.cpython-36.pyc
│ │ ├── coco.cpython-36.pyc
│ │ ├── config_dataset.cpython-36.pyc
│ │ ├── ds_utils.cpython-36.pyc
│ │ ├── factory.cpython-36.pyc
│ │ ├── imagenet.cpython-36.pyc
│ │ ├── imdb.cpython-36.pyc
│ │ ├── pascal_voc.cpython-36.pyc
│ │ ├── pascal_voc_water.cpython-36.pyc
│ │ ├── rpc.cpython-36.pyc
│ │ ├── rpc_fake.cpython-36.pyc
│ │ ├── sim10k_coco.cpython-36.pyc
│ │ ├── vg.cpython-36.pyc
│ │ ├── vg_eval.cpython-36.pyc
│ │ ├── voc_eval.cpython-36.pyc
│ │ └── water.cpython-36.pyc
│ ├── bdd100k_daytime.py
│ ├── bdd100k_daytime_car.py
│ ├── bdd100k_dd.py
│ ├── bdd100k_night.py
│ ├── bdd_eval.py
│ ├── boxes.py
│ ├── cityscape.py
│ ├── cityscapes_car.py
│ ├── cityscapes_ms_car.py
│ ├── clipart.py
│ ├── coco.py
│ ├── config_dataset.py
│ ├── convert_cityscapes_to_caronly_coco.py
│ ├── convert_cityscapes_to_unlabeled_caronly_coco.py
│ ├── convert_sim10k_to_coco.py
│ ├── ds_utils.py
│ ├── factory.py
│ ├── imagenet.py
│ ├── imdb.py
│ ├── pascal_voc.py
│ ├── pascal_voc_rbg.py
│ ├── pascal_voc_water.py
│ ├── rpc.py
│ ├── rpc_fake.py
│ ├── segms.py
│ ├── sim10k_coco.py
│ ├── vg.py
│ ├── vg_eval.py
│ ├── voc_eval.py
│ ├── voc_eval_no_add_1.py
│ └── water.py
├── faster_rcnn.egg-info
│ ├── PKG-INFO
│ ├── SOURCES.txt
│ ├── dependency_links.txt
│ └── top_level.txt
├── model
│ ├── _C.cpython-36m-x86_64-linux-gnu.so
│ ├── __init__.py
│ ├── __pycache__
│ │ └── __init__.cpython-36.pyc
│ ├── csrc
│ │ ├── ROIAlign.h
│ │ ├── ROIPool.h
│ │ ├── cpu
│ │ │ ├── ROIAlign_cpu.cpp
│ │ │ ├── nms_cpu.cpp
│ │ │ └── vision.h
│ │ ├── cuda
│ │ │ ├── ROIAlign_cuda.cu
│ │ │ ├── ROIPool_cuda.cu
│ │ │ ├── nms.cu
│ │ │ └── vision.h
│ │ ├── nms.h
│ │ └── vision.cpp
│ ├── da_faster_rcnn
│ │ ├── DA.py
│ │ ├── LabelResizeLayer.py
│ │ ├── __pycache__
│ │ │ ├── faster_rcnn.cpython-36.pyc
│ │ │ ├── resnet.cpython-36.pyc
│ │ │ └── vgg16.cpython-36.pyc
│ │ ├── faster_rcnn.py
│ │ ├── faster_rcnn_multi_label.py
│ │ ├── resnet.py
│ │ ├── resnet_multi_label.py
│ │ └── vgg16.py
│ ├── da_faster_rcnn_instance_da_weight
│ │ ├── DA.py
│ │ ├── LabelResizeLayer.py
│ │ ├── __pycache__
│ │ │ ├── DA.cpython-36.pyc
│ │ │ ├── LabelResizeLayer.cpython-36.pyc
│ │ │ ├── faster_rcnn.cpython-36.pyc
│ │ │ ├── resnet.cpython-36.pyc
│ │ │ └── vgg16.cpython-36.pyc
│ │ ├── faster_rcnn.py
│ │ ├── resnet.py
│ │ └── vgg16.py
│ ├── faster_rcnn
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── faster_rcnn.cpython-36.pyc
│ │ │ ├── resnet.cpython-36.pyc
│ │ │ └── vgg16.cpython-36.pyc
│ │ ├── faster_rcnn.py
│ │ ├── resnet.py
│ │ └── vgg16.py
│ ├── nms
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── _ext
│ │ │ ├── __init__.py
│ │ │ └── nms
│ │ │ │ └── __init__.py
│ │ ├── build.py
│ │ ├── make.sh
│ │ ├── nms_cpu.py
│ │ ├── nms_gpu.py
│ │ ├── nms_kernel.cu
│ │ ├── nms_wrapper.py
│ │ └── src
│ │ │ ├── nms_cuda.h
│ │ │ ├── nms_cuda_kernel.cu
│ │ │ └── nms_cuda_kernel.h
│ ├── roi_align
│ │ ├── __init__.py
│ │ ├── _ext
│ │ │ ├── __init__.py
│ │ │ └── roi_align
│ │ │ │ └── __init__.py
│ │ ├── build.py
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ └── roi_align.py
│ │ ├── make.sh
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ └── roi_align.py
│ │ └── src
│ │ │ ├── roi_align.c
│ │ │ ├── roi_align.h
│ │ │ ├── roi_align_cuda.c
│ │ │ ├── roi_align_cuda.h
│ │ │ ├── roi_align_kernel.cu
│ │ │ └── roi_align_kernel.h
│ ├── roi_crop
│ │ ├── __init__.py
│ │ ├── _ext
│ │ │ ├── __init__.py
│ │ │ ├── crop_resize
│ │ │ │ └── __init__.py
│ │ │ └── roi_crop
│ │ │ │ └── __init__.py
│ │ ├── build.py
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ ├── crop_resize.py
│ │ │ ├── gridgen.py
│ │ │ └── roi_crop.py
│ │ ├── make.sh
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── gridgen.py
│ │ │ └── roi_crop.py
│ │ └── src
│ │ │ ├── roi_crop.c
│ │ │ ├── roi_crop.h
│ │ │ ├── roi_crop_cuda.c
│ │ │ ├── roi_crop_cuda.h
│ │ │ ├── roi_crop_cuda_kernel.cu
│ │ │ └── roi_crop_cuda_kernel.h
│ ├── roi_layers
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── nms.cpython-36.pyc
│ │ │ ├── roi_align.cpython-36.pyc
│ │ │ └── roi_pool.cpython-36.pyc
│ │ ├── nms.py
│ │ ├── roi_align.py
│ │ └── roi_pool.py
│ ├── roi_pooling
│ │ ├── __init__.py
│ │ ├── _ext
│ │ │ ├── __init__.py
│ │ │ └── roi_pooling
│ │ │ │ └── __init__.py
│ │ ├── build.py
│ │ ├── functions
│ │ │ ├── __init__.py
│ │ │ └── roi_pool.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ └── roi_pool.py
│ │ └── src
│ │ │ ├── roi_pooling.c
│ │ │ ├── roi_pooling.h
│ │ │ ├── roi_pooling_cuda.c
│ │ │ ├── roi_pooling_cuda.h
│ │ │ ├── roi_pooling_kernel.cu
│ │ │ └── roi_pooling_kernel.h
│ ├── rpn
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── anchor_target_layer.cpython-36.pyc
│ │ │ ├── bbox_transform.cpython-36.pyc
│ │ │ ├── generate_anchors.cpython-36.pyc
│ │ │ ├── proposal_layer.cpython-36.pyc
│ │ │ ├── proposal_target_layer_cascade.cpython-36.pyc
│ │ │ └── rpn.cpython-36.pyc
│ │ ├── anchor_target_layer.py
│ │ ├── bbox_transform.py
│ │ ├── generate_anchors.py
│ │ ├── proposal_layer.py
│ │ ├── proposal_target_layer_cascade.py
│ │ └── rpn.py
│ └── utils
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── blob.cpython-36.pyc
│ │ ├── config.cpython-36.pyc
│ │ └── net_utils.cpython-36.pyc
│ │ ├── bbox.pyx
│ │ ├── blob.py
│ │ ├── config.py
│ │ ├── logger.py
│ │ └── net_utils.py
├── msda
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── faster_rcnn.cpython-36.pyc
│ │ ├── rpn.cpython-36.pyc
│ │ ├── utils.cpython-36.pyc
│ │ └── vgg16.cpython-36.pyc
│ ├── faster_rcnn.py
│ ├── rpn.py
│ ├── utils.py
│ └── vgg16.py
├── roi_da_data_layer
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── minibatch.cpython-36.pyc
│ │ ├── roibatchLoader.cpython-36.pyc
│ │ └── roidb.cpython-36.pyc
│ ├── minibatch.py
│ ├── roibatchLoader.py
│ └── roidb.py
├── roi_data_layer
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ ├── minibatch.cpython-36.pyc
│ │ ├── roibatchLoader.cpython-36.pyc
│ │ └── roidb.cpython-36.pyc
│ ├── minibatch.py
│ ├── roibatchLoader.py
│ └── roidb.py
└── setup.py
├── nohup.out
├── predict_all_boxes.pkl
├── result
├── bicycle_pr.pkl
├── bus_pr.pkl
├── car_pr.pkl
├── eval_result.txt
├── motorcycle_pr.pkl
├── person_pr.pkl
├── rider_pr.pkl
├── train_pr.pkl
└── truck_pr.pkl
├── test_msda.py
├── train_msda.py
└── train_msda.sh
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/.idea/SW_Faster_ICR_CCR.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
12 |
13 |
14 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Multi_Source_Domain_Adaptation_for_Object_Detection
2 | This repository is Pytorch Unofficial Implementation of Multi-Source Domain Adaptation for Object Detection, ICCV2021.
3 |
4 | Try to start sh train_msda.sh
5 |
6 | If you want to test, try test_msda.py --net "BACKBONE" --gpus "GPU ID" --model_prefix "SAVED MODEL"
7 |
--------------------------------------------------------------------------------
/__pycache__/_init_paths.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/__pycache__/_init_paths.cpython-36.pyc
--------------------------------------------------------------------------------
/__pycache__/pseudo.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/__pycache__/pseudo.cpython-36.pyc
--------------------------------------------------------------------------------
/_init_paths.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import sys
3 |
4 |
5 | def add_path(path):
6 | if path not in sys.path:
7 | sys.path.insert(0, path)
8 |
9 |
10 | this_dir = osp.dirname(__file__)
11 | print(this_dir)
12 | # Add lib to PYTHONPATH
13 | lib_path = osp.join(this_dir, "lib")
14 | add_path(lib_path)
15 |
16 | coco_path = osp.join(this_dir, "data", "coco", "PythonAPI")
17 | add_path(coco_path)
18 |
--------------------------------------------------------------------------------
/cfgs/res101.yml:
--------------------------------------------------------------------------------
1 | EXP_DIR: res101
2 | TRAIN:
3 | HAS_RPN: True
4 | BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
5 | RPN_POSITIVE_OVERLAP: 0.7
6 | RPN_BATCHSIZE: 256
7 | PROPOSAL_METHOD: gt
8 | BG_THRESH_LO: 0.0
9 | DISPLAY: 20
10 | BATCH_SIZE: 128
11 | WEIGHT_DECAY: 0.0001
12 | DOUBLE_BIAS: False
13 | LEARNING_RATE: 0.001
14 | MAX_SIZE: 1200
15 | TEST:
16 | MAX_SIZE: 1000
17 | HAS_RPN: True
18 | POOLING_SIZE: 7
19 | POOLING_MODE: align
20 | CROP_RESIZE_WITH_MAX_POOL: False
21 |
--------------------------------------------------------------------------------
/cfgs/res101_ls.yml:
--------------------------------------------------------------------------------
1 | EXP_DIR: res101
2 | TRAIN:
3 | HAS_RPN: True
4 | BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
5 | RPN_POSITIVE_OVERLAP: 0.7
6 | RPN_BATCHSIZE: 256
7 | PROPOSAL_METHOD: gt
8 | BG_THRESH_LO: 0.0
9 | DISPLAY: 20
10 | BATCH_SIZE: 128
11 | WEIGHT_DECAY: 0.0001
12 | SCALES: [800]
13 | DOUBLE_BIAS: False
14 | LEARNING_RATE: 0.001
15 | TEST:
16 | HAS_RPN: True
17 | SCALES: [800]
18 | MAX_SIZE: 1200
19 | RPN_POST_NMS_TOP_N: 1000
20 | POOLING_SIZE: 7
21 | POOLING_MODE: align
22 | CROP_RESIZE_WITH_MAX_POOL: False
23 |
--------------------------------------------------------------------------------
/cfgs/res50.yml:
--------------------------------------------------------------------------------
1 | EXP_DIR: res50
2 | TRAIN:
3 | HAS_RPN: True
4 | # IMS_PER_BATCH: 1
5 | BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
6 | RPN_POSITIVE_OVERLAP: 0.7
7 | RPN_BATCHSIZE: 256
8 | PROPOSAL_METHOD: gt
9 | BG_THRESH_LO: 0.0
10 | DISPLAY: 20
11 | BATCH_SIZE: 256
12 | WEIGHT_DECAY: 0.0001
13 | DOUBLE_BIAS: False
14 | SNAPSHOT_PREFIX: res50_faster_rcnn
15 | TEST:
16 | HAS_RPN: True
17 | POOLING_MODE: crop
18 |
--------------------------------------------------------------------------------
/cfgs/vgg16.yml:
--------------------------------------------------------------------------------
1 | EXP_DIR: vgg16
2 | TRAIN:
3 | HAS_RPN: True
4 | BBOX_NORMALIZE_TARGETS_PRECOMPUTED: True
5 | RPN_POSITIVE_OVERLAP: 0.7
6 | RPN_BATCHSIZE: 256
7 | RPN_NMS_THRESH: 0.7
8 | PROPOSAL_METHOD: gt
9 | BG_THRESH_LO: 0.0
10 | BATCH_SIZE: 256
11 | LEARNING_RATE: 0.001
12 | MAX_SIZE: 1200
13 | TEST:
14 | MAX_SIZE: 1000
15 | HAS_RPN: True
16 | RPN_NMS_THRESH: 0.7
17 | POOLING_MODE: align
18 | CROP_RESIZE_WITH_MAX_POOL: False
19 | ANCHOR_SCALES: [8, 16, 32]
20 | ANCHOR_RATIOS: [0.5, 1, 2]
21 | MAX_NUM_GT_BOXES: 30
22 |
--------------------------------------------------------------------------------
/data/cache/cityscape_2007_test_t_gt_roidb.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/data/cache/cityscape_2007_test_t_gt_roidb.pkl
--------------------------------------------------------------------------------
/data/cache/cityscape_2007_train_s_gt_roidb.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/data/cache/cityscape_2007_train_s_gt_roidb.pkl
--------------------------------------------------------------------------------
/data/cache/cityscape_2007_train_t_gt_roidb.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/data/cache/cityscape_2007_train_t_gt_roidb.pkl
--------------------------------------------------------------------------------
/eval/__init__.py:
--------------------------------------------------------------------------------
1 | import _init_paths
2 |
--------------------------------------------------------------------------------
/eval/__pycache__/_init_paths.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/eval/__pycache__/_init_paths.cpython-36.pyc
--------------------------------------------------------------------------------
/eval/_init_paths.py:
--------------------------------------------------------------------------------
1 | import os.path as osp
2 | import sys
3 |
4 |
5 | def add_path(path):
6 | if path not in sys.path:
7 | sys.path.insert(0, path)
8 |
9 |
10 | this_dir = osp.dirname(__file__)
11 | print(this_dir)
12 | # Add lib to PYTHONPATH
13 | lib_path = osp.join(this_dir, "lib")
14 | lib_path = osp.join("/data/code/DA/strong_weak_image_level", "lib")
15 | add_path(lib_path)
16 |
17 | coco_path = osp.join(this_dir, "data", "coco", "PythonAPI")
18 | add_path(coco_path)
19 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/datasets/boxes.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017-present, Facebook, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | ##############################################################################
15 | #
16 | # Based on:
17 | # --------------------------------------------------------
18 | # Fast/er R-CNN
19 | # Licensed under The MIT License [see LICENSE for details]
20 | # Written by Ross Girshick
21 | # --------------------------------------------------------
22 |
23 | """Box manipulation functions. The internal Detectron box format is
24 | [x1, y1, x2, y2] where (x1, y1) specify the top-left box corner and (x2, y2)
25 | specify the bottom-right box corner. Boxes from external sources, e.g.,
26 | datasets, may be in other formats (such as [x, y, w, h]) and require conversion.
27 |
28 | This module uses a convention that may seem strange at first: the width of a box
29 | is computed as x2 - x1 + 1 (likewise for height). The "+ 1" dates back to old
30 | object detection days when the coordinates were integer pixel indices, rather
31 | than floating point coordinates in a subpixel coordinate frame. A box with x2 =
32 | x1 and y2 = y1 was taken to include a single pixel, having a width of 1, and
33 | hence requiring the "+ 1". Now, most datasets will likely provide boxes with
34 | floating point coordinates and the width should be more reasonably computed as
35 | x2 - x1.
36 |
37 | In practice, as long as a model is trained and tested with a consistent
38 | convention either decision seems to be ok (at least in our experience on COCO).
39 | Since we have a long history of training models with the "+ 1" convention, we
40 | are reluctant to change it even if our modern tastes prefer not to use it.
41 | """
42 |
43 | from __future__ import absolute_import, division, print_function, unicode_literals
44 |
45 | import numpy as np
46 |
47 |
48 | def xywh_to_xyxy(xywh):
49 | """Convert [x1 y1 w h] box format to [x1 y1 x2 y2] format."""
50 | if isinstance(xywh, (list, tuple)):
51 | # Single box given as a list of coordinates
52 | assert len(xywh) == 4
53 | x1, y1 = xywh[0], xywh[1]
54 | x2 = x1 + np.maximum(0.0, xywh[2] - 1.0)
55 | y2 = y1 + np.maximum(0.0, xywh[3] - 1.0)
56 | return (x1, y1, x2, y2)
57 | elif isinstance(xywh, np.ndarray):
58 | # Multiple boxes given as a 2D ndarray
59 | return np.hstack((xywh[:, 0:2], xywh[:, 0:2] + np.maximum(0, xywh[:, 2:4] - 1)))
60 | else:
61 | raise TypeError("Argument xywh must be a list, tuple, or numpy array.")
62 |
63 |
64 | def xyxy_to_xywh(xyxy):
65 | """Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format."""
66 | if isinstance(xyxy, (list, tuple)):
67 | # Single box given as a list of coordinates
68 | assert len(xyxy) == 4
69 | x1, y1 = xyxy[0], xyxy[1]
70 | w = xyxy[2] - x1 + 1
71 | h = xyxy[3] - y1 + 1
72 | return (x1, y1, w, h)
73 | elif isinstance(xyxy, np.ndarray):
74 | # Multiple boxes given as a 2D ndarray
75 | return np.hstack((xyxy[:, 0:2], xyxy[:, 2:4] - xyxy[:, 0:2] + 1))
76 | else:
77 | raise TypeError("Argument xyxy must be a list, tuple, or numpy array.")
78 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/datasets/config_dataset.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import numpy as np
4 |
5 | # `pip install easydict` if you don't have it
6 | from easydict import EasyDict as edict
7 |
8 | __D = edict()
9 | # Consumers can get config by:
10 | # from fast_rcnn_config import cfg
11 | cfg_d = __D
12 | #
13 | # Training options
14 | # with regard to pascal, the directories under the path will be ./VOC2007, ./VOC2012"
15 | __D.PASCAL = "/data/datasets/DA_Detection/VOCdevkit"
16 | __D.PASCALCLIP = ""
17 | __D.PASCALWATER = "/data/datasets/DA_Detection/VOCdevkit"
18 | __D.PASCALRPCFAKE = "/data/GeneralDataset/DomainAdaptation/rpc/voc_format_fake4-2"
19 | __D.PASCALRPC = "/data/GeneralDataset/DomainAdaptation/rpc/voc_format_rpc-2"
20 |
21 | # For these datasets, the directories under the path will be Annotations ImageSets JPEGImages."
22 | __D.CLIPART = "/data/datasets/DA_Detection/clipart"
23 | __D.WATER = "/data/datasets/DA_Detection/watercolor"
24 |
25 |
26 | def _merge_a_into_b(a, b):
27 | """Merge config dictionary a into config dictionary b, clobbering the
28 | options in b whenever they are also specified in a.
29 | """
30 | if type(a) is not edict:
31 | return
32 |
33 | for k, v in a.items():
34 | # a must specify keys that are in b
35 | if k not in b:
36 | raise KeyError("{} is not a valid config key".format(k))
37 |
38 | # the types must match, too
39 | old_type = type(b[k])
40 | if old_type is not type(v):
41 | if isinstance(b[k], np.ndarray):
42 | v = np.array(v, dtype=b[k].dtype)
43 | else:
44 | raise ValueError(
45 | ("Type mismatch ({} vs. {}) " "for config key: {}").format(
46 | type(b[k]), type(v), k
47 | )
48 | )
49 |
50 | # recursively merge dicts
51 | if type(v) is edict:
52 | try:
53 | _merge_a_into_b(a[k], b[k])
54 | except:
55 | print(("Error under config key: {}".format(k)))
56 | raise
57 | else:
58 | b[k] = v
59 |
60 |
61 | def cfg_from_file(filename):
62 | """Load a config file and merge it into the default options."""
63 | import yaml
64 |
65 | with open(filename, "r") as f:
66 | yaml_cfg = edict(yaml.load(f))
67 |
68 | _merge_a_into_b(yaml_cfg, __D)
69 |
70 |
71 | def cfg_from_list(cfg_list):
72 | """Set config keys via list (e.g., from command line)."""
73 | from ast import literal_eval
74 |
75 | assert len(cfg_list) % 2 == 0
76 | for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
77 | key_list = k.split(".")
78 | d = __D
79 | for subkey in key_list[:-1]:
80 | assert subkey in d
81 | d = d[subkey]
82 | subkey = key_list[-1]
83 | assert subkey in d
84 | try:
85 | value = literal_eval(v)
86 | except:
87 | # handle the case when v is a string literal
88 | value = v
89 | assert type(value) == type(
90 | d[subkey]
91 | ), "type {} does not match original type {}".format(
92 | type(value), type(d[subkey])
93 | )
94 | d[subkey] = value
95 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/datasets/ds_utils.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast/er R-CNN
3 | # Licensed under The MIT License [see LICENSE for details]
4 | # Written by Ross Girshick
5 | # --------------------------------------------------------
6 | from __future__ import absolute_import, division, print_function
7 |
8 | import numpy as np
9 |
10 |
11 | def unique_boxes(boxes, scale=1.0):
12 | """Return indices of unique boxes."""
13 | v = np.array([1, 1e3, 1e6, 1e9])
14 | hashes = np.round(boxes * scale).dot(v)
15 | _, index = np.unique(hashes, return_index=True)
16 | return np.sort(index)
17 |
18 |
19 | def xywh_to_xyxy(boxes):
20 | """Convert [x y w h] box format to [x1 y1 x2 y2] format."""
21 | return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
22 |
23 |
24 | def xyxy_to_xywh(boxes):
25 | """Convert [x1 y1 x2 y2] box format to [x y w h] format."""
26 | return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
27 |
28 |
29 | def validate_boxes(boxes, width=0, height=0):
30 | """Check that a set of boxes are valid."""
31 | x1 = boxes[:, 0]
32 | y1 = boxes[:, 1]
33 | x2 = boxes[:, 2]
34 | y2 = boxes[:, 3]
35 | assert (x1 >= 0).all()
36 | assert (y1 >= 0).all()
37 | assert (x2 >= x1).all()
38 | assert (y2 >= y1).all()
39 | assert (x2 < width).all()
40 | assert (y2 < height).all()
41 |
42 |
43 | def filter_small_boxes(boxes, min_size):
44 | w = boxes[:, 2] - boxes[:, 0]
45 | h = boxes[:, 3] - boxes[:, 1]
46 | keep = np.where((w >= min_size) & (h > min_size))[0]
47 | return keep
48 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/datasets/segms.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017-present, Facebook, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | ##############################################################################
15 |
16 | """Functions for interacting with segmentation masks in the COCO format.
17 |
18 | The following terms are used in this module
19 | mask: a binary mask encoded as a 2D numpy array
20 | segm: a segmentation mask in one of the two COCO formats (polygon or RLE)
21 | polygon: COCO's polygon format
22 | RLE: COCO's run length encoding format
23 | """
24 |
25 | from __future__ import absolute_import, division, print_function, unicode_literals
26 |
27 | import numpy as np
28 | import pycocotools.mask as mask_util
29 |
30 | # Type used for storing masks in polygon format
31 | _POLY_TYPE = list
32 | # Type used for storing masks in RLE format
33 | _RLE_TYPE = dict
34 |
35 |
36 | def polys_to_boxes(polys):
37 | """Convert a list of polygons into an array of tight bounding boxes."""
38 | boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)
39 | for i in range(len(polys)):
40 | poly = polys[i]
41 | x0 = min(min(p[::2]) for p in poly)
42 | x1 = max(max(p[::2]) for p in poly)
43 | y0 = min(min(p[1::2]) for p in poly)
44 | y1 = max(max(p[1::2]) for p in poly)
45 | boxes_from_polys[i, :] = [x0, y0, x1, y1]
46 |
47 | return boxes_from_polys
48 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/_C.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/_C.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/faster_rcnn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/faster_rcnn/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/faster_rcnn/vgg16.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Tensorflow Faster R-CNN
3 | # Licensed under The MIT License [see LICENSE for details]
4 | # Written by Xinlei Chen
5 | # --------------------------------------------------------
6 | from __future__ import absolute_import, division, print_function
7 |
8 | import torch
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 | import torchvision.models as models
12 | from model.faster_rcnn.faster_rcnn import _fasterRCNN
13 | from torch.autograd import Variable
14 |
15 |
16 | class vgg16(_fasterRCNN):
17 | def __init__(self, classes, pretrained=False, class_agnostic=False):
18 | self.model_path = "data/pretrained_model/vgg16_caffe.pth"
19 | self.dout_base_model = 512
20 | self.pretrained = pretrained
21 | self.class_agnostic = class_agnostic
22 |
23 | _fasterRCNN.__init__(self, classes, class_agnostic)
24 |
25 | def _init_modules(self):
26 | vgg = models.vgg16()
27 | if self.pretrained:
28 | print("Loading pretrained weights from %s" % (self.model_path))
29 | state_dict = torch.load(self.model_path)
30 | vgg.load_state_dict(
31 | {k: v for k, v in state_dict.items() if k in vgg.state_dict()}
32 | )
33 |
34 | vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
35 |
36 | # not using the last maxpool layer
37 | self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])
38 |
39 | # Fix the layers before conv3:
40 | for layer in range(10):
41 | for p in self.RCNN_base[layer].parameters():
42 | p.requires_grad = False
43 |
44 | # self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)
45 |
46 | self.RCNN_top = vgg.classifier
47 |
48 | # not using the last maxpool layer
49 | self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
50 |
51 | if self.class_agnostic:
52 | self.RCNN_bbox_pred = nn.Linear(4096, 4)
53 | else:
54 | self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)
55 |
56 | def _head_to_tail(self, pool5):
57 |
58 | pool5_flat = pool5.view(pool5.size(0), -1)
59 | fc7 = self.RCNN_top(pool5_flat)
60 |
61 | return fc7
62 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/nms/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/nms/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/_ext/nms/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._nms import ffi as _ffi
4 | from ._nms import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | # this_file = os.path.dirname(__file__)
9 |
10 | sources = []
11 | headers = []
12 | defines = []
13 | with_cuda = False
14 |
15 | if torch.cuda.is_available():
16 | print("Including CUDA code.")
17 | sources += ["src/nms_cuda.c"]
18 | headers += ["src/nms_cuda.h"]
19 | defines += [("WITH_CUDA", None)]
20 | with_cuda = True
21 |
22 | this_file = os.path.dirname(os.path.realpath(__file__))
23 | print(this_file)
24 | extra_objects = ["src/nms_cuda_kernel.cu.o"]
25 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
26 | print(extra_objects)
27 |
28 | ffi = create_extension(
29 | "_ext.nms",
30 | headers=headers,
31 | sources=sources,
32 | define_macros=defines,
33 | relative_to=__file__,
34 | with_cuda=with_cuda,
35 | extra_objects=extra_objects,
36 | )
37 |
38 | if __name__ == "__main__":
39 | ffi.build()
40 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/nms_cpu.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 | import torch
5 |
6 |
7 | def nms_cpu(dets, thresh):
8 | dets = dets.numpy()
9 | x1 = dets[:, 0]
10 | y1 = dets[:, 1]
11 | x2 = dets[:, 2]
12 | y2 = dets[:, 3]
13 | scores = dets[:, 4]
14 |
15 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
16 | order = scores.argsort()[::-1]
17 |
18 | keep = []
19 | while order.size > 0:
20 | i = order.item(0)
21 | keep.append(i)
22 | xx1 = np.maximum(x1[i], x1[order[1:]])
23 | yy1 = np.maximum(y1[i], y1[order[1:]])
24 | xx2 = np.maximum(x2[i], x2[order[1:]])
25 | yy2 = np.maximum(y2[i], y2[order[1:]])
26 |
27 | w = np.maximum(0.0, xx2 - xx1 + 1)
28 | h = np.maximum(0.0, yy2 - yy1 + 1)
29 | inter = w * h
30 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
31 |
32 | inds = np.where(ovr <= thresh)[0]
33 | order = order[inds + 1]
34 |
35 | return torch.IntTensor(keep)
36 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/nms_gpu.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from ._ext import nms
7 |
8 |
9 | def nms_gpu(dets, thresh):
10 | keep = dets.new(dets.size(0), 1).zero_().int()
11 | num_out = dets.new(1).zero_().int()
12 | nms.nms_cuda(keep, dets, num_out, thresh)
13 | keep = keep[: num_out[0]]
14 | return keep
15 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/nms/nms_wrapper.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 | import torch
8 | from model.nms.nms_cpu import nms_cpu
9 | from model.utils.config import cfg
10 |
11 | if torch.cuda.is_available():
12 | from model.nms.nms_gpu import nms_gpu
13 |
14 |
15 | def nms(dets, thresh, force_cpu=False):
16 | """Dispatch to either CPU or GPU NMS implementations."""
17 | if dets.shape[0] == 0:
18 | return []
19 | # ---numpy version---
20 | # original: return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)
21 | # ---pytorch version---
22 |
23 | return nms_gpu(dets, thresh) if force_cpu == False else nms_cpu(dets, thresh)
24 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_align/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_align/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/_ext/roi_align/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._roi_align import ffi as _ffi
4 | from ._roi_align import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | sources = ["src/roi_align.c"]
9 | headers = ["src/roi_align.h"]
10 | extra_objects = []
11 | # sources = []
12 | # headers = []
13 | defines = []
14 | with_cuda = False
15 |
16 | this_file = os.path.dirname(os.path.realpath(__file__))
17 | print(this_file)
18 |
19 | if torch.cuda.is_available():
20 | print("Including CUDA code.")
21 | sources += ["src/roi_align_cuda.c"]
22 | headers += ["src/roi_align_cuda.h"]
23 | defines += [("WITH_CUDA", None)]
24 | with_cuda = True
25 |
26 | extra_objects = ["src/roi_align_kernel.cu.o"]
27 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
28 |
29 | ffi = create_extension(
30 | "_ext.roi_align",
31 | headers=headers,
32 | sources=sources,
33 | define_macros=defines,
34 | relative_to=__file__,
35 | with_cuda=with_cuda,
36 | extra_objects=extra_objects,
37 | )
38 |
39 | if __name__ == "__main__":
40 | ffi.build()
41 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_align/functions/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/functions/roi_align.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | from .._ext import roi_align
5 |
6 |
7 | # TODO use save_for_backward instead
8 | class RoIAlignFunction(Function):
9 | def __init__(self, aligned_height, aligned_width, spatial_scale):
10 | self.aligned_width = int(aligned_width)
11 | self.aligned_height = int(aligned_height)
12 | self.spatial_scale = float(spatial_scale)
13 | self.rois = None
14 | self.feature_size = None
15 |
16 | def forward(self, features, rois):
17 | self.rois = rois
18 | self.feature_size = features.size()
19 |
20 | batch_size, num_channels, data_height, data_width = features.size()
21 | num_rois = rois.size(0)
22 |
23 | output = features.new(
24 | num_rois, num_channels, self.aligned_height, self.aligned_width
25 | ).zero_()
26 | if features.is_cuda:
27 | roi_align.roi_align_forward_cuda(
28 | self.aligned_height,
29 | self.aligned_width,
30 | self.spatial_scale,
31 | features,
32 | rois,
33 | output,
34 | )
35 | else:
36 | roi_align.roi_align_forward(
37 | self.aligned_height,
38 | self.aligned_width,
39 | self.spatial_scale,
40 | features,
41 | rois,
42 | output,
43 | )
44 | # raise NotImplementedError
45 |
46 | return output
47 |
48 | def backward(self, grad_output):
49 | assert self.feature_size is not None and grad_output.is_cuda
50 |
51 | batch_size, num_channels, data_height, data_width = self.feature_size
52 |
53 | grad_input = self.rois.new(
54 | batch_size, num_channels, data_height, data_width
55 | ).zero_()
56 | roi_align.roi_align_backward_cuda(
57 | self.aligned_height,
58 | self.aligned_width,
59 | self.spatial_scale,
60 | grad_output,
61 | self.rois,
62 | grad_input,
63 | )
64 |
65 | # print grad_input
66 |
67 | return grad_input, None
68 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_align/modules/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_align/modules/roi_align.py:
--------------------------------------------------------------------------------
1 | from torch.nn.functional import avg_pool2d, max_pool2d
2 | from torch.nn.modules.module import Module
3 |
4 | from ..functions.roi_align import RoIAlignFunction
5 |
6 |
7 | class RoIAlign(Module):
8 | def __init__(self, aligned_height, aligned_width, spatial_scale):
9 | super(RoIAlign, self).__init__()
10 |
11 | self.aligned_width = int(aligned_width)
12 | self.aligned_height = int(aligned_height)
13 | self.spatial_scale = float(spatial_scale)
14 |
15 | def forward(self, features, rois):
16 | return RoIAlignFunction(
17 | self.aligned_height, self.aligned_width, self.spatial_scale
18 | )(features, rois)
19 |
20 |
21 | class RoIAlignAvg(Module):
22 | def __init__(self, aligned_height, aligned_width, spatial_scale):
23 | super(RoIAlignAvg, self).__init__()
24 |
25 | self.aligned_width = int(aligned_width)
26 | self.aligned_height = int(aligned_height)
27 | self.spatial_scale = float(spatial_scale)
28 |
29 | def forward(self, features, rois):
30 | x = RoIAlignFunction(
31 | self.aligned_height + 1, self.aligned_width + 1, self.spatial_scale
32 | )(features, rois)
33 | return avg_pool2d(x, kernel_size=2, stride=1)
34 |
35 |
36 | class RoIAlignMax(Module):
37 | def __init__(self, aligned_height, aligned_width, spatial_scale):
38 | super(RoIAlignMax, self).__init__()
39 |
40 | self.aligned_width = int(aligned_width)
41 | self.aligned_height = int(aligned_height)
42 | self.spatial_scale = float(spatial_scale)
43 |
44 | def forward(self, features, rois):
45 | x = RoIAlignFunction(
46 | self.aligned_height + 1, self.aligned_width + 1, self.spatial_scale
47 | )(features, rois)
48 | return max_pool2d(x, kernel_size=2, stride=1)
49 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_crop/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_crop/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/_ext/crop_resize/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._crop_resize import ffi as _ffi
4 | from ._crop_resize import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | locals[symbol] = _wrap_function(fn, _ffi)
13 | __all__.append(symbol)
14 |
15 |
16 | _import_symbols(locals())
17 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/_ext/roi_crop/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._roi_crop import ffi as _ffi
4 | from ._roi_crop import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | # this_file = os.path.dirname(__file__)
9 |
10 | sources = ["src/roi_crop.c"]
11 | headers = ["src/roi_crop.h"]
12 | defines = []
13 | with_cuda = False
14 |
15 | if torch.cuda.is_available():
16 | print("Including CUDA code.")
17 | sources += ["src/roi_crop_cuda.c"]
18 | headers += ["src/roi_crop_cuda.h"]
19 | defines += [("WITH_CUDA", None)]
20 | with_cuda = True
21 |
22 | this_file = os.path.dirname(os.path.realpath(__file__))
23 | print(this_file)
24 | extra_objects = ["src/roi_crop_cuda_kernel.cu.o"]
25 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
26 |
27 | ffi = create_extension(
28 | "_ext.roi_crop",
29 | headers=headers,
30 | sources=sources,
31 | define_macros=defines,
32 | relative_to=__file__,
33 | with_cuda=with_cuda,
34 | extra_objects=extra_objects,
35 | )
36 |
37 | if __name__ == "__main__":
38 | ffi.build()
39 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_crop/functions/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/functions/crop_resize.py:
--------------------------------------------------------------------------------
1 | # functions/add.py
2 | import torch
3 | from cffi import FFI
4 | from torch.autograd import Function
5 |
6 | from .._ext import roi_crop
7 |
8 | ffi = FFI()
9 |
10 |
11 | class RoICropFunction(Function):
12 | def forward(self, input1, input2):
13 | self.input1 = input1
14 | self.input2 = input2
15 | self.device_c = ffi.new("int *")
16 | output = torch.zeros(
17 | input2.size()[0], input1.size()[1], input2.size()[1], input2.size()[2]
18 | )
19 | # print('decice %d' % torch.cuda.current_device())
20 | if input1.is_cuda:
21 | self.device = torch.cuda.current_device()
22 | else:
23 | self.device = -1
24 | self.device_c[0] = self.device
25 | if not input1.is_cuda:
26 | roi_crop.BilinearSamplerBHWD_updateOutput(input1, input2, output)
27 | else:
28 | output = output.cuda(self.device)
29 | roi_crop.BilinearSamplerBHWD_updateOutput_cuda(input1, input2, output)
30 | return output
31 |
32 | def backward(self, grad_output):
33 | grad_input1 = torch.zeros(self.input1.size())
34 | grad_input2 = torch.zeros(self.input2.size())
35 | # print('backward decice %d' % self.device)
36 | if not grad_output.is_cuda:
37 | roi_crop.BilinearSamplerBHWD_updateGradInput(
38 | self.input1, self.input2, grad_input1, grad_input2, grad_output
39 | )
40 | else:
41 | grad_input1 = grad_input1.cuda(self.device)
42 | grad_input2 = grad_input2.cuda(self.device)
43 | roi_crop.BilinearSamplerBHWD_updateGradInput_cuda(
44 | self.input1, self.input2, grad_input1, grad_input2, grad_output
45 | )
46 | return grad_input1, grad_input2
47 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/functions/gridgen.py:
--------------------------------------------------------------------------------
1 | # functions/add.py
2 | import numpy as np
3 | import torch
4 | from torch.autograd import Function
5 |
6 |
7 | class AffineGridGenFunction(Function):
8 | def __init__(self, height, width, lr=1):
9 | super(AffineGridGenFunction, self).__init__()
10 | self.lr = lr
11 | self.height, self.width = height, width
12 | self.grid = np.zeros([self.height, self.width, 3], dtype=np.float32)
13 | self.grid[:, :, 0] = np.expand_dims(
14 | np.repeat(
15 | np.expand_dims(np.arange(-1, 1, 2.0 / (self.height)), 0),
16 | repeats=self.width,
17 | axis=0,
18 | ).T,
19 | 0,
20 | )
21 | self.grid[:, :, 1] = np.expand_dims(
22 | np.repeat(
23 | np.expand_dims(np.arange(-1, 1, 2.0 / (self.width)), 0),
24 | repeats=self.height,
25 | axis=0,
26 | ),
27 | 0,
28 | )
29 | # self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/(self.height - 1)), 0), repeats = self.width, axis = 0).T, 0)
30 | # self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/(self.width - 1)), 0), repeats = self.height, axis = 0), 0)
31 | self.grid[:, :, 2] = np.ones([self.height, width])
32 | self.grid = torch.from_numpy(self.grid.astype(np.float32))
33 | # print(self.grid)
34 |
35 | def forward(self, input1):
36 | self.input1 = input1
37 | output = input1.new(torch.Size([input1.size(0)]) + self.grid.size()).zero_()
38 | self.batchgrid = input1.new(
39 | torch.Size([input1.size(0)]) + self.grid.size()
40 | ).zero_()
41 | for i in range(input1.size(0)):
42 | self.batchgrid[i] = self.grid.astype(self.batchgrid[i])
43 |
44 | # if input1.is_cuda:
45 | # self.batchgrid = self.batchgrid.cuda()
46 | # output = output.cuda()
47 |
48 | for i in range(input1.size(0)):
49 | output = torch.bmm(
50 | self.batchgrid.view(-1, self.height * self.width, 3),
51 | torch.transpose(input1, 1, 2),
52 | ).view(-1, self.height, self.width, 2)
53 |
54 | return output
55 |
56 | def backward(self, grad_output):
57 |
58 | grad_input1 = self.input1.new(self.input1.size()).zero_()
59 |
60 | # if grad_output.is_cuda:
61 | # self.batchgrid = self.batchgrid.cuda()
62 | # grad_input1 = grad_input1.cuda()
63 |
64 | grad_input1 = torch.baddbmm(
65 | grad_input1,
66 | torch.transpose(grad_output.view(-1, self.height * self.width, 2), 1, 2),
67 | self.batchgrid.view(-1, self.height * self.width, 3),
68 | )
69 | return grad_input1
70 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/functions/roi_crop.py:
--------------------------------------------------------------------------------
1 | # functions/add.py
2 | import torch
3 | from torch.autograd import Function
4 |
5 | from .._ext import roi_crop
6 |
7 |
8 | class RoICropFunction(Function):
9 | def forward(self, input1, input2):
10 | self.input1 = input1.clone()
11 | self.input2 = input2.clone()
12 | output = input2.new(
13 | input2.size()[0], input1.size()[1], input2.size()[1], input2.size()[2]
14 | ).zero_()
15 | assert (
16 | output.get_device() == input1.get_device()
17 | ), "output and input1 must on the same device"
18 | assert (
19 | output.get_device() == input2.get_device()
20 | ), "output and input2 must on the same device"
21 | roi_crop.BilinearSamplerBHWD_updateOutput_cuda(input1, input2, output)
22 | return output
23 |
24 | def backward(self, grad_output):
25 | grad_input1 = self.input1.new(self.input1.size()).zero_()
26 | grad_input2 = self.input2.new(self.input2.size()).zero_()
27 | roi_crop.BilinearSamplerBHWD_updateGradInput_cuda(
28 | self.input1, self.input2, grad_input1, grad_input2, grad_output
29 | )
30 | return grad_input1, grad_input2
31 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_crop/modules/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_crop/modules/roi_crop.py:
--------------------------------------------------------------------------------
1 | from torch.nn.modules.module import Module
2 |
3 | from ..functions.roi_crop import RoICropFunction
4 |
5 |
6 | class _RoICrop(Module):
7 | def __init__(self, layout="BHWD"):
8 | super(_RoICrop, self).__init__()
9 |
10 | def forward(self, input1, input2):
11 | return RoICropFunction()(input1, input2)
12 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_layers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 |
4 | from .nms import nms
5 | from .roi_align import ROIAlign, roi_align
6 | from .roi_pool import ROIPool, roi_pool
7 |
8 | __all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool"]
9 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_layers/nms.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | # from ._utils import _C
3 | from model import _C
4 |
5 | nms = _C.nms
6 | # nms.__doc__ = """
7 | # This function performs Non-maximum suppresion"""
8 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_layers/roi_align.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 | from model import _C
4 | from torch import nn
5 | from torch.autograd import Function
6 | from torch.autograd.function import once_differentiable
7 | from torch.nn.modules.utils import _pair
8 |
9 |
10 | class _ROIAlign(Function):
11 | @staticmethod
12 | def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
13 | ctx.save_for_backward(roi)
14 | ctx.output_size = _pair(output_size)
15 | ctx.spatial_scale = spatial_scale
16 | ctx.sampling_ratio = sampling_ratio
17 | ctx.input_shape = input.size()
18 | output = _C.roi_align_forward(
19 | input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
20 | )
21 | return output
22 |
23 | @staticmethod
24 | @once_differentiable
25 | def backward(ctx, grad_output):
26 | (rois,) = ctx.saved_tensors
27 | output_size = ctx.output_size
28 | spatial_scale = ctx.spatial_scale
29 | sampling_ratio = ctx.sampling_ratio
30 | bs, ch, h, w = ctx.input_shape
31 | grad_input = _C.roi_align_backward(
32 | grad_output,
33 | rois,
34 | spatial_scale,
35 | output_size[0],
36 | output_size[1],
37 | bs,
38 | ch,
39 | h,
40 | w,
41 | sampling_ratio,
42 | )
43 | return grad_input, None, None, None, None
44 |
45 |
46 | roi_align = _ROIAlign.apply
47 |
48 |
49 | class ROIAlign(nn.Module):
50 | def __init__(self, output_size, spatial_scale, sampling_ratio):
51 | super(ROIAlign, self).__init__()
52 | self.output_size = output_size
53 | self.spatial_scale = spatial_scale
54 | self.sampling_ratio = sampling_ratio
55 |
56 | def forward(self, input, rois):
57 | return roi_align(
58 | input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
59 | )
60 |
61 | def __repr__(self):
62 | tmpstr = self.__class__.__name__ + "("
63 | tmpstr += "output_size=" + str(self.output_size)
64 | tmpstr += ", spatial_scale=" + str(self.spatial_scale)
65 | tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
66 | tmpstr += ")"
67 | return tmpstr
68 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_layers/roi_pool.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 | from model import _C
4 | from torch import nn
5 | from torch.autograd import Function
6 | from torch.autograd.function import once_differentiable
7 | from torch.nn.modules.utils import _pair
8 |
9 |
10 | class _ROIPool(Function):
11 | @staticmethod
12 | def forward(ctx, input, roi, output_size, spatial_scale):
13 | ctx.output_size = _pair(output_size)
14 | ctx.spatial_scale = spatial_scale
15 | ctx.input_shape = input.size()
16 | output, argmax = _C.roi_pool_forward(
17 | input, roi, spatial_scale, output_size[0], output_size[1]
18 | )
19 | ctx.save_for_backward(input, roi, argmax)
20 | return output
21 |
22 | @staticmethod
23 | @once_differentiable
24 | def backward(ctx, grad_output):
25 | input, rois, argmax = ctx.saved_tensors
26 | output_size = ctx.output_size
27 | spatial_scale = ctx.spatial_scale
28 | bs, ch, h, w = ctx.input_shape
29 | grad_input = _C.roi_pool_backward(
30 | grad_output,
31 | input,
32 | rois,
33 | argmax,
34 | spatial_scale,
35 | output_size[0],
36 | output_size[1],
37 | bs,
38 | ch,
39 | h,
40 | w,
41 | )
42 | return grad_input, None, None, None
43 |
44 |
45 | roi_pool = _ROIPool.apply
46 |
47 |
48 | class ROIPool(nn.Module):
49 | def __init__(self, output_size, spatial_scale):
50 | super(ROIPool, self).__init__()
51 | self.output_size = output_size
52 | self.spatial_scale = spatial_scale
53 |
54 | def forward(self, input, rois):
55 | return roi_pool(input, rois, self.output_size, self.spatial_scale)
56 |
57 | def __repr__(self):
58 | tmpstr = self.__class__.__name__ + "("
59 | tmpstr += "output_size=" + str(self.output_size)
60 | tmpstr += ", spatial_scale=" + str(self.spatial_scale)
61 | tmpstr += ")"
62 | return tmpstr
63 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/_ext/roi_pooling/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._roi_pooling import ffi as _ffi
4 | from ._roi_pooling import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | sources = ["src/roi_pooling.c"]
9 | headers = ["src/roi_pooling.h"]
10 | extra_objects = []
11 | defines = []
12 | with_cuda = False
13 |
14 | this_file = os.path.dirname(os.path.realpath(__file__))
15 | print(this_file)
16 |
17 | if torch.cuda.is_available():
18 | print("Including CUDA code.")
19 | sources += ["src/roi_pooling_cuda.c"]
20 | headers += ["src/roi_pooling_cuda.h"]
21 | defines += [("WITH_CUDA", None)]
22 | with_cuda = True
23 | extra_objects = ["src/roi_pooling.cu.o"]
24 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
25 |
26 | ffi = create_extension(
27 | "_ext.roi_pooling",
28 | headers=headers,
29 | sources=sources,
30 | define_macros=defines,
31 | relative_to=__file__,
32 | with_cuda=with_cuda,
33 | extra_objects=extra_objects,
34 | )
35 |
36 | if __name__ == "__main__":
37 | ffi.build()
38 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/functions/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/functions/roi_pool.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | from .._ext import roi_pooling
5 |
6 |
7 | class RoIPoolFunction(Function):
8 | def __init__(ctx, pooled_height, pooled_width, spatial_scale):
9 | ctx.pooled_width = pooled_width
10 | ctx.pooled_height = pooled_height
11 | ctx.spatial_scale = spatial_scale
12 | ctx.feature_size = None
13 |
14 | def forward(ctx, features, rois):
15 | ctx.feature_size = features.size()
16 | batch_size, num_channels, data_height, data_width = ctx.feature_size
17 | num_rois = rois.size(0)
18 | output = features.new(
19 | num_rois, num_channels, ctx.pooled_height, ctx.pooled_width
20 | ).zero_()
21 | ctx.argmax = (
22 | features.new(num_rois, num_channels, ctx.pooled_height, ctx.pooled_width)
23 | .zero_()
24 | .int()
25 | )
26 | ctx.rois = rois
27 | if not features.is_cuda:
28 | _features = features.permute(0, 2, 3, 1)
29 | roi_pooling.roi_pooling_forward(
30 | ctx.pooled_height,
31 | ctx.pooled_width,
32 | ctx.spatial_scale,
33 | _features,
34 | rois,
35 | output,
36 | )
37 | else:
38 | roi_pooling.roi_pooling_forward_cuda(
39 | ctx.pooled_height,
40 | ctx.pooled_width,
41 | ctx.spatial_scale,
42 | features,
43 | rois,
44 | output,
45 | ctx.argmax,
46 | )
47 |
48 | return output
49 |
50 | def backward(ctx, grad_output):
51 | assert ctx.feature_size is not None and grad_output.is_cuda
52 | batch_size, num_channels, data_height, data_width = ctx.feature_size
53 | grad_input = grad_output.new(
54 | batch_size, num_channels, data_height, data_width
55 | ).zero_()
56 |
57 | roi_pooling.roi_pooling_backward_cuda(
58 | ctx.pooled_height,
59 | ctx.pooled_width,
60 | ctx.spatial_scale,
61 | grad_output,
62 | ctx.rois,
63 | grad_input,
64 | ctx.argmax,
65 | )
66 |
67 | return grad_input, None
68 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/modules/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/roi_pooling/modules/roi_pool.py:
--------------------------------------------------------------------------------
1 | from torch.nn.modules.module import Module
2 |
3 | from ..functions.roi_pool import RoIPoolFunction
4 |
5 |
6 | class _RoIPooling(Module):
7 | def __init__(self, pooled_height, pooled_width, spatial_scale):
8 | super(_RoIPooling, self).__init__()
9 |
10 | self.pooled_width = int(pooled_width)
11 | self.pooled_height = int(pooled_height)
12 | self.spatial_scale = float(spatial_scale)
13 |
14 | def forward(self, features, rois):
15 | return RoIPoolFunction(
16 | self.pooled_height, self.pooled_width, self.spatial_scale
17 | )(features, rois)
18 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/rpn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/rpn/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/rpn/generate_anchors.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import numpy as np
4 |
5 | # --------------------------------------------------------
6 | # Faster R-CNN
7 | # Copyright (c) 2015 Microsoft
8 | # Licensed under The MIT License [see LICENSE for details]
9 | # Written by Ross Girshick and Sean Bell
10 | # --------------------------------------------------------
11 |
12 |
13 | # Verify that we compute the same anchors as Shaoqing's matlab implementation:
14 | #
15 | # >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
16 | # >> anchors
17 | #
18 | # anchors =
19 | #
20 | # -83 -39 100 56
21 | # -175 -87 192 104
22 | # -359 -183 376 200
23 | # -55 -55 72 72
24 | # -119 -119 136 136
25 | # -247 -247 264 264
26 | # -35 -79 52 96
27 | # -79 -167 96 184
28 | # -167 -343 184 360
29 |
30 | # array([[ -83., -39., 100., 56.],
31 | # [-175., -87., 192., 104.],
32 | # [-359., -183., 376., 200.],
33 | # [ -55., -55., 72., 72.],
34 | # [-119., -119., 136., 136.],
35 | # [-247., -247., 264., 264.],
36 | # [ -35., -79., 52., 96.],
37 | # [ -79., -167., 96., 184.],
38 | # [-167., -343., 184., 360.]])
39 |
40 | try:
41 | xrange # Python 2
42 | except NameError:
43 | xrange = range # Python 3
44 |
45 |
46 | def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2 ** np.arange(3, 6)):
47 | """
48 | Generate anchor (reference) windows by enumerating aspect ratios X
49 | scales wrt a reference (0, 0, 15, 15) window.
50 | """
51 |
52 | base_anchor = np.array([1, 1, base_size, base_size]) - 1
53 | ratio_anchors = _ratio_enum(base_anchor, ratios)
54 | anchors = np.vstack(
55 | [
56 | _scale_enum(ratio_anchors[i, :], scales)
57 | for i in xrange(ratio_anchors.shape[0])
58 | ]
59 | )
60 | return anchors
61 |
62 |
63 | def _whctrs(anchor):
64 | """
65 | Return width, height, x center, and y center for an anchor (window).
66 | """
67 |
68 | w = anchor[2] - anchor[0] + 1
69 | h = anchor[3] - anchor[1] + 1
70 | x_ctr = anchor[0] + 0.5 * (w - 1)
71 | y_ctr = anchor[1] + 0.5 * (h - 1)
72 | return w, h, x_ctr, y_ctr
73 |
74 |
75 | def _mkanchors(ws, hs, x_ctr, y_ctr):
76 | """
77 | Given a vector of widths (ws) and heights (hs) around a center
78 | (x_ctr, y_ctr), output a set of anchors (windows).
79 | """
80 |
81 | ws = ws[:, np.newaxis]
82 | hs = hs[:, np.newaxis]
83 | anchors = np.hstack(
84 | (
85 | x_ctr - 0.5 * (ws - 1),
86 | y_ctr - 0.5 * (hs - 1),
87 | x_ctr + 0.5 * (ws - 1),
88 | y_ctr + 0.5 * (hs - 1),
89 | )
90 | )
91 | return anchors
92 |
93 |
94 | def _ratio_enum(anchor, ratios):
95 | """
96 | Enumerate a set of anchors for each aspect ratio wrt an anchor.
97 | """
98 |
99 | w, h, x_ctr, y_ctr = _whctrs(anchor)
100 | size = w * h
101 | size_ratios = size / ratios
102 | ws = np.round(np.sqrt(size_ratios))
103 | hs = np.round(ws * ratios)
104 | anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
105 | return anchors
106 |
107 |
108 | def _scale_enum(anchor, scales):
109 | """
110 | Enumerate a set of anchors for each scale wrt an anchor.
111 | """
112 |
113 | w, h, x_ctr, y_ctr = _whctrs(anchor)
114 | ws = w * scales
115 | hs = h * scales
116 | anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
117 | return anchors
118 |
119 |
120 | if __name__ == "__main__":
121 | import time
122 |
123 | t = time.time()
124 | a = generate_anchors()
125 | print(time.time() - t)
126 | print(a)
127 | from IPython import embed
128 |
129 | embed()
130 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/lib.linux-x86_64-3.6/model/utils/__init__.py
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/utils/blob.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | """Blob helper functions."""
9 |
10 | # from scipy.misc import imread, imresize
11 | import cv2
12 | import numpy as np
13 |
14 | try:
15 | xrange # Python 2
16 | except NameError:
17 | xrange = range # Python 3
18 |
19 |
20 | def im_list_to_blob(ims):
21 | """Convert a list of images into a network input.
22 |
23 | Assumes images are already prepared (means subtracted, BGR order, ...).
24 | """
25 | max_shape = np.array([im.shape for im in ims]).max(axis=0)
26 | num_images = len(ims)
27 | blob = np.zeros((num_images, max_shape[0], max_shape[1], 3), dtype=np.float32)
28 | for i in xrange(num_images):
29 | im = ims[i]
30 | blob[i, 0 : im.shape[0], 0 : im.shape[1], :] = im
31 |
32 | return blob
33 |
34 |
35 | def prep_im_for_blob(im, pixel_means, target_size, max_size):
36 | """Mean subtract and scale an image for use in a blob."""
37 |
38 | im = im.astype(np.float32, copy=False)
39 | im -= pixel_means
40 | # im = im[:, :, ::-1]
41 | im_shape = im.shape
42 | im_size_min = np.min(im_shape[0:2])
43 | im_size_max = np.max(im_shape[0:2])
44 | im_scale = float(target_size) / float(im_size_min)
45 | # Prevent the biggest axis from being more than MAX_SIZE
46 | # if np.round(im_scale * im_size_max) > max_size:
47 | # im_scale = float(max_size) / float(im_size_max)
48 | # im = imresize(im, im_scale)
49 | im = cv2.resize(
50 | im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
51 | )
52 |
53 | return im, im_scale
54 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/model/utils/logger.py:
--------------------------------------------------------------------------------
1 | # Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
2 | import numpy as np
3 | import scipy.misc
4 | import tensorflow as tf
5 |
6 | try:
7 | from StringIO import StringIO # Python 2.7
8 | except ImportError:
9 | from io import BytesIO # Python 3.x
10 |
11 |
12 | class Logger(object):
13 | def __init__(self, log_dir):
14 | """Create a summary writer logging to log_dir."""
15 | self.writer = tf.summary.FileWriter(log_dir)
16 |
17 | def scalar_summary(self, tag, value, step):
18 | """Log a scalar variable."""
19 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
20 | self.writer.add_summary(summary, step)
21 |
22 | def image_summary(self, tag, images, step):
23 | """Log a list of images."""
24 |
25 | img_summaries = []
26 | for i, img in enumerate(images):
27 | # Write the image to a string
28 | try:
29 | s = StringIO()
30 | except:
31 | s = BytesIO()
32 | scipy.misc.toimage(img).save(s, format="png")
33 |
34 | # Create an Image object
35 | img_sum = tf.Summary.Image(
36 | encoded_image_string=s.getvalue(),
37 | height=img.shape[0],
38 | width=img.shape[1],
39 | )
40 | # Create a Summary value
41 | img_summaries.append(
42 | tf.Summary.Value(tag="%s/%d" % (tag, i), image=img_sum)
43 | )
44 |
45 | # Create and write Summary
46 | summary = tf.Summary(value=img_summaries)
47 | self.writer.add_summary(summary, step)
48 |
49 | def histo_summary(self, tag, values, step, bins=1000):
50 | """Log a histogram of the tensor of values."""
51 |
52 | # Create a histogram using numpy
53 | counts, bin_edges = np.histogram(values, bins=bins)
54 |
55 | # Fill the fields of the histogram proto
56 | hist = tf.HistogramProto()
57 | hist.min = float(np.min(values))
58 | hist.max = float(np.max(values))
59 | hist.num = int(np.prod(values.shape))
60 | hist.sum = float(np.sum(values))
61 | hist.sum_squares = float(np.sum(values ** 2))
62 |
63 | # Drop the start of the first bin
64 | bin_edges = bin_edges[1:]
65 |
66 | # Add bin edges and counts
67 | for edge in bin_edges:
68 | hist.bucket_limit.append(edge)
69 | for c in counts:
70 | hist.bucket.append(c)
71 |
72 | # Create and write Summary
73 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
74 | self.writer.add_summary(summary, step)
75 | self.writer.flush()
76 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/roi_da_data_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # author : tiancity-NJU
3 | # --------------------------------------------------------
4 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/roi_data_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
--------------------------------------------------------------------------------
/lib/build/lib.linux-x86_64-3.6/roi_data_layer/minibatch.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick and Xinlei Chen
6 | # --------------------------------------------------------
7 |
8 | """Compute minibatch blobs for training a Fast R-CNN network."""
9 | from __future__ import absolute_import, division, print_function
10 |
11 | # from scipy.misc import imread
12 | import cv2
13 | import numpy as np
14 | import numpy.random as npr
15 | from model.utils.blob import im_list_to_blob, prep_im_for_blob
16 | from model.utils.config import cfg
17 |
18 |
19 | def get_minibatch(roidb, num_classes):
20 | """Given a roidb, construct a minibatch sampled from it."""
21 | num_images = len(roidb)
22 | # Sample random scales to use for each image in this batch
23 | random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images)
24 | assert (
25 | cfg.TRAIN.BATCH_SIZE % num_images == 0
26 | ), "num_images ({}) must divide BATCH_SIZE ({})".format(
27 | num_images, cfg.TRAIN.BATCH_SIZE
28 | )
29 |
30 | # Get the input image blob, formatted for caffe
31 | im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
32 |
33 | blobs = {"data": im_blob}
34 |
35 | assert len(im_scales) == 1, "Single batch only"
36 | assert len(roidb) == 1, "Single batch only"
37 |
38 | # gt boxes: (x1, y1, x2, y2, cls)
39 | if cfg.TRAIN.USE_ALL_GT:
40 | # Include all ground truth boxes
41 | gt_inds = np.where(roidb[0]["gt_classes"] != 0)[0]
42 | else:
43 | # For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
44 | gt_inds = np.where(
45 | (roidb[0]["gt_classes"] != 0)
46 | & np.all(roidb[0]["gt_overlaps"].toarray() > -1.0, axis=1)
47 | )[0]
48 | gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
49 | gt_boxes[:, 0:4] = roidb[0]["boxes"][gt_inds, :] * im_scales[0]
50 | gt_boxes[:, 4] = roidb[0]["gt_classes"][gt_inds]
51 | blobs["gt_boxes"] = gt_boxes
52 | blobs["im_info"] = np.array(
53 | [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32
54 | )
55 |
56 | blobs["img_id"] = roidb[0]["img_id"]
57 |
58 | return blobs
59 |
60 |
61 | def _get_image_blob(roidb, scale_inds):
62 | """Builds an input blob from the images in the roidb at the specified
63 | scales.
64 | """
65 | num_images = len(roidb)
66 |
67 | processed_ims = []
68 | im_scales = []
69 | for i in range(num_images):
70 | im = cv2.imread(roidb[i]["image"])
71 | # im = imread(roidb[i]["image"])
72 |
73 | if len(im.shape) == 2:
74 | im = im[:, :, np.newaxis]
75 | im = np.concatenate((im, im, im), axis=2)
76 | # flip the channel, since the original one using cv2
77 | # rgb -> bgr
78 | im = im[:, :, ::-1]
79 |
80 | if roidb[i]["flipped"]:
81 | im = im[:, ::-1, :]
82 | target_size = cfg.TRAIN.SCALES[scale_inds[i]]
83 | im, im_scale = prep_im_for_blob(
84 | im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE
85 | )
86 | im_scales.append(im_scale)
87 | processed_ims.append(im)
88 |
89 | # Create a blob to hold the input images
90 | blob = im_list_to_blob(processed_ims)
91 |
92 | return blob, im_scales
93 |
--------------------------------------------------------------------------------
/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cpu/ROIAlign_cpu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cpu/ROIAlign_cpu.o
--------------------------------------------------------------------------------
/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cpu/nms_cpu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cpu/nms_cpu.o
--------------------------------------------------------------------------------
/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/ROIAlign_cuda.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/ROIAlign_cuda.o
--------------------------------------------------------------------------------
/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/ROIPool_cuda.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/ROIPool_cuda.o
--------------------------------------------------------------------------------
/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/nms.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/nms.o
--------------------------------------------------------------------------------
/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/vision.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/build/temp.linux-x86_64-3.6/mnt/data2/JINSU/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/vision.o
--------------------------------------------------------------------------------
/lib/datasets.tar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets.tar
--------------------------------------------------------------------------------
/lib/datasets/VOCdevkit-matlab-wrapper/get_voc_opts.m:
--------------------------------------------------------------------------------
1 | function VOCopts = get_voc_opts(path)
2 |
3 | tmp = pwd;
4 | cd(path);
5 | try
6 | addpath('VOCcode');
7 | VOCinit;
8 | catch
9 | rmpath('VOCcode');
10 | cd(tmp);
11 | error(sprintf('VOCcode directory not found under %s', path));
12 | end
13 | rmpath('VOCcode');
14 | cd(tmp);
15 |
--------------------------------------------------------------------------------
/lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m:
--------------------------------------------------------------------------------
1 | function res = voc_eval(path, comp_id, test_set, output_dir)
2 |
3 | VOCopts = get_voc_opts(path);
4 | VOCopts.testset = test_set;
5 |
6 | for i = 1:length(VOCopts.classes)
7 | cls = VOCopts.classes{i};
8 | res(i) = voc_eval_cls(cls, VOCopts, comp_id, output_dir);
9 | end
10 |
11 | fprintf('\n~~~~~~~~~~~~~~~~~~~~\n');
12 | fprintf('Results:\n');
13 | aps = [res(:).ap]';
14 | fprintf('%.1f\n', aps * 100);
15 | fprintf('%.1f\n', mean(aps) * 100);
16 | fprintf('~~~~~~~~~~~~~~~~~~~~\n');
17 |
18 | function res = voc_eval_cls(cls, VOCopts, comp_id, output_dir)
19 |
20 | test_set = VOCopts.testset;
21 | year = VOCopts.dataset(4:end);
22 |
23 | addpath(fullfile(VOCopts.datadir, 'VOCcode'));
24 |
25 | res_fn = sprintf(VOCopts.detrespath, comp_id, cls);
26 |
27 | recall = [];
28 | prec = [];
29 | ap = 0;
30 | ap_auc = 0;
31 |
32 | do_eval = (str2num(year) <= 2007) | ~strcmp(test_set, 'test');
33 | if do_eval
34 | % Bug in VOCevaldet requires that tic has been called first
35 | tic;
36 | [recall, prec, ap] = VOCevaldet(VOCopts, comp_id, cls, true);
37 | ap_auc = xVOCap(recall, prec);
38 |
39 | % force plot limits
40 | ylim([0 1]);
41 | xlim([0 1]);
42 |
43 | print(gcf, '-djpeg', '-r0', ...
44 | [output_dir '/' cls '_pr.jpg']);
45 | end
46 | fprintf('!!! %s : %.4f %.4f\n', cls, ap, ap_auc);
47 |
48 | res.recall = recall;
49 | res.prec = prec;
50 | res.ap = ap;
51 | res.ap_auc = ap_auc;
52 |
53 | save([output_dir '/' cls '_pr.mat'], ...
54 | 'res', 'recall', 'prec', 'ap', 'ap_auc');
55 |
56 | rmpath(fullfile(VOCopts.datadir, 'VOCcode'));
57 |
--------------------------------------------------------------------------------
/lib/datasets/VOCdevkit-matlab-wrapper/xVOCap.m:
--------------------------------------------------------------------------------
1 | function ap = xVOCap(rec,prec)
2 | % From the PASCAL VOC 2011 devkit
3 |
4 | mrec=[0 ; rec ; 1];
5 | mpre=[0 ; prec ; 0];
6 | for i=numel(mpre)-1:-1:1
7 | mpre(i)=max(mpre(i),mpre(i+1));
8 | end
9 | i=find(mrec(2:end)~=mrec(1:end-1))+1;
10 | ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
11 |
--------------------------------------------------------------------------------
/lib/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/KITTI_car.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/KITTI_car.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/bdd100k_daytime.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/bdd100k_daytime.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/bdd100k_daytime_car.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/bdd100k_daytime_car.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/bdd100k_dd.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/bdd100k_dd.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/bdd100k_night.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/bdd100k_night.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/bdd_eval.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/bdd_eval.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/cityscape.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/cityscape.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/cityscapes_car.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/cityscapes_car.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/cityscapes_ms_car.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/cityscapes_ms_car.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/clipart.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/clipart.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/coco.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/coco.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/config_dataset.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/config_dataset.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/ds_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/ds_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/factory.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/factory.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/imagenet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/imagenet.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/imdb.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/imdb.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/pascal_voc.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/pascal_voc.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/pascal_voc_water.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/pascal_voc_water.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/rpc.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/rpc.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/rpc_fake.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/rpc_fake.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/sim10k_coco.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/sim10k_coco.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/vg.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/vg.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/vg_eval.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/vg_eval.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/voc_eval.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/voc_eval.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/__pycache__/water.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/datasets/__pycache__/water.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/datasets/boxes.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017-present, Facebook, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | ##############################################################################
15 | #
16 | # Based on:
17 | # --------------------------------------------------------
18 | # Fast/er R-CNN
19 | # Licensed under The MIT License [see LICENSE for details]
20 | # Written by Ross Girshick
21 | # --------------------------------------------------------
22 |
23 | """Box manipulation functions. The internal Detectron box format is
24 | [x1, y1, x2, y2] where (x1, y1) specify the top-left box corner and (x2, y2)
25 | specify the bottom-right box corner. Boxes from external sources, e.g.,
26 | datasets, may be in other formats (such as [x, y, w, h]) and require conversion.
27 |
28 | This module uses a convention that may seem strange at first: the width of a box
29 | is computed as x2 - x1 + 1 (likewise for height). The "+ 1" dates back to old
30 | object detection days when the coordinates were integer pixel indices, rather
31 | than floating point coordinates in a subpixel coordinate frame. A box with x2 =
32 | x1 and y2 = y1 was taken to include a single pixel, having a width of 1, and
33 | hence requiring the "+ 1". Now, most datasets will likely provide boxes with
34 | floating point coordinates and the width should be more reasonably computed as
35 | x2 - x1.
36 |
37 | In practice, as long as a model is trained and tested with a consistent
38 | convention either decision seems to be ok (at least in our experience on COCO).
39 | Since we have a long history of training models with the "+ 1" convention, we
40 | are reluctant to change it even if our modern tastes prefer not to use it.
41 | """
42 |
43 | from __future__ import absolute_import, division, print_function, unicode_literals
44 |
45 | import numpy as np
46 |
47 |
48 | def xywh_to_xyxy(xywh):
49 | """Convert [x1 y1 w h] box format to [x1 y1 x2 y2] format."""
50 | if isinstance(xywh, (list, tuple)):
51 | # Single box given as a list of coordinates
52 | assert len(xywh) == 4
53 | x1, y1 = xywh[0], xywh[1]
54 | x2 = x1 + np.maximum(0.0, xywh[2] - 1.0)
55 | y2 = y1 + np.maximum(0.0, xywh[3] - 1.0)
56 | return (x1, y1, x2, y2)
57 | elif isinstance(xywh, np.ndarray):
58 | # Multiple boxes given as a 2D ndarray
59 | return np.hstack((xywh[:, 0:2], xywh[:, 0:2] + np.maximum(0, xywh[:, 2:4] - 1)))
60 | else:
61 | raise TypeError("Argument xywh must be a list, tuple, or numpy array.")
62 |
63 |
64 | def xyxy_to_xywh(xyxy):
65 | """Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format."""
66 | if isinstance(xyxy, (list, tuple)):
67 | # Single box given as a list of coordinates
68 | assert len(xyxy) == 4
69 | x1, y1 = xyxy[0], xyxy[1]
70 | w = xyxy[2] - x1 + 1
71 | h = xyxy[3] - y1 + 1
72 | return (x1, y1, w, h)
73 | elif isinstance(xyxy, np.ndarray):
74 | # Multiple boxes given as a 2D ndarray
75 | return np.hstack((xyxy[:, 0:2], xyxy[:, 2:4] - xyxy[:, 0:2] + 1))
76 | else:
77 | raise TypeError("Argument xyxy must be a list, tuple, or numpy array.")
78 |
--------------------------------------------------------------------------------
/lib/datasets/config_dataset.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import numpy as np
4 |
5 | # `pip install easydict` if you don't have it
6 | from easydict import EasyDict as edict
7 |
8 | __D = edict()
9 | # Consumers can get config by:
10 | # from fast_rcnn_config import cfg
11 | cfg_d = __D
12 | #
13 | # Training options
14 | # with regard to pascal, the directories under the path will be ./VOC2007, ./VOC2012"
15 | __D.PASCAL = "/data/datasets/DA_Detection/VOCdevkit"
16 | __D.PASCALCLIP = ""
17 | __D.PASCALWATER = "/data/datasets/DA_Detection/VOCdevkit"
18 | __D.PASCALRPCFAKE = "/data/GeneralDataset/DomainAdaptation/rpc/voc_format_fake4-2"
19 | __D.PASCALRPC = "/data/GeneralDataset/DomainAdaptation/rpc/voc_format_rpc-2"
20 |
21 | # For these datasets, the directories under the path will be Annotations ImageSets JPEGImages."
22 | __D.CLIPART = "/data/datasets/DA_Detection/clipart"
23 | __D.WATER = "/data/datasets/DA_Detection/watercolor"
24 | __D.BDD100k = "/media/ssd1/BDD100k/"
25 | __D.cityscapes_KDA = "/media/ssd1/cityscape/"
26 | __D.KITTI = "/media/ssd1/KITTI/"
27 |
28 | def _merge_a_into_b(a, b):
29 | """Merge config dictionary a into config dictionary b, clobbering the
30 | options in b whenever they are also specified in a.
31 | """
32 | if type(a) is not edict:
33 | return
34 |
35 | for k, v in a.items():
36 | # a must specify keys that are in b
37 | if k not in b:
38 | raise KeyError("{} is not a valid config key".format(k))
39 |
40 | # the types must match, too
41 | old_type = type(b[k])
42 | if old_type is not type(v):
43 | if isinstance(b[k], np.ndarray):
44 | v = np.array(v, dtype=b[k].dtype)
45 | else:
46 | raise ValueError(
47 | ("Type mismatch ({} vs. {}) " "for config key: {}").format(
48 | type(b[k]), type(v), k
49 | )
50 | )
51 |
52 | # recursively merge dicts
53 | if type(v) is edict:
54 | try:
55 | _merge_a_into_b(a[k], b[k])
56 | except:
57 | print(("Error under config key: {}".format(k)))
58 | raise
59 | else:
60 | b[k] = v
61 |
62 |
63 | def cfg_from_file(filename):
64 | """Load a config file and merge it into the default options."""
65 | import yaml
66 |
67 | with open(filename, "r") as f:
68 | yaml_cfg = edict(yaml.load(f))
69 |
70 | _merge_a_into_b(yaml_cfg, __D)
71 |
72 |
73 | def cfg_from_list(cfg_list):
74 | """Set config keys via list (e.g., from command line)."""
75 | from ast import literal_eval
76 |
77 | assert len(cfg_list) % 2 == 0
78 | for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
79 | key_list = k.split(".")
80 | d = __D
81 | for subkey in key_list[:-1]:
82 | assert subkey in d
83 | d = d[subkey]
84 | subkey = key_list[-1]
85 | assert subkey in d
86 | try:
87 | value = literal_eval(v)
88 | except:
89 | # handle the case when v is a string literal
90 | value = v
91 | assert type(value) == type(
92 | d[subkey]
93 | ), "type {} does not match original type {}".format(
94 | type(value), type(d[subkey])
95 | )
96 | d[subkey] = value
97 |
--------------------------------------------------------------------------------
/lib/datasets/ds_utils.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast/er R-CNN
3 | # Licensed under The MIT License [see LICENSE for details]
4 | # Written by Ross Girshick
5 | # --------------------------------------------------------
6 | from __future__ import absolute_import, division, print_function
7 |
8 | import numpy as np
9 |
10 |
11 | def unique_boxes(boxes, scale=1.0):
12 | """Return indices of unique boxes."""
13 | v = np.array([1, 1e3, 1e6, 1e9])
14 | hashes = np.round(boxes * scale).dot(v)
15 | _, index = np.unique(hashes, return_index=True)
16 | return np.sort(index)
17 |
18 |
19 | def xywh_to_xyxy(boxes):
20 | """Convert [x y w h] box format to [x1 y1 x2 y2] format."""
21 | return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1))
22 |
23 |
24 | def xyxy_to_xywh(boxes):
25 | """Convert [x1 y1 x2 y2] box format to [x y w h] format."""
26 | return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1))
27 |
28 |
29 | def validate_boxes(boxes, width=0, height=0):
30 | """Check that a set of boxes are valid."""
31 | x1 = boxes[:, 0]
32 | y1 = boxes[:, 1]
33 | x2 = boxes[:, 2]
34 | y2 = boxes[:, 3]
35 | assert (x1 >= 0).all()
36 | assert (y1 >= 0).all()
37 | assert (x2 >= x1).all()
38 | assert (y2 >= y1).all()
39 | assert (x2 < width).all()
40 | assert (y2 < height).all()
41 |
42 |
43 | def filter_small_boxes(boxes, min_size):
44 | w = boxes[:, 2] - boxes[:, 0]
45 | h = boxes[:, 3] - boxes[:, 1]
46 | keep = np.where((w >= min_size) & (h > min_size))[0]
47 | return keep
48 |
--------------------------------------------------------------------------------
/lib/datasets/segms.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017-present, Facebook, Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | ##############################################################################
15 |
16 | """Functions for interacting with segmentation masks in the COCO format.
17 |
18 | The following terms are used in this module
19 | mask: a binary mask encoded as a 2D numpy array
20 | segm: a segmentation mask in one of the two COCO formats (polygon or RLE)
21 | polygon: COCO's polygon format
22 | RLE: COCO's run length encoding format
23 | """
24 |
25 | from __future__ import absolute_import, division, print_function, unicode_literals
26 |
27 | import numpy as np
28 | import pycocotools.mask as mask_util
29 |
30 | # Type used for storing masks in polygon format
31 | _POLY_TYPE = list
32 | # Type used for storing masks in RLE format
33 | _RLE_TYPE = dict
34 |
35 |
36 | def polys_to_boxes(polys):
37 | """Convert a list of polygons into an array of tight bounding boxes."""
38 | boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)
39 | for i in range(len(polys)):
40 | poly = polys[i]
41 | x0 = min(min(p[::2]) for p in poly)
42 | x1 = max(max(p[::2]) for p in poly)
43 | y0 = min(min(p[1::2]) for p in poly)
44 | y1 = max(max(p[1::2]) for p in poly)
45 | boxes_from_polys[i, :] = [x0, y0, x1, y1]
46 |
47 | return boxes_from_polys
48 |
--------------------------------------------------------------------------------
/lib/faster_rcnn.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 1.0
2 | Name: faster-rcnn
3 | Version: 0.1
4 | Summary: object detection in pytorch
5 | Home-page: UNKNOWN
6 | Author: UNKNOWN
7 | Author-email: UNKNOWN
8 | License: UNKNOWN
9 | Description: UNKNOWN
10 | Platform: UNKNOWN
11 |
--------------------------------------------------------------------------------
/lib/faster_rcnn.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | setup.py
2 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/vision.cpp
3 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cpu/ROIAlign_cpu.cpp
4 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cpu/nms_cpu.cpp
5 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/ROIAlign_cuda.cu
6 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/ROIPool_cuda.cu
7 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/csrc/cuda/nms.cu
8 | datasets/__init__.py
9 | datasets/boxes.py
10 | datasets/cityscape.py
11 | datasets/cityscapes_car.py
12 | datasets/clipart.py
13 | datasets/coco.py
14 | datasets/config_dataset.py
15 | datasets/convert_cityscapes_to_caronly_coco.py
16 | datasets/convert_cityscapes_to_unlabeled_caronly_coco.py
17 | datasets/convert_sim10k_to_coco.py
18 | datasets/ds_utils.py
19 | datasets/factory.py
20 | datasets/imagenet.py
21 | datasets/imdb.py
22 | datasets/pascal_voc.py
23 | datasets/pascal_voc_rbg.py
24 | datasets/pascal_voc_water.py
25 | datasets/rpc.py
26 | datasets/rpc_fake.py
27 | datasets/segms.py
28 | datasets/sim10k_coco.py
29 | datasets/vg.py
30 | datasets/vg_eval.py
31 | datasets/voc_eval.py
32 | datasets/voc_eval_no_add_1.py
33 | datasets/water.py
34 | faster_rcnn.egg-info/PKG-INFO
35 | faster_rcnn.egg-info/SOURCES.txt
36 | faster_rcnn.egg-info/dependency_links.txt
37 | faster_rcnn.egg-info/top_level.txt
38 | model/__init__.py
39 | model/faster_rcnn/__init__.py
40 | model/faster_rcnn/faster_rcnn.py
41 | model/faster_rcnn/resnet.py
42 | model/faster_rcnn/vgg16.py
43 | model/nms/__init__.py
44 | model/nms/build.py
45 | model/nms/nms_cpu.py
46 | model/nms/nms_gpu.py
47 | model/nms/nms_wrapper.py
48 | model/nms/_ext/__init__.py
49 | model/nms/_ext/nms/__init__.py
50 | model/roi_align/__init__.py
51 | model/roi_align/build.py
52 | model/roi_align/_ext/__init__.py
53 | model/roi_align/_ext/roi_align/__init__.py
54 | model/roi_align/functions/__init__.py
55 | model/roi_align/functions/roi_align.py
56 | model/roi_align/modules/__init__.py
57 | model/roi_align/modules/roi_align.py
58 | model/roi_crop/__init__.py
59 | model/roi_crop/build.py
60 | model/roi_crop/_ext/__init__.py
61 | model/roi_crop/_ext/crop_resize/__init__.py
62 | model/roi_crop/_ext/roi_crop/__init__.py
63 | model/roi_crop/functions/__init__.py
64 | model/roi_crop/functions/crop_resize.py
65 | model/roi_crop/functions/gridgen.py
66 | model/roi_crop/functions/roi_crop.py
67 | model/roi_crop/modules/__init__.py
68 | model/roi_crop/modules/gridgen.py
69 | model/roi_crop/modules/roi_crop.py
70 | model/roi_layers/__init__.py
71 | model/roi_layers/nms.py
72 | model/roi_layers/roi_align.py
73 | model/roi_layers/roi_pool.py
74 | model/roi_pooling/__init__.py
75 | model/roi_pooling/build.py
76 | model/roi_pooling/_ext/__init__.py
77 | model/roi_pooling/_ext/roi_pooling/__init__.py
78 | model/roi_pooling/functions/__init__.py
79 | model/roi_pooling/functions/roi_pool.py
80 | model/roi_pooling/modules/__init__.py
81 | model/roi_pooling/modules/roi_pool.py
82 | model/rpn/__init__.py
83 | model/rpn/anchor_target_layer.py
84 | model/rpn/bbox_transform.py
85 | model/rpn/generate_anchors.py
86 | model/rpn/proposal_layer.py
87 | model/rpn/proposal_target_layer_cascade.py
88 | model/rpn/rpn.py
89 | model/utils/__init__.py
90 | model/utils/blob.py
91 | model/utils/config.py
92 | model/utils/logger.py
93 | model/utils/net_utils.py
94 | roi_da_data_layer/__init__.py
95 | roi_da_data_layer/minibatch.py
96 | roi_da_data_layer/roibatchLoader.py
97 | roi_da_data_layer/roidb.py
98 | roi_data_layer/__init__.py
99 | roi_data_layer/minibatch.py
100 | roi_data_layer/roibatchLoader.py
101 | roi_data_layer/roidb.py
--------------------------------------------------------------------------------
/lib/faster_rcnn.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/lib/faster_rcnn.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 | datasets
2 | model
3 | roi_da_data_layer
4 | roi_data_layer
5 |
--------------------------------------------------------------------------------
/lib/model/_C.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/_C.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/lib/model/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/__init__.py
--------------------------------------------------------------------------------
/lib/model/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/csrc/ROIAlign.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #pragma once
3 |
4 | #include "cpu/vision.h"
5 |
6 | #ifdef WITH_CUDA
7 | #include "cuda/vision.h"
8 | #endif
9 |
10 | // Interface for Python
11 | at::Tensor ROIAlign_forward(const at::Tensor& input,
12 | const at::Tensor& rois,
13 | const float spatial_scale,
14 | const int pooled_height,
15 | const int pooled_width,
16 | const int sampling_ratio) {
17 | if (input.type().is_cuda()) {
18 | #ifdef WITH_CUDA
19 | return ROIAlign_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
20 | #else
21 | AT_ERROR("Not compiled with GPU support");
22 | #endif
23 | }
24 | return ROIAlign_forward_cpu(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
25 | }
26 |
27 | at::Tensor ROIAlign_backward(const at::Tensor& grad,
28 | const at::Tensor& rois,
29 | const float spatial_scale,
30 | const int pooled_height,
31 | const int pooled_width,
32 | const int batch_size,
33 | const int channels,
34 | const int height,
35 | const int width,
36 | const int sampling_ratio) {
37 | if (grad.type().is_cuda()) {
38 | #ifdef WITH_CUDA
39 | return ROIAlign_backward_cuda(grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio);
40 | #else
41 | AT_ERROR("Not compiled with GPU support");
42 | #endif
43 | }
44 | AT_ERROR("Not implemented on the CPU");
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/lib/model/csrc/ROIPool.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #pragma once
3 |
4 | #include "cpu/vision.h"
5 |
6 | #ifdef WITH_CUDA
7 | #include "cuda/vision.h"
8 | #endif
9 |
10 |
11 | std::tuple ROIPool_forward(const at::Tensor& input,
12 | const at::Tensor& rois,
13 | const float spatial_scale,
14 | const int pooled_height,
15 | const int pooled_width) {
16 | if (input.type().is_cuda()) {
17 | #ifdef WITH_CUDA
18 | return ROIPool_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width);
19 | #else
20 | AT_ERROR("Not compiled with GPU support");
21 | #endif
22 | }
23 | AT_ERROR("Not implemented on the CPU");
24 | }
25 |
26 | at::Tensor ROIPool_backward(const at::Tensor& grad,
27 | const at::Tensor& input,
28 | const at::Tensor& rois,
29 | const at::Tensor& argmax,
30 | const float spatial_scale,
31 | const int pooled_height,
32 | const int pooled_width,
33 | const int batch_size,
34 | const int channels,
35 | const int height,
36 | const int width) {
37 | if (grad.type().is_cuda()) {
38 | #ifdef WITH_CUDA
39 | return ROIPool_backward_cuda(grad, input, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width);
40 | #else
41 | AT_ERROR("Not compiled with GPU support");
42 | #endif
43 | }
44 | AT_ERROR("Not implemented on the CPU");
45 | }
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/lib/model/csrc/cpu/nms_cpu.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #include "cpu/vision.h"
3 |
4 |
5 | template
6 | at::Tensor nms_cpu_kernel(const at::Tensor& dets,
7 | const at::Tensor& scores,
8 | const float threshold) {
9 | AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
10 | AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor");
11 | AT_ASSERTM(dets.type() == scores.type(), "dets should have the same type as scores");
12 |
13 | if (dets.numel() == 0) {
14 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
15 | }
16 |
17 | auto x1_t = dets.select(1, 0).contiguous();
18 | auto y1_t = dets.select(1, 1).contiguous();
19 | auto x2_t = dets.select(1, 2).contiguous();
20 | auto y2_t = dets.select(1, 3).contiguous();
21 |
22 | at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1);
23 |
24 | auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
25 |
26 | auto ndets = dets.size(0);
27 | at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU));
28 |
29 | auto suppressed = suppressed_t.data();
30 | auto order = order_t.data();
31 | auto x1 = x1_t.data();
32 | auto y1 = y1_t.data();
33 | auto x2 = x2_t.data();
34 | auto y2 = y2_t.data();
35 | auto areas = areas_t.data();
36 |
37 | for (int64_t _i = 0; _i < ndets; _i++) {
38 | auto i = order[_i];
39 | if (suppressed[i] == 1)
40 | continue;
41 | auto ix1 = x1[i];
42 | auto iy1 = y1[i];
43 | auto ix2 = x2[i];
44 | auto iy2 = y2[i];
45 | auto iarea = areas[i];
46 |
47 | for (int64_t _j = _i + 1; _j < ndets; _j++) {
48 | auto j = order[_j];
49 | if (suppressed[j] == 1)
50 | continue;
51 | auto xx1 = std::max(ix1, x1[j]);
52 | auto yy1 = std::max(iy1, y1[j]);
53 | auto xx2 = std::min(ix2, x2[j]);
54 | auto yy2 = std::min(iy2, y2[j]);
55 |
56 | auto w = std::max(static_cast(0), xx2 - xx1 + 1);
57 | auto h = std::max(static_cast(0), yy2 - yy1 + 1);
58 | auto inter = w * h;
59 | auto ovr = inter / (iarea + areas[j] - inter);
60 | if (ovr >= threshold)
61 | suppressed[j] = 1;
62 | }
63 | }
64 | return at::nonzero(suppressed_t == 0).squeeze(1);
65 | }
66 |
67 | at::Tensor nms_cpu(const at::Tensor& dets,
68 | const at::Tensor& scores,
69 | const float threshold) {
70 | at::Tensor result;
71 | AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] {
72 | result = nms_cpu_kernel(dets, scores, threshold);
73 | });
74 | return result;
75 | }
76 |
--------------------------------------------------------------------------------
/lib/model/csrc/cpu/vision.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #pragma once
3 | #include
4 |
5 |
6 | at::Tensor ROIAlign_forward_cpu(const at::Tensor& input,
7 | const at::Tensor& rois,
8 | const float spatial_scale,
9 | const int pooled_height,
10 | const int pooled_width,
11 | const int sampling_ratio);
12 |
13 |
14 | at::Tensor nms_cpu(const at::Tensor& dets,
15 | const at::Tensor& scores,
16 | const float threshold);
17 |
--------------------------------------------------------------------------------
/lib/model/csrc/cuda/vision.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #pragma once
3 | #include
4 |
5 |
6 | at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
7 | const at::Tensor& rois,
8 | const float spatial_scale,
9 | const int pooled_height,
10 | const int pooled_width,
11 | const int sampling_ratio);
12 |
13 | at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
14 | const at::Tensor& rois,
15 | const float spatial_scale,
16 | const int pooled_height,
17 | const int pooled_width,
18 | const int batch_size,
19 | const int channels,
20 | const int height,
21 | const int width,
22 | const int sampling_ratio);
23 |
24 |
25 | std::tuple ROIPool_forward_cuda(const at::Tensor& input,
26 | const at::Tensor& rois,
27 | const float spatial_scale,
28 | const int pooled_height,
29 | const int pooled_width);
30 |
31 | at::Tensor ROIPool_backward_cuda(const at::Tensor& grad,
32 | const at::Tensor& input,
33 | const at::Tensor& rois,
34 | const at::Tensor& argmax,
35 | const float spatial_scale,
36 | const int pooled_height,
37 | const int pooled_width,
38 | const int batch_size,
39 | const int channels,
40 | const int height,
41 | const int width);
42 |
43 | at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh);
44 |
45 |
46 | at::Tensor compute_flow_cuda(const at::Tensor& boxes,
47 | const int height,
48 | const int width);
49 |
--------------------------------------------------------------------------------
/lib/model/csrc/nms.h:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #pragma once
3 | #include "cpu/vision.h"
4 |
5 | #ifdef WITH_CUDA
6 | #include "cuda/vision.h"
7 | #endif
8 |
9 |
10 | at::Tensor nms(const at::Tensor& dets,
11 | const at::Tensor& scores,
12 | const float threshold) {
13 |
14 | if (dets.type().is_cuda()) {
15 | #ifdef WITH_CUDA
16 | // TODO raise error if not compiled with CUDA
17 | if (dets.numel() == 0)
18 | return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU));
19 | auto b = at::cat({dets, scores.unsqueeze(1)}, 1);
20 | return nms_cuda(b, threshold);
21 | #else
22 | AT_ERROR("Not compiled with GPU support");
23 | #endif
24 | }
25 |
26 | at::Tensor result = nms_cpu(dets, scores, threshold);
27 | return result;
28 | }
29 |
--------------------------------------------------------------------------------
/lib/model/csrc/vision.cpp:
--------------------------------------------------------------------------------
1 | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #include "nms.h"
3 | #include "ROIAlign.h"
4 | #include "ROIPool.h"
5 |
6 |
7 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
8 | m.def("nms", &nms, "non-maximum suppression");
9 | m.def("roi_align_forward", &ROIAlign_forward, "ROIAlign_forward");
10 | m.def("roi_align_backward", &ROIAlign_backward, "ROIAlign_backward");
11 | m.def("roi_pool_forward", &ROIPool_forward, "ROIPool_forward");
12 | m.def("roi_pool_backward", &ROIPool_backward, "ROIPool_backward");
13 | }
14 |
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn/DA.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import numpy as np
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 | import torchvision.models as models
8 | from model.da_faster_rcnn.LabelResizeLayer import (
9 | ImageLabelResizeLayer,
10 | InstanceLabelResizeLayer,
11 | )
12 | from model.utils.config import cfg
13 | from torch.autograd import Function, Variable
14 |
15 |
16 | class GRLayer(Function):
17 | @staticmethod
18 | def forward(ctx, input):
19 | ctx.alpha = 0.1
20 |
21 | return input.view_as(input)
22 |
23 | @staticmethod
24 | def backward(ctx, grad_outputs):
25 | output = grad_outputs.neg() * ctx.alpha
26 | return output
27 |
28 |
29 | def grad_reverse(x):
30 | return GRLayer.apply(x)
31 |
32 |
33 | class _ImageDA(nn.Module):
34 | def __init__(self, dim):
35 | super(_ImageDA, self).__init__()
36 | self.dim = dim # feat layer 256*H*W for vgg16
37 | self.Conv1 = nn.Conv2d(self.dim, 512, kernel_size=1, stride=1, bias=False)
38 | self.Conv2 = nn.Conv2d(512, 2, kernel_size=1, stride=1, bias=False)
39 | self.reLu = nn.ReLU(inplace=False)
40 | self.LabelResizeLayer = ImageLabelResizeLayer()
41 |
42 | def forward(self, x, need_backprop):
43 | x = grad_reverse(x)
44 | x = self.reLu(self.Conv1(x))
45 | x = self.Conv2(x)
46 | label = self.LabelResizeLayer(x, need_backprop)
47 | return x, label
48 |
49 |
50 | class _InstanceDA(nn.Module):
51 | def __init__(self):
52 | super(_InstanceDA, self).__init__()
53 | self.dc_ip1 = nn.Linear(4096, 1024)
54 | self.dc_relu1 = nn.ReLU()
55 | self.dc_drop1 = nn.Dropout(p=0.5)
56 |
57 | self.dc_ip2 = nn.Linear(1024, 1024)
58 | self.dc_relu2 = nn.ReLU()
59 | self.dc_drop2 = nn.Dropout(p=0.5)
60 |
61 | self.clssifer = nn.Linear(1024, 1)
62 | self.LabelResizeLayer = InstanceLabelResizeLayer()
63 |
64 | def forward(self, x, need_backprop):
65 | x = grad_reverse(x)
66 | x = self.dc_drop1(self.dc_relu1(self.dc_ip1(x)))
67 | x = self.dc_drop2(self.dc_relu2(self.dc_ip2(x)))
68 | x = F.sigmoid(self.clssifer(x))
69 | label = self.LabelResizeLayer(x, need_backprop)
70 | return x, label
71 |
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn/LabelResizeLayer.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import cv2
4 | import numpy as np
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | import torchvision.models as models
9 | from model.utils.config import cfg
10 | from torch.autograd import Function, Variable
11 |
12 |
13 | class ImageLabelResizeLayer(nn.Module):
14 | """
15 | Resize label to be the same size with the samples
16 | """
17 |
18 | def __init__(self):
19 | super(ImageLabelResizeLayer, self).__init__()
20 |
21 | def forward(self, x, need_backprop):
22 |
23 | feats = x.detach().cpu().numpy()
24 | lbs = need_backprop.detach().cpu().numpy()
25 | gt_blob = np.zeros(
26 | (lbs.shape[0], feats.shape[2], feats.shape[3], 1), dtype=np.float32
27 | )
28 | for i in range(lbs.shape[0]):
29 | lb = np.array([lbs[i]])
30 | lbs_resize = cv2.resize(
31 | lb, (feats.shape[3], feats.shape[2]), interpolation=cv2.INTER_NEAREST
32 | )
33 | gt_blob[i, 0 : lbs_resize.shape[0], 0 : lbs_resize.shape[1], 0] = lbs_resize
34 |
35 | channel_swap = (0, 3, 1, 2)
36 | gt_blob = gt_blob.transpose(channel_swap)
37 | y = Variable(torch.from_numpy(gt_blob)).cuda()
38 | y = y.squeeze(1).long()
39 | return y
40 |
41 |
42 | class InstanceLabelResizeLayer(nn.Module):
43 | def __init__(self):
44 | super(InstanceLabelResizeLayer, self).__init__()
45 | self.minibatch = 256
46 |
47 | def forward(self, x, need_backprop):
48 | feats = x.data.cpu().numpy()
49 | lbs = need_backprop.data.cpu().numpy()
50 | resized_lbs = np.ones((feats.shape[0], 1), dtype=np.float32)
51 | for i in range(lbs.shape[0]):
52 | resized_lbs[i * self.minibatch : (i + 1) * self.minibatch] = lbs[i]
53 |
54 | y = torch.from_numpy(resized_lbs).cuda()
55 |
56 | return y
57 |
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn/__pycache__/faster_rcnn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn/__pycache__/faster_rcnn.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn/__pycache__/resnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn/__pycache__/resnet.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn/__pycache__/vgg16.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn/__pycache__/vgg16.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/DA.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import numpy as np
4 | import torch
5 | import torch.nn as nn
6 | import torch.nn.functional as F
7 | import torchvision.models as models
8 | from model.da_faster_rcnn_instance_da_weight.LabelResizeLayer import (
9 | ImageLabelResizeLayer,
10 | InstanceLabelResizeLayer,
11 | )
12 | from model.utils.config import cfg
13 | from torch.autograd import Function, Variable
14 |
15 |
16 | class GRLayer(Function):
17 | @staticmethod
18 | def forward(ctx, input):
19 | ctx.alpha = 0.1
20 |
21 | return input.view_as(input)
22 |
23 | @staticmethod
24 | def backward(ctx, grad_outputs):
25 | output = grad_outputs.neg() * ctx.alpha
26 | return output
27 |
28 |
29 | def grad_reverse(x):
30 | return GRLayer.apply(x)
31 |
32 |
33 | class _ImageDA(nn.Module):
34 | def __init__(self, dim):
35 | super(_ImageDA, self).__init__()
36 | self.dim = dim # feat layer 256*H*W for vgg16
37 | self.Conv1 = nn.Conv2d(self.dim, 512, kernel_size=1, stride=1, bias=False)
38 | self.Conv2 = nn.Conv2d(512, 2, kernel_size=1, stride=1, bias=False)
39 | self.reLu = nn.ReLU(inplace=False)
40 | self.LabelResizeLayer = ImageLabelResizeLayer()
41 |
42 | def forward(self, x, need_backprop):
43 | x = grad_reverse(x)
44 | x = self.reLu(self.Conv1(x))
45 | x = self.Conv2(x)
46 | label = self.LabelResizeLayer(x, need_backprop)
47 | return x, label
48 |
49 |
50 | class _InstanceDA(nn.Module):
51 | def __init__(self, in_channle=4096):
52 | super(_InstanceDA, self).__init__()
53 | self.dc_ip1 = nn.Linear(in_channle, 1024)
54 | self.dc_relu1 = nn.ReLU()
55 | self.dc_drop1 = nn.Dropout(p=0.5)
56 |
57 | self.dc_ip2 = nn.Linear(1024, 1024)
58 | self.dc_relu2 = nn.ReLU()
59 | self.dc_drop2 = nn.Dropout(p=0.5)
60 |
61 | self.clssifer = nn.Linear(1024, 1)
62 | self.LabelResizeLayer = InstanceLabelResizeLayer()
63 |
64 | def forward(self, x, need_backprop):
65 | x = grad_reverse(x)
66 | x = self.dc_drop1(self.dc_relu1(self.dc_ip1(x)))
67 | x = self.dc_drop2(self.dc_relu2(self.dc_ip2(x)))
68 | x = F.sigmoid(self.clssifer(x))
69 | label = self.LabelResizeLayer(x, need_backprop)
70 | return x, label
71 |
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/LabelResizeLayer.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import, division, print_function
2 |
3 | import cv2
4 | import numpy as np
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | import torchvision.models as models
9 | from model.utils.config import cfg
10 | from torch.autograd import Function, Variable
11 |
12 |
13 | class ImageLabelResizeLayer(nn.Module):
14 | """
15 | Resize label to be the same size with the samples
16 | """
17 |
18 | def __init__(self):
19 | super(ImageLabelResizeLayer, self).__init__()
20 |
21 | def forward(self, x, need_backprop):
22 |
23 | feats = x.detach().cpu().numpy()
24 | lbs = need_backprop.detach().cpu().numpy()
25 | gt_blob = np.zeros(
26 | (lbs.shape[0], feats.shape[2], feats.shape[3], 1), dtype=np.float32
27 | )
28 | for i in range(lbs.shape[0]):
29 | lb = np.array([lbs[i]])
30 | lbs_resize = cv2.resize(
31 | lb, (feats.shape[3], feats.shape[2]), interpolation=cv2.INTER_NEAREST
32 | )
33 | gt_blob[i, 0 : lbs_resize.shape[0], 0 : lbs_resize.shape[1], 0] = lbs_resize
34 |
35 | channel_swap = (0, 3, 1, 2)
36 | gt_blob = gt_blob.transpose(channel_swap)
37 | y = Variable(torch.from_numpy(gt_blob)).cuda()
38 | y = y.squeeze(1).long()
39 | return y
40 |
41 |
42 | class InstanceLabelResizeLayer(nn.Module):
43 | def __init__(self):
44 | super(InstanceLabelResizeLayer, self).__init__()
45 | self.minibatch = 256
46 |
47 | def forward(self, x, need_backprop):
48 | feats = x.data.cpu().numpy()
49 | lbs = need_backprop.data.cpu().numpy()
50 | resized_lbs = np.ones((feats.shape[0], 1), dtype=np.float32)
51 | for i in range(lbs.shape[0]):
52 | resized_lbs[i * self.minibatch : (i + 1) * self.minibatch] = lbs[i]
53 |
54 | y = torch.from_numpy(resized_lbs).cuda()
55 |
56 | return y
57 |
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/DA.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/DA.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/LabelResizeLayer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/LabelResizeLayer.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/faster_rcnn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/faster_rcnn.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/resnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/resnet.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/vgg16.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/da_faster_rcnn_instance_da_weight/__pycache__/vgg16.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/faster_rcnn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/faster_rcnn/__init__.py
--------------------------------------------------------------------------------
/lib/model/faster_rcnn/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/faster_rcnn/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/faster_rcnn/__pycache__/faster_rcnn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/faster_rcnn/__pycache__/faster_rcnn.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/faster_rcnn/__pycache__/resnet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/faster_rcnn/__pycache__/resnet.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/faster_rcnn/__pycache__/vgg16.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/faster_rcnn/__pycache__/vgg16.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/faster_rcnn/vgg16.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Tensorflow Faster R-CNN
3 | # Licensed under The MIT License [see LICENSE for details]
4 | # Written by Xinlei Chen
5 | # --------------------------------------------------------
6 | from __future__ import absolute_import, division, print_function
7 |
8 | import torch
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 | import torchvision.models as models
12 | from model.faster_rcnn.faster_rcnn import _fasterRCNN
13 | from torch.autograd import Variable
14 |
15 |
16 | class vgg16(_fasterRCNN):
17 | def __init__(self, classes, pretrained=False, class_agnostic=False):
18 | self.model_path = "data/pretrained_model/vgg16_caffe.pth"
19 | self.dout_base_model = 512
20 | self.pretrained = pretrained
21 | self.class_agnostic = class_agnostic
22 |
23 | _fasterRCNN.__init__(self, classes, class_agnostic)
24 |
25 | def _init_modules(self):
26 | vgg = models.vgg16()
27 | if self.pretrained:
28 | print("Loading pretrained weights from %s" % (self.model_path))
29 | state_dict = torch.load(self.model_path)
30 | vgg.load_state_dict(
31 | {k: v for k, v in state_dict.items() if k in vgg.state_dict()}
32 | )
33 |
34 | vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
35 |
36 | # not using the last maxpool layer
37 | self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])
38 |
39 | # Fix the layers before conv3:
40 | for layer in range(10):
41 | for p in self.RCNN_base[layer].parameters():
42 | p.requires_grad = False
43 |
44 | # self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)
45 |
46 | self.RCNN_top = vgg.classifier
47 |
48 | # not using the last maxpool layer
49 | self.RCNN_cls_score = nn.Linear(4096, self.n_classes)
50 |
51 | if self.class_agnostic:
52 | self.RCNN_bbox_pred = nn.Linear(4096, 4)
53 | else:
54 | self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)
55 |
56 | def _head_to_tail(self, pool5):
57 |
58 | pool5_flat = pool5.view(pool5.size(0), -1)
59 | fc7 = self.RCNN_top(pool5_flat)
60 |
61 | return fc7
62 |
--------------------------------------------------------------------------------
/lib/model/nms/.gitignore:
--------------------------------------------------------------------------------
1 | *.c
2 | *.cpp
3 | *.so
4 |
--------------------------------------------------------------------------------
/lib/model/nms/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/nms/__init__.py
--------------------------------------------------------------------------------
/lib/model/nms/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/nms/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/model/nms/_ext/nms/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._nms import ffi as _ffi
4 | from ._nms import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/model/nms/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | # this_file = os.path.dirname(__file__)
9 |
10 | sources = []
11 | headers = []
12 | defines = []
13 | with_cuda = False
14 |
15 | if torch.cuda.is_available():
16 | print("Including CUDA code.")
17 | sources += ["src/nms_cuda.c"]
18 | headers += ["src/nms_cuda.h"]
19 | defines += [("WITH_CUDA", None)]
20 | with_cuda = True
21 |
22 | this_file = os.path.dirname(os.path.realpath(__file__))
23 | print(this_file)
24 | extra_objects = ["src/nms_cuda_kernel.cu.o"]
25 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
26 | print(extra_objects)
27 |
28 | ffi = create_extension(
29 | "_ext.nms",
30 | headers=headers,
31 | sources=sources,
32 | define_macros=defines,
33 | relative_to=__file__,
34 | with_cuda=with_cuda,
35 | extra_objects=extra_objects,
36 | )
37 |
38 | if __name__ == "__main__":
39 | ffi.build()
40 |
--------------------------------------------------------------------------------
/lib/model/nms/make.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # CUDA_PATH=/usr/local/cuda/
4 |
5 | cd src
6 | echo "Compiling stnm kernels by nvcc..."
7 | nvcc -c -o nms_cuda_kernel.cu.o nms_cuda_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52
8 |
9 | cd ../
10 | python build.py
11 |
--------------------------------------------------------------------------------
/lib/model/nms/nms_cpu.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 | import torch
5 |
6 |
7 | def nms_cpu(dets, thresh):
8 | dets = dets.numpy()
9 | x1 = dets[:, 0]
10 | y1 = dets[:, 1]
11 | x2 = dets[:, 2]
12 | y2 = dets[:, 3]
13 | scores = dets[:, 4]
14 |
15 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
16 | order = scores.argsort()[::-1]
17 |
18 | keep = []
19 | while order.size > 0:
20 | i = order.item(0)
21 | keep.append(i)
22 | xx1 = np.maximum(x1[i], x1[order[1:]])
23 | yy1 = np.maximum(y1[i], y1[order[1:]])
24 | xx2 = np.maximum(x2[i], x2[order[1:]])
25 | yy2 = np.maximum(y2[i], y2[order[1:]])
26 |
27 | w = np.maximum(0.0, xx2 - xx1 + 1)
28 | h = np.maximum(0.0, yy2 - yy1 + 1)
29 | inter = w * h
30 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
31 |
32 | inds = np.where(ovr <= thresh)[0]
33 | order = order[inds + 1]
34 |
35 | return torch.IntTensor(keep)
36 |
--------------------------------------------------------------------------------
/lib/model/nms/nms_gpu.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | import numpy as np
4 | import torch
5 |
6 | from ._ext import nms
7 |
8 |
9 | def nms_gpu(dets, thresh):
10 | keep = dets.new(dets.size(0), 1).zero_().int()
11 | num_out = dets.new(1).zero_().int()
12 | nms.nms_cuda(keep, dets, num_out, thresh)
13 | keep = keep[: num_out[0]]
14 | return keep
15 |
--------------------------------------------------------------------------------
/lib/model/nms/nms_wrapper.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 | import torch
8 | from model.nms.nms_cpu import nms_cpu
9 | from model.utils.config import cfg
10 |
11 | if torch.cuda.is_available():
12 | from model.nms.nms_gpu import nms_gpu
13 |
14 |
15 | def nms(dets, thresh, force_cpu=False):
16 | """Dispatch to either CPU or GPU NMS implementations."""
17 | if dets.shape[0] == 0:
18 | return []
19 | # ---numpy version---
20 | # original: return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)
21 | # ---pytorch version---
22 |
23 | return nms_gpu(dets, thresh) if force_cpu == False else nms_cpu(dets, thresh)
24 |
--------------------------------------------------------------------------------
/lib/model/nms/src/nms_cuda.h:
--------------------------------------------------------------------------------
1 | // int nms_cuda(THCudaTensor *keep_out, THCudaTensor *num_out,
2 | // THCudaTensor *boxes_host, THCudaTensor *nms_overlap_thresh);
3 |
4 | int nms_cuda(THCudaIntTensor *keep_out, THCudaTensor *boxes_host,
5 | THCudaIntTensor *num_out, float nms_overlap_thresh);
6 |
--------------------------------------------------------------------------------
/lib/model/nms/src/nms_cuda_kernel.h:
--------------------------------------------------------------------------------
1 | #ifdef __cplusplus
2 | extern "C" {
3 | #endif
4 |
5 | void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num,
6 | int boxes_dim, float nms_overlap_thresh);
7 |
8 | #ifdef __cplusplus
9 | }
10 | #endif
11 |
--------------------------------------------------------------------------------
/lib/model/roi_align/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_align/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_align/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_align/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_align/_ext/roi_align/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._roi_align import ffi as _ffi
4 | from ._roi_align import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/model/roi_align/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | sources = ["src/roi_align.c"]
9 | headers = ["src/roi_align.h"]
10 | extra_objects = []
11 | # sources = []
12 | # headers = []
13 | defines = []
14 | with_cuda = False
15 |
16 | this_file = os.path.dirname(os.path.realpath(__file__))
17 | print(this_file)
18 |
19 | if torch.cuda.is_available():
20 | print("Including CUDA code.")
21 | sources += ["src/roi_align_cuda.c"]
22 | headers += ["src/roi_align_cuda.h"]
23 | defines += [("WITH_CUDA", None)]
24 | with_cuda = True
25 |
26 | extra_objects = ["src/roi_align_kernel.cu.o"]
27 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
28 |
29 | ffi = create_extension(
30 | "_ext.roi_align",
31 | headers=headers,
32 | sources=sources,
33 | define_macros=defines,
34 | relative_to=__file__,
35 | with_cuda=with_cuda,
36 | extra_objects=extra_objects,
37 | )
38 |
39 | if __name__ == "__main__":
40 | ffi.build()
41 |
--------------------------------------------------------------------------------
/lib/model/roi_align/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_align/functions/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_align/functions/roi_align.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | from .._ext import roi_align
5 |
6 |
7 | # TODO use save_for_backward instead
8 | class RoIAlignFunction(Function):
9 | def __init__(self, aligned_height, aligned_width, spatial_scale):
10 | self.aligned_width = int(aligned_width)
11 | self.aligned_height = int(aligned_height)
12 | self.spatial_scale = float(spatial_scale)
13 | self.rois = None
14 | self.feature_size = None
15 |
16 | def forward(self, features, rois):
17 | self.rois = rois
18 | self.feature_size = features.size()
19 |
20 | batch_size, num_channels, data_height, data_width = features.size()
21 | num_rois = rois.size(0)
22 |
23 | output = features.new(
24 | num_rois, num_channels, self.aligned_height, self.aligned_width
25 | ).zero_()
26 | if features.is_cuda:
27 | roi_align.roi_align_forward_cuda(
28 | self.aligned_height,
29 | self.aligned_width,
30 | self.spatial_scale,
31 | features,
32 | rois,
33 | output,
34 | )
35 | else:
36 | roi_align.roi_align_forward(
37 | self.aligned_height,
38 | self.aligned_width,
39 | self.spatial_scale,
40 | features,
41 | rois,
42 | output,
43 | )
44 | # raise NotImplementedError
45 |
46 | return output
47 |
48 | def backward(self, grad_output):
49 | assert self.feature_size is not None and grad_output.is_cuda
50 |
51 | batch_size, num_channels, data_height, data_width = self.feature_size
52 |
53 | grad_input = self.rois.new(
54 | batch_size, num_channels, data_height, data_width
55 | ).zero_()
56 | roi_align.roi_align_backward_cuda(
57 | self.aligned_height,
58 | self.aligned_width,
59 | self.spatial_scale,
60 | grad_output,
61 | self.rois,
62 | grad_input,
63 | )
64 |
65 | # print grad_input
66 |
67 | return grad_input, None
68 |
--------------------------------------------------------------------------------
/lib/model/roi_align/make.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | CUDA_PATH=/usr/local/cuda/
4 |
5 | cd src
6 | echo "Compiling my_lib kernels by nvcc..."
7 | nvcc -c -o roi_align_kernel.cu.o roi_align_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52
8 |
9 | cd ../
10 | python build.py
11 |
--------------------------------------------------------------------------------
/lib/model/roi_align/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_align/modules/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_align/modules/roi_align.py:
--------------------------------------------------------------------------------
1 | from torch.nn.functional import avg_pool2d, max_pool2d
2 | from torch.nn.modules.module import Module
3 |
4 | from ..functions.roi_align import RoIAlignFunction
5 |
6 |
7 | class RoIAlign(Module):
8 | def __init__(self, aligned_height, aligned_width, spatial_scale):
9 | super(RoIAlign, self).__init__()
10 |
11 | self.aligned_width = int(aligned_width)
12 | self.aligned_height = int(aligned_height)
13 | self.spatial_scale = float(spatial_scale)
14 |
15 | def forward(self, features, rois):
16 | return RoIAlignFunction(
17 | self.aligned_height, self.aligned_width, self.spatial_scale
18 | )(features, rois)
19 |
20 |
21 | class RoIAlignAvg(Module):
22 | def __init__(self, aligned_height, aligned_width, spatial_scale):
23 | super(RoIAlignAvg, self).__init__()
24 |
25 | self.aligned_width = int(aligned_width)
26 | self.aligned_height = int(aligned_height)
27 | self.spatial_scale = float(spatial_scale)
28 |
29 | def forward(self, features, rois):
30 | x = RoIAlignFunction(
31 | self.aligned_height + 1, self.aligned_width + 1, self.spatial_scale
32 | )(features, rois)
33 | return avg_pool2d(x, kernel_size=2, stride=1)
34 |
35 |
36 | class RoIAlignMax(Module):
37 | def __init__(self, aligned_height, aligned_width, spatial_scale):
38 | super(RoIAlignMax, self).__init__()
39 |
40 | self.aligned_width = int(aligned_width)
41 | self.aligned_height = int(aligned_height)
42 | self.spatial_scale = float(spatial_scale)
43 |
44 | def forward(self, features, rois):
45 | x = RoIAlignFunction(
46 | self.aligned_height + 1, self.aligned_width + 1, self.spatial_scale
47 | )(features, rois)
48 | return max_pool2d(x, kernel_size=2, stride=1)
49 |
--------------------------------------------------------------------------------
/lib/model/roi_align/src/roi_align.h:
--------------------------------------------------------------------------------
1 | int roi_align_forward(int aligned_height, int aligned_width, float spatial_scale,
2 | THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output);
3 |
4 | int roi_align_backward(int aligned_height, int aligned_width, float spatial_scale,
5 | THFloatTensor * top_grad, THFloatTensor * rois, THFloatTensor * bottom_grad);
6 |
--------------------------------------------------------------------------------
/lib/model/roi_align/src/roi_align_cuda.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "roi_align_kernel.h"
4 |
5 | extern THCState *state;
6 |
7 | int roi_align_forward_cuda(int aligned_height, int aligned_width, float spatial_scale,
8 | THCudaTensor * features, THCudaTensor * rois, THCudaTensor * output)
9 | {
10 | // Grab the input tensor
11 | float * data_flat = THCudaTensor_data(state, features);
12 | float * rois_flat = THCudaTensor_data(state, rois);
13 |
14 | float * output_flat = THCudaTensor_data(state, output);
15 |
16 | // Number of ROIs
17 | int num_rois = THCudaTensor_size(state, rois, 0);
18 | int size_rois = THCudaTensor_size(state, rois, 1);
19 | if (size_rois != 5)
20 | {
21 | return 0;
22 | }
23 |
24 | // data height
25 | int data_height = THCudaTensor_size(state, features, 2);
26 | // data width
27 | int data_width = THCudaTensor_size(state, features, 3);
28 | // Number of channels
29 | int num_channels = THCudaTensor_size(state, features, 1);
30 |
31 | cudaStream_t stream = THCState_getCurrentStream(state);
32 |
33 | ROIAlignForwardLaucher(
34 | data_flat, spatial_scale, num_rois, data_height,
35 | data_width, num_channels, aligned_height,
36 | aligned_width, rois_flat,
37 | output_flat, stream);
38 |
39 | return 1;
40 | }
41 |
42 | int roi_align_backward_cuda(int aligned_height, int aligned_width, float spatial_scale,
43 | THCudaTensor * top_grad, THCudaTensor * rois, THCudaTensor * bottom_grad)
44 | {
45 | // Grab the input tensor
46 | float * top_grad_flat = THCudaTensor_data(state, top_grad);
47 | float * rois_flat = THCudaTensor_data(state, rois);
48 |
49 | float * bottom_grad_flat = THCudaTensor_data(state, bottom_grad);
50 |
51 | // Number of ROIs
52 | int num_rois = THCudaTensor_size(state, rois, 0);
53 | int size_rois = THCudaTensor_size(state, rois, 1);
54 | if (size_rois != 5)
55 | {
56 | return 0;
57 | }
58 |
59 | // batch size
60 | int batch_size = THCudaTensor_size(state, bottom_grad, 0);
61 | // data height
62 | int data_height = THCudaTensor_size(state, bottom_grad, 2);
63 | // data width
64 | int data_width = THCudaTensor_size(state, bottom_grad, 3);
65 | // Number of channels
66 | int num_channels = THCudaTensor_size(state, bottom_grad, 1);
67 |
68 | cudaStream_t stream = THCState_getCurrentStream(state);
69 | ROIAlignBackwardLaucher(
70 | top_grad_flat, spatial_scale, batch_size, num_rois, data_height,
71 | data_width, num_channels, aligned_height,
72 | aligned_width, rois_flat,
73 | bottom_grad_flat, stream);
74 |
75 | return 1;
76 | }
77 |
--------------------------------------------------------------------------------
/lib/model/roi_align/src/roi_align_cuda.h:
--------------------------------------------------------------------------------
1 | int roi_align_forward_cuda(int aligned_height, int aligned_width, float spatial_scale,
2 | THCudaTensor * features, THCudaTensor * rois, THCudaTensor * output);
3 |
4 | int roi_align_backward_cuda(int aligned_height, int aligned_width, float spatial_scale,
5 | THCudaTensor * top_grad, THCudaTensor * rois, THCudaTensor * bottom_grad);
6 |
--------------------------------------------------------------------------------
/lib/model/roi_align/src/roi_align_kernel.h:
--------------------------------------------------------------------------------
1 | #ifndef _ROI_ALIGN_KERNEL
2 | #define _ROI_ALIGN_KERNEL
3 |
4 | #ifdef __cplusplus
5 | extern "C" {
6 | #endif
7 |
8 | __global__ void ROIAlignForward(const int nthreads, const float* bottom_data,
9 | const float spatial_scale, const int height, const int width,
10 | const int channels, const int aligned_height, const int aligned_width,
11 | const float* bottom_rois, float* top_data);
12 |
13 | int ROIAlignForwardLaucher(
14 | const float* bottom_data, const float spatial_scale, const int num_rois, const int height,
15 | const int width, const int channels, const int aligned_height,
16 | const int aligned_width, const float* bottom_rois,
17 | float* top_data, cudaStream_t stream);
18 |
19 | __global__ void ROIAlignBackward(const int nthreads, const float* top_diff,
20 | const float spatial_scale, const int height, const int width,
21 | const int channels, const int aligned_height, const int aligned_width,
22 | float* bottom_diff, const float* bottom_rois);
23 |
24 | int ROIAlignBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois,
25 | const int height, const int width, const int channels, const int aligned_height,
26 | const int aligned_width, const float* bottom_rois,
27 | float* bottom_diff, cudaStream_t stream);
28 |
29 | #ifdef __cplusplus
30 | }
31 | #endif
32 |
33 | #endif
34 |
35 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_crop/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_crop/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_crop/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_crop/_ext/crop_resize/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._crop_resize import ffi as _ffi
4 | from ._crop_resize import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | locals[symbol] = _wrap_function(fn, _ffi)
13 | __all__.append(symbol)
14 |
15 |
16 | _import_symbols(locals())
17 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/_ext/roi_crop/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._roi_crop import ffi as _ffi
4 | from ._roi_crop import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | # this_file = os.path.dirname(__file__)
9 |
10 | sources = ["src/roi_crop.c"]
11 | headers = ["src/roi_crop.h"]
12 | defines = []
13 | with_cuda = False
14 |
15 | if torch.cuda.is_available():
16 | print("Including CUDA code.")
17 | sources += ["src/roi_crop_cuda.c"]
18 | headers += ["src/roi_crop_cuda.h"]
19 | defines += [("WITH_CUDA", None)]
20 | with_cuda = True
21 |
22 | this_file = os.path.dirname(os.path.realpath(__file__))
23 | print(this_file)
24 | extra_objects = ["src/roi_crop_cuda_kernel.cu.o"]
25 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
26 |
27 | ffi = create_extension(
28 | "_ext.roi_crop",
29 | headers=headers,
30 | sources=sources,
31 | define_macros=defines,
32 | relative_to=__file__,
33 | with_cuda=with_cuda,
34 | extra_objects=extra_objects,
35 | )
36 |
37 | if __name__ == "__main__":
38 | ffi.build()
39 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_crop/functions/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_crop/functions/crop_resize.py:
--------------------------------------------------------------------------------
1 | # functions/add.py
2 | import torch
3 | from cffi import FFI
4 | from torch.autograd import Function
5 |
6 | from .._ext import roi_crop
7 |
8 | ffi = FFI()
9 |
10 |
11 | class RoICropFunction(Function):
12 | def forward(self, input1, input2):
13 | self.input1 = input1
14 | self.input2 = input2
15 | self.device_c = ffi.new("int *")
16 | output = torch.zeros(
17 | input2.size()[0], input1.size()[1], input2.size()[1], input2.size()[2]
18 | )
19 | # print('decice %d' % torch.cuda.current_device())
20 | if input1.is_cuda:
21 | self.device = torch.cuda.current_device()
22 | else:
23 | self.device = -1
24 | self.device_c[0] = self.device
25 | if not input1.is_cuda:
26 | roi_crop.BilinearSamplerBHWD_updateOutput(input1, input2, output)
27 | else:
28 | output = output.cuda(self.device)
29 | roi_crop.BilinearSamplerBHWD_updateOutput_cuda(input1, input2, output)
30 | return output
31 |
32 | def backward(self, grad_output):
33 | grad_input1 = torch.zeros(self.input1.size())
34 | grad_input2 = torch.zeros(self.input2.size())
35 | # print('backward decice %d' % self.device)
36 | if not grad_output.is_cuda:
37 | roi_crop.BilinearSamplerBHWD_updateGradInput(
38 | self.input1, self.input2, grad_input1, grad_input2, grad_output
39 | )
40 | else:
41 | grad_input1 = grad_input1.cuda(self.device)
42 | grad_input2 = grad_input2.cuda(self.device)
43 | roi_crop.BilinearSamplerBHWD_updateGradInput_cuda(
44 | self.input1, self.input2, grad_input1, grad_input2, grad_output
45 | )
46 | return grad_input1, grad_input2
47 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/functions/gridgen.py:
--------------------------------------------------------------------------------
1 | # functions/add.py
2 | import numpy as np
3 | import torch
4 | from torch.autograd import Function
5 |
6 |
7 | class AffineGridGenFunction(Function):
8 | def __init__(self, height, width, lr=1):
9 | super(AffineGridGenFunction, self).__init__()
10 | self.lr = lr
11 | self.height, self.width = height, width
12 | self.grid = np.zeros([self.height, self.width, 3], dtype=np.float32)
13 | self.grid[:, :, 0] = np.expand_dims(
14 | np.repeat(
15 | np.expand_dims(np.arange(-1, 1, 2.0 / (self.height)), 0),
16 | repeats=self.width,
17 | axis=0,
18 | ).T,
19 | 0,
20 | )
21 | self.grid[:, :, 1] = np.expand_dims(
22 | np.repeat(
23 | np.expand_dims(np.arange(-1, 1, 2.0 / (self.width)), 0),
24 | repeats=self.height,
25 | axis=0,
26 | ),
27 | 0,
28 | )
29 | # self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/(self.height - 1)), 0), repeats = self.width, axis = 0).T, 0)
30 | # self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/(self.width - 1)), 0), repeats = self.height, axis = 0), 0)
31 | self.grid[:, :, 2] = np.ones([self.height, width])
32 | self.grid = torch.from_numpy(self.grid.astype(np.float32))
33 | # print(self.grid)
34 |
35 | def forward(self, input1):
36 | self.input1 = input1
37 | output = input1.new(torch.Size([input1.size(0)]) + self.grid.size()).zero_()
38 | self.batchgrid = input1.new(
39 | torch.Size([input1.size(0)]) + self.grid.size()
40 | ).zero_()
41 | for i in range(input1.size(0)):
42 | self.batchgrid[i] = self.grid.astype(self.batchgrid[i])
43 |
44 | # if input1.is_cuda:
45 | # self.batchgrid = self.batchgrid.cuda()
46 | # output = output.cuda()
47 |
48 | for i in range(input1.size(0)):
49 | output = torch.bmm(
50 | self.batchgrid.view(-1, self.height * self.width, 3),
51 | torch.transpose(input1, 1, 2),
52 | ).view(-1, self.height, self.width, 2)
53 |
54 | return output
55 |
56 | def backward(self, grad_output):
57 |
58 | grad_input1 = self.input1.new(self.input1.size()).zero_()
59 |
60 | # if grad_output.is_cuda:
61 | # self.batchgrid = self.batchgrid.cuda()
62 | # grad_input1 = grad_input1.cuda()
63 |
64 | grad_input1 = torch.baddbmm(
65 | grad_input1,
66 | torch.transpose(grad_output.view(-1, self.height * self.width, 2), 1, 2),
67 | self.batchgrid.view(-1, self.height * self.width, 3),
68 | )
69 | return grad_input1
70 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/functions/roi_crop.py:
--------------------------------------------------------------------------------
1 | # functions/add.py
2 | import torch
3 | from torch.autograd import Function
4 |
5 | from .._ext import roi_crop
6 |
7 |
8 | class RoICropFunction(Function):
9 | def forward(self, input1, input2):
10 | self.input1 = input1.clone()
11 | self.input2 = input2.clone()
12 | output = input2.new(
13 | input2.size()[0], input1.size()[1], input2.size()[1], input2.size()[2]
14 | ).zero_()
15 | assert (
16 | output.get_device() == input1.get_device()
17 | ), "output and input1 must on the same device"
18 | assert (
19 | output.get_device() == input2.get_device()
20 | ), "output and input2 must on the same device"
21 | roi_crop.BilinearSamplerBHWD_updateOutput_cuda(input1, input2, output)
22 | return output
23 |
24 | def backward(self, grad_output):
25 | grad_input1 = self.input1.new(self.input1.size()).zero_()
26 | grad_input2 = self.input2.new(self.input2.size()).zero_()
27 | roi_crop.BilinearSamplerBHWD_updateGradInput_cuda(
28 | self.input1, self.input2, grad_input1, grad_input2, grad_output
29 | )
30 | return grad_input1, grad_input2
31 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/make.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | CUDA_PATH=/usr/local/cuda/
4 |
5 | cd src
6 | echo "Compiling my_lib kernels by nvcc..."
7 | nvcc -c -o roi_crop_cuda_kernel.cu.o roi_crop_cuda_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52
8 |
9 | cd ../
10 | python build.py
11 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_crop/modules/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_crop/modules/roi_crop.py:
--------------------------------------------------------------------------------
1 | from torch.nn.modules.module import Module
2 |
3 | from ..functions.roi_crop import RoICropFunction
4 |
5 |
6 | class _RoICrop(Module):
7 | def __init__(self, layout="BHWD"):
8 | super(_RoICrop, self).__init__()
9 |
10 | def forward(self, input1, input2):
11 | return RoICropFunction()(input1, input2)
12 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/src/roi_crop.h:
--------------------------------------------------------------------------------
1 | int BilinearSamplerBHWD_updateOutput(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output);
2 |
3 | int BilinearSamplerBHWD_updateGradInput(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
4 | THFloatTensor *gradGrids, THFloatTensor *gradOutput);
5 |
6 |
7 |
8 | int BilinearSamplerBCHW_updateOutput(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *output);
9 |
10 | int BilinearSamplerBCHW_updateGradInput(THFloatTensor *inputImages, THFloatTensor *grids, THFloatTensor *gradInputImages,
11 | THFloatTensor *gradGrids, THFloatTensor *gradOutput);
12 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/src/roi_crop_cuda.h:
--------------------------------------------------------------------------------
1 | // Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
2 | // we assume BHWD format in inputImages
3 | // we assume BHW(YX) format on grids
4 |
5 | int BilinearSamplerBHWD_updateOutput_cuda(THCudaTensor *inputImages, THCudaTensor *grids, THCudaTensor *output);
6 |
7 | int BilinearSamplerBHWD_updateGradInput_cuda(THCudaTensor *inputImages, THCudaTensor *grids, THCudaTensor *gradInputImages,
8 | THCudaTensor *gradGrids, THCudaTensor *gradOutput);
9 |
--------------------------------------------------------------------------------
/lib/model/roi_crop/src/roi_crop_cuda_kernel.h:
--------------------------------------------------------------------------------
1 | #ifdef __cplusplus
2 | extern "C" {
3 | #endif
4 |
5 |
6 | int BilinearSamplerBHWD_updateOutput_cuda_kernel(/*output->size[3]*/int oc,
7 | /*output->size[2]*/int ow,
8 | /*output->size[1]*/int oh,
9 | /*output->size[0]*/int ob,
10 | /*THCudaTensor_size(state, inputImages, 3)*/int ic,
11 | /*THCudaTensor_size(state, inputImages, 1)*/int ih,
12 | /*THCudaTensor_size(state, inputImages, 2)*/int iw,
13 | /*THCudaTensor_size(state, inputImages, 0)*/int ib,
14 | /*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int ish, int isw,
15 | /*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsh, int gsw,
16 | /*THCudaTensor *output*/float *output, int osb, int osc, int osh, int osw,
17 | /*THCState_getCurrentStream(state)*/cudaStream_t stream);
18 |
19 | int BilinearSamplerBHWD_updateGradInput_cuda_kernel(/*gradOutput->size[3]*/int goc,
20 | /*gradOutput->size[2]*/int gow,
21 | /*gradOutput->size[1]*/int goh,
22 | /*gradOutput->size[0]*/int gob,
23 | /*THCudaTensor_size(state, inputImages, 3)*/int ic,
24 | /*THCudaTensor_size(state, inputImages, 1)*/int ih,
25 | /*THCudaTensor_size(state, inputImages, 2)*/int iw,
26 | /*THCudaTensor_size(state, inputImages, 0)*/int ib,
27 | /*THCudaTensor *inputImages*/float *inputImages, int isb, int isc, int ish, int isw,
28 | /*THCudaTensor *grids*/float *grids, int gsb, int gsc, int gsh, int gsw,
29 | /*THCudaTensor *gradInputImages*/float *gradInputImages, int gisb, int gisc, int gish, int gisw,
30 | /*THCudaTensor *gradGrids*/float *gradGrids, int ggsb, int ggsc, int ggsh, int ggsw,
31 | /*THCudaTensor *gradOutput*/float *gradOutput, int gosb, int gosc, int gosh, int gosw,
32 | /*THCState_getCurrentStream(state)*/cudaStream_t stream);
33 |
34 |
35 | #ifdef __cplusplus
36 | }
37 | #endif
38 |
--------------------------------------------------------------------------------
/lib/model/roi_layers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 |
4 | from .nms import nms
5 | from .roi_align import ROIAlign, roi_align
6 | from .roi_pool import ROIPool, roi_pool
7 |
8 | __all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool"]
9 |
--------------------------------------------------------------------------------
/lib/model/roi_layers/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_layers/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/roi_layers/__pycache__/nms.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_layers/__pycache__/nms.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/roi_layers/__pycache__/roi_align.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_layers/__pycache__/roi_align.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/roi_layers/__pycache__/roi_pool.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_layers/__pycache__/roi_pool.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/roi_layers/nms.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | # from ._utils import _C
3 | from model import _C
4 |
5 | nms = _C.nms
6 | # nms.__doc__ = """
7 | # This function performs Non-maximum suppresion"""
8 |
--------------------------------------------------------------------------------
/lib/model/roi_layers/roi_align.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 | from model import _C
4 | from torch import nn
5 | from torch.autograd import Function
6 | from torch.autograd.function import once_differentiable
7 | from torch.nn.modules.utils import _pair
8 |
9 |
10 | class _ROIAlign(Function):
11 | @staticmethod
12 | def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
13 | ctx.save_for_backward(roi)
14 | ctx.output_size = _pair(output_size)
15 | ctx.spatial_scale = spatial_scale
16 | ctx.sampling_ratio = sampling_ratio
17 | ctx.input_shape = input.size()
18 | output = _C.roi_align_forward(
19 | input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
20 | )
21 | return output
22 |
23 | @staticmethod
24 | @once_differentiable
25 | def backward(ctx, grad_output):
26 | (rois,) = ctx.saved_tensors
27 | output_size = ctx.output_size
28 | spatial_scale = ctx.spatial_scale
29 | sampling_ratio = ctx.sampling_ratio
30 | bs, ch, h, w = ctx.input_shape
31 | grad_input = _C.roi_align_backward(
32 | grad_output,
33 | rois,
34 | spatial_scale,
35 | output_size[0],
36 | output_size[1],
37 | bs,
38 | ch,
39 | h,
40 | w,
41 | sampling_ratio,
42 | )
43 | return grad_input, None, None, None, None
44 |
45 |
46 | roi_align = _ROIAlign.apply
47 |
48 |
49 | class ROIAlign(nn.Module):
50 | def __init__(self, output_size, spatial_scale, sampling_ratio):
51 | super(ROIAlign, self).__init__()
52 | self.output_size = output_size
53 | self.spatial_scale = spatial_scale
54 | self.sampling_ratio = sampling_ratio
55 |
56 | def forward(self, input, rois):
57 | return roi_align(
58 | input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
59 | )
60 |
61 | def __repr__(self):
62 | tmpstr = self.__class__.__name__ + "("
63 | tmpstr += "output_size=" + str(self.output_size)
64 | tmpstr += ", spatial_scale=" + str(self.spatial_scale)
65 | tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
66 | tmpstr += ")"
67 | return tmpstr
68 |
--------------------------------------------------------------------------------
/lib/model/roi_layers/roi_pool.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | import torch
3 | from model import _C
4 | from torch import nn
5 | from torch.autograd import Function
6 | from torch.autograd.function import once_differentiable
7 | from torch.nn.modules.utils import _pair
8 |
9 |
10 | class _ROIPool(Function):
11 | @staticmethod
12 | def forward(ctx, input, roi, output_size, spatial_scale):
13 | ctx.output_size = _pair(output_size)
14 | ctx.spatial_scale = spatial_scale
15 | ctx.input_shape = input.size()
16 | output, argmax = _C.roi_pool_forward(
17 | input, roi, spatial_scale, output_size[0], output_size[1]
18 | )
19 | ctx.save_for_backward(input, roi, argmax)
20 | return output
21 |
22 | @staticmethod
23 | @once_differentiable
24 | def backward(ctx, grad_output):
25 | input, rois, argmax = ctx.saved_tensors
26 | output_size = ctx.output_size
27 | spatial_scale = ctx.spatial_scale
28 | bs, ch, h, w = ctx.input_shape
29 | grad_input = _C.roi_pool_backward(
30 | grad_output,
31 | input,
32 | rois,
33 | argmax,
34 | spatial_scale,
35 | output_size[0],
36 | output_size[1],
37 | bs,
38 | ch,
39 | h,
40 | w,
41 | )
42 | return grad_input, None, None, None
43 |
44 |
45 | roi_pool = _ROIPool.apply
46 |
47 |
48 | class ROIPool(nn.Module):
49 | def __init__(self, output_size, spatial_scale):
50 | super(ROIPool, self).__init__()
51 | self.output_size = output_size
52 | self.spatial_scale = spatial_scale
53 |
54 | def forward(self, input, rois):
55 | return roi_pool(input, rois, self.output_size, self.spatial_scale)
56 |
57 | def __repr__(self):
58 | tmpstr = self.__class__.__name__ + "("
59 | tmpstr += "output_size=" + str(self.output_size)
60 | tmpstr += ", spatial_scale=" + str(self.spatial_scale)
61 | tmpstr += ")"
62 | return tmpstr
63 |
--------------------------------------------------------------------------------
/lib/model/roi_pooling/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_pooling/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_pooling/_ext/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_pooling/_ext/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_pooling/_ext/roi_pooling/__init__.py:
--------------------------------------------------------------------------------
1 | from torch.utils.ffi import _wrap_function
2 |
3 | from ._roi_pooling import ffi as _ffi
4 | from ._roi_pooling import lib as _lib
5 |
6 | __all__ = []
7 |
8 |
9 | def _import_symbols(locals):
10 | for symbol in dir(_lib):
11 | fn = getattr(_lib, symbol)
12 | if callable(fn):
13 | locals[symbol] = _wrap_function(fn, _ffi)
14 | else:
15 | locals[symbol] = fn
16 | __all__.append(symbol)
17 |
18 |
19 | _import_symbols(locals())
20 |
--------------------------------------------------------------------------------
/lib/model/roi_pooling/build.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import os
4 |
5 | import torch
6 | from torch.utils.ffi import create_extension
7 |
8 | sources = ["src/roi_pooling.c"]
9 | headers = ["src/roi_pooling.h"]
10 | extra_objects = []
11 | defines = []
12 | with_cuda = False
13 |
14 | this_file = os.path.dirname(os.path.realpath(__file__))
15 | print(this_file)
16 |
17 | if torch.cuda.is_available():
18 | print("Including CUDA code.")
19 | sources += ["src/roi_pooling_cuda.c"]
20 | headers += ["src/roi_pooling_cuda.h"]
21 | defines += [("WITH_CUDA", None)]
22 | with_cuda = True
23 | extra_objects = ["src/roi_pooling.cu.o"]
24 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
25 |
26 | ffi = create_extension(
27 | "_ext.roi_pooling",
28 | headers=headers,
29 | sources=sources,
30 | define_macros=defines,
31 | relative_to=__file__,
32 | with_cuda=with_cuda,
33 | extra_objects=extra_objects,
34 | )
35 |
36 | if __name__ == "__main__":
37 | ffi.build()
38 |
--------------------------------------------------------------------------------
/lib/model/roi_pooling/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_pooling/functions/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_pooling/functions/roi_pool.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.autograd import Function
3 |
4 | from .._ext import roi_pooling
5 |
6 |
7 | class RoIPoolFunction(Function):
8 | def __init__(ctx, pooled_height, pooled_width, spatial_scale):
9 | ctx.pooled_width = pooled_width
10 | ctx.pooled_height = pooled_height
11 | ctx.spatial_scale = spatial_scale
12 | ctx.feature_size = None
13 |
14 | def forward(ctx, features, rois):
15 | ctx.feature_size = features.size()
16 | batch_size, num_channels, data_height, data_width = ctx.feature_size
17 | num_rois = rois.size(0)
18 | output = features.new(
19 | num_rois, num_channels, ctx.pooled_height, ctx.pooled_width
20 | ).zero_()
21 | ctx.argmax = (
22 | features.new(num_rois, num_channels, ctx.pooled_height, ctx.pooled_width)
23 | .zero_()
24 | .int()
25 | )
26 | ctx.rois = rois
27 | if not features.is_cuda:
28 | _features = features.permute(0, 2, 3, 1)
29 | roi_pooling.roi_pooling_forward(
30 | ctx.pooled_height,
31 | ctx.pooled_width,
32 | ctx.spatial_scale,
33 | _features,
34 | rois,
35 | output,
36 | )
37 | else:
38 | roi_pooling.roi_pooling_forward_cuda(
39 | ctx.pooled_height,
40 | ctx.pooled_width,
41 | ctx.spatial_scale,
42 | features,
43 | rois,
44 | output,
45 | ctx.argmax,
46 | )
47 |
48 | return output
49 |
50 | def backward(ctx, grad_output):
51 | assert ctx.feature_size is not None and grad_output.is_cuda
52 | batch_size, num_channels, data_height, data_width = ctx.feature_size
53 | grad_input = grad_output.new(
54 | batch_size, num_channels, data_height, data_width
55 | ).zero_()
56 |
57 | roi_pooling.roi_pooling_backward_cuda(
58 | ctx.pooled_height,
59 | ctx.pooled_width,
60 | ctx.spatial_scale,
61 | grad_output,
62 | ctx.rois,
63 | grad_input,
64 | ctx.argmax,
65 | )
66 |
67 | return grad_input, None
68 |
--------------------------------------------------------------------------------
/lib/model/roi_pooling/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/roi_pooling/modules/__init__.py
--------------------------------------------------------------------------------
/lib/model/roi_pooling/modules/roi_pool.py:
--------------------------------------------------------------------------------
1 | from torch.nn.modules.module import Module
2 |
3 | from ..functions.roi_pool import RoIPoolFunction
4 |
5 |
6 | class _RoIPooling(Module):
7 | def __init__(self, pooled_height, pooled_width, spatial_scale):
8 | super(_RoIPooling, self).__init__()
9 |
10 | self.pooled_width = int(pooled_width)
11 | self.pooled_height = int(pooled_height)
12 | self.spatial_scale = float(spatial_scale)
13 |
14 | def forward(self, features, rois):
15 | return RoIPoolFunction(
16 | self.pooled_height, self.pooled_width, self.spatial_scale
17 | )(features, rois)
18 |
--------------------------------------------------------------------------------
/lib/model/roi_pooling/src/roi_pooling.h:
--------------------------------------------------------------------------------
1 | int roi_pooling_forward(int pooled_height, int pooled_width, float spatial_scale,
2 | THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output);
--------------------------------------------------------------------------------
/lib/model/roi_pooling/src/roi_pooling_cuda.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "roi_pooling_kernel.h"
4 |
5 | extern THCState *state;
6 |
7 | int roi_pooling_forward_cuda(int pooled_height, int pooled_width, float spatial_scale,
8 | THCudaTensor * features, THCudaTensor * rois, THCudaTensor * output, THCudaIntTensor * argmax)
9 | {
10 | // Grab the input tensor
11 | float * data_flat = THCudaTensor_data(state, features);
12 | float * rois_flat = THCudaTensor_data(state, rois);
13 |
14 | float * output_flat = THCudaTensor_data(state, output);
15 | int * argmax_flat = THCudaIntTensor_data(state, argmax);
16 |
17 | // Number of ROIs
18 | int num_rois = THCudaTensor_size(state, rois, 0);
19 | int size_rois = THCudaTensor_size(state, rois, 1);
20 | if (size_rois != 5)
21 | {
22 | return 0;
23 | }
24 |
25 | // batch size
26 | // int batch_size = THCudaTensor_size(state, features, 0);
27 | // if (batch_size != 1)
28 | // {
29 | // return 0;
30 | // }
31 | // data height
32 | int data_height = THCudaTensor_size(state, features, 2);
33 | // data width
34 | int data_width = THCudaTensor_size(state, features, 3);
35 | // Number of channels
36 | int num_channels = THCudaTensor_size(state, features, 1);
37 |
38 | cudaStream_t stream = THCState_getCurrentStream(state);
39 |
40 | ROIPoolForwardLaucher(
41 | data_flat, spatial_scale, num_rois, data_height,
42 | data_width, num_channels, pooled_height,
43 | pooled_width, rois_flat,
44 | output_flat, argmax_flat, stream);
45 |
46 | return 1;
47 | }
48 |
49 | int roi_pooling_backward_cuda(int pooled_height, int pooled_width, float spatial_scale,
50 | THCudaTensor * top_grad, THCudaTensor * rois, THCudaTensor * bottom_grad, THCudaIntTensor * argmax)
51 | {
52 | // Grab the input tensor
53 | float * top_grad_flat = THCudaTensor_data(state, top_grad);
54 | float * rois_flat = THCudaTensor_data(state, rois);
55 |
56 | float * bottom_grad_flat = THCudaTensor_data(state, bottom_grad);
57 | int * argmax_flat = THCudaIntTensor_data(state, argmax);
58 |
59 | // Number of ROIs
60 | int num_rois = THCudaTensor_size(state, rois, 0);
61 | int size_rois = THCudaTensor_size(state, rois, 1);
62 | if (size_rois != 5)
63 | {
64 | return 0;
65 | }
66 |
67 | // batch size
68 | int batch_size = THCudaTensor_size(state, bottom_grad, 0);
69 | // if (batch_size != 1)
70 | // {
71 | // return 0;
72 | // }
73 | // data height
74 | int data_height = THCudaTensor_size(state, bottom_grad, 2);
75 | // data width
76 | int data_width = THCudaTensor_size(state, bottom_grad, 3);
77 | // Number of channels
78 | int num_channels = THCudaTensor_size(state, bottom_grad, 1);
79 |
80 | cudaStream_t stream = THCState_getCurrentStream(state);
81 | ROIPoolBackwardLaucher(
82 | top_grad_flat, spatial_scale, batch_size, num_rois, data_height,
83 | data_width, num_channels, pooled_height,
84 | pooled_width, rois_flat,
85 | bottom_grad_flat, argmax_flat, stream);
86 |
87 | return 1;
88 | }
89 |
--------------------------------------------------------------------------------
/lib/model/roi_pooling/src/roi_pooling_cuda.h:
--------------------------------------------------------------------------------
1 | int roi_pooling_forward_cuda(int pooled_height, int pooled_width, float spatial_scale,
2 | THCudaTensor * features, THCudaTensor * rois, THCudaTensor * output, THCudaIntTensor * argmax);
3 |
4 | int roi_pooling_backward_cuda(int pooled_height, int pooled_width, float spatial_scale,
5 | THCudaTensor * top_grad, THCudaTensor * rois, THCudaTensor * bottom_grad, THCudaIntTensor * argmax);
--------------------------------------------------------------------------------
/lib/model/roi_pooling/src/roi_pooling_kernel.h:
--------------------------------------------------------------------------------
1 | #ifndef _ROI_POOLING_KERNEL
2 | #define _ROI_POOLING_KERNEL
3 |
4 | #ifdef __cplusplus
5 | extern "C" {
6 | #endif
7 |
8 | int ROIPoolForwardLaucher(
9 | const float* bottom_data, const float spatial_scale, const int num_rois, const int height,
10 | const int width, const int channels, const int pooled_height,
11 | const int pooled_width, const float* bottom_rois,
12 | float* top_data, int* argmax_data, cudaStream_t stream);
13 |
14 |
15 | int ROIPoolBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois,
16 | const int height, const int width, const int channels, const int pooled_height,
17 | const int pooled_width, const float* bottom_rois,
18 | float* bottom_diff, const int* argmax_data, cudaStream_t stream);
19 |
20 | #ifdef __cplusplus
21 | }
22 | #endif
23 |
24 | #endif
25 |
26 |
--------------------------------------------------------------------------------
/lib/model/rpn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__init__.py
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/anchor_target_layer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/anchor_target_layer.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/bbox_transform.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/bbox_transform.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/generate_anchors.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/generate_anchors.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/proposal_layer.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/proposal_layer.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/proposal_target_layer_cascade.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/proposal_target_layer_cascade.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/__pycache__/rpn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/rpn/__pycache__/rpn.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/rpn/generate_anchors.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import numpy as np
4 |
5 | # --------------------------------------------------------
6 | # Faster R-CNN
7 | # Copyright (c) 2015 Microsoft
8 | # Licensed under The MIT License [see LICENSE for details]
9 | # Written by Ross Girshick and Sean Bell
10 | # --------------------------------------------------------
11 |
12 |
13 | # Verify that we compute the same anchors as Shaoqing's matlab implementation:
14 | #
15 | # >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
16 | # >> anchors
17 | #
18 | # anchors =
19 | #
20 | # -83 -39 100 56
21 | # -175 -87 192 104
22 | # -359 -183 376 200
23 | # -55 -55 72 72
24 | # -119 -119 136 136
25 | # -247 -247 264 264
26 | # -35 -79 52 96
27 | # -79 -167 96 184
28 | # -167 -343 184 360
29 |
30 | # array([[ -83., -39., 100., 56.],
31 | # [-175., -87., 192., 104.],
32 | # [-359., -183., 376., 200.],
33 | # [ -55., -55., 72., 72.],
34 | # [-119., -119., 136., 136.],
35 | # [-247., -247., 264., 264.],
36 | # [ -35., -79., 52., 96.],
37 | # [ -79., -167., 96., 184.],
38 | # [-167., -343., 184., 360.]])
39 |
40 | try:
41 | xrange # Python 2
42 | except NameError:
43 | xrange = range # Python 3
44 |
45 |
46 | def generate_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2 ** np.arange(3, 6)):
47 | """
48 | Generate anchor (reference) windows by enumerating aspect ratios X
49 | scales wrt a reference (0, 0, 15, 15) window.
50 | """
51 |
52 | base_anchor = np.array([1, 1, base_size, base_size]) - 1
53 | ratio_anchors = _ratio_enum(base_anchor, ratios)
54 | anchors = np.vstack(
55 | [
56 | _scale_enum(ratio_anchors[i, :], scales)
57 | for i in xrange(ratio_anchors.shape[0])
58 | ]
59 | )
60 | return anchors
61 |
62 |
63 | def _whctrs(anchor):
64 | """
65 | Return width, height, x center, and y center for an anchor (window).
66 | """
67 |
68 | w = anchor[2] - anchor[0] + 1
69 | h = anchor[3] - anchor[1] + 1
70 | x_ctr = anchor[0] + 0.5 * (w - 1)
71 | y_ctr = anchor[1] + 0.5 * (h - 1)
72 | return w, h, x_ctr, y_ctr
73 |
74 |
75 | def _mkanchors(ws, hs, x_ctr, y_ctr):
76 | """
77 | Given a vector of widths (ws) and heights (hs) around a center
78 | (x_ctr, y_ctr), output a set of anchors (windows).
79 | """
80 |
81 | ws = ws[:, np.newaxis]
82 | hs = hs[:, np.newaxis]
83 | anchors = np.hstack(
84 | (
85 | x_ctr - 0.5 * (ws - 1),
86 | y_ctr - 0.5 * (hs - 1),
87 | x_ctr + 0.5 * (ws - 1),
88 | y_ctr + 0.5 * (hs - 1),
89 | )
90 | )
91 | return anchors
92 |
93 |
94 | def _ratio_enum(anchor, ratios):
95 | """
96 | Enumerate a set of anchors for each aspect ratio wrt an anchor.
97 | """
98 |
99 | w, h, x_ctr, y_ctr = _whctrs(anchor)
100 | size = w * h
101 | size_ratios = size / ratios
102 | ws = np.round(np.sqrt(size_ratios))
103 | hs = np.round(ws * ratios)
104 | anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
105 | return anchors
106 |
107 |
108 | def _scale_enum(anchor, scales):
109 | """
110 | Enumerate a set of anchors for each scale wrt an anchor.
111 | """
112 |
113 | w, h, x_ctr, y_ctr = _whctrs(anchor)
114 | ws = w * scales
115 | hs = h * scales
116 | anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
117 | return anchors
118 |
119 |
120 | if __name__ == "__main__":
121 | import time
122 |
123 | t = time.time()
124 | a = generate_anchors()
125 | print(time.time() - t)
126 | print(a)
127 | from IPython import embed
128 |
129 | embed()
130 |
--------------------------------------------------------------------------------
/lib/model/utils/.gitignore:
--------------------------------------------------------------------------------
1 | *.c
2 | *.cpp
3 | *.so
4 |
--------------------------------------------------------------------------------
/lib/model/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/utils/__init__.py
--------------------------------------------------------------------------------
/lib/model/utils/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/utils/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/utils/__pycache__/blob.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/utils/__pycache__/blob.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/utils/__pycache__/config.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/utils/__pycache__/config.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/utils/__pycache__/net_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/model/utils/__pycache__/net_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/model/utils/bbox.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Sergey Karayev
6 | # --------------------------------------------------------
7 |
8 | cimport cython
9 | import numpy as np
10 | cimport numpy as np
11 |
12 | DTYPE = np.float
13 | ctypedef np.float_t DTYPE_t
14 |
15 | def bbox_overlaps(np.ndarray[DTYPE_t, ndim=2] boxes,
16 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
17 | return bbox_overlaps_c(boxes, query_boxes)
18 |
19 | cdef np.ndarray[DTYPE_t, ndim=2] bbox_overlaps_c(
20 | np.ndarray[DTYPE_t, ndim=2] boxes,
21 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
22 | """
23 | Parameters
24 | ----------
25 | boxes: (N, 4) ndarray of float
26 | query_boxes: (K, 4) ndarray of float
27 | Returns
28 | -------
29 | overlaps: (N, K) ndarray of overlap between boxes and query_boxes
30 | """
31 | cdef unsigned int N = boxes.shape[0]
32 | cdef unsigned int K = query_boxes.shape[0]
33 | cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE)
34 | cdef DTYPE_t iw, ih, box_area
35 | cdef DTYPE_t ua
36 | cdef unsigned int k, n
37 | for k in range(K):
38 | box_area = (
39 | (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
40 | (query_boxes[k, 3] - query_boxes[k, 1] + 1)
41 | )
42 | for n in range(N):
43 | iw = (
44 | min(boxes[n, 2], query_boxes[k, 2]) -
45 | max(boxes[n, 0], query_boxes[k, 0]) + 1
46 | )
47 | if iw > 0:
48 | ih = (
49 | min(boxes[n, 3], query_boxes[k, 3]) -
50 | max(boxes[n, 1], query_boxes[k, 1]) + 1
51 | )
52 | if ih > 0:
53 | ua = float(
54 | (boxes[n, 2] - boxes[n, 0] + 1) *
55 | (boxes[n, 3] - boxes[n, 1] + 1) +
56 | box_area - iw * ih
57 | )
58 | overlaps[n, k] = iw * ih / ua
59 | return overlaps
60 |
61 |
62 | def bbox_intersections(
63 | np.ndarray[DTYPE_t, ndim=2] boxes,
64 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
65 | return bbox_intersections_c(boxes, query_boxes)
66 |
67 |
68 | cdef np.ndarray[DTYPE_t, ndim=2] bbox_intersections_c(
69 | np.ndarray[DTYPE_t, ndim=2] boxes,
70 | np.ndarray[DTYPE_t, ndim=2] query_boxes):
71 | """
72 | For each query box compute the intersection ratio covered by boxes
73 | ----------
74 | Parameters
75 | ----------
76 | boxes: (N, 4) ndarray of float
77 | query_boxes: (K, 4) ndarray of float
78 | Returns
79 | -------
80 | overlaps: (N, K) ndarray of intersec between boxes and query_boxes
81 | """
82 | cdef unsigned int N = boxes.shape[0]
83 | cdef unsigned int K = query_boxes.shape[0]
84 | cdef np.ndarray[DTYPE_t, ndim=2] intersec = np.zeros((N, K), dtype=DTYPE)
85 | cdef DTYPE_t iw, ih, box_area
86 | cdef DTYPE_t ua
87 | cdef unsigned int k, n
88 | for k in range(K):
89 | box_area = (
90 | (query_boxes[k, 2] - query_boxes[k, 0] + 1) *
91 | (query_boxes[k, 3] - query_boxes[k, 1] + 1)
92 | )
93 | for n in range(N):
94 | iw = (
95 | min(boxes[n, 2], query_boxes[k, 2]) -
96 | max(boxes[n, 0], query_boxes[k, 0]) + 1
97 | )
98 | if iw > 0:
99 | ih = (
100 | min(boxes[n, 3], query_boxes[k, 3]) -
101 | max(boxes[n, 1], query_boxes[k, 1]) + 1
102 | )
103 | if ih > 0:
104 | intersec[n, k] = iw * ih / box_area
105 | return intersec
--------------------------------------------------------------------------------
/lib/model/utils/blob.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
8 | """Blob helper functions."""
9 |
10 | # from scipy.misc import imread, imresize
11 | import cv2
12 | import numpy as np
13 |
14 | try:
15 | xrange # Python 2
16 | except NameError:
17 | xrange = range # Python 3
18 |
19 |
20 | def im_list_to_blob(ims):
21 | """Convert a list of images into a network input.
22 |
23 | Assumes images are already prepared (means subtracted, BGR order, ...).
24 | """
25 | max_shape = np.array([im.shape for im in ims]).max(axis=0)
26 | num_images = len(ims)
27 | blob = np.zeros((num_images, max_shape[0], max_shape[1], 3), dtype=np.float32)
28 | for i in xrange(num_images):
29 | im = ims[i]
30 | blob[i, 0 : im.shape[0], 0 : im.shape[1], :] = im
31 |
32 | return blob
33 |
34 |
35 | def prep_im_for_blob(im, pixel_means, target_size, max_size):
36 | """Mean subtract and scale an image for use in a blob."""
37 |
38 | im = im.astype(np.float32, copy=False)
39 | im -= pixel_means
40 | # im = im[:, :, ::-1]
41 | im_shape = im.shape
42 | im_size_min = np.min(im_shape[0:2])
43 | im_size_max = np.max(im_shape[0:2])
44 | im_scale = float(target_size) / float(im_size_min)
45 | # Prevent the biggest axis from being more than MAX_SIZE
46 | # if np.round(im_scale * im_size_max) > max_size:
47 | # im_scale = float(max_size) / float(im_size_max)
48 | # im = imresize(im, im_scale)
49 | im = cv2.resize(
50 | im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
51 | )
52 |
53 | return im, im_scale
54 |
--------------------------------------------------------------------------------
/lib/model/utils/logger.py:
--------------------------------------------------------------------------------
1 | # Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
2 | import numpy as np
3 | import scipy.misc
4 | import tensorflow as tf
5 |
6 | try:
7 | from StringIO import StringIO # Python 2.7
8 | except ImportError:
9 | from io import BytesIO # Python 3.x
10 |
11 |
12 | class Logger(object):
13 | def __init__(self, log_dir):
14 | """Create a summary writer logging to log_dir."""
15 | self.writer = tf.summary.FileWriter(log_dir)
16 |
17 | def scalar_summary(self, tag, value, step):
18 | """Log a scalar variable."""
19 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
20 | self.writer.add_summary(summary, step)
21 |
22 | def image_summary(self, tag, images, step):
23 | """Log a list of images."""
24 |
25 | img_summaries = []
26 | for i, img in enumerate(images):
27 | # Write the image to a string
28 | try:
29 | s = StringIO()
30 | except:
31 | s = BytesIO()
32 | scipy.misc.toimage(img).save(s, format="png")
33 |
34 | # Create an Image object
35 | img_sum = tf.Summary.Image(
36 | encoded_image_string=s.getvalue(),
37 | height=img.shape[0],
38 | width=img.shape[1],
39 | )
40 | # Create a Summary value
41 | img_summaries.append(
42 | tf.Summary.Value(tag="%s/%d" % (tag, i), image=img_sum)
43 | )
44 |
45 | # Create and write Summary
46 | summary = tf.Summary(value=img_summaries)
47 | self.writer.add_summary(summary, step)
48 |
49 | def histo_summary(self, tag, values, step, bins=1000):
50 | """Log a histogram of the tensor of values."""
51 |
52 | # Create a histogram using numpy
53 | counts, bin_edges = np.histogram(values, bins=bins)
54 |
55 | # Fill the fields of the histogram proto
56 | hist = tf.HistogramProto()
57 | hist.min = float(np.min(values))
58 | hist.max = float(np.max(values))
59 | hist.num = int(np.prod(values.shape))
60 | hist.sum = float(np.sum(values))
61 | hist.sum_squares = float(np.sum(values ** 2))
62 |
63 | # Drop the start of the first bin
64 | bin_edges = bin_edges[1:]
65 |
66 | # Add bin edges and counts
67 | for edge in bin_edges:
68 | hist.bucket_limit.append(edge)
69 | for c in counts:
70 | hist.bucket.append(c)
71 |
72 | # Create and write Summary
73 | summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
74 | self.writer.add_summary(summary, step)
75 | self.writer.flush()
76 |
--------------------------------------------------------------------------------
/lib/msda/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/msda/__init__.py
--------------------------------------------------------------------------------
/lib/msda/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/msda/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/msda/__pycache__/faster_rcnn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/msda/__pycache__/faster_rcnn.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/msda/__pycache__/rpn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/msda/__pycache__/rpn.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/msda/__pycache__/utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/msda/__pycache__/utils.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/msda/__pycache__/vgg16.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/msda/__pycache__/vgg16.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/msda/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def get_max_iou(pred_bboxes, gt_bbox):
4 | '''
5 | :param pred_bboxs: [[x1, y1, x2, y2] [x1, y1, x2, y2],,,]
6 | :param gt_bbox: [x1, y1, x2, y2]
7 | :return:
8 | '''
9 | ixmin = torch.max(pred_bboxes[:, 1], gt_bbox[1])
10 | iymin = torch.max(pred_bboxes[:, 2], gt_bbox[2])
11 | ixmax = torch.min(pred_bboxes[:, 3], gt_bbox[3])
12 | iymax = torch.min(pred_bboxes[:, 4], gt_bbox[4])
13 |
14 | iws = torch.max(ixmax - ixmin + 1.0, torch.tensor(0.).cuda())
15 | ihs = torch.max(iymax - iymin + 1.0, torch.tensor(0.).cuda())
16 |
17 | inters = iws * ihs
18 |
19 | unis = (pred_bboxes[:, 3] - pred_bboxes[:, 1] + 1.0) * (pred_bboxes[:, 4] - pred_bboxes[:, 2] + 1.0) + (
20 | gt_bbox[3] - gt_bbox[1] + 1.0) * (gt_bbox[4] - gt_bbox[2] + 1.0) - inters
21 |
22 | ious = inters / unis
23 | max_iou = torch.max(ious)
24 | max_index = torch.argmax(ious)
25 |
26 | return ious, max_iou, max_index
27 |
28 | def consist_loss(pred, pred_ema):
29 | loss = 0
30 | pred = pred[0, :256, :]
31 | pred_ema = pred_ema[0, :256, :]
32 |
33 | for idx, gt in enumerate(pred_ema): # pop best?
34 | ious, max_iou, max_index = get_max_iou(pred,gt)
35 | loss += abs(max_index - idx) * max_iou
36 | loss /= len(pred_ema)
37 | return loss
38 |
39 |
40 |
41 | @torch.no_grad()
42 | class WeightEMA (object):
43 | """
44 | Exponential moving average weight optimizer for mean teacher model
45 | """
46 | def __init__(self, params, src_params, alpha=0.999):
47 | self.params = list(params)
48 | self.src_params = list(src_params)
49 | self.alpha = alpha
50 |
51 | for p, src_p in zip(self.params, self.src_params):
52 | p.data[:] = src_p.data[:]
53 |
54 | def step(self):
55 | one_minus_alpha = 1.0 - self.alpha
56 | for p, src_p in zip(self.params, self.src_params):
57 | p.data.mul_(self.alpha)
58 | p.data.add_(src_p.data * one_minus_alpha)
59 |
--------------------------------------------------------------------------------
/lib/roi_da_data_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # author : tiancity-NJU
3 | # --------------------------------------------------------
4 |
--------------------------------------------------------------------------------
/lib/roi_da_data_layer/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_da_data_layer/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_da_data_layer/__pycache__/minibatch.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_da_data_layer/__pycache__/minibatch.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_da_data_layer/__pycache__/roibatchLoader.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_da_data_layer/__pycache__/roibatchLoader.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_da_data_layer/__pycache__/roidb.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_da_data_layer/__pycache__/roidb.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_data_layer/__init__.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick
6 | # --------------------------------------------------------
7 |
--------------------------------------------------------------------------------
/lib/roi_data_layer/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_data_layer/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_data_layer/__pycache__/minibatch.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_data_layer/__pycache__/minibatch.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_data_layer/__pycache__/roibatchLoader.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_data_layer/__pycache__/roibatchLoader.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_data_layer/__pycache__/roidb.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/lib/roi_data_layer/__pycache__/roidb.cpython-36.pyc
--------------------------------------------------------------------------------
/lib/roi_data_layer/minibatch.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Ross Girshick and Xinlei Chen
6 | # --------------------------------------------------------
7 |
8 | """Compute minibatch blobs for training a Fast R-CNN network."""
9 | from __future__ import absolute_import, division, print_function
10 |
11 | # from scipy.misc import imread
12 | import cv2
13 | import numpy as np
14 | import numpy.random as npr
15 | from model.utils.blob import im_list_to_blob, prep_im_for_blob
16 | from model.utils.config import cfg
17 |
18 |
19 | def get_minibatch(roidb, num_classes):
20 | """Given a roidb, construct a minibatch sampled from it."""
21 | num_images = len(roidb)
22 | # Sample random scales to use for each image in this batch
23 | random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images)
24 | assert (
25 | cfg.TRAIN.BATCH_SIZE % num_images == 0
26 | ), "num_images ({}) must divide BATCH_SIZE ({})".format(
27 | num_images, cfg.TRAIN.BATCH_SIZE
28 | )
29 |
30 | # Get the input image blob, formatted for caffe
31 | im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
32 |
33 | blobs = {"data": im_blob}
34 |
35 | assert len(im_scales) == 1, "Single batch only"
36 | assert len(roidb) == 1, "Single batch only"
37 |
38 | # gt boxes: (x1, y1, x2, y2, cls)
39 | if cfg.TRAIN.USE_ALL_GT:
40 | # Include all ground truth boxes
41 | gt_inds = np.where(roidb[0]["gt_classes"] != 0)[0]
42 | else:
43 | # For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
44 | gt_inds = np.where(
45 | (roidb[0]["gt_classes"] != 0)
46 | & np.all(roidb[0]["gt_overlaps"].toarray() > -1.0, axis=1)
47 | )[0]
48 | gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
49 | gt_boxes[:, 0:4] = roidb[0]["boxes"][gt_inds, :] * im_scales[0]
50 | gt_boxes[:, 4] = roidb[0]["gt_classes"][gt_inds]
51 | blobs["gt_boxes"] = gt_boxes
52 | blobs["im_info"] = np.array(
53 | [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32
54 | )
55 |
56 | blobs["img_id"] = roidb[0]["img_id"]
57 |
58 | return blobs
59 |
60 |
61 | def _get_image_blob(roidb, scale_inds):
62 | """Builds an input blob from the images in the roidb at the specified
63 | scales.
64 | """
65 | num_images = len(roidb)
66 |
67 | processed_ims = []
68 | im_scales = []
69 | for i in range(num_images):
70 | im = cv2.imread(roidb[i]["image"])
71 | # im = imread(roidb[i]["image"])
72 |
73 | if len(im.shape) == 2:
74 | im = im[:, :, np.newaxis]
75 | im = np.concatenate((im, im, im), axis=2)
76 | # flip the channel, since the original one using cv2
77 | # rgb -> bgr
78 | im = im[:, :, ::-1]
79 |
80 | if roidb[i]["flipped"]:
81 | im = im[:, ::-1, :]
82 | target_size = cfg.TRAIN.SCALES[scale_inds[i]]
83 | im, im_scale = prep_im_for_blob(
84 | im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE
85 | )
86 | im_scales.append(im_scale)
87 | processed_ims.append(im)
88 |
89 | # Create a blob to hold the input images
90 | blob = im_list_to_blob(processed_ims)
91 |
92 | return blob, im_scales
93 |
--------------------------------------------------------------------------------
/lib/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 | #!/usr/bin/env python
3 |
4 | import glob
5 | import os
6 |
7 | from setuptools import find_packages, setup
8 |
9 | import torch
10 | from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
11 |
12 | requirements = ["torch", "torchvision"]
13 |
14 |
15 | def get_extensions():
16 | this_dir = os.path.dirname(os.path.abspath(__file__))
17 | extensions_dir = os.path.join(this_dir, "model", "csrc")
18 |
19 | main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
20 | source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
21 | source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
22 |
23 | sources = main_file + source_cpu
24 | extension = CppExtension
25 |
26 | extra_compile_args = {"cxx": []}
27 | define_macros = []
28 |
29 | if torch.cuda.is_available() and CUDA_HOME is not None:
30 | extension = CUDAExtension
31 | sources += source_cuda
32 | define_macros += [("WITH_CUDA", None)]
33 | extra_compile_args["nvcc"] = [
34 | "-DCUDA_HAS_FP16=1",
35 | "-D__CUDA_NO_HALF_OPERATORS__",
36 | "-D__CUDA_NO_HALF_CONVERSIONS__",
37 | "-D__CUDA_NO_HALF2_OPERATORS__",
38 | ]
39 |
40 | sources = [os.path.join(extensions_dir, s) for s in sources]
41 |
42 | include_dirs = [extensions_dir]
43 |
44 | ext_modules = [
45 | extension(
46 | "model._C",
47 | sources,
48 | include_dirs=include_dirs,
49 | define_macros=define_macros,
50 | extra_compile_args=extra_compile_args,
51 | )
52 | ]
53 |
54 | return ext_modules
55 |
56 |
57 | setup(
58 | name="faster_rcnn",
59 | version="0.1",
60 | description="object detection in pytorch",
61 | packages=find_packages(exclude=("configs", "tests")),
62 | # install_requires=requirements,
63 | ext_modules=get_extensions(),
64 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
65 | )
66 |
--------------------------------------------------------------------------------
/nohup.out:
--------------------------------------------------------------------------------
1 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/utils/config.py:377: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
2 | yaml_cfg = edict(yaml.load(f))
3 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/utils/net_utils.py:376: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
4 | P = F.softmax(inputs)
5 | Traceback (most recent call last):
6 | File "train_pseudo.py", line 629, in
7 | pseudo_label, pseudo_num = make_pseudo(fasterRCNN, data_t, im_data, im_info, gt_boxes, num_boxes, confidence_score = args.cf_score)
8 | NameError: name 'make_pseudo' is not defined
9 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/utils/config.py:377: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
10 | yaml_cfg = edict(yaml.load(f))
11 | /media/hdd1/CR-DA-DET/SW_Faster_ICR_CCR/lib/model/utils/net_utils.py:376: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.
12 | P = F.softmax(inputs)
13 | Traceback (most recent call last):
14 | File "train_pseudo.py", line 629, in
15 | pseudo_label, pseudo_num = make_pseudo(fasterRCNN, data_t, im_data, im_info, gt_boxes, num_boxes, confidence_score = args.cf_score)
16 | NameError: name 'make_pseudo' is not defined
17 |
--------------------------------------------------------------------------------
/predict_all_boxes.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/predict_all_boxes.pkl
--------------------------------------------------------------------------------
/result/bicycle_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/bicycle_pr.pkl
--------------------------------------------------------------------------------
/result/bus_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/bus_pr.pkl
--------------------------------------------------------------------------------
/result/car_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/car_pr.pkl
--------------------------------------------------------------------------------
/result/eval_result.txt:
--------------------------------------------------------------------------------
1 | 6
2 | 7
3 | 8
4 | 6
5 | 7
6 | 6
7 | 8
8 | 7
9 | 9
10 | 8
11 | 10
12 | 9
13 | 11
14 | 12
15 | 10
16 | 11
17 | 12
18 | 6
19 | 6
20 | 7
21 | 7
22 | 8
23 | 8
24 | 9
25 | 9
26 | 10
27 | 10
28 | 11
29 | 11
30 | 12
31 | 12
32 | 6
33 | 7
34 | 6
35 | 6
36 | 7
37 | 7
38 | 8
39 | 8
40 | 9
41 | 9
42 | 10
43 | 10
44 | 11
45 | 11
46 | 12
47 | 12
48 | 6
49 | 7
50 | 8
51 | 9
52 | 10
53 | 11
54 | 12
55 | 6
56 | 7
57 | 8
58 | 7
59 | 8
60 | 9
61 |
--------------------------------------------------------------------------------
/result/motorcycle_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/motorcycle_pr.pkl
--------------------------------------------------------------------------------
/result/person_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/person_pr.pkl
--------------------------------------------------------------------------------
/result/rider_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/rider_pr.pkl
--------------------------------------------------------------------------------
/result/train_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/train_pr.pkl
--------------------------------------------------------------------------------
/result/truck_pr.pkl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jh-Han777/Multi_Source_Domain_Adaptation_for_Object_Detection/3828089d7361cd51ef58bfa8a95a48e917db4c98/result/truck_pr.pkl
--------------------------------------------------------------------------------
/test_msda.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | net = "vgg16"
4 | part = "test_t"
5 | start_epoch = 7
6 | max_epochs = 12
7 | output_dir = "result/"
8 | dataset = "mskda_bdd"
9 |
10 | def parse_args():
11 | """
12 | Parse input arguments
13 | """
14 | parser = argparse.ArgumentParser(description="Train a Fast R-CNN network")
15 | parser.add_argument(
16 | "--model_prefix",
17 | dest="model_prefix",
18 | help="directory to trained model",
19 | default=" ",
20 | type=str,
21 | )
22 | parser.add_argument(
23 | "--net", dest="net", help="vgg16, res101", default="vgg16", type=str
24 | )
25 |
26 | parser.add_argument(
27 | "--gpus", dest="gpus", help="gpu number", default="3", type=str
28 | )
29 |
30 | args = parser.parse_args()
31 | return args
32 |
33 | args = parse_args()
34 | net=args.net
35 | os.environ["CUDA_VISIBLE_DEVICES"]=args.gpus
36 |
37 | for i in range(start_epoch, max_epochs + 1):
38 | model_dir = args.model_prefix+"mskda_bdd_{}.pth".format(
39 | i
40 | )
41 | command = "python eval/test_msda.py --cuda --gc --lc --vis --part {} --net {} --dataset {} --model_dir {} --output_dir {} --num_epoch {}".format(
42 | part, net, dataset, model_dir, output_dir, i
43 | )
44 | os.system(command)
45 |
--------------------------------------------------------------------------------
/train_msda.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | save_dir="/media/hdd1/etri/save_model/train_adap_sw"
3 | dataset="mskda_bdd"
4 | net="vgg16"
5 | pretrained_path="/media/hdd1/paper/CR-DA-DET/SW_Faster_ICR_CCR/pre_trained_model/vgg16_caffe.pth"
6 | max_epoch=20
7 | burn_in=10
8 |
9 | CUDA_VISIBLE_DEVICES=2 python train_msda.py --cuda --dataset ${dataset} \
10 | --net ${net} --save_dir ${save_dir} --pretrained_path ${pretrained_path} --max_epoch ${max_epoch} --burn_in ${burn_in}\
11 | #>result_msda.txt 2>&1
12 |
13 | #13849
--------------------------------------------------------------------------------