├── R2CNN_HEAD_FPN_Tensorflow
├── __init__.py
├── data
│ ├── __init__.py
│ ├── io
│ │ ├── __init__.py
│ │ ├── __init__.pyc
│ │ ├── read_tfrecord.pyc
│ │ ├── image_preprocess.pyc
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── read_tfrecord.cpython-35.pyc
│ │ │ ├── image_preprocess.cpython-35.pyc
│ │ │ └── image_preprocess.cpython-36.pyc
│ │ ├── tif2jpg.py
│ │ ├── divide_data.py
│ │ ├── read_tfrecord.py
│ │ ├── convert_data_to_tfrecord.py
│ │ └── image_preprocess.py
│ ├── __init__.pyc
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ └── __init__.cpython-36.pyc
│ ├── pretrained_weights
│ │ └── README.md
│ └── tfrecords
│ │ └── README.md
├── libs
│ ├── __init__.py
│ ├── rpn
│ │ ├── __init__.py
│ │ ├── __init__.pyc
│ │ ├── build_rpn.pyc
│ │ └── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── build_rpn.cpython-35.pyc
│ │ │ └── build_rpn.cpython-36.pyc
│ ├── box_utils
│ │ ├── __init__.py
│ │ ├── 1.jpg
│ │ ├── iou.pyc
│ │ ├── nms.pyc
│ │ ├── __init__.pyc
│ │ ├── iou_cpu.so
│ │ ├── iou_rotate.pyc
│ │ ├── nms_rotate.pyc
│ │ ├── boxes_utils.pyc
│ │ ├── make_anchor.pyc
│ │ ├── rbbox_overlaps.hpp
│ │ ├── rbbox_overlaps.so
│ │ ├── coordinate_convert.pyc
│ │ ├── encode_and_decode.pyc
│ │ ├── rotate_polygon_nms.so
│ │ ├── show_box_in_tensor.pyc
│ │ ├── anchor_utils_pyfunc.pyc
│ │ ├── __pycache__
│ │ │ ├── iou.cpython-35.pyc
│ │ │ ├── iou.cpython-36.pyc
│ │ │ ├── nms.cpython-35.pyc
│ │ │ ├── nms.cpython-36.pyc
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── nms_rotate.cpython-35.pyc
│ │ │ ├── nms_rotate.cpython-36.pyc
│ │ │ ├── boxes_utils.cpython-35.pyc
│ │ │ ├── boxes_utils.cpython-36.pyc
│ │ │ ├── make_anchor.cpython-35.pyc
│ │ │ ├── make_anchor.cpython-36.pyc
│ │ │ ├── visualization.cpython-36.pyc
│ │ │ ├── encode_and_decode.cpython-35.pyc
│ │ │ ├── encode_and_decode.cpython-36.pyc
│ │ │ ├── anchor_utils_pyfunc.cpython-35.pyc
│ │ │ ├── anchor_utils_pyfunc.cpython-36.pyc
│ │ │ ├── coordinate_convert.cpython-35.pyc
│ │ │ ├── coordinate_convert.cpython-36.pyc
│ │ │ ├── show_box_in_tensor.cpython-35.pyc
│ │ │ └── show_box_in_tensor.cpython-36.pyc
│ │ ├── build
│ │ │ └── temp.linux-x86_64-3.6
│ │ │ │ ├── iou_cpu.o
│ │ │ │ ├── rbbox_overlaps.o
│ │ │ │ ├── rotate_polygon_nms.o
│ │ │ │ ├── rbbox_overlaps_kernel.o
│ │ │ │ └── rotate_polygon_nms_kernel.o
│ │ ├── iou_cpu.cpython-36m-x86_64-linux-gnu.so
│ │ ├── rotate_gpu_nms.hpp
│ │ ├── rotate_polygon_nms.hpp
│ │ ├── rbbox_overlaps.cpython-36m-x86_64-linux-gnu.so
│ │ ├── rotate_polygon_nms.cpython-36m-x86_64-linux-gnu.so
│ │ ├── nms.py
│ │ ├── rbbox_overlaps.pyx
│ │ ├── rotate_polygon_nms.pyx
│ │ ├── iou.py
│ │ ├── visualization.py
│ │ ├── iou_cpu.pyx
│ │ ├── iou_rotate.py
│ │ ├── coordinate_convert.py
│ │ ├── make_anchor.py
│ │ ├── anchor_utils_pyfunc.py
│ │ ├── nms_rotate.py
│ │ └── boxes_utils.py
│ ├── configs
│ │ ├── __init__.py
│ │ ├── cfgs.pyc
│ │ ├── __init__.pyc
│ │ ├── __pycache__
│ │ │ ├── cfgs.cpython-35.pyc
│ │ │ ├── cfgs.cpython-36.pyc
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ └── __init__.cpython-36.pyc
│ │ ├── v1_ship_head.py
│ │ ├── v1_UAV_rotate.py
│ │ └── cfgs.py
│ ├── fast_rcnn
│ │ ├── __init__.py
│ │ ├── __init__.pyc
│ │ └── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── build_fast_rcnn.cpython-35.pyc
│ │ │ └── build_fast_rcnn.cpython-36.pyc
│ ├── losses
│ │ ├── __init__.py
│ │ ├── losses.pyc
│ │ ├── __init__.pyc
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── losses.cpython-35.pyc
│ │ │ └── losses.cpython-36.pyc
│ │ └── losses.py
│ ├── networks
│ │ ├── __init__.py
│ │ ├── nets
│ │ │ └── __init__.py
│ │ ├── slim_nets
│ │ │ ├── __init__.py
│ │ │ ├── vgg.pyc
│ │ │ ├── __init__.pyc
│ │ │ ├── resnet_v1.pyc
│ │ │ ├── mobilenet_v1.png
│ │ │ ├── mobilenet_v1.pyc
│ │ │ ├── resnet_utils.pyc
│ │ │ ├── inception_resnet_v2.pyc
│ │ │ ├── __pycache__
│ │ │ │ ├── vgg.cpython-35.pyc
│ │ │ │ ├── vgg.cpython-36.pyc
│ │ │ │ ├── __init__.cpython-35.pyc
│ │ │ │ ├── __init__.cpython-36.pyc
│ │ │ │ ├── resnet_v1.cpython-35.pyc
│ │ │ │ ├── resnet_v1.cpython-36.pyc
│ │ │ │ ├── mobilenet_v1.cpython-35.pyc
│ │ │ │ ├── mobilenet_v1.cpython-36.pyc
│ │ │ │ ├── resnet_utils.cpython-35.pyc
│ │ │ │ ├── resnet_utils.cpython-36.pyc
│ │ │ │ ├── inception_resnet_v2.cpython-35.pyc
│ │ │ │ └── inception_resnet_v2.cpython-36.pyc
│ │ │ ├── inception.py
│ │ │ ├── nets_factory_test.py
│ │ │ ├── inception_utils.py
│ │ │ ├── lenet.py
│ │ │ ├── mobilenet_v1.md
│ │ │ ├── cifarnet.py
│ │ │ ├── nets_factory.py
│ │ │ ├── overfeat.py
│ │ │ ├── alexnet.py
│ │ │ ├── overfeat_test.py
│ │ │ └── alexnet_test.py
│ │ ├── __init__.pyc
│ │ ├── network_factory.pyc
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── network_factory.cpython-35.pyc
│ │ │ └── network_factory.cpython-36.pyc
│ │ └── network_factory.py
│ ├── label_name_dict
│ │ ├── __init__.py
│ │ ├── __init__.pyc
│ │ ├── __pycache__
│ │ │ ├── __init__.cpython-35.pyc
│ │ │ ├── __init__.cpython-36.pyc
│ │ │ ├── label_dict.cpython-35.pyc
│ │ │ └── label_dict.cpython-36.pyc
│ │ └── label_dict.py
│ ├── __init__.pyc
│ └── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ └── __init__.cpython-36.pyc
├── tools
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ └── restore_model.cpython-35.pyc
│ ├── restore_model.py
│ └── test_rotate.py
├── configs
│ ├── __init__.py
│ ├── __init__.pyc
│ ├── config_vgg16.pyc
│ ├── config_res101.pyc
│ ├── config_resnet_50.pyc
│ ├── config_inception_resnet.pyc
│ ├── __pycache__
│ │ ├── __init__.cpython-35.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── config_res101.cpython-35.pyc
│ │ ├── config_res101.cpython-36.pyc
│ │ └── config_resnet_50.cpython-35.pyc
│ ├── config_resnet_50.py
│ ├── config_res101.py
│ ├── config_vgg16.py
│ └── config_inception_resnet.py
├── help_utils
│ ├── __init__.py
│ ├── tools.pyc
│ ├── __init__.pyc
│ ├── help_utils.pyc
│ ├── __pycache__
│ │ ├── tools.cpython-35.pyc
│ │ ├── tools.cpython-36.pyc
│ │ ├── __init__.cpython-35.pyc
│ │ ├── __init__.cpython-36.pyc
│ │ ├── help_utils.cpython-35.pyc
│ │ └── help_utils.cpython-36.pyc
│ └── tools.py
├── HEAD
├── config
├── description
├── .directory
├── graph.png
├── sample.tif
├── output
│ └── res101_summary
│ │ ├── rpn_loss.bmp
│ │ ├── total_loss.bmp
│ │ ├── fast_rcnn_loss.bmp
│ │ └── README.md
├── .idea
│ ├── vcs.xml
│ ├── misc.xml
│ ├── modules.xml
│ ├── deployment.xml
│ ├── FPN_v1.iml
│ └── webServers.xml
├── hooks
│ ├── post-update.sample
│ ├── pre-applypatch.sample
│ ├── applypatch-msg.sample
│ ├── commit-msg.sample
│ ├── prepare-commit-msg.sample
│ ├── pre-push.sample
│ ├── pre-commit.sample
│ ├── update.sample
│ └── pre-rebase.sample
├── info
│ └── exclude
├── sh.exe.stackdump
├── sample.xml
└── README.md
└── README.md
/R2CNN_HEAD_FPN_Tensorflow/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/tools/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/HEAD:
--------------------------------------------------------------------------------
1 | ref: refs/heads/master
2 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/nets/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/config:
--------------------------------------------------------------------------------
1 | [core]
2 | repositoryformatversion = 0
3 | filemode = true
4 | bare = true
5 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/description:
--------------------------------------------------------------------------------
1 | Unnamed repository; edit this file 'description' to name the repository.
2 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.directory:
--------------------------------------------------------------------------------
1 | [Dolphin]
2 | PreviewsShown=true
3 | Timestamp=2019,4,11,12,19,21
4 | Version=3
5 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/graph.png
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/sample.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/sample.tif
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/tools.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/tools.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/1.jpg
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/nms.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/nms.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/cfgs.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/configs/cfgs.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/losses.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/losses/losses.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/build_rpn.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/build_rpn.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_vgg16.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/config_vgg16.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_res101.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/config_res101.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/read_tfrecord.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/read_tfrecord.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/help_utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/help_utils.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_cpu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_cpu.so
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_resnet_50.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/config_resnet_50.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/image_preprocess.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/image_preprocess.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_rotate.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_rotate.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/nms_rotate.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/nms_rotate.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/boxes_utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/boxes_utils.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/make_anchor.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/make_anchor.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rbbox_overlaps.hpp:
--------------------------------------------------------------------------------
1 | void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id);
2 |
3 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rbbox_overlaps.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rbbox_overlaps.so
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/vgg.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/vgg.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/network_factory.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/network_factory.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/rpn_loss.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/rpn_loss.bmp
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_inception_resnet.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/config_inception_resnet.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/coordinate_convert.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/coordinate_convert.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/encode_and_decode.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/encode_and_decode.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_polygon_nms.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_polygon_nms.so
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/show_box_in_tensor.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/show_box_in_tensor.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__init__.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/resnet_v1.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/resnet_v1.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/total_loss.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/total_loss.bmp
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/anchor_utils_pyfunc.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/anchor_utils_pyfunc.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/mobilenet_v1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/mobilenet_v1.png
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/mobilenet_v1.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/mobilenet_v1.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/resnet_utils.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/resnet_utils.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/fast_rcnn_loss.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/fast_rcnn_loss.bmp
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/tools/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/tools/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/tools.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/tools.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/tools.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/tools.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/cfgs.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/cfgs.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/cfgs.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/cfgs.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/iou.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/iou.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/iou.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/iou.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/losses.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/losses.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/losses.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/losses/__pycache__/losses.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/inception_resnet_v2.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/inception_resnet_v2.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/build_rpn.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/build_rpn.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/build_rpn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/rpn/__pycache__/build_rpn.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/tools/__pycache__/restore_model.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/tools/__pycache__/restore_model.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/config_res101.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/config_res101.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/config_res101.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/config_res101.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/read_tfrecord.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/read_tfrecord.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/help_utils.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/help_utils.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/help_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/help_utils/__pycache__/help_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/configs/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/config_resnet_50.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/configs/__pycache__/config_resnet_50.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/image_preprocess.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/image_preprocess.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/image_preprocess.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/data/io/__pycache__/image_preprocess.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms_rotate.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms_rotate.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms_rotate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/nms_rotate.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/iou_cpu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/iou_cpu.o
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/boxes_utils.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/boxes_utils.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/boxes_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/boxes_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/make_anchor.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/make_anchor.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/make_anchor.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/make_anchor.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_cpu.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_cpu.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_gpu_nms.hpp:
--------------------------------------------------------------------------------
1 | void _rotate_nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
2 | int boxes_dim, float nms_overlap_thresh, int device_id);
3 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/vgg.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/vgg.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/vgg.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/vgg.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/visualization.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/visualization.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_polygon_nms.hpp:
--------------------------------------------------------------------------------
1 | void _rotate_nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
2 | int boxes_dim, float nms_overlap_thresh, int device_id);
3 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/build_fast_rcnn.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/build_fast_rcnn.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/build_fast_rcnn.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/fast_rcnn/__pycache__/build_fast_rcnn.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/network_factory.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/network_factory.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/network_factory.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/__pycache__/network_factory.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/encode_and_decode.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/encode_and_decode.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/encode_and_decode.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/encode_and_decode.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rbbox_overlaps.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rbbox_overlaps.o
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/label_dict.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/label_dict.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/label_dict.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/__pycache__/label_dict.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/__init__.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/__init__.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/output/res101_summary/README.md:
--------------------------------------------------------------------------------
1 | # Here is summary.
2 | tensorboard --logdir=$R2CNN_HEAD_ROOT/output/res101_summary/
3 | 
4 | 
5 | 
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/anchor_utils_pyfunc.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/anchor_utils_pyfunc.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/anchor_utils_pyfunc.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/anchor_utils_pyfunc.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/coordinate_convert.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/coordinate_convert.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/coordinate_convert.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/coordinate_convert.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/show_box_in_tensor.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/show_box_in_tensor.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/show_box_in_tensor.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/__pycache__/show_box_in_tensor.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rbbox_overlaps.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rbbox_overlaps.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_v1.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_v1.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_v1.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_v1.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rotate_polygon_nms.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rotate_polygon_nms.o
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/mobilenet_v1.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/mobilenet_v1.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/mobilenet_v1.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/mobilenet_v1.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_utils.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_utils.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_utils.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/resnet_utils.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rbbox_overlaps_kernel.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rbbox_overlaps_kernel.o
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_polygon_nms.cpython-36m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_polygon_nms.cpython-36m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rotate_polygon_nms_kernel.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/build/temp.linux-x86_64-3.6/rotate_polygon_nms_kernel.o
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/inception_resnet_v2.cpython-35.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/inception_resnet_v2.cpython-35.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/inception_resnet_v2.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MonsterZhZh/Ship-Detection/HEAD/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/__pycache__/inception_resnet_v2.cpython-36.pyc
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/post-update.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to prepare a packed repository for use over
4 | # dumb transports.
5 | #
6 | # To enable this hook, rename this file to "post-update".
7 |
8 | exec git update-server-info
9 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/info/exclude:
--------------------------------------------------------------------------------
1 | # git ls-files --others --exclude-from=.git/info/exclude
2 | # Lines that start with '#' are comments.
3 | # For a project mostly in C, the following would be a good set of
4 | # exclude patterns (uncomment them if you want to use them):
5 | # *.[oa]
6 | # *~
7 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/pretrained_weights/README.md:
--------------------------------------------------------------------------------
1 | # Pre-training weights stored here (ResNet101 or ResNet50)
2 |
3 | download pretrain weight([resnet_v1_101_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz) or [resnet_v1_50_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz)) from [here](https://github.com/yangxue0827/models/tree/master/slim), then extract to here.
4 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/tfrecords/README.md:
--------------------------------------------------------------------------------
1 | # Store tfrecord here
2 | The data is VOC format, reference [here](sample.xml)
3 | data path format
4 | VOCdevkit
5 | >VOCdevkit_train
6 | >>Annotation
7 | >>JPEGImages
8 |
9 | >VOCdevkit_test
10 | >>Annotation
11 | >>JPEGImages
12 |
13 | python ./data/io/convert_data_to_tfrecord.py --VOC_dir='***/VOCdevkit/VOCdevkit_train/' --save_name='train' --img_format='.tif' --dataset='ship'
14 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/pre-applypatch.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to verify what is about to be committed
4 | # by applypatch from an e-mail message.
5 | #
6 | # The hook should exit with non-zero status after issuing an
7 | # appropriate message if it wants to stop the commit.
8 | #
9 | # To enable this hook, rename this file to "pre-applypatch".
10 |
11 | . git-sh-setup
12 | test -x "$GIT_DIR/hooks/pre-commit" &&
13 | exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"}
14 | :
15 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/tif2jpg.py:
--------------------------------------------------------------------------------
1 | # -*-encoding:utf-8-*-
2 | import os
3 | import cv2
4 |
5 | input_path = '/yangxue/data_ship_clean/VOCdevkit/TIFImages/'
6 | output_path = '/yangxue/data_ship_clean/VOCdevkit/JPEGImages/'
7 |
8 | images = os.listdir(input_path)
9 |
10 | for count, i in enumerate(images):
11 | img = cv2.imread(os.path.join(input_path, str(i)))
12 | cv2.imwrite(os.path.join(output_path, i.replace('.tif', '.jpg')), img)
13 | if count % 1000 == 0:
14 | print(count)
15 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/applypatch-msg.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to check the commit log message taken by
4 | # applypatch from an e-mail message.
5 | #
6 | # The hook should exit with non-zero status after issuing an
7 | # appropriate message if it wants to stop the commit. The hook is
8 | # allowed to edit the commit message file.
9 | #
10 | # To enable this hook, rename this file to "applypatch-msg".
11 |
12 | . git-sh-setup
13 | test -x "$GIT_DIR/hooks/commit-msg" &&
14 | exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"}
15 | :
16 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/help_utils/tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 | import math
4 | import sys
5 | import os
6 |
7 |
8 | def view_bar(message, num, total):
9 | rate = num / total
10 | rate_num = int(rate * 40)
11 | rate_nums = math.ceil(rate * 100)
12 | r = '\r%s:[%s%s]%d%%\t%d/%d' % (message, ">" * rate_num, " " * (40 - rate_num), rate_nums, num, total,)
13 | sys.stdout.write(r)
14 | sys.stdout.flush()
15 |
16 |
17 | def mkdir(path):
18 | if not os.path.exists(path):
19 | os.makedirs(path)
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.idea/FPN_v1.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/sh.exe.stackdump:
--------------------------------------------------------------------------------
1 | MSYS-1.0.12 Build:2012-07-05 14:56
2 | Exception: STATUS_ACCESS_VIOLATION at eip=02A37968
3 | eax=00000001 ebx=0069FA7C ecx=02AB4C5C edx=026F0000 esi=0069FA80 edi=00000000
4 | ebp=0069FA54 esp=0069F8DC program=C:\Users\yangxue\AppData\Local\GitHub\PortableGit_c2ba306e536fdf878271f7fe636a147ff37326ad\bin\sh.exe
5 | cs=0023 ds=002B es=002B fs=0053 gs=002B ss=002B
6 | Stack trace:
7 | Frame Function Args
8 | 0069FA54 02A37968 (0055005C, 00650073, 00730072, 0079005C)
9 | 72238 [main] sh 8740 handle_exceptions: Error while dumping state (probably corrupted stack)
10 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/nms.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 |
10 | def non_maximal_suppression(boxes, scores, iou_threshold, max_output_size, name='non_maximal_suppression'):
11 | with tf.variable_scope(name):
12 | nms_index = tf.image.non_max_suppression(
13 | boxes=boxes,
14 | scores=scores,
15 | max_output_size=max_output_size,
16 | iou_threshold=iou_threshold,
17 | name=name
18 | )
19 | return nms_index
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/.idea/webServers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
14 |
15 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rbbox_overlaps.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | cimport numpy as np
3 |
4 | cdef extern from "rbbox_overlaps.hpp":
5 | void _overlaps(np.float32_t*, np.float32_t*, np.float32_t*, int, int, int)
6 |
7 | def rbbx_overlaps (np.ndarray[np.float32_t, ndim=2] boxes, np.ndarray[np.float32_t, ndim=2] query_boxes, np.int32_t device_id=0):
8 | # boxes: [x, y, w, h, theta]
9 | cdef int N = boxes.shape[0]
10 | cdef int K = query_boxes.shape[0]
11 | cdef np.ndarray[np.float32_t, ndim=2] overlaps = np.zeros((N, K), dtype = np.float32)
12 | _overlaps(&overlaps[0, 0], &boxes[0, 0], &query_boxes[0, 0], N, K, device_id)
13 | return overlaps
14 |
15 |
16 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/commit-msg.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to check the commit log message.
4 | # Called by "git commit" with one argument, the name of the file
5 | # that has the commit message. The hook should exit with non-zero
6 | # status after issuing an appropriate message if it wants to stop the
7 | # commit. The hook is allowed to edit the commit message file.
8 | #
9 | # To enable this hook, rename this file to "commit-msg".
10 |
11 | # Uncomment the below to add a Signed-off-by line to the message.
12 | # Doing this in a hook is a bad idea in general, but the prepare-commit-msg
13 | # hook is more suited to it.
14 | #
15 | # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
16 | # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
17 |
18 | # This example catches duplicate Signed-off-by lines.
19 |
20 | test "" = "$(grep '^Signed-off-by: ' "$1" |
21 | sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || {
22 | echo >&2 Duplicate Signed-off-by lines.
23 | exit 1
24 | }
25 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/rotate_polygon_nms.pyx:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | cimport numpy as np
3 |
4 | assert sizeof(int) == sizeof(np.int32_t)
5 |
6 | cdef extern from "rotate_gpu_nms.hpp":
7 | void _rotate_nms(np.int32_t*, int*, np.float32_t*, int, int, float, int)
8 |
9 | def rotate_gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float_t thresh, np.int32_t device_id=0):
10 | cdef int boxes_num = dets.shape[0]
11 | cdef int boxes_dim = dets.shape[1]
12 | cdef int num_out
13 | cdef np.ndarray[np.int32_t, ndim=1] \
14 | keep = np.zeros(boxes_num, dtype=np.int32)
15 | cdef np.ndarray[np.float32_t, ndim=1] \
16 | scores = dets[:, 5]
17 | cdef np.ndarray[np.int_t, ndim=1] \
18 | order = scores.argsort()[::-1]
19 | cdef np.ndarray[np.float32_t, ndim=2] \
20 | sorted_dets = dets[order, :]
21 | thresh = thresh
22 | _rotate_nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id)
23 | keep = keep[:num_out]
24 | return order[keep]
25 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 |
8 | import tensorflow as tf
9 |
10 |
11 | def iou_calculate(boxes_1, boxes_2):
12 | '''
13 |
14 | :param boxes_1: [N, 4] [ymin, xmin, ymax, xmax]
15 | :param boxes_2: [M, 4] [ymin, xmin. ymax, xmax]
16 | :return:
17 | '''
18 | with tf.name_scope('iou_caculate'):
19 |
20 | ymin_1, xmin_1, ymax_1, xmax_1 = tf.split(boxes_1, 4, axis=1) # ymin_1 shape is [N, 1]..
21 |
22 | ymin_2, xmin_2, ymax_2, xmax_2 = tf.unstack(boxes_2, axis=1) # ymin_2 shape is [M, ]..
23 |
24 | max_xmin = tf.maximum(xmin_1, xmin_2)
25 | min_xmax = tf.minimum(xmax_1, xmax_2)
26 |
27 | max_ymin = tf.maximum(ymin_1, ymin_2)
28 | min_ymax = tf.minimum(ymax_1, ymax_2)
29 |
30 | overlap_h = tf.maximum(0., min_ymax - max_ymin) # avoid h < 0
31 | overlap_w = tf.maximum(0., min_xmax - max_xmin)
32 |
33 | overlaps = overlap_h * overlap_w
34 |
35 | area_1 = (xmax_1 - xmin_1) * (ymax_1 - ymin_1) # [N, 1]
36 | area_2 = (xmax_2 - xmin_2) * (ymax_2 - ymin_2) # [M, ]
37 |
38 | iou = overlaps / (area_1 + area_2 - overlaps)
39 |
40 | return iou
41 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/prepare-commit-msg.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to prepare the commit log message.
4 | # Called by "git commit" with the name of the file that has the
5 | # commit message, followed by the description of the commit
6 | # message's source. The hook's purpose is to edit the commit
7 | # message file. If the hook fails with a non-zero status,
8 | # the commit is aborted.
9 | #
10 | # To enable this hook, rename this file to "prepare-commit-msg".
11 |
12 | # This hook includes three examples. The first comments out the
13 | # "Conflicts:" part of a merge commit.
14 | #
15 | # The second includes the output of "git diff --name-status -r"
16 | # into the message, just before the "git status" output. It is
17 | # commented because it doesn't cope with --amend or with squashed
18 | # commits.
19 | #
20 | # The third example adds a Signed-off-by line to the message, that can
21 | # still be edited. This is rarely a good idea.
22 |
23 | case "$2,$3" in
24 | merge,)
25 | /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;;
26 |
27 | # ,|template,)
28 | # /usr/bin/perl -i.bak -pe '
29 | # print "\n" . `git diff --cached --name-status -r`
30 | # if /^#/ && $first++ == 0' "$1" ;;
31 |
32 | *) ;;
33 | esac
34 |
35 | # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
36 | # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
37 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/pre-push.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # An example hook script to verify what is about to be pushed. Called by "git
4 | # push" after it has checked the remote status, but before anything has been
5 | # pushed. If this script exits with a non-zero status nothing will be pushed.
6 | #
7 | # This hook is called with the following parameters:
8 | #
9 | # $1 -- Name of the remote to which the push is being done
10 | # $2 -- URL to which the push is being done
11 | #
12 | # If pushing without using a named remote those arguments will be equal.
13 | #
14 | # Information about the commits which are being pushed is supplied as lines to
15 | # the standard input in the form:
16 | #
17 | #
18 | #
19 | # This sample shows how to prevent push of commits where the log message starts
20 | # with "WIP" (work in progress).
21 |
22 | remote="$1"
23 | url="$2"
24 |
25 | z40=0000000000000000000000000000000000000000
26 |
27 | IFS=' '
28 | while read local_ref local_sha remote_ref remote_sha
29 | do
30 | if [ "$local_sha" = $z40 ]
31 | then
32 | # Handle delete
33 | else
34 | if [ "$remote_sha" = $z40 ]
35 | then
36 | # New branch, examine all commits
37 | range="$local_sha"
38 | else
39 | # Update to existing branch, examine new commits
40 | range="$remote_sha..$local_sha"
41 | fi
42 |
43 | # Check for WIP commit
44 | commit=`git rev-list -n 1 --grep '^WIP' "$range"`
45 | if [ -n "$commit" ]
46 | then
47 | echo "Found WIP commit in $local_ref, not pushing"
48 | exit 1
49 | fi
50 | fi
51 | done
52 |
53 | exit 0
54 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/visualization.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 |
10 | def roi_visualize(img, img_h, img_w, roi_box, rois):
11 | with tf.variable_scope('roi_visualize'):
12 | ymin, xmin, ymax, xmax = tf.unstack(roi_box, axis=1)
13 |
14 | normalize_ymin = ymin / img_h
15 | normalize_xmin = xmin / img_w
16 | normalize_ymax = ymax / img_h
17 | normalize_xmax = xmax / img_w
18 |
19 | tmp_img = tf.squeeze(img) + tf.constant([103.939, 116.779, 123.68], dtype=tf.float32)
20 | tmp_img = tf.cast(tmp_img * 225 / tf.reduce_max(tmp_img), dtype=tf.uint8)
21 | tmp_img = tf.expand_dims(tmp_img, axis=0)
22 | target = tf.image.crop_and_resize(tmp_img,
23 | boxes=tf.transpose(tf.stack([normalize_ymin, normalize_xmin,
24 | normalize_ymax, normalize_xmax])),
25 | box_ind=tf.zeros(shape=[tf.shape(roi_box)[0], ],
26 | dtype=tf.int32),
27 | crop_size=[56, 56],
28 | name='crop_img_object'
29 | )
30 |
31 | rois = tf.image.resize_bilinear(rois, size=[56, 56])
32 | rois_mean = tf.reduce_mean(rois, axis=3)
33 | tf.summary.image('target', target[:, :, :, ::-1])
34 | tf.summary.image('rois', tf.expand_dims(rois_mean, axis=3))
35 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/divide_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 | import sys
4 | sys.path.append('../../')
5 | import shutil
6 | import os
7 | import random
8 | import math
9 |
10 |
11 | def mkdir(path):
12 | if not os.path.exists(path):
13 | os.makedirs(path)
14 |
15 |
16 | divide_rate = 0.9
17 |
18 | root_path = '/yangxue/shapan'
19 |
20 | image_path = root_path + '/VOCdevkit/JPEGImages'
21 | xml_path = root_path + '/VOCdevkit/Annotations'
22 |
23 | image_list = os.listdir(image_path)
24 |
25 | image_name = [n.split('.')[0] for n in image_list]
26 |
27 | random.shuffle(image_name)
28 |
29 | train_image = image_name[:int(math.ceil(len(image_name)) * divide_rate)]
30 | test_image = image_name[int(math.ceil(len(image_name)) * divide_rate):]
31 |
32 | image_output_train = os.path.join(root_path, 'VOCdevkit_train/JPEGImages')
33 | mkdir(image_output_train)
34 | image_output_test = os.path.join(root_path, 'VOCdevkit_test/JPEGImages')
35 | mkdir(image_output_test)
36 |
37 | xml_train = os.path.join(root_path, 'VOCdevkit_train/Annotations')
38 | mkdir(xml_train)
39 | xml_test = os.path.join(root_path, 'VOCdevkit_test/Annotations')
40 | mkdir(xml_test)
41 |
42 |
43 | count = 0
44 | for i in train_image:
45 | shutil.copy(os.path.join(image_path, i + '.jpg'), image_output_train)
46 | shutil.copy(os.path.join(xml_path, i + '.xml'), xml_train)
47 | if count % 1000 == 0:
48 | print("process step {}".format(count))
49 | count += 1
50 |
51 | for i in test_image:
52 | shutil.copy(os.path.join(image_path, i + '.jpg'), image_output_test)
53 | shutil.copy(os.path.join(xml_path, i + '.xml'), xml_test)
54 | if count % 1000 == 0:
55 | print("process step {}".format(count))
56 | count += 1
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/inception.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Brings all inception models under one namespace."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | # pylint: disable=unused-import
22 | from nets.inception_resnet_v2 import inception_resnet_v2
23 | from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope
24 | from nets.inception_resnet_v2 import inception_resnet_v2_base
25 | from nets.inception_v1 import inception_v1
26 | from nets.inception_v1 import inception_v1_arg_scope
27 | from nets.inception_v1 import inception_v1_base
28 | from nets.inception_v2 import inception_v2
29 | from nets.inception_v2 import inception_v2_arg_scope
30 | from nets.inception_v2 import inception_v2_base
31 | from nets.inception_v3 import inception_v3
32 | from nets.inception_v3 import inception_v3_arg_scope
33 | from nets.inception_v3 import inception_v3_base
34 | from nets.inception_v4 import inception_v4
35 | from nets.inception_v4 import inception_v4_arg_scope
36 | from nets.inception_v4 import inception_v4_base
37 | # pylint: enable=unused-import
38 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/tools/restore_model.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import print_function
5 | from __future__ import division
6 |
7 | import tensorflow as tf
8 | import tensorflow.contrib.slim as slim
9 | import os
10 |
11 | from libs.configs import cfgs
12 | from libs.networks.network_factory import get_flags_byname
13 |
14 |
15 | RESTORE_FROM_RPN = False
16 | FLAGS = get_flags_byname(cfgs.NET_NAME)
17 | os.environ["CUDA_VISIBLE_DEVICES"] = "6"
18 |
19 |
20 | def get_restorer():
21 |
22 | checkpoint_path = tf.train.latest_checkpoint(os.path.join(FLAGS.trained_checkpoint, cfgs.VERSION))
23 |
24 | if checkpoint_path != None:
25 | if RESTORE_FROM_RPN:
26 | print('___restore from rpn___')
27 | model_variables = slim.get_model_variables()
28 | restore_variables = [var for var in model_variables if not var.name.startswith('Fast_Rcnn')] + [slim.get_or_create_global_step()]
29 | for var in restore_variables:
30 | print(var.name)
31 | restorer = tf.train.Saver(restore_variables)
32 | else:
33 | restorer = tf.train.Saver()
34 | print("model restore from :", checkpoint_path)
35 | else:
36 | checkpoint_path = FLAGS.pretrained_model_path
37 | print("model restore from pretrained mode, path is :", checkpoint_path)
38 |
39 | model_variables = slim.get_model_variables()
40 |
41 | restore_variables = [var for var in model_variables
42 | if (var.name.startswith(cfgs.NET_NAME)
43 | and not var.name.startswith('{}/logits'.format(cfgs.NET_NAME)))]
44 | for var in restore_variables:
45 | print(var.name)
46 | restorer = tf.train.Saver(restore_variables)
47 | return restorer, checkpoint_path
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/v1_ship_head.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 | import os
4 |
5 | # root path
6 | ROO_PATH = os.path.abspath('/yangxue/FPN_v21')
7 |
8 | # pretrain weights path
9 | MODEL_PATH = ROO_PATH + '/output/model'
10 | SUMMARY_PATH = ROO_PATH + '/output/summary'
11 |
12 | TEST_SAVE_PATH = ROO_PATH + '/tools/test_result'
13 | INFERENCE_IMAGE_PATH = ROO_PATH + '/tools/inference_image'
14 | INFERENCE_SAVE_PATH = ROO_PATH + '/tools/inference_result'
15 |
16 | NET_NAME = 'resnet_v1_101'
17 | VERSION = 'v1_ship_head'
18 | CLASS_NUM = 1
19 | LEVEL = ['P2', 'P3', 'P4', 'P5', 'P6']
20 | BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
21 | STRIDE = [4, 8, 16, 32, 64]
22 | ANCHOR_SCALES = [1.]
23 | ANCHOR_RATIOS = [1 / 2., 1 / 3., 1., 3., 2.]
24 | SCALE_FACTORS = [10., 10., 5., 5., 10.]
25 | OUTPUT_STRIDE = 16
26 | SHORT_SIDE_LEN = 600
27 | DATASET_NAME = 'ship'
28 |
29 | BATCH_SIZE = 1
30 | WEIGHT_DECAY = {'vggnet16': 0.0005, 'resnet_v1_50': 0.0001, 'resnet_v1_101': 0.0001}
31 | EPSILON = 1e-5
32 | MOMENTUM = 0.9
33 | MAX_ITERATION = 40000
34 | GPU_GROUP = "1"
35 |
36 | # rpn
37 | SHARE_HEAD = False
38 | RPN_NMS_IOU_THRESHOLD = 0.7
39 | MAX_PROPOSAL_NUM = 300
40 | RPN_IOU_POSITIVE_THRESHOLD = 0.6
41 | RPN_IOU_NEGATIVE_THRESHOLD = 0.25
42 | RPN_MINIBATCH_SIZE = 256
43 | RPN_POSITIVE_RATE = 0.5
44 | IS_FILTER_OUTSIDE_BOXES = True
45 | RPN_TOP_K_NMS = 3000
46 | FEATURE_PYRAMID_MODE = 0 # {0: 'feature_pyramid', 1: 'dense_feature_pyramid'}
47 |
48 | # fast rcnn
49 | ROTATE_NMS_USE_GPU = True
50 | ROI_SIZE = 14
51 | ROI_POOL_KERNEL_SIZE = 2
52 | USE_DROPOUT = False
53 | KEEP_PROB = 0.5
54 | FAST_RCNN_NMS_IOU_THRESHOLD = 0.15
55 | FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
56 | FINAL_SCORE_THRESHOLD = 0.8
57 | FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
58 | FAST_RCNN_MINIBATCH_SIZE = 128
59 | FAST_RCNN_POSITIVE_RATE = 0.5
60 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/v1_UAV_rotate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 | import os
4 |
5 | # root path
6 | ROO_PATH = os.path.abspath('/yangxue/FPN_v21')
7 |
8 | # pretrain weights path
9 | MODEL_PATH = ROO_PATH + '/output/model'
10 | SUMMARY_PATH = ROO_PATH + '/output/summary'
11 |
12 | TEST_SAVE_PATH = ROO_PATH + '/tools/test_result'
13 | INFERENCE_IMAGE_PATH = ROO_PATH + '/tools/inference_image'
14 | INFERENCE_SAVE_PATH = ROO_PATH + '/tools/inference_result'
15 |
16 | NET_NAME = 'resnet_v1_101'
17 | VERSION = 'v1_UAV_rotate'
18 | CLASS_NUM = 3
19 | LEVEL = ['P2', 'P3', 'P4', 'P5', 'P6']
20 | BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
21 | STRIDE = [4, 8, 16, 32, 64]
22 | ANCHOR_SCALES = [1.]
23 | ANCHOR_RATIOS = [1, 0.5, 2, 1 / 3., 3., 1.5, 1 / 1.5]
24 | SCALE_FACTORS = [10., 10., 5., 5., 5.]
25 | OUTPUT_STRIDE = 16
26 | SHORT_SIDE_LEN = 600
27 | DATASET_NAME = 'UAV'
28 |
29 | BATCH_SIZE = 1
30 | WEIGHT_DECAY = {'vggnet16': 0.0005, 'resnet_v1_50': 0.0001, 'resnet_v1_101': 0.0001}
31 | EPSILON = 1e-5
32 | MOMENTUM = 0.9
33 | MAX_ITERATION = 50000
34 | GPU_GROUP = "1"
35 |
36 | # rpn
37 | SHARE_HEAD = False
38 | RPN_NMS_IOU_THRESHOLD = 0.6
39 | MAX_PROPOSAL_NUM = 300
40 | RPN_IOU_POSITIVE_THRESHOLD = 0.5
41 | RPN_IOU_NEGATIVE_THRESHOLD = 0.2
42 | RPN_MINIBATCH_SIZE = 256
43 | RPN_POSITIVE_RATE = 0.5
44 | IS_FILTER_OUTSIDE_BOXES = True
45 | RPN_TOP_K_NMS = 3000
46 | FEATURE_PYRAMID_MODE = 0 # {0: 'feature_pyramid', 1: 'dense_feature_pyramid'}
47 |
48 | # fast rcnn
49 | FAST_RCNN_MODE = 'build_fast_rcnn1'
50 | ROI_SIZE = 14
51 | ROI_POOL_KERNEL_SIZE = 2
52 | USE_DROPOUT = False
53 | KEEP_PROB = 0.5
54 | FAST_RCNN_NMS_IOU_THRESHOLD = 0.2
55 | FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
56 | FINAL_SCORE_THRESHOLD = 0.7
57 | FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.45
58 | FAST_RCNN_MINIBATCH_SIZE = 512
59 | FAST_RCNN_POSITIVE_RATE = 0.25
60 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/pre-commit.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to verify what is about to be committed.
4 | # Called by "git commit" with no arguments. The hook should
5 | # exit with non-zero status after issuing an appropriate message if
6 | # it wants to stop the commit.
7 | #
8 | # To enable this hook, rename this file to "pre-commit".
9 |
10 | if git rev-parse --verify HEAD >/dev/null 2>&1
11 | then
12 | against=HEAD
13 | else
14 | # Initial commit: diff against an empty tree object
15 | against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
16 | fi
17 |
18 | # If you want to allow non-ascii filenames set this variable to true.
19 | allownonascii=$(git config hooks.allownonascii)
20 |
21 | # Redirect output to stderr.
22 | exec 1>&2
23 |
24 | # Cross platform projects tend to avoid non-ascii filenames; prevent
25 | # them from being added to the repository. We exploit the fact that the
26 | # printable range starts at the space character and ends with tilde.
27 | if [ "$allownonascii" != "true" ] &&
28 | # Note that the use of brackets around a tr range is ok here, (it's
29 | # even required, for portability to Solaris 10's /usr/bin/tr), since
30 | # the square bracket bytes happen to fall in the designated range.
31 | test $(git diff --cached --name-only --diff-filter=A -z $against |
32 | LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0
33 | then
34 | echo "Error: Attempt to add a non-ascii file name."
35 | echo
36 | echo "This can cause problems if you want to work"
37 | echo "with people on other platforms."
38 | echo
39 | echo "To be portable it is advisable to rename the file ..."
40 | echo
41 | echo "If you know what you are doing you can disable this"
42 | echo "check using:"
43 | echo
44 | echo " git config hooks.allownonascii true"
45 | echo
46 | exit 1
47 | fi
48 |
49 | # If there are whitespace errors, print the offending file names and fail.
50 | exec git diff-index --check --cached $against --
51 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_resnet_50.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | tf.app.flags.DEFINE_string(
8 | 'dataset_tfrecord',
9 | '../data/tfrecords',
10 | 'tfrecord of fruits dataset'
11 | )
12 | tf.app.flags.DEFINE_integer(
13 | 'shortside_size',
14 | 600,
15 | 'the value of new height and new width, new_height = new_width'
16 | )
17 |
18 | ###########################
19 | # data batch
20 | ##########################
21 | tf.app.flags.DEFINE_integer(
22 | 'num_classes',
23 | 20,
24 | 'num of classes'
25 | )
26 | tf.app.flags.DEFINE_integer(
27 | 'batch_size',
28 | 1, #64
29 | 'num of imgs in a batch'
30 | )
31 |
32 | ###############################
33 | # optimizer-- MomentumOptimizer
34 | ###############################
35 | tf.app.flags.DEFINE_float(
36 | 'momentum',
37 | 0.9,
38 | 'accumulation = momentum * accumulation + gradient'
39 | )
40 |
41 | ############################
42 | # train
43 | ########################
44 | tf.app.flags.DEFINE_integer(
45 | 'max_steps',
46 | 900000,
47 | 'max iterate steps'
48 | )
49 |
50 | tf.app.flags.DEFINE_string(
51 | 'pretrained_model_path',
52 | '../data/pretrained_weights/resnet_50.ckpt',
53 | 'the path of pretrained weights'
54 | )
55 | tf.app.flags.DEFINE_float(
56 | 'weight_decay',
57 | 0.0001,
58 | 'weight_decay in regulation'
59 | )
60 | ################################
61 | # summary and save_weights_checkpoint
62 | ##################################
63 | tf.app.flags.DEFINE_string(
64 | 'summary_path',
65 | '../output/resnet_summary',
66 | 'the path of summary write to '
67 | )
68 | tf.app.flags.DEFINE_string(
69 | 'trained_checkpoint',
70 | '../output/resnet_trained_weights',
71 | 'the path to save trained_weights'
72 | )
73 | FLAGS = tf.app.flags.FLAGS
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/configs/cfgs.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 | import os
4 |
5 | # root path
6 | ROO_PATH = os.path.abspath('/home/jzchen/WorkingSpace/R2CNN_HEAD_FPN_Tensorflow')
7 |
8 | # pretrain weights path
9 | MODEL_PATH = ROO_PATH + '/output/model'
10 | SUMMARY_PATH = ROO_PATH + '/output/summary'
11 |
12 | TEST_SAVE_PATH = ROO_PATH + '/tools/test_result'
13 | # INFERENCE_IMAGE_PATH = ROO_PATH + '/tools/inference_image'
14 | INFERENCE_IMAGE_PATH = '/home/jzchen/data/RemoteSensing/ships/HRSC2016/HRSC2016/Train/AllImages'
15 | INFERENCE_SAVE_PATH = ROO_PATH + '/tools/inference_result'
16 |
17 | # Evaluation save path
18 | EVALUATE_R_DIR = ROO_PATH + '/tools/evaluation'
19 |
20 | NET_NAME = 'resnet_v1_101'
21 | VERSION = 'v1_ship_head'
22 | # CLASS_NUM = 1
23 | CLASS_NUM = 22
24 | LEVEL = ['P2', 'P3', 'P4', 'P5', 'P6']
25 | BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
26 | STRIDE = [4, 8, 16, 32, 64]
27 | ANCHOR_SCALES = [1.]
28 | ANCHOR_RATIOS = [1 / 2., 1 / 3., 1., 3., 2.]
29 | SCALE_FACTORS = [10., 10., 5., 5., 10.]
30 | OUTPUT_STRIDE = 16
31 | SHORT_SIDE_LEN = 600
32 | DATASET_NAME = 'HRSC2016'
33 |
34 | BATCH_SIZE = 1
35 | WEIGHT_DECAY = {'vggnet16': 0.0005, 'resnet_v1_50': 0.0001, 'resnet_v1_101': 0.0001}
36 | EPSILON = 1e-5
37 | MOMENTUM = 0.9
38 | MAX_ITERATION = 80000
39 | GPU_GROUP = "0"
40 |
41 | # rpn
42 | SHARE_HEAD = False
43 | RPN_NMS_IOU_THRESHOLD = 0.7
44 | MAX_PROPOSAL_NUM = 300
45 | RPN_IOU_POSITIVE_THRESHOLD = 0.6
46 | RPN_IOU_NEGATIVE_THRESHOLD = 0.25
47 | RPN_MINIBATCH_SIZE = 256
48 | RPN_POSITIVE_RATE = 0.5
49 | IS_FILTER_OUTSIDE_BOXES = True
50 | RPN_TOP_K_NMS = 3000
51 | FEATURE_PYRAMID_MODE = 0 # {0: 'feature_pyramid', 1: 'dense_feature_pyramid'}
52 |
53 | # fast rcnn
54 | ROTATE_NMS_USE_GPU = True
55 | ROI_SIZE = 14
56 | ROI_POOL_KERNEL_SIZE = 2
57 | USE_DROPOUT = False
58 | KEEP_PROB = 0.5
59 | FAST_RCNN_NMS_IOU_THRESHOLD = 0.15
60 | FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100
61 | # FINAL_SCORE_THRESHOLD = 0.8
62 | FINAL_SCORE_THRESHOLD = 0.4
63 | FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5
64 | FAST_RCNN_MINIBATCH_SIZE = 128
65 | FAST_RCNN_POSITIVE_RATE = 0.5
66 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_cpu.pyx:
--------------------------------------------------------------------------------
1 | # written by yjr
2 |
3 | cimport cython
4 | import numpy as np
5 | cimport numpy as np
6 | import cv2
7 | import time
8 |
9 | DTYPE = np.float32
10 | ctypedef np.float32_t DTYPE_t
11 | ctypedef bint BOOL
12 |
13 | cdef DTYPE_t two_boxes_iou(np.ndarray[DTYPE_t, ndim=1] rectangle_1, np.ndarray[DTYPE_t, ndim=1] rectangle_2):
14 |
15 | """
16 | calu rectangle_1 and rectangle_2 iou
17 | :param rectangle_1: [x, y, w, h, theta]. shape: (5, )
18 | :param rectangle_2:
19 | :return:
20 | """
21 | cdef DTYPE_t area1 = rectangle_1[2] * rectangle_1[3]
22 | cdef DTYPE_t area2 = rectangle_2[2] * rectangle_2[3]
23 |
24 | rect_1 = ((rectangle_1[0], rectangle_1[1]), (rectangle_1[3], rectangle_1[2]), rectangle_1[-1])
25 | rect_2 = ((rectangle_2[0], rectangle_2[1]), (rectangle_2[3], rectangle_2[2]), rectangle_2[-1])
26 |
27 | inter_points = cv2.rotatedRectangleIntersection(rect_1, rect_2)[1]
28 |
29 | cdef np.ndarray[DTYPE_t, ndim=3] order_points
30 | cdef float inter_area, iou
31 | if inter_points is not None:
32 | order_points = cv2.convexHull(inter_points, returnPoints=True)
33 |
34 | inter_area = cv2.contourArea(order_points)
35 | if area1 + area2 == inter_area:
36 | print ("area1-->", area1)
37 | print ("area2-->", area2)
38 | print ("inter_area-->", inter_area)
39 | iou = inter_area *1.0 / (area1 + area2 - inter_area)
40 | return iou
41 | else:
42 | return 0.0
43 |
44 | cpdef np.ndarray[DTYPE_t, ndim=2] get_iou_matrix(
45 | np.ndarray[DTYPE_t, ndim=2] boxes1, # (N, 5)
46 | np.ndarray[DTYPE_t, ndim=2] boxes2): # (M, 5)
47 |
48 | cdef unsigned int num_of_boxes1 = boxes1.shape[0]
49 | cdef unsigned int num_of_boxes2 = boxes2.shape[0]
50 |
51 | cdef np.ndarray[DTYPE_t, ndim=2] iou_matrix = np.zeros((num_of_boxes1, num_of_boxes2), dtype=DTYPE)
52 | # cdef DTYPE_t box_iou
53 | cdef unsigned int n, m
54 | # st = time.time()
55 | for n in range(num_of_boxes1):
56 | for m in range(num_of_boxes2):
57 |
58 | iou_matrix[n, m] = two_boxes_iou(boxes1[n], boxes2[m])
59 | # print "iou_matrix cost time: ", time.time() - st
60 | return iou_matrix
61 |
62 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/nets_factory_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 Google Inc. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for slim.inception."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import tensorflow as tf
23 |
24 | from nets import nets_factory
25 |
26 | slim = tf.contrib.slim
27 |
28 |
29 | class NetworksTest(tf.test.TestCase):
30 |
31 | def testGetNetworkFn(self):
32 | batch_size = 5
33 | num_classes = 1000
34 | for net in nets_factory.networks_map:
35 | with self.test_session():
36 | net_fn = nets_factory.get_network_fn(net, num_classes)
37 | # Most networks use 224 as their default_image_size
38 | image_size = getattr(net_fn, 'default_image_size', 224)
39 | inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
40 | logits, end_points = net_fn(inputs)
41 | self.assertTrue(isinstance(logits, tf.Tensor))
42 | self.assertTrue(isinstance(end_points, dict))
43 | self.assertEqual(logits.get_shape().as_list()[0], batch_size)
44 | self.assertEqual(logits.get_shape().as_list()[-1], num_classes)
45 |
46 | def testGetNetworkFnArgScope(self):
47 | batch_size = 5
48 | num_classes = 10
49 | net = 'cifarnet'
50 | with self.test_session(use_gpu=True):
51 | net_fn = nets_factory.get_network_fn(net, num_classes)
52 | image_size = getattr(net_fn, 'default_image_size', 224)
53 | with slim.arg_scope([slim.model_variable, slim.variable],
54 | device='/CPU:0'):
55 | inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
56 | net_fn(inputs)
57 | weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'CifarNet/conv1')[0]
58 | self.assertDeviceEqual('/CPU:0', weights.device)
59 |
60 | if __name__ == '__main__':
61 | tf.test.main()
62 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_res101.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | ######################
8 | # data set
9 | ####################
10 | tf.app.flags.DEFINE_string(
11 | 'dataset_tfrecord',
12 | '../data/tfrecords',
13 | 'tfrecord of fruits dataset'
14 | )
15 | tf.app.flags.DEFINE_integer(
16 | 'new_img_size',
17 | 224,
18 | 'the value of new height and new width, new_height = new_width'
19 | )
20 |
21 | ###########################
22 | # data batch
23 | ##########################
24 | tf.app.flags.DEFINE_integer(
25 | 'num_classes',
26 | 134,
27 | 'num of classes'
28 | )
29 | tf.app.flags.DEFINE_integer(
30 | 'batch_size',
31 | 64, #64
32 | 'num of imgs in a batch'
33 | )
34 | tf.app.flags.DEFINE_integer(
35 | 'val_batch_size',
36 | 32,
37 | 'val or test batch'
38 | )
39 | ###########################
40 | ## learning rate
41 | #########################
42 | tf.app.flags.DEFINE_float(
43 | 'lr_begin',
44 | 0.001, # 0.01 # 0.001 for without prepocess
45 | 'the value of learning rate start with'
46 | )
47 | tf.app.flags.DEFINE_integer(
48 | 'decay_steps',
49 | 20000, # 5000
50 | "after 'decay_steps' steps, learning rate begin decay"
51 | )
52 | tf.app.flags.DEFINE_float(
53 | 'decay_rate',
54 | 0.1,
55 | 'decay rate'
56 | )
57 |
58 | ###############################
59 | # optimizer-- MomentumOptimizer
60 | ###############################
61 | tf.app.flags.DEFINE_float(
62 | 'momentum',
63 | 0.9,
64 | 'accumulation = momentum * accumulation + gradient'
65 | )
66 |
67 | ############################
68 | # train
69 | ########################
70 | tf.app.flags.DEFINE_integer(
71 | 'max_steps',
72 | 4003,
73 | 'max iterate steps'
74 | )
75 |
76 | tf.app.flags.DEFINE_string(
77 | 'pretrained_model_path',
78 | '../data/pretrained_weights/resnet_v1_101.ckpt',
79 | 'the path of pretrained weights'
80 | )
81 | tf.app.flags.DEFINE_float(
82 | 'weight_decay',
83 | 0.0001,
84 | 'weight_decay in regulation'
85 | )
86 | ################################
87 | # summary and save_weights_checkpoint
88 | ##################################
89 | tf.app.flags.DEFINE_string(
90 | 'summary_path',
91 | '../output/res101_summary',
92 | 'the path of summary write to '
93 | )
94 | tf.app.flags.DEFINE_string(
95 | 'trained_checkpoint',
96 | '../output/res101_trained_weights',
97 | 'the path to save trained_weights'
98 | )
99 | FLAGS = tf.app.flags.FLAGS
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_vgg16.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | ######################
8 | # data set
9 | ####################
10 |
11 | tf.app.flags.DEFINE_string(
12 | 'dataset_tfrecord',
13 | '../data/tfrecords',
14 | 'tfrecord of dog dataset'
15 | )
16 | tf.app.flags.DEFINE_integer(
17 | 'new_img_size',
18 | 224,
19 | 'the value of new height and new width, new_height = new_width'
20 | )
21 |
22 | ###########################
23 | # data batch
24 | ##########################
25 | tf.app.flags.DEFINE_integer(
26 | 'num_classes',
27 | 134,
28 | 'num of classes'
29 | )
30 | tf.app.flags.DEFINE_integer(
31 | 'batch_size',
32 | 32, #64
33 | 'num of imgs in a batch'
34 | )
35 | tf.app.flags.DEFINE_integer(
36 | 'val_batch_size',
37 | 8,
38 | 'val or test batch'
39 | )
40 | ###########################
41 | ## learning rate
42 | #########################
43 | tf.app.flags.DEFINE_float(
44 | 'lr_begin',
45 | 0.001, # 0.01 # 0.001 for without prepocess # 0.01 for inception
46 | 'the value of learning rate start with'
47 | )
48 | tf.app.flags.DEFINE_integer(
49 | 'decay_steps',
50 | 3000, # 5000
51 | "after 'decay_steps' steps, learning rate begin decay"
52 | )
53 | tf.app.flags.DEFINE_float(
54 | 'decay_rate',
55 | 0.1,
56 | 'decay rate'
57 | )
58 |
59 | ###############################
60 | # optimizer-- MomentumOptimizer
61 | ###############################
62 | tf.app.flags.DEFINE_float(
63 | 'momentum',
64 | 0.9,
65 | 'accumulation = momentum * accumulation + gradient'
66 | )
67 |
68 | ############################
69 | # train
70 | ########################
71 | tf.app.flags.DEFINE_integer(
72 | 'max_steps',
73 | 20010,
74 | 'max iterate steps'
75 | )
76 |
77 | tf.app.flags.DEFINE_string(
78 | 'pretrained_model_path',
79 | '../data/pretrained_weights/vgg_16.ckpt',
80 | 'the path of pretrained weights'
81 | )
82 | tf.app.flags.DEFINE_float(
83 | 'weight_decay',
84 | 0.0005,
85 | 'weight_decay in regulation'
86 | )
87 | ################################
88 | # summary and save_weights_checkpoint
89 | ##################################
90 | tf.app.flags.DEFINE_string(
91 | 'summary_path',
92 | '../output/vgg16_summary',
93 | 'the path of summary write to '
94 | )
95 | tf.app.flags.DEFINE_string(
96 | 'trained_checkpoint',
97 | '../output/vgg16_trainedweights',
98 | 'the path to save trained_weights'
99 | )
100 | FLAGS = tf.app.flags.FLAGS
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/configs/config_inception_resnet.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | ######################
8 | # data set
9 | ####################
10 |
11 | tf.app.flags.DEFINE_string(
12 | 'dataset_tfrecord',
13 | '../data/tfrecords',
14 | 'tfrecord of fruits dataset'
15 | )
16 | tf.app.flags.DEFINE_integer(
17 | 'new_img_size',
18 | 224,
19 | 'the value of new height and new width, new_height = new_width'
20 | )
21 |
22 | ###########################
23 | # data batch
24 | ##########################
25 | tf.app.flags.DEFINE_integer(
26 | 'num_classes',
27 | 100,
28 | 'num of classes'
29 | )
30 | tf.app.flags.DEFINE_integer(
31 | 'batch_size',
32 | 32, #64
33 | 'num of imgs in a batch'
34 | )
35 | tf.app.flags.DEFINE_integer(
36 | 'val_batch_size',
37 | 16,
38 | 'val or test batch'
39 | )
40 | ###########################
41 | ## learning rate
42 | #########################
43 | tf.app.flags.DEFINE_float(
44 | 'lr_begin',
45 | 0.0001, # 0.01 # 0.001 for without prepocess
46 | 'the value of learning rate start with'
47 | )
48 | tf.app.flags.DEFINE_integer(
49 | 'decay_steps',
50 | 2000, # 5000
51 | "after 'decay_steps' steps, learning rate begin decay"
52 | )
53 | tf.app.flags.DEFINE_float(
54 | 'decay_rate',
55 | 0.1,
56 | 'decay rate'
57 | )
58 |
59 | ###############################
60 | # optimizer-- MomentumOptimizer
61 | ###############################
62 | tf.app.flags.DEFINE_float(
63 | 'momentum',
64 | 0.9,
65 | 'accumulation = momentum * accumulation + gradient'
66 | )
67 |
68 | ############################
69 | # train
70 | ########################
71 | tf.app.flags.DEFINE_integer(
72 | 'max_steps',
73 | 30050,
74 | 'max iterate steps'
75 | )
76 |
77 | tf.app.flags.DEFINE_string(
78 | 'pretrained_model_path',
79 | '../data/pretrained_weights/inception_resnet_v2_2016_08_30.ckpt',
80 | 'the path of pretrained weights'
81 | )
82 | tf.app.flags.DEFINE_float(
83 | 'weight_decay',
84 | 0.00004,
85 | 'weight_decay in regulation'
86 | )
87 | ################################
88 | # summary and save_weights_checkpoint
89 | ##################################
90 | tf.app.flags.DEFINE_string(
91 | 'summary_path',
92 | '../output/inception_res_summary',
93 | 'the path of summary write to '
94 | )
95 | tf.app.flags.DEFINE_string(
96 | 'trained_checkpoint',
97 | '../output/inception_res_trainedweights',
98 | 'the path to save trained_weights'
99 | )
100 | FLAGS = tf.app.flags.FLAGS
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/inception_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Contains common code shared by all inception models.
16 |
17 | Usage of arg scope:
18 | with slim.arg_scope(inception_arg_scope()):
19 | logits, end_points = inception.inception_v3(images, num_classes,
20 | is_training=is_training)
21 |
22 | """
23 | from __future__ import absolute_import
24 | from __future__ import division
25 | from __future__ import print_function
26 |
27 | import tensorflow as tf
28 |
29 | slim = tf.contrib.slim
30 |
31 |
32 | def inception_arg_scope(weight_decay=0.00004,
33 | use_batch_norm=True,
34 | batch_norm_decay=0.9997,
35 | batch_norm_epsilon=0.001):
36 | """Defines the default arg scope for inception models.
37 |
38 | Args:
39 | weight_decay: The weight decay to use for regularizing the model.
40 | use_batch_norm: "If `True`, batch_norm is applied after each convolution.
41 | batch_norm_decay: Decay for batch norm moving average.
42 | batch_norm_epsilon: Small float added to variance to avoid dividing by zero
43 | in batch norm.
44 |
45 | Returns:
46 | An `arg_scope` to use for the inception models.
47 | """
48 | batch_norm_params = {
49 | # Decay for the moving averages.
50 | 'decay': batch_norm_decay,
51 | # epsilon to prevent 0s in variance.
52 | 'epsilon': batch_norm_epsilon,
53 | # collection containing update_ops.
54 | 'updates_collections': tf.GraphKeys.UPDATE_OPS,
55 | }
56 | if use_batch_norm:
57 | normalizer_fn = slim.batch_norm
58 | normalizer_params = batch_norm_params
59 | else:
60 | normalizer_fn = None
61 | normalizer_params = {}
62 | # Set weight_decay for weights in Conv and FC layers.
63 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
64 | weights_regularizer=slim.l2_regularizer(weight_decay)):
65 | with slim.arg_scope(
66 | [slim.conv2d],
67 | weights_initializer=slim.variance_scaling_initializer(),
68 | activation_fn=tf.nn.relu,
69 | normalizer_fn=normalizer_fn,
70 | normalizer_params=normalizer_params) as sc:
71 | return sc
72 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/iou_rotate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | import cv2
9 | import time
10 | from libs.box_utils.rbbox_overlaps import rbbx_overlaps
11 | from libs.box_utils.iou_cpu import get_iou_matrix
12 | import tensorflow as tf
13 | from libs.box_utils.coordinate_convert import *
14 |
15 |
16 | def iou_rotate_calculate(boxes1, boxes2, use_gpu=True, gpu_id=0):
17 | '''
18 |
19 | :param boxes_list1:[N, 8] tensor
20 | :param boxes_list2: [M, 8] tensor
21 | :return:
22 | '''
23 |
24 | boxes1 = tf.cast(boxes1, tf.float32)
25 | boxes2 = tf.cast(boxes2, tf.float32)
26 | if use_gpu:
27 |
28 | iou_matrix = tf.py_func(rbbx_overlaps,
29 | inp=[boxes1, boxes2, gpu_id],
30 | Tout=tf.float32)
31 | else:
32 | iou_matrix = tf.py_func(get_iou_matrix, inp=[boxes1, boxes2],
33 | Tout=tf.float32)
34 |
35 | iou_matrix = tf.reshape(iou_matrix, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
36 |
37 | return iou_matrix
38 |
39 |
40 | def iou_rotate_calculate1(boxes1, boxes2, use_gpu=True, gpu_id=0):
41 |
42 | # start = time.time()
43 | if use_gpu:
44 | ious = rbbx_overlaps(boxes1, boxes2, gpu_id)
45 | else:
46 | area1 = boxes1[:, 2] * boxes1[:, 3]
47 | area2 = boxes2[:, 2] * boxes2[:, 3]
48 | ious = []
49 | for i, box1 in enumerate(boxes1):
50 | temp_ious = []
51 | r1 = ((box1[0], box1[1]), (box1[2], box1[3]), box1[4])
52 | for j, box2 in enumerate(boxes2):
53 | r2 = ((box2[0], box2[1]), (box2[2], box2[3]), box2[4])
54 |
55 | int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]
56 | if int_pts is not None:
57 | order_pts = cv2.convexHull(int_pts, returnPoints=True)
58 |
59 | int_area = cv2.contourArea(order_pts)
60 |
61 | inter = int_area * 1.0 / (area1[i] + area2[j] - int_area)
62 | temp_ious.append(inter)
63 | else:
64 | temp_ious.append(0.0)
65 | ious.append(temp_ious)
66 |
67 | # print('{}s'.format(time.time() - start))
68 |
69 | return np.array(ious, dtype=np.float32)
70 |
71 |
72 | if __name__ == '__main__':
73 | import os
74 | os.environ["CUDA_VISIBLE_DEVICES"] = '13'
75 | boxes1 = np.array([[50, 50, 100, 300, 0],
76 | [60, 60, 100, 200, 0]], np.float32)
77 |
78 | boxes2 = np.array([[50, 50, 100, 300, -45.],
79 | [200, 200, 100, 200, 0.]], np.float32)
80 |
81 | start = time.time()
82 | with tf.Session() as sess:
83 | ious = iou_rotate_calculate1(boxes1, boxes2, use_gpu=False)
84 | print(sess.run(ious))
85 | print('{}s'.format(time.time() - start))
86 |
87 | # start = time.time()
88 | # for _ in range(10):
89 | # ious = rbbox_overlaps.rbbx_overlaps(boxes1, boxes2)
90 | # print('{}s'.format(time.time() - start))
91 | # print(ious)
92 |
93 | # print(ovr)
94 |
95 |
96 |
97 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Ship-Detection
2 | Ship Detection based on FPN, horizontal bounding box, rotated bounding box, and head prediction.
3 |
4 | This work is based on Yang xue, etc ([Github](https://github.com/yangxue0827/R2CNN_HEAD_FPN_Tensorflow)). Thanks to their great work. Based on their work, I have made following modifications:
5 |
6 | ## Focal loss
7 |
8 | You need to lower the "FINAL_SCORE_THRESHOLD" value (in the cfgs.py) to see more prediction results.
9 |
10 | ## Randomly ratotation of data augmentation
11 |
12 | When each time we train a image (batch size = 1), we randomly rotate the image by a angle within [0,90]. The expriments reveal that it is an efficient way as regularization.
13 |
14 | Attention: the loss of ship head regression is multiplied by 10 and added into the total loss.
15 |
16 | ## Correction of eval.py (eval_deprecated.py) to EVAL_TEST.py for evaluation
17 |
18 | Since their original evaluation code (eval.py) is not recommended, I have upgraded this as EVAL_TEST.py, in which it includes following concerns:
19 |
20 | ### Inconsitent Coordinate
21 |
22 | 1. Format of coordinates of predicted bounding boxes and ground truth bounding boxes:
23 | fast_rcnn_decode_boxes/gtbox_minAreaRectangle: [ymin, xmin, ymax, xmax]
24 | fast_rcnn_decode_boxes_rotate/gtbox: [y_c, x_c, h, w, theta]
25 | 2. However, when caculating IoUs on horizontal and rotated bounding boxes (EVAL_test.py and iou_rotate.py), it requires following format:
26 | Rotated: [x_c, y_c, w, h, theta]
27 | Horizontal: [xmin, ymin, xmax, ymax]
28 |
29 | ### Calculating the interpolation performed in all points
30 |
31 | In some images there are more than one detection overlapping a ground truth. For those cases the detection with the highest IOU is taken, discarding the other detections. This rule is applied by the PASCAL VOC 2012 metric. However, in my upgraded implementatioin ([Reference](https://github.com/DetectionTeamUCAS/R2CNN_Faster-RCNN_Tensorflow)), the detection with the highest condidence is taken. I assume it is a tradeoff between classification and regression.
32 |
33 | ### When evaluating, you need to set the number of images needed to be evaluated manually.
34 |
35 | ### Need to further verification
36 |
37 | ## Change to class-agnostic regression
38 |
39 | Originally, regression and NMS are committed for each class. Due to limited size of training data, I change the multi-classes regression to class-agnostic regression. Especially, the scores used for NMS are selected according to the highest confidence in all classes corresponding to each instance (each row). However, the values of fast_rcnn_loc_loss and fast_rcnn_loc_rotate_loss are too small, and it may causes unnormal gradient behavior. Therefore, you need to lower the learning rate to continue the training process and further analyze such phenomenon according to related works and Tensorflow debugger.
40 |
41 | ## Dataset
42 |
43 | [HRSC 2016](https://sites.google.com/site/hrsc2016/home)
44 |
45 | ## Reference
46 |
47 | ### [Metrics for object detection](https://github.com/MonsterZhZh/Object-Detection-Metrics)
48 |
49 | ### [Oriented Evaluation Server provided by DOTA](https://github.com/CAPTAIN-WHU/DOTA_devkit)
50 |
51 | ### [Tensorflow debugger(from tensorflow.python import debug as tf_debug)](https://blog.csdn.net/qq_22291287/article/details/82712050)
52 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/label_name_dict/label_dict.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 |
4 | from libs.configs import cfgs
5 |
6 | if cfgs.DATASET_NAME == 'HRSC2016':
7 | # NAME_LABEL_MAP = {
8 | # 'back_ground': 0,
9 | # 'ship': 1
10 | # }
11 | # Class_ID distribution on the HRSC_2016 Train set:
12 | # 100000001:386
13 | # 100000002:1
14 | # 100000003:44
15 | # 100000004:1
16 |
17 | # 100000005:50
18 | # 100000006:20
19 | # 100000007:266
20 | # 100000008:66
21 | # 100000009:132
22 | # 100000010:30
23 | # 100000011:160
24 | # Without 100000012
25 | # 100000013:8
26 | # Without 100000014
27 | # 100000015:51
28 | # 100000016:99
29 | # Without 100000017
30 | # 100000018:34
31 | # 100000019:47
32 | # 100000020:15
33 | # 100000022:55
34 | # 100000024:8
35 | # 100000025:167
36 | # 100000026:6
37 | # 100000027:44
38 | # 100000028:5
39 | # 100000029:15
40 | # 100000030:25
41 | # Without 100000031
42 | # 100000032:13
43 | # Without 100000033
44 |
45 | # Total Instances: 1748
46 | # Total classes: 26
47 | NAME_LABEL_MAP = {
48 | 'back_ground': 0,
49 | '100000005': 1,
50 | '100000006' : 2,
51 | '100000007' : 3,
52 | '100000008' : 4,
53 | '100000009' : 5,
54 | '100000010' : 6,
55 | '100000011' : 7,
56 | # 'Kitty' : 8,
57 | '100000013' : 8,
58 | # 'Abukuma' : 10,
59 | '100000015' : 9,
60 | '100000016' : 10,
61 | # 'USS' : 13,
62 | '100000018' : 11,
63 | '100000019' : 12,
64 | '100000020' : 13,
65 | '100000022' : 14,
66 | '100000024' : 15,
67 | '100000025' : 16,
68 | '100000026' : 17,
69 | '100000027' : 18,
70 | '100000028' : 19,
71 | '100000029' : 20,
72 | '100000030' : 21,
73 | # 'Ford_class' : 25,
74 | '100000032' : 22,
75 | # 'Invincible_class' : 27
76 | }
77 | elif cfgs.DATASET_NAME == 'UAV':
78 | NAME_LABEL_MAP = {
79 | 'back_ground': 0,
80 | 'M41': 1,
81 | 'M603A': 2,
82 | 'M48H': 3,
83 | }
84 | elif cfgs.DATASET_NAME == 'airplane':
85 | NAME_LABEL_MAP = {
86 | 'back_ground': 0,
87 | 'airplane': 1
88 | }
89 | elif cfgs.DATASET_NAME == 'pascal':
90 | NAME_LABEL_MAP = {
91 | 'back_ground': 0,
92 | 'aeroplane': 1,
93 | 'bicycle': 2,
94 | 'bird': 3,
95 | 'boat': 4,
96 | 'bottle': 5,
97 | 'bus': 6,
98 | 'car': 7,
99 | 'cat': 8,
100 | 'chair': 9,
101 | 'cow': 10,
102 | 'diningtable': 11,
103 | 'dog': 12,
104 | 'horse': 13,
105 | 'motorbike': 14,
106 | 'person': 15,
107 | 'pottedplant': 16,
108 | 'sheep': 17,
109 | 'sofa': 18,
110 | 'train': 19,
111 | 'tvmonitor': 20
112 | }
113 | else:
114 | assert 'please set label dict!'
115 |
116 |
117 | def get_label_name_map():
118 | reverse_dict = {}
119 | for name, label in NAME_LABEL_MAP.items():
120 | reverse_dict[label] = name
121 | return reverse_dict
122 |
123 | LABEl_NAME_MAP = get_label_name_map()
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/coordinate_convert.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import cv2
8 | import numpy as np
9 |
10 |
11 | def forward_convert(coordinate, with_label=True):
12 | """
13 | :param coordinate: format [y_c, x_c, h, w, theta]
14 | :return: format [y1, x1, y2, x2, y3, x3, y4, x4]
15 | """
16 | boxes = []
17 | if with_label:
18 | for rect in coordinate:
19 | box = cv2.boxPoints(((rect[1], rect[0]), (rect[3], rect[2]), rect[4]))
20 | box = np.reshape(box, [-1, ])
21 | boxes.append([box[1], box[0], box[3], box[2], box[5], box[4], box[7], box[6], rect[5]])
22 | else:
23 | for rect in coordinate:
24 | box = cv2.boxPoints(((rect[1], rect[0]), (rect[3], rect[2]), rect[4]))
25 | box = np.reshape(box, [-1, ])
26 | boxes.append([box[1], box[0], box[3], box[2], box[5], box[4], box[7], box[6]])
27 |
28 | return np.array(boxes, dtype=np.float32)
29 |
30 |
31 | def back_forward_convert(coordinate, with_label=True):
32 | """
33 | :param coordinate: format [x1, y1, x2, y2, x3, y3, x4, y4, (label)]
34 | :param with_label: default True
35 | :return: format [y_c, x_c, h, w, theta, (label)]
36 | """
37 |
38 | boxes = []
39 | if with_label:
40 | for rect in coordinate:
41 | box = np.int0(rect[:-1])
42 | box = box.reshape([4, 2])
43 | rect1 = cv2.minAreaRect(box)
44 |
45 | x, y, w, h, theta = rect1[0][0], rect1[0][1], rect1[1][0], rect1[1][1], rect1[2]
46 | boxes.append([y, x, h, w, theta, rect[-1]])
47 |
48 | else:
49 | for rect in coordinate:
50 | box = np.int0(rect)
51 | box = box.reshape([4, 2])
52 | rect1 = cv2.minAreaRect(box)
53 |
54 | x, y, w, h, theta = rect1[0][0], rect1[0][1], rect1[1][0], rect1[1][1], rect1[2]
55 | boxes.append([y, x, h, w, theta])
56 |
57 | return np.array(boxes, dtype=np.float32)
58 |
59 |
60 | def get_head_quadrant(head, gtbox):
61 | """
62 | :param head: [head_x, head_y]
63 | :param gtbox: [y_c, x_c, h, w, theta, label]
64 | :return: head quadrant 0/1/2/3
65 | """
66 | head_quadrant = []
67 | for i, box in enumerate(gtbox):
68 | detla_x = head[i][0] - box[1]
69 | detla_y = head[i][1] - box[0]
70 | if (detla_x >= 0) and (detla_y >= 0):
71 | head_quadrant.append(0)
72 | elif (detla_x >= 0) and (detla_y <= 0):
73 | head_quadrant.append(1)
74 | elif (detla_x <= 0) and (detla_y <= 0):
75 | head_quadrant.append(2)
76 | else:
77 | head_quadrant.append(3)
78 | return np.array(head_quadrant, np.float32)
79 |
80 | if __name__ == '__main__':
81 | coord = np.array([[150, 150, 50, 100, -90, 1],
82 | [150, 150, 100, 50, -90, 1],
83 | [150, 150, 50, 100, -45, 1],
84 | [150, 150, 100, 50, -45, 1]])
85 |
86 | coord1 = np.array([[150, 150, 100, 50, 0],
87 | [150, 150, 100, 50, -90],
88 | [150, 150, 100, 50, 45],
89 | [150, 150, 100, 50, -45]])
90 |
91 | coord2 = forward_convert(coord)
92 | # coord3 = forward_convert(coord1, mode=-1)
93 | print(coord2)
94 |
95 | head_quadrant = get_head_quadrant([[0, 200],
96 | [200, 200],
97 | [200, 100],
98 | [100, 100]], coord)
99 | print(head_quadrant)
100 |
101 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/sample.xml:
--------------------------------------------------------------------------------
1 |
2 | xxx
3 | 17%4800%7000.tif
4 |
5 | xxxx
6 | IECAS
7 | IECAS
8 | 0
9 |
10 |
11 | IECAS
12 | IECAS
13 |
14 |
15 | 1000
16 | 600
17 | 3
18 |
19 | 0
20 |
38 |
56 |
74 |
92 |
110 |
128 |
129 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/tools/test_rotate.py:
--------------------------------------------------------------------------------
1 | # -*- coding:utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import print_function
5 | from __future__ import division
6 | import sys
7 | sys.path.append('../')
8 |
9 | import tensorflow.contrib.slim as slim
10 | import os
11 | import time
12 | from data.io.read_tfrecord import next_batch
13 | from libs.configs import cfgs
14 | from help_utils import tools
15 | from libs.box_utils.show_box_in_tensor import *
16 | from libs.box_utils.coordinate_convert import back_forward_convert, get_head_quadrant
17 | from libs.box_utils.boxes_utils import get_horizen_minAreaRectangle, get_head
18 |
19 | import numpy as np
20 |
21 |
22 | os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
23 |
24 |
25 | def test_rotate():
26 | with tf.Graph().as_default():
27 | with tf.name_scope('get_batch'):
28 | img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
29 | next_batch(dataset_name=cfgs.DATASET_NAME,
30 | batch_size=cfgs.BATCH_SIZE,
31 | shortside_len=cfgs.SHORT_SIDE_LEN,
32 | is_training=True)
33 | gtboxes_and_label, head = get_head(tf.squeeze(gtboxes_and_label_batch, 0))
34 | gtboxes_and_label = tf.py_func(back_forward_convert,
35 | inp=[gtboxes_and_label],
36 | Tout=tf.float32)
37 | gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])
38 | head_quadrant = tf.py_func(get_head_quadrant,
39 | inp=[head, gtboxes_and_label],
40 | Tout=tf.float32)
41 | head_quadrant = tf.reshape(head_quadrant, [-1, 1])
42 |
43 | gtboxes_and_label_minAreaRectangle = get_horizen_minAreaRectangle(gtboxes_and_label)
44 |
45 | gtboxes_and_label_minAreaRectangle = tf.reshape(gtboxes_and_label_minAreaRectangle, [-1, 5])
46 |
47 | with tf.name_scope('draw_gtboxes'):
48 | gtboxes_in_img = draw_box_with_color(img_batch, tf.reshape(gtboxes_and_label_minAreaRectangle, [-1, 5])[:, :-1],
49 | text=tf.shape(gtboxes_and_label_minAreaRectangle)[0])
50 |
51 | gtboxes_rotate_in_img = draw_box_with_color_rotate(img_batch, tf.reshape(gtboxes_and_label, [-1, 6])[:, :-1],
52 | text=tf.shape(gtboxes_and_label)[0],
53 | head=head_quadrant)
54 |
55 | init_op = tf.group(
56 | tf.global_variables_initializer(),
57 | tf.local_variables_initializer()
58 | )
59 |
60 | config = tf.ConfigProto()
61 | # config.gpu_options.per_process_gpu_memory_fraction = 0.5
62 | config.gpu_options.allow_growth = True
63 | with tf.Session(config=config) as sess:
64 | sess.run(init_op)
65 | coord = tf.train.Coordinator()
66 | threads = tf.train.start_queue_runners(sess, coord)
67 |
68 | for i in range(650):
69 | img_gtboxes, img_gtboxes_rotate, img_name = sess.run([gtboxes_in_img, gtboxes_rotate_in_img, img_name_batch])
70 | img_gtboxes = np.squeeze(img_gtboxes, axis=0)
71 | img_gtboxes_rotate = np.squeeze(img_gtboxes_rotate, axis=0)
72 |
73 | print(i)
74 | cv2.imwrite(cfgs.INFERENCE_SAVE_PATH + '/{}_horizontal_fpn.jpg'.format(str(img_name[0])), img_gtboxes)
75 | cv2.imwrite(cfgs.INFERENCE_SAVE_PATH + '/{}_rotate_fpn.jpg'.format(str(img_name[0])), img_gtboxes_rotate)
76 |
77 | coord.request_stop()
78 | coord.join(threads)
79 |
80 |
81 | if __name__ == '__main__':
82 |
83 | test_rotate()
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/lenet.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Contains a variant of the LeNet model definition."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import tensorflow as tf
22 |
23 | slim = tf.contrib.slim
24 |
25 |
26 | def lenet(images, num_classes=10, is_training=False,
27 | dropout_keep_prob=0.5,
28 | prediction_fn=slim.softmax,
29 | scope='LeNet'):
30 | """Creates a variant of the LeNet model.
31 |
32 | Note that since the output is a set of 'logits', the values fall in the
33 | interval of (-infinity, infinity). Consequently, to convert the outputs to a
34 | probability distribution over the characters, one will need to convert them
35 | using the softmax function:
36 |
37 | logits = lenet.lenet(images, is_training=False)
38 | probabilities = tf.nn.softmax(logits)
39 | predictions = tf.argmax(logits, 1)
40 |
41 | Args:
42 | images: A batch of `Tensors` of size [batch_size, height, width, channels].
43 | num_classes: the number of classes in the dataset.
44 | is_training: specifies whether or not we're currently training the model.
45 | This variable will determine the behaviour of the dropout layer.
46 | dropout_keep_prob: the percentage of activation values that are retained.
47 | prediction_fn: a function to get predictions out of logits.
48 | scope: Optional variable_scope.
49 |
50 | Returns:
51 | logits: the pre-softmax activations, a tensor of size
52 | [batch_size, `num_classes`]
53 | end_points: a dictionary from components of the network to the corresponding
54 | activation.
55 | """
56 | end_points = {}
57 |
58 | with tf.variable_scope(scope, 'LeNet', [images, num_classes]):
59 | net = slim.conv2d(images, 32, [5, 5], scope='conv1')
60 | net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
61 | net = slim.conv2d(net, 64, [5, 5], scope='conv2')
62 | net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
63 | net = slim.flatten(net)
64 | end_points['Flatten'] = net
65 |
66 | net = slim.fully_connected(net, 1024, scope='fc3')
67 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
68 | scope='dropout3')
69 | logits = slim.fully_connected(net, num_classes, activation_fn=None,
70 | scope='fc4')
71 |
72 | end_points['Logits'] = logits
73 | end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
74 |
75 | return logits, end_points
76 | lenet.default_image_size = 28
77 |
78 |
79 | def lenet_arg_scope(weight_decay=0.0):
80 | """Defines the default lenet argument scope.
81 |
82 | Args:
83 | weight_decay: The weight decay to use for regularizing the model.
84 |
85 | Returns:
86 | An `arg_scope` to use for the inception v3 model.
87 | """
88 | with slim.arg_scope(
89 | [slim.conv2d, slim.fully_connected],
90 | weights_regularizer=slim.l2_regularizer(weight_decay),
91 | weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
92 | activation_fn=tf.nn.relu) as sc:
93 | return sc
94 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/make_anchor.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 |
10 | def enum_scales(base_anchor, anchor_scales, name='enum_scales'):
11 |
12 | '''
13 | :param base_anchor: [y_center, x_center, h, w]
14 | :param anchor_scales: different scales, like [0.5, 1., 2.0]
15 | :return: return base anchors in different scales.
16 | Example:[[0, 0, 128, 128],[0, 0, 256, 256],[0, 0, 512, 512]]
17 | '''
18 | with tf.variable_scope(name):
19 | anchor_scales = tf.reshape(anchor_scales, [-1, 1])
20 |
21 | return base_anchor * anchor_scales
22 |
23 |
24 | def enum_ratios(anchors, anchor_ratios, name='enum_ratios'):
25 |
26 | '''
27 | :param anchors: base anchors in different scales
28 | :param anchor_ratios: ratio = h / w
29 | :return: base anchors in different scales and ratios
30 | '''
31 |
32 | with tf.variable_scope(name):
33 | _, _, hs, ws = tf.unstack(anchors, axis=1) # for base anchor, w == h
34 | sqrt_ratios = tf.sqrt(anchor_ratios)
35 | sqrt_ratios = tf.expand_dims(sqrt_ratios, axis=1)
36 | ws = tf.reshape(ws / sqrt_ratios, [-1])
37 | hs = tf.reshape(hs * sqrt_ratios, [-1])
38 | # assert tf.shape(ws) == tf.shape(hs), 'h shape is not equal w shape'
39 |
40 | num_anchors_per_location = tf.shape(ws)[0]
41 |
42 | return tf.transpose(tf.stack([tf.zeros([num_anchors_per_location, ]),
43 | tf.zeros([num_anchors_per_location,]),
44 | ws, hs]))
45 |
46 |
47 | def make_anchors(base_anchor_size, anchor_scales, anchor_ratios, featuremaps_height,
48 | featuremaps_width, stride, name='make_anchors'):
49 |
50 | '''
51 | :param base_anchor_size: base anchor size in different scales
52 | :param anchor_scales: anchor scales
53 | :param anchor_ratios: anchor ratios
54 | :param featuremaps_width: width of featuremaps
55 | :param featuremaps_height: height of featuremaps
56 | :return: anchors of shape [w * h * len(anchor_scales) * len(anchor_ratios), 4]
57 | '''
58 |
59 | with tf.variable_scope(name):
60 | # [y_center, x_center, h, w]
61 | base_anchor = tf.constant([0, 0, base_anchor_size, base_anchor_size], dtype=tf.float32)
62 | base_anchors = enum_ratios(enum_scales(base_anchor, anchor_scales), anchor_ratios)
63 |
64 | _, _, ws, hs = tf.unstack(base_anchors, axis=1)
65 |
66 | x_centers = tf.range(tf.cast(featuremaps_width, tf.float32), dtype=tf.float32) * stride
67 | y_centers = tf.range(tf.cast(featuremaps_height, tf.float32), dtype=tf.float32) * stride
68 |
69 | x_centers, y_centers = tf.meshgrid(x_centers, y_centers)
70 |
71 | ws, x_centers = tf.meshgrid(ws, x_centers)
72 | hs, y_centers = tf.meshgrid(hs, y_centers)
73 |
74 | box_centers = tf.stack([y_centers, x_centers], axis=2)
75 | box_centers = tf.reshape(box_centers, [-1, 2])
76 |
77 | box_sizes = tf.stack([hs, ws], axis=2)
78 | box_sizes = tf.reshape(box_sizes, [-1, 2])
79 | final_anchors = tf.concat([box_centers - 0.5*box_sizes, box_centers+0.5*box_sizes], axis=1)
80 | return final_anchors
81 |
82 | if __name__ == '__main__':
83 | base_anchor = tf.constant([256], dtype=tf.float32)
84 | anchor_scales = tf.constant([1.0], dtype=tf.float32)
85 | anchor_ratios = tf.constant([0.5, 1.0, 2.0], dtype=tf.float32)
86 | # print(enum_scales(base_anchor, anchor_scales))
87 | sess = tf.Session()
88 | # print(sess.run(enum_ratios(enum_scales(base_anchor, anchor_scales), anchor_ratios)))
89 | anchors = make_anchors(256, anchor_scales, anchor_ratios,
90 | featuremaps_height=38,
91 | featuremaps_width=50, stride=16)
92 |
93 | _anchors = sess.run(anchors)
94 | print(_anchors)
95 |
96 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/mobilenet_v1.md:
--------------------------------------------------------------------------------
1 | # MobileNet_v1
2 |
3 | [MobileNets](https://arxiv.org/abs/1704.04861) are small, low-latency, low-power models parameterized to meet the resource constraints of a variety of use cases. They can be built upon for classification, detection, embeddings and segmentation similar to how other popular large scale models, such as Inception, are used. MobileNets can be run efficiently on mobile devices with [TensorFlow Mobile](https://www.tensorflow.org/mobile/).
4 |
5 | MobileNets trade off between latency, size and accuracy while comparing favorably with popular models from the literature.
6 |
7 | 
8 |
9 | # Pre-trained Models
10 |
11 | Choose the right MobileNet model to fit your latency and size budget. The size of the network in memory and on disk is proportional to the number of parameters. The latency and power usage of the network scales with the number of Multiply-Accumulates (MACs) which measures the number of fused Multiplication and Addition operations. These MobileNet models have been trained on the
12 | [ILSVRC-2012-CLS](http://www.image-net.org/challenges/LSVRC/2012/)
13 | image classification dataset. Accuracies were computed by evaluating using a single image crop.
14 |
15 | Model Checkpoint | Million MACs | Million Parameters | Top-1 Accuracy| Top-5 Accuracy |
16 | :----:|:------------:|:----------:|:-------:|:-------:|
17 | [MobileNet_v1_1.0_224](http://download.tensorflow.org/models/mobilenet_v1_1.0_224_2017_06_14.tar.gz)|569|4.24|70.7|89.5|
18 | [MobileNet_v1_1.0_192](http://download.tensorflow.org/models/mobilenet_v1_1.0_192_2017_06_14.tar.gz)|418|4.24|69.3|88.9|
19 | [MobileNet_v1_1.0_160](http://download.tensorflow.org/models/mobilenet_v1_1.0_160_2017_06_14.tar.gz)|291|4.24|67.2|87.5|
20 | [MobileNet_v1_1.0_128](http://download.tensorflow.org/models/mobilenet_v1_1.0_128_2017_06_14.tar.gz)|186|4.24|64.1|85.3|
21 | [MobileNet_v1_0.75_224](http://download.tensorflow.org/models/mobilenet_v1_0.75_224_2017_06_14.tar.gz)|317|2.59|68.4|88.2|
22 | [MobileNet_v1_0.75_192](http://download.tensorflow.org/models/mobilenet_v1_0.75_192_2017_06_14.tar.gz)|233|2.59|67.4|87.3|
23 | [MobileNet_v1_0.75_160](http://download.tensorflow.org/models/mobilenet_v1_0.75_160_2017_06_14.tar.gz)|162|2.59|65.2|86.1|
24 | [MobileNet_v1_0.75_128](http://download.tensorflow.org/models/mobilenet_v1_0.75_128_2017_06_14.tar.gz)|104|2.59|61.8|83.6|
25 | [MobileNet_v1_0.50_224](http://download.tensorflow.org/models/mobilenet_v1_0.50_224_2017_06_14.tar.gz)|150|1.34|64.0|85.4|
26 | [MobileNet_v1_0.50_192](http://download.tensorflow.org/models/mobilenet_v1_0.50_192_2017_06_14.tar.gz)|110|1.34|62.1|84.0|
27 | [MobileNet_v1_0.50_160](http://download.tensorflow.org/models/mobilenet_v1_0.50_160_2017_06_14.tar.gz)|77|1.34|59.9|82.5|
28 | [MobileNet_v1_0.50_128](http://download.tensorflow.org/models/mobilenet_v1_0.50_128_2017_06_14.tar.gz)|49|1.34|56.2|79.6|
29 | [MobileNet_v1_0.25_224](http://download.tensorflow.org/models/mobilenet_v1_0.25_224_2017_06_14.tar.gz)|41|0.47|50.6|75.0|
30 | [MobileNet_v1_0.25_192](http://download.tensorflow.org/models/mobilenet_v1_0.25_192_2017_06_14.tar.gz)|34|0.47|49.0|73.6|
31 | [MobileNet_v1_0.25_160](http://download.tensorflow.org/models/mobilenet_v1_0.25_160_2017_06_14.tar.gz)|21|0.47|46.0|70.7|
32 | [MobileNet_v1_0.25_128](http://download.tensorflow.org/models/mobilenet_v1_0.25_128_2017_06_14.tar.gz)|14|0.47|41.3|66.2|
33 |
34 |
35 | Here is an example of how to download the MobileNet_v1_1.0_224 checkpoint:
36 |
37 | ```shell
38 | $ CHECKPOINT_DIR=/tmp/checkpoints
39 | $ mkdir ${CHECKPOINT_DIR}
40 | $ wget http://download.tensorflow.org/models/mobilenet_v1_1.0_224_2017_06_14.tar.gz
41 | $ tar -xvf mobilenet_v1_1.0_224_2017_06_14.tar.gz
42 | $ mv mobilenet_v1_1.0_224.ckpt.* ${CHECKPOINT_DIR}
43 | $ rm mobilenet_v1_1.0_224_2017_06_14.tar.gz
44 | ```
45 | More information on integrating MobileNets into your project can be found at the [TF-Slim Image Classification Library](https://github.com/tensorflow/models/blob/master/slim/README.md).
46 |
47 | To get started running models on-device go to [TensorFlow Mobile](https://www.tensorflow.org/mobile/).
48 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/read_tfrecord.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import print_function
5 | from __future__ import division
6 |
7 | import tensorflow as tf
8 | import os
9 | from data.io import image_preprocess
10 |
11 |
12 | def read_single_example_and_decode(filename_queue):
13 |
14 | # tfrecord_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
15 | # reader = tf.TFRecordReader(options=tfrecord_options)
16 | reader = tf.TFRecordReader()
17 |
18 | _, serialized_example = reader.read(filename_queue)
19 |
20 | features = tf.parse_single_example(
21 | serialized=serialized_example,
22 | features={
23 | 'img_name': tf.FixedLenFeature([], tf.string),
24 | 'img_height': tf.FixedLenFeature([], tf.int64),
25 | 'img_width': tf.FixedLenFeature([], tf.int64),
26 | 'img': tf.FixedLenFeature([], tf.string),
27 | 'gtboxes_and_label': tf.FixedLenFeature([], tf.string),
28 | 'num_objects': tf.FixedLenFeature([], tf.int64)
29 | }
30 | )
31 | img_name = features['img_name']
32 | img_height = tf.cast(features['img_height'], tf.int32)
33 | img_width = tf.cast(features['img_width'], tf.int32)
34 | img = tf.decode_raw(features['img'], tf.uint8)
35 |
36 | img = tf.reshape(img, shape=[img_height, img_width, 3])
37 |
38 | gtboxes_and_label = tf.decode_raw(features['gtboxes_and_label'], tf.int32)
39 | gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 11])
40 |
41 | num_objects = tf.cast(features['num_objects'], tf.int32)
42 | return img_name, img, gtboxes_and_label, num_objects
43 |
44 |
45 | def read_and_prepocess_single_img(filename_queue, shortside_len, is_training):
46 |
47 | img_name, img, gtboxes_and_label, num_objects = read_single_example_and_decode(filename_queue)
48 | # img = tf.image.per_image_standardization(img)
49 | img = tf.cast(img, tf.float32)
50 | img = img - tf.constant([103.939, 116.779, 123.68])
51 | if is_training:
52 | img, gtboxes_and_label = image_preprocess.rotate_point(img_tensor=img, gtboxes_and_label=gtboxes_and_label)
53 | img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,
54 | target_shortside_len=shortside_len)
55 | img, gtboxes_and_label = image_preprocess.random_flip_left_right(img_tensor=img, gtboxes_and_label=gtboxes_and_label)
56 |
57 | else:
58 | img, gtboxes_and_label = image_preprocess.short_side_resize(img_tensor=img, gtboxes_and_label=gtboxes_and_label,
59 | target_shortside_len=shortside_len)
60 |
61 | return img_name, img, gtboxes_and_label, num_objects
62 |
63 |
64 | def next_batch(dataset_name, batch_size, shortside_len, is_training):
65 | if dataset_name not in ['UAV', 'shapan', 'airplane', 'SHIP', 'HRSC2016', 'spacenet', 'pascal', 'coco']:
66 | raise ValueError('dataSet name must be in pascal or coco')
67 |
68 | if is_training:
69 | pattern = os.path.join('../data/tfrecords', dataset_name + '_train*')
70 | else:
71 | pattern = os.path.join('../data/tfrecords', dataset_name + '_test*')
72 |
73 | print('tfrecord path is -->', os.path.abspath(pattern))
74 | filename_tensorlist = tf.train.match_filenames_once(pattern)
75 |
76 | filename_queue = tf.train.string_input_producer(filename_tensorlist)
77 |
78 | img_name, img, gtboxes_and_label, num_obs = read_and_prepocess_single_img(filename_queue, shortside_len,
79 | is_training=is_training)
80 | img_name_batch, img_batch, gtboxes_and_label_batch, num_obs_batch = \
81 | tf.train.batch(
82 | [img_name, img, gtboxes_and_label, num_obs],
83 | batch_size=batch_size,
84 | capacity=100,
85 | num_threads=16,
86 | dynamic_pad=True)
87 | return img_name_batch, img_batch, gtboxes_and_label_batch, num_obs_batch
88 |
89 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/update.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # An example hook script to blocks unannotated tags from entering.
4 | # Called by "git receive-pack" with arguments: refname sha1-old sha1-new
5 | #
6 | # To enable this hook, rename this file to "update".
7 | #
8 | # Config
9 | # ------
10 | # hooks.allowunannotated
11 | # This boolean sets whether unannotated tags will be allowed into the
12 | # repository. By default they won't be.
13 | # hooks.allowdeletetag
14 | # This boolean sets whether deleting tags will be allowed in the
15 | # repository. By default they won't be.
16 | # hooks.allowmodifytag
17 | # This boolean sets whether a tag may be modified after creation. By default
18 | # it won't be.
19 | # hooks.allowdeletebranch
20 | # This boolean sets whether deleting branches will be allowed in the
21 | # repository. By default they won't be.
22 | # hooks.denycreatebranch
23 | # This boolean sets whether remotely creating branches will be denied
24 | # in the repository. By default this is allowed.
25 | #
26 |
27 | # --- Command line
28 | refname="$1"
29 | oldrev="$2"
30 | newrev="$3"
31 |
32 | # --- Safety check
33 | if [ -z "$GIT_DIR" ]; then
34 | echo "Don't run this script from the command line." >&2
35 | echo " (if you want, you could supply GIT_DIR then run" >&2
36 | echo " $0 [ )" >&2
37 | exit 1
38 | fi
39 |
40 | if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then
41 | echo "usage: $0 ][ " >&2
42 | exit 1
43 | fi
44 |
45 | # --- Config
46 | allowunannotated=$(git config --bool hooks.allowunannotated)
47 | allowdeletebranch=$(git config --bool hooks.allowdeletebranch)
48 | denycreatebranch=$(git config --bool hooks.denycreatebranch)
49 | allowdeletetag=$(git config --bool hooks.allowdeletetag)
50 | allowmodifytag=$(git config --bool hooks.allowmodifytag)
51 |
52 | # check for no description
53 | projectdesc=$(sed -e '1q' "$GIT_DIR/description")
54 | case "$projectdesc" in
55 | "Unnamed repository"* | "")
56 | echo "*** Project description file hasn't been set" >&2
57 | exit 1
58 | ;;
59 | esac
60 |
61 | # --- Check types
62 | # if $newrev is 0000...0000, it's a commit to delete a ref.
63 | zero="0000000000000000000000000000000000000000"
64 | if [ "$newrev" = "$zero" ]; then
65 | newrev_type=delete
66 | else
67 | newrev_type=$(git cat-file -t $newrev)
68 | fi
69 |
70 | case "$refname","$newrev_type" in
71 | refs/tags/*,commit)
72 | # un-annotated tag
73 | short_refname=${refname##refs/tags/}
74 | if [ "$allowunannotated" != "true" ]; then
75 | echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2
76 | echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2
77 | exit 1
78 | fi
79 | ;;
80 | refs/tags/*,delete)
81 | # delete tag
82 | if [ "$allowdeletetag" != "true" ]; then
83 | echo "*** Deleting a tag is not allowed in this repository" >&2
84 | exit 1
85 | fi
86 | ;;
87 | refs/tags/*,tag)
88 | # annotated tag
89 | if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1
90 | then
91 | echo "*** Tag '$refname' already exists." >&2
92 | echo "*** Modifying a tag is not allowed in this repository." >&2
93 | exit 1
94 | fi
95 | ;;
96 | refs/heads/*,commit)
97 | # branch
98 | if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then
99 | echo "*** Creating a branch is not allowed in this repository" >&2
100 | exit 1
101 | fi
102 | ;;
103 | refs/heads/*,delete)
104 | # delete branch
105 | if [ "$allowdeletebranch" != "true" ]; then
106 | echo "*** Deleting a branch is not allowed in this repository" >&2
107 | exit 1
108 | fi
109 | ;;
110 | refs/remotes/*,commit)
111 | # tracking branch
112 | ;;
113 | refs/remotes/*,delete)
114 | # delete tracking branch
115 | if [ "$allowdeletebranch" != "true" ]; then
116 | echo "*** Deleting a tracking branch is not allowed in this repository" >&2
117 | exit 1
118 | fi
119 | ;;
120 | *)
121 | # Anything else (is there anything else?)
122 | echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2
123 | exit 1
124 | ;;
125 | esac
126 |
127 | # --- Finished
128 | exit 0
129 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/network_factory.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | from __future__ import print_function
3 | from __future__ import absolute_import
4 |
5 | import tensorflow as tf
6 | import tensorflow.contrib.slim as slim
7 |
8 | import os,sys
9 | sys.path.insert(0, '../../')
10 |
11 | from libs.networks.slim_nets import resnet_v1
12 | from libs.networks.slim_nets import mobilenet_v1
13 | from libs.networks.slim_nets import inception_resnet_v2
14 | from libs.networks.slim_nets import vgg
15 |
16 | # FLAGS = get_flags_byname()
17 |
18 |
19 | def get_flags_byname(net_name):
20 | if net_name not in ['resnet_v1_50', 'mobilenet_224', 'inception_resnet', 'vgg16', 'resnet_v1_101']:
21 | raise ValueError("not include network: {}, we allow resnet_50, mobilenet_224, inception_resnet,"
22 | " vgg16, resnet_v1_101"
23 | "")
24 |
25 | if net_name == 'resnet_v1_50':
26 | from configs import config_resnet_50
27 | return config_resnet_50.FLAGS
28 | if net_name == 'mobilenet_224':
29 | from configs import config_mobilenet_224
30 | return config_mobilenet_224.FLAGS
31 | if net_name == 'inception_resnet':
32 | from configs import config_inception_resnet
33 | return config_inception_resnet.FLAGS
34 | if net_name == 'vgg16':
35 | from configs import config_vgg16
36 | return config_vgg16.FLAGS
37 | if net_name == 'resnet_v1_101':
38 | from configs import config_res101
39 | return config_res101.FLAGS
40 |
41 |
42 | def get_network_byname(net_name,
43 | inputs,
44 | num_classes=None,
45 | is_training=True,
46 | global_pool=True,
47 | output_stride=None,
48 | spatial_squeeze=True):
49 | if net_name == 'resnet_v1_50':
50 | FLAGS = get_flags_byname(net_name)
51 | with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=FLAGS.weight_decay)):
52 | logits, end_points = resnet_v1.resnet_v1_50(inputs=inputs,
53 | num_classes=num_classes,
54 | is_training=is_training,
55 | global_pool=global_pool,
56 | output_stride=output_stride,
57 | spatial_squeeze=spatial_squeeze
58 | )
59 |
60 | return logits, end_points
61 | if net_name == 'resnet_v1_101':
62 | FLAGS = get_flags_byname(net_name)
63 | with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=FLAGS.weight_decay)):
64 | logits, end_points = resnet_v1.resnet_v1_101(inputs=inputs,
65 | num_classes=num_classes,
66 | is_training=is_training,
67 | global_pool=global_pool,
68 | output_stride=output_stride,
69 | spatial_squeeze=spatial_squeeze
70 | )
71 | return logits, end_points
72 |
73 | # if net_name == 'inception_resnet':
74 | # FLAGS = get_flags_byname(net_name)
75 | # arg_sc = inception_resnet_v2.inception_resnet_v2_arg_scope(weight_decay=FLAGS.weight_decay)
76 | # with slim.arg_scope(arg_sc):
77 | # logits, end_points = inception_resnet_v2.inception_resnet_v2(inputs=inputs,
78 | # num_classes=num_classes,
79 | # is_training=is_training)
80 | #
81 | # return logits, end_points
82 | #
83 | # if net_name == 'vgg16':
84 | # FLAGS = get_flags_byname(net_name)
85 | # arg_sc = vgg.vgg_arg_scope(weight_decay=FLAGS.weight_decay)
86 | # with slim.arg_scope(arg_sc):
87 | # logits, end_points = vgg.vgg_16(inputs=inputs,
88 | # num_classes=num_classes,
89 | # is_training=is_training)
90 | # return logits, end_points
91 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/anchor_utils_pyfunc.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | import cv2
9 | from help_utils.help_utils import show_boxes_in_img
10 |
11 |
12 | def make_anchors(base_anchor_size, anchor_scales, anchor_ratios, featuremaps_height,
13 | featuremaps_width, stride
14 | ):
15 | '''
16 | :param base_anchor_size:
17 | :param anchor_scales:
18 | :param anchor_ratios:
19 | :param featuremaps_width:
20 | :param featuremaps_height:
21 | :param stride
22 | :return: anchors of shape: [w*h*9, 4]
23 | '''
24 | base_anchor = [0, 0, base_anchor_size, base_anchor_size] # [y_center, x_center, h, w]
25 | per_location_anchors = enum_ratios(enum_scales(base_anchor, anchor_scales),
26 | anchor_ratios)
27 |
28 | ws, hs = per_location_anchors[:, 2], per_location_anchors[:, 3]
29 |
30 | x_centers = np.arange(featuremaps_width) * stride
31 | y_centers = np.arange(featuremaps_height) * stride
32 |
33 | x_centers, y_centers = np.meshgrid(x_centers, y_centers)
34 |
35 | ws, x_centers = np.meshgrid(ws, x_centers)
36 | hs, y_centers = np.meshgrid(hs, y_centers)
37 |
38 | box_centers = np.stack([y_centers, x_centers], axis=2)
39 | box_centers = np.reshape(box_centers, [-1, 2])
40 |
41 | box_sizes = np.stack([hs, ws], axis=2)
42 | box_sizes = np.reshape(box_sizes, [-1, 2])
43 | final_anchors = np.concatenate([box_centers - 0.5*box_sizes, box_centers+0.5*box_sizes], axis=1)
44 | final_anchors = final_anchors.astype(dtype=np.float32)
45 | return final_anchors
46 |
47 |
48 | def enum_scales(base_anchor, anchor_scales):
49 | '''
50 | for baseanchor : center point is zero
51 | :param base_anchor: [y_center, x_center, h, w] -->may[0, 0, 256, 256]
52 | :param anchor_scales: maybe [0.5, 1., 2.0]
53 | :return:
54 | '''
55 |
56 | base_anchor = np.array(base_anchor)
57 | anchor_scales = np.array(anchor_scales).reshape(len(anchor_scales), 1)
58 |
59 | return base_anchor * anchor_scales
60 |
61 |
62 | def enum_ratios(anchors, anchor_ratios):
63 | '''
64 | h / w = ratio
65 | :param anchors:
66 | :param anchor_ratios:
67 | :return:
68 | '''
69 |
70 | ws = anchors[:, 3] # for base anchor, w == h
71 | hs = anchors[:, 2]
72 | sqrt_ratios = np.sqrt(np.array(anchor_ratios))
73 | ws = np.reshape(ws / sqrt_ratios[:, np.newaxis], [-1])
74 | hs = np.reshape(hs * sqrt_ratios[:, np.newaxis], [-1])
75 | assert ws.shape == hs.shape, 'h shape is not equal w shape'
76 |
77 | num_anchors_per_location = ws.shape[0]
78 |
79 | return np.hstack([np.zeros((num_anchors_per_location, 1)),
80 | np.zeros((num_anchors_per_location, 1)),
81 | ws[:, np.newaxis],
82 | hs[:, np.newaxis]])
83 |
84 |
85 | def filter_outside_boxes(anchors, img_h, img_w):
86 | '''
87 |
88 | :param anchors:[-1, 4] ... [ymin, xmin, ymax, xmax]
89 | :param img_h:
90 | :param img_w:
91 | :return:
92 | '''
93 |
94 | index = (anchors[:, 0] > 0) & (anchors[:, 0] < img_h) & \
95 | (anchors[:, 1] > 0) & (anchors[:, 1] < img_w) & \
96 | (anchors[:, 2] < img_h) & (anchors[:, 2] > 0) & \
97 | (anchors[:, 3] < img_w) & (anchors[:, 3] > 0)
98 |
99 | valid_indices = np.where(index == True)[0]
100 |
101 | return valid_indices
102 |
103 |
104 | def show_anchors_in_img(anchors):
105 | img = cv2.imread('1.jpg')
106 | img = cv2.resize(img, (800, 600), interpolation=cv2.INTER_AREA)
107 | img = show_boxes_in_img(img, anchors)
108 |
109 | cv2.imshow('resize_img', img)
110 | cv2.waitKey(0)
111 |
112 | if __name__ == '__main__':
113 | print(enum_scales([0, 0, 256, 256], [0.5, 1.0, 2.0]))
114 | print("_______________")
115 | anchors = make_anchors(256,
116 | [1.0],
117 | [0.5, 1.0, 2.0],
118 | featuremaps_height=38,
119 | featuremaps_width=50,
120 | stride=16)
121 | indices = filter_outside_boxes(anchors, img_h=600, img_w=800)
122 | show_anchors_in_img(np.column_stack([anchors[indices], np.ones(shape=(anchors[indices].shape[0], 1))]))
123 |
124 |
125 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/losses/losses.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 | from tensorflow.python.ops import array_ops
9 |
10 |
11 | def l1_smooth_losses(predict_boxes, gtboxes, object_weights, classes_weights=None):
12 | '''
13 |
14 | :param predict_boxes: [minibatch_size, -1]
15 | :param gtboxes: [minibatch_size, -1]
16 | :param object_weights: [minibatch_size, ]. 1.0 represent object, 0.0 represent others(ignored or background)
17 | :return:
18 | '''
19 |
20 | diff = predict_boxes - gtboxes
21 | abs_diff = tf.cast(tf.abs(diff), tf.float32)
22 |
23 | if classes_weights is None:
24 | '''
25 | first_stage:
26 | predict_boxes :[minibatch_size, 4]
27 | gtboxes: [minibatchs_size, 4]
28 | '''
29 | anchorwise_smooth_l1norm = tf.reduce_sum(
30 | tf.where(tf.less(abs_diff, 1), 0.5 * tf.square(abs_diff), abs_diff - 0.5),
31 | axis=1) * object_weights
32 | else:
33 | '''
34 | fast_rcnn:
35 | predict_boxes: [minibatch_size, 4*num_classes]
36 | gtboxes: [minibatch_size, 4*num_classes]
37 | classes_weights : [minibatch_size, 4*num_classes]
38 | '''
39 | anchorwise_smooth_l1norm = tf.reduce_sum(
40 | tf.where(tf.less(abs_diff, 1), 0.5*tf.square(abs_diff)*classes_weights,
41 | (abs_diff - 0.5)*classes_weights),
42 | axis=1)*object_weights
43 | return tf.reduce_mean(anchorwise_smooth_l1norm, axis=0) # reduce mean
44 |
45 |
46 | def weighted_softmax_cross_entropy_loss(predictions, labels, weights):
47 | '''
48 |
49 | :param predictions:
50 | :param labels:
51 | :param weights: [N, ] 1 -> should be sampled , 0-> not should be sampled
52 | :return:
53 | # '''
54 | per_row_cross_ent = tf.nn.softmax_cross_entropy_with_logits(logits=predictions,
55 | labels=labels)
56 |
57 | weighted_cross_ent = tf.reduce_sum(per_row_cross_ent * weights)
58 | return weighted_cross_ent / tf.reduce_sum(weights)
59 |
60 | def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
61 | """Compute focal loss for predictions.
62 | Multi-labels Focal loss formula:
63 | FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
64 | ,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
65 | Args:
66 | prediction_tensor: A float tensor of shape [batch_size, num_anchors,
67 | num_classes] representing the predicted logits for each class
68 | target_tensor: A float tensor of shape [batch_size, num_anchors,
69 | num_classes] representing one-hot encoded classification targets
70 | weights: A float tensor of shape [batch_size, num_anchors]
71 | alpha: A scalar tensor for focal loss alpha hyper-parameter
72 | gamma: A scalar tensor for focal loss gamma hyper-parameter
73 | Returns:
74 | loss: A (scalar) tensor representing the value of the loss function
75 | """
76 | sigmoid_p = tf.nn.sigmoid(prediction_tensor)
77 | zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
78 |
79 | # For poitive prediction, only need consider front part loss, back part is 0;
80 | # target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
81 | pos_p_sub = array_ops.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)
82 |
83 | # For negative prediction, only need consider back part loss, front part is 0;
84 | # target_tensor > zeros <=> z=1, so negative coefficient = 0.
85 | neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
86 | per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
87 | - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
88 | return tf.reduce_sum(per_entry_cross_ent)
89 |
90 | def test_smoothl1():
91 |
92 | predict_boxes = tf.constant([[1, 1, 2, 2],
93 | [2, 2, 2, 2],
94 | [3, 3, 3, 3]])
95 | gtboxes = tf.constant([[1, 1, 1, 1],
96 | [2, 1, 1, 1],
97 | [3, 3, 2, 1]])
98 |
99 | loss = l1_smooth_losses(predict_boxes, gtboxes, [1, 1, 1])
100 |
101 | with tf.Session() as sess:
102 | print(sess.run(loss))
103 |
104 | if __name__ == '__main__':
105 | test_smoothl1()
106 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/nms_rotate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | import cv2
9 | from libs.configs import cfgs
10 | import tensorflow as tf
11 | if cfgs.ROTATE_NMS_USE_GPU:
12 | from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms
13 |
14 |
15 | def nms_rotate(decode_boxes, scores, iou_threshold, max_output_size,
16 | use_angle_condition=False, angle_threshold=0, use_gpu=True, gpu_id=0):
17 | """
18 | :param boxes: format [x_c, y_c, w, h, theta]
19 | :param scores: scores of boxes
20 | :param threshold: iou threshold (0.7 or 0.5)
21 | :param max_output_size: max number of output
22 | :return: the remaining index of boxes
23 | """
24 |
25 | if use_gpu:
26 | keep = nms_rotate_gpu(boxes_list=decode_boxes,
27 | scores=scores,
28 | iou_threshold=iou_threshold,
29 | angle_gap_threshold=angle_threshold,
30 | use_angle_condition=use_angle_condition,
31 | device_id=gpu_id)
32 |
33 | keep = tf.cond(
34 | tf.greater(tf.shape(keep)[0], max_output_size),
35 | true_fn=lambda: tf.slice(keep, [0], [max_output_size]),
36 | false_fn=lambda: keep)
37 |
38 | else:
39 | keep = tf.py_func(nms_rotate_cpu,
40 | inp=[decode_boxes, scores, iou_threshold, max_output_size],
41 | Tout=tf.int64)
42 | return keep
43 |
44 |
45 | def nms_rotate_cpu(boxes, scores, iou_threshold, max_output_size):
46 |
47 | keep = []
48 |
49 | order = scores.argsort()[::-1]
50 | num = boxes.shape[0]
51 |
52 | suppressed = np.zeros((num), dtype=np.int)
53 |
54 | for _i in range(num):
55 | if len(keep) >= max_output_size:
56 | break
57 |
58 | i = order[_i]
59 | if suppressed[i] == 1:
60 | continue
61 | keep.append(i)
62 | r1 = ((boxes[i, 1], boxes[i, 0]), (boxes[i, 3], boxes[i, 2]), boxes[i, 4])
63 | area_r1 = boxes[i, 2] * boxes[i, 3]
64 | for _j in range(_i + 1, num):
65 | j = order[_j]
66 | if suppressed[i] == 1:
67 | continue
68 | r2 = ((boxes[j, 1], boxes[j, 0]), (boxes[j, 3], boxes[j, 2]), boxes[j, 4])
69 | area_r2 = boxes[j, 2] * boxes[j, 3]
70 | inter = 0.0
71 |
72 | int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]
73 | if int_pts is not None:
74 | order_pts = cv2.convexHull(int_pts, returnPoints=True)
75 |
76 | int_area = cv2.contourArea(order_pts)
77 |
78 | inter = int_area * 1.0 / (area_r1 + area_r2 - int_area + cfgs.EPSILON)
79 |
80 | if inter >= iou_threshold:
81 | suppressed[j] = 1
82 |
83 | return np.array(keep, np.int64)
84 |
85 |
86 | def nms_rotate_gpu(boxes_list, scores, iou_threshold, use_angle_condition=False, angle_gap_threshold=0, device_id=0):
87 | if use_angle_condition:
88 | y_c, x_c, h, w, theta = tf.unstack(boxes_list, axis=1)
89 | boxes_list = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
90 | det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], axis=1)
91 | keep = tf.py_func(rotate_gpu_nms,
92 | inp=[det_tensor, iou_threshold, device_id],
93 | Tout=tf.int64)
94 | return keep
95 | else:
96 | y_c, x_c, h, w, theta = tf.unstack(boxes_list, axis=1)
97 | boxes_list = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
98 | det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], axis=1)
99 | keep = tf.py_func(rotate_gpu_nms,
100 | inp=[det_tensor, iou_threshold, device_id],
101 | Tout=tf.int64)
102 | keep = tf.reshape(keep, [-1])
103 | return keep
104 |
105 |
106 | if __name__ == '__main__':
107 | boxes = np.array([[50, 50, 100, 100, 0],
108 | [60, 60, 100, 100, 0],
109 | [50, 50, 100, 100, -45.],
110 | [200, 200, 100, 100, 0.]])
111 |
112 | scores = np.array([0.99, 0.88, 0.66, 0.77])
113 |
114 | keep = nms_rotate(tf.convert_to_tensor(boxes, dtype=tf.float32), tf.convert_to_tensor(scores, dtype=tf.float32),
115 | 0.7, 5)
116 |
117 | import os
118 | os.environ["CUDA_VISIBLE_DEVICES"] = '0'
119 | with tf.Session() as sess:
120 | print(sess.run(keep))
121 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/cifarnet.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Contains a variant of the CIFAR-10 model definition."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import tensorflow as tf
22 |
23 | slim = tf.contrib.slim
24 |
25 | trunc_normal = lambda stddev: tf.truncated_normal_initializer(stddev=stddev)
26 |
27 |
28 | def cifarnet(images, num_classes=10, is_training=False,
29 | dropout_keep_prob=0.5,
30 | prediction_fn=slim.softmax,
31 | scope='CifarNet'):
32 | """Creates a variant of the CifarNet model.
33 |
34 | Note that since the output is a set of 'logits', the values fall in the
35 | interval of (-infinity, infinity). Consequently, to convert the outputs to a
36 | probability distribution over the characters, one will need to convert them
37 | using the softmax function:
38 |
39 | logits = cifarnet.cifarnet(images, is_training=False)
40 | probabilities = tf.nn.softmax(logits)
41 | predictions = tf.argmax(logits, 1)
42 |
43 | Args:
44 | images: A batch of `Tensors` of size [batch_size, height, width, channels].
45 | num_classes: the number of classes in the dataset.
46 | is_training: specifies whether or not we're currently training the model.
47 | This variable will determine the behaviour of the dropout layer.
48 | dropout_keep_prob: the percentage of activation values that are retained.
49 | prediction_fn: a function to get predictions out of logits.
50 | scope: Optional variable_scope.
51 |
52 | Returns:
53 | logits: the pre-softmax activations, a tensor of size
54 | [batch_size, `num_classes`]
55 | end_points: a dictionary from components of the network to the corresponding
56 | activation.
57 | """
58 | end_points = {}
59 |
60 | with tf.variable_scope(scope, 'CifarNet', [images, num_classes]):
61 | net = slim.conv2d(images, 64, [5, 5], scope='conv1')
62 | end_points['conv1'] = net
63 | net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
64 | end_points['pool1'] = net
65 | net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')
66 | net = slim.conv2d(net, 64, [5, 5], scope='conv2')
67 | end_points['conv2'] = net
68 | net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')
69 | net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
70 | end_points['pool2'] = net
71 | net = slim.flatten(net)
72 | end_points['Flatten'] = net
73 | net = slim.fully_connected(net, 384, scope='fc3')
74 | end_points['fc3'] = net
75 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
76 | scope='dropout3')
77 | net = slim.fully_connected(net, 192, scope='fc4')
78 | end_points['fc4'] = net
79 | logits = slim.fully_connected(net, num_classes,
80 | biases_initializer=tf.zeros_initializer(),
81 | weights_initializer=trunc_normal(1/192.0),
82 | weights_regularizer=None,
83 | activation_fn=None,
84 | scope='logits')
85 |
86 | end_points['Logits'] = logits
87 | end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
88 |
89 | return logits, end_points
90 | cifarnet.default_image_size = 32
91 |
92 |
93 | def cifarnet_arg_scope(weight_decay=0.004):
94 | """Defines the default cifarnet argument scope.
95 |
96 | Args:
97 | weight_decay: The weight decay to use for regularizing the model.
98 |
99 | Returns:
100 | An `arg_scope` to use for the inception v3 model.
101 | """
102 | with slim.arg_scope(
103 | [slim.conv2d],
104 | weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
105 | activation_fn=tf.nn.relu):
106 | with slim.arg_scope(
107 | [slim.fully_connected],
108 | biases_initializer=tf.constant_initializer(0.1),
109 | weights_initializer=trunc_normal(0.04),
110 | weights_regularizer=slim.l2_regularizer(weight_decay),
111 | activation_fn=tf.nn.relu) as sc:
112 | return sc
113 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/nets_factory.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Contains a factory for building various models."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 | import functools
21 |
22 | import tensorflow as tf
23 |
24 | from nets import alexnet
25 | from nets import cifarnet
26 | from nets import inception
27 | from nets import lenet
28 | from nets import mobilenet_v1
29 | from nets import overfeat
30 | from nets import resnet_v1
31 | from nets import resnet_v2
32 | from nets import vgg
33 |
34 | slim = tf.contrib.slim
35 |
36 | networks_map = {'alexnet_v2': alexnet.alexnet_v2,
37 | 'cifarnet': cifarnet.cifarnet,
38 | 'overfeat': overfeat.overfeat,
39 | 'vgg_a': vgg.vgg_a,
40 | 'vgg_16': vgg.vgg_16,
41 | 'vgg_19': vgg.vgg_19,
42 | 'inception_v1': inception.inception_v1,
43 | 'inception_v2': inception.inception_v2,
44 | 'inception_v3': inception.inception_v3,
45 | 'inception_v4': inception.inception_v4,
46 | 'inception_resnet_v2': inception.inception_resnet_v2,
47 | 'lenet': lenet.lenet,
48 | 'resnet_v1_50': resnet_v1.resnet_v1_50,
49 | 'resnet_v1_101': resnet_v1.resnet_v1_101,
50 | 'resnet_v1_152': resnet_v1.resnet_v1_152,
51 | 'resnet_v1_200': resnet_v1.resnet_v1_200,
52 | 'resnet_v2_50': resnet_v2.resnet_v2_50,
53 | 'resnet_v2_101': resnet_v2.resnet_v2_101,
54 | 'resnet_v2_152': resnet_v2.resnet_v2_152,
55 | 'resnet_v2_200': resnet_v2.resnet_v2_200,
56 | 'mobilenet_v1': mobilenet_v1.mobilenet_v1,
57 | }
58 |
59 | arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
60 | 'cifarnet': cifarnet.cifarnet_arg_scope,
61 | 'overfeat': overfeat.overfeat_arg_scope,
62 | 'vgg_a': vgg.vgg_arg_scope,
63 | 'vgg_16': vgg.vgg_arg_scope,
64 | 'vgg_19': vgg.vgg_arg_scope,
65 | 'inception_v1': inception.inception_v3_arg_scope,
66 | 'inception_v2': inception.inception_v3_arg_scope,
67 | 'inception_v3': inception.inception_v3_arg_scope,
68 | 'inception_v4': inception.inception_v4_arg_scope,
69 | 'inception_resnet_v2':
70 | inception.inception_resnet_v2_arg_scope,
71 | 'lenet': lenet.lenet_arg_scope,
72 | 'resnet_v1_50': resnet_v1.resnet_arg_scope,
73 | 'resnet_v1_101': resnet_v1.resnet_arg_scope,
74 | 'resnet_v1_152': resnet_v1.resnet_arg_scope,
75 | 'resnet_v1_200': resnet_v1.resnet_arg_scope,
76 | 'resnet_v2_50': resnet_v2.resnet_arg_scope,
77 | 'resnet_v2_101': resnet_v2.resnet_arg_scope,
78 | 'resnet_v2_152': resnet_v2.resnet_arg_scope,
79 | 'resnet_v2_200': resnet_v2.resnet_arg_scope,
80 | 'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
81 | }
82 |
83 |
84 | def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
85 | """Returns a network_fn such as `logits, end_points = network_fn(images)`.
86 |
87 | Args:
88 | name: The name of the network.
89 | num_classes: The number of classes to use for classification.
90 | weight_decay: The l2 coefficient for the model weights.
91 | is_training: `True` if the model is being used for training and `False`
92 | otherwise.
93 |
94 | Returns:
95 | network_fn: A function that applies the model to a batch of images. It has
96 | the following signature:
97 | logits, end_points = network_fn(images)
98 | Raises:
99 | ValueError: If network `name` is not recognized.
100 | """
101 | if name not in networks_map:
102 | raise ValueError('Name of network unknown %s' % name)
103 | arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
104 | func = networks_map[name]
105 | @functools.wraps(func)
106 | def network_fn(images):
107 | with slim.arg_scope(arg_scope):
108 | return func(images, num_classes, is_training=is_training)
109 | if hasattr(func, 'default_image_size'):
110 | network_fn.default_image_size = func.default_image_size
111 |
112 | return network_fn
113 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/convert_data_to_tfrecord.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import division, print_function, absolute_import
3 | import sys
4 | sys.path.append('../../')
5 |
6 | import xml.etree.cElementTree as ET
7 | from libs.configs import cfgs
8 | import numpy as np
9 | import tensorflow as tf
10 | import glob
11 | import cv2
12 | from libs.label_name_dict.label_dict import *
13 | from help_utils.tools import *
14 |
15 | tf.app.flags.DEFINE_string('VOC_dir', '/home/jzchen/data/RemoteSensing/ships/HRSC2016/HRSC2016/Train/', 'Voc dir')
16 | tf.app.flags.DEFINE_string('xml_dir', 'Annotations', 'xml dir')
17 | tf.app.flags.DEFINE_string('image_dir', 'AllImages', 'image dir')
18 | tf.app.flags.DEFINE_string('save_name', 'train', 'save name')
19 | tf.app.flags.DEFINE_string('save_dir', cfgs.ROO_PATH + '/data/tfrecords/', 'save name')
20 | tf.app.flags.DEFINE_string('img_format', '.bmp', 'format of image')
21 | tf.app.flags.DEFINE_string('dataset', 'ship', 'dataset')
22 | FLAGS = tf.app.flags.FLAGS
23 |
24 |
25 | def _int64_feature(value):
26 | return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
27 |
28 |
29 | def _bytes_feature(value):
30 | value = tf.compat.as_bytes(value)
31 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
32 |
33 |
34 | def read_xml_gtbox_and_label(xml_path):
35 | """
36 | :param xml_path: the path of voc xml
37 | :return: a list contains gtboxes and labels, shape is [num_of_gtboxes, 9],
38 | and has [x1, y1, x2, y2, x3, y3, x4, y4, label] in a per row
39 | """
40 |
41 | tree = ET.parse(xml_path)
42 | root = tree.getroot()
43 | img_width = None
44 | img_height = None
45 | box_list = []
46 | for child_of_root in root:
47 | # if child_of_root.tag == 'filename':
48 | # assert child_of_root.text == xml_path.split('/')[-1].split('.')[0] \
49 | # + FLAGS.img_format, 'xml_name and img_name cannot match'
50 |
51 | if child_of_root.tag == 'size':
52 | for child_item in child_of_root:
53 | if child_item.tag == 'width':
54 | img_width = int(child_item.text)
55 | if child_item.tag == 'height':
56 | img_height = int(child_item.text)
57 |
58 | if child_of_root.tag == 'object':
59 | label = None
60 | for child_item in child_of_root:
61 | if child_item.tag == 'name':
62 | label = NAME_LABEL_MAP[child_item.text]
63 | if child_item.tag == 'bndbox':
64 | tmp_box = []
65 | for node in child_item:
66 | tmp_box.append(int(node.text))
67 | assert label is not None, 'label is none, error'
68 | tmp_box.append(label)
69 | box_list.append(tmp_box)
70 |
71 | gtbox_label = np.array(box_list, dtype=np.int32)
72 |
73 | return img_height, img_width, gtbox_label
74 |
75 |
76 | def convert_pascal_to_tfrecord():
77 | xml_path = '/home/jzchen/WorkingSpace/R2CNN_HEAD_FPN_Tensorflow/' + FLAGS.xml_dir
78 | image_path = FLAGS.VOC_dir + FLAGS.image_dir
79 | save_path = FLAGS.save_dir + FLAGS.dataset + '_' + FLAGS.save_name + '.tfrecord'
80 | mkdir(FLAGS.save_dir)
81 |
82 | # writer_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
83 | # writer = tf.python_io.TFRecordWriter(path=save_path, options=writer_options)
84 | writer = tf.python_io.TFRecordWriter(path=save_path)
85 |
86 | for count, xml in enumerate(glob.glob(xml_path + '/*.xml')):
87 | # to avoid path error in different development platform
88 | xml = xml.replace('\\', '/')
89 |
90 | img_name = xml.split('/')[-1].split('.')[0] + FLAGS.img_format
91 | img_path = image_path + '/' + img_name
92 |
93 | if not os.path.exists(img_path):
94 | print('{} is not exist!'.format(img_path))
95 | continue
96 |
97 | img_height, img_width, gtbox_label = read_xml_gtbox_and_label(xml)
98 |
99 | # img = np.array(Image.open(img_path))
100 | img = cv2.imread(img_path)
101 |
102 | feature = tf.train.Features(feature={
103 | # do not need encode() in linux
104 | # 'img_name': _bytes_feature(img_name.encode()),
105 | 'img_name': _bytes_feature(img_name),
106 | 'img_height': _int64_feature(img_height),
107 | 'img_width': _int64_feature(img_width),
108 | 'img': _bytes_feature(img.tostring()),
109 | 'gtboxes_and_label': _bytes_feature(gtbox_label.tostring()),
110 | 'num_objects': _int64_feature(gtbox_label.shape[0])
111 | })
112 |
113 | example = tf.train.Example(features=feature)
114 |
115 | writer.write(example.SerializeToString())
116 |
117 | view_bar('Conversion progress', count + 1, len(glob.glob(xml_path + '/*.xml')))
118 |
119 | print('\nConversion is complete!')
120 |
121 |
122 | if __name__ == '__main__':
123 | # xml_path = '../data/dataset/VOCdevkit/VOC2007/Annotations/000005.xml'
124 | # read_xml_gtbox_and_label(xml_path)
125 |
126 | convert_pascal_to_tfrecord()
127 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/README.md:
--------------------------------------------------------------------------------
1 | # R2CNN_HEAD (The paper is under review.): Position Detection and Direction Prediction for Arbitrary-Oriented Ships via Multiscale Rotation Region Convolutional Neural Network
2 |
3 | ## Recommend improved code: https://github.com/DetectionTeamUCAS
4 |
5 | A Tensorflow implementation of FPN or R2CNN detection framework based on [FPN](https://github.com/yangxue0827/FPN_Tensorflow).
6 | You can refer to the papers [R2CNN Rotational Region CNN for Orientation Robust Scene Text Detection](https://arxiv.org/abs/1706.09579) or [Feature Pyramid Networks for Object Detection](https://arxiv.org/abs/1612.03144)
7 | Other rotation detection method reference [R-DFPN](https://github.com/yangxue0827/R-DFPN_FPN_Tensorflow), [RRPN](https://github.com/yangJirui/RRPN_FPN_Tensorflow) and [R2CNN](https://github.com/yangxue0827/R2CNN_FPN_Tensorflow)
8 |
9 | If useful to you, please star to support my work. Thanks.
10 |
11 |
12 | ## Citation
13 | Some relevant achievements based on this code.
14 |
15 | @article{[yang2018position](https://ieeexplore.ieee.org/document/8464244),
16 | title={Position Detection and Direction Prediction for Arbitrary-Oriented Ships via Multitask Rotation Region Convolutional Neural Network},
17 | author={Yang, Xue and Sun, Hao and Sun, Xian and Yan, Menglong and Guo, Zhi and Fu, Kun},
18 | journal={IEEE Access},
19 | volume={6},
20 | pages={50839-50849},
21 | year={2018},
22 | publisher={IEEE}
23 | }
24 |
25 | @article{[yang2018r-dfpn](http://www.mdpi.com/2072-4292/10/1/132),
26 | title={Automatic ship detection in remote sensing images from google earth of complex scenes based on multiscale rotation dense feature pyramid networks},
27 | author={Yang, Xue and Sun, Hao and Fu, Kun and Yang, Jirui and Sun, Xian and Yan, Menglong and Guo, Zhi},
28 | journal={Remote Sensing},
29 | volume={10},
30 | number={1},
31 | pages={132},
32 | year={2018},
33 | publisher={Multidisciplinary Digital Publishing Institute}
34 | }
35 |
36 | ## Configuration Environment
37 | ubuntu(Encoding problems may occur on windows) + python2 + tensorflow1.2 + cv2 + cuda8.0 + GeForce GTX 1080
38 | If you want to use cpu, you need to modify the parameters of NMS and IOU functions use_gpu = False in cfgs.py
39 | You can also use docker environment, command: docker pull yangxue2docker/tensorflow3_gpu_cv2_sshd:v1.0
40 |
41 | ## Installation
42 | Clone the repository
43 | ```Shell
44 | git clone https://github.com/yangxue0827/R2CNN_HEAD_FPN_Tensorflow.git
45 | ```
46 |
47 | ## Make tfrecord
48 | The data is VOC format, reference [here](sample.xml)
49 | data path format ($R2CNN_HEAD_ROOT/data/io/divide_data.py)
50 | ```
51 | ├── VOCdevkit
52 | │ ├── VOCdevkit_train
53 | │ ├── Annotation
54 | │ ├── JPEGImages
55 | │ ├── VOCdevkit_test
56 | │ ├── Annotation
57 | │ ├── JPEGImages
58 | ```
59 |
60 | Clone the repository
61 | ```Shell
62 | cd $R2CNN_HEAD_ROOT/data/io/
63 | python convert_data_to_tfrecord.py --VOC_dir='***/VOCdevkit/VOCdevkit_train/' --save_name='train' --img_format='.jpg' --dataset='ship'
64 |
65 | ```
66 |
67 | ## Compile
68 | ```
69 | cd $PATH_ROOT/libs/box_utils/
70 | python setup.py build_ext --inplace
71 | ```
72 |
73 | ## Demo
74 | 1、Unzip the weight $R2CNN_HEAD_ROOT/output/res101_trained_weights/*.rar
75 | 2、put images in $R2CNN_HEAD_ROOT/tools/inference_image
76 | 3、Configure parameters in $R2CNN_HEAD_ROOT/libs/configs/cfgs.py and modify the project's root directory
77 | 4、
78 | ```Shell
79 | cd $R2CNN_HEAD_ROOT/tools
80 | ```
81 | 5、image slice
82 | ```Shell
83 | python inference.py
84 | ```
85 |
86 | 6、big image
87 | ```Shell
88 | cd $FPN_ROOT/tools
89 | python demo.py --src_folder=.\demo_src --des_folder=.\demo_des
90 | ```
91 |
92 | ## Train
93 | 1、Modify $R2CNN_HEAD_ROOT/libs/lable_name_dict/***_dict.py, corresponding to the number of categories in the configuration file
94 | 2、download pretrain weight([resnet_v1_101_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz) or [resnet_v1_50_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz)) from [here](https://github.com/yangxue0827/models/tree/master/slim), then extract to folder $R2CNN_HEAD_ROOT/data/pretrained_weights
95 | 3、
96 | ```Shell
97 | cd $R2CNN_HEAD_ROOT/tools
98 | python train.py
99 | ```
100 |
101 | ## Test tfrecord
102 | ```Shell
103 | cd $R2CNN_HEAD_ROOT/tools
104 | python test.py
105 | ```
106 |
107 | ## eval(Not recommended, Please refer [here](https://github.com/DetectionTeamUCAS))
108 | ```Shell
109 | cd $R2CNN_HEAD_ROOT/tools
110 | python eval.py
111 | ```
112 |
113 | ## Summary
114 | ```Shell
115 | tensorboard --logdir=$R2CNN_HEAD_ROOT/output/res101_summary/
116 | ```
117 | 
118 | 
119 | 
120 |
121 | ## Graph
122 | 
123 |
124 | ## Test results
125 | 
126 | 
127 |
128 | 
129 | 
130 |
131 | 
132 | 
133 |
134 | 
135 | 
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/overfeat.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Contains the model definition for the OverFeat network.
16 |
17 | The definition for the network was obtained from:
18 | OverFeat: Integrated Recognition, Localization and Detection using
19 | Convolutional Networks
20 | Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
21 | Yann LeCun, 2014
22 | http://arxiv.org/abs/1312.6229
23 |
24 | Usage:
25 | with slim.arg_scope(overfeat.overfeat_arg_scope()):
26 | outputs, end_points = overfeat.overfeat(inputs)
27 |
28 | @@overfeat
29 | """
30 | from __future__ import absolute_import
31 | from __future__ import division
32 | from __future__ import print_function
33 |
34 | import tensorflow as tf
35 |
36 | slim = tf.contrib.slim
37 | trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
38 |
39 |
40 | def overfeat_arg_scope(weight_decay=0.0005):
41 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
42 | activation_fn=tf.nn.relu,
43 | weights_regularizer=slim.l2_regularizer(weight_decay),
44 | biases_initializer=tf.zeros_initializer()):
45 | with slim.arg_scope([slim.conv2d], padding='SAME'):
46 | with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
47 | return arg_sc
48 |
49 |
50 | def overfeat(inputs,
51 | num_classes=1000,
52 | is_training=True,
53 | dropout_keep_prob=0.5,
54 | spatial_squeeze=True,
55 | scope='overfeat'):
56 | """Contains the model definition for the OverFeat network.
57 |
58 | The definition for the network was obtained from:
59 | OverFeat: Integrated Recognition, Localization and Detection using
60 | Convolutional Networks
61 | Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
62 | Yann LeCun, 2014
63 | http://arxiv.org/abs/1312.6229
64 |
65 | Note: All the fully_connected layers have been transformed to conv2d layers.
66 | To use in classification mode, resize input to 231x231. To use in fully
67 | convolutional mode, set spatial_squeeze to false.
68 |
69 | Args:
70 | inputs: a tensor of size [batch_size, height, width, channels].
71 | num_classes: number of predicted classes.
72 | is_training: whether or not the model is being trained.
73 | dropout_keep_prob: the probability that activations are kept in the dropout
74 | layers during training.
75 | spatial_squeeze: whether or not should squeeze the spatial dimensions of the
76 | outputs. Useful to remove unnecessary dimensions for classification.
77 | scope: Optional scope for the variables.
78 |
79 | Returns:
80 | the last op containing the log predictions and end_points dict.
81 |
82 | """
83 | with tf.variable_scope(scope, 'overfeat', [inputs]) as sc:
84 | end_points_collection = sc.name + '_end_points'
85 | # Collect outputs for conv2d, fully_connected and max_pool2d
86 | with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
87 | outputs_collections=end_points_collection):
88 | net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
89 | scope='conv1')
90 | net = slim.max_pool2d(net, [2, 2], scope='pool1')
91 | net = slim.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
92 | net = slim.max_pool2d(net, [2, 2], scope='pool2')
93 | net = slim.conv2d(net, 512, [3, 3], scope='conv3')
94 | net = slim.conv2d(net, 1024, [3, 3], scope='conv4')
95 | net = slim.conv2d(net, 1024, [3, 3], scope='conv5')
96 | net = slim.max_pool2d(net, [2, 2], scope='pool5')
97 | with slim.arg_scope([slim.conv2d],
98 | weights_initializer=trunc_normal(0.005),
99 | biases_initializer=tf.constant_initializer(0.1)):
100 | # Use conv2d instead of fully_connected layers.
101 | net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
102 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
103 | scope='dropout6')
104 | net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
105 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
106 | scope='dropout7')
107 | net = slim.conv2d(net, num_classes, [1, 1],
108 | activation_fn=None,
109 | normalizer_fn=None,
110 | biases_initializer=tf.zeros_initializer(),
111 | scope='fc8')
112 | # Convert end_points_collection into a end_point dict.
113 | end_points = slim.utils.convert_collection_to_dict(end_points_collection)
114 | if spatial_squeeze:
115 | net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
116 | end_points[sc.name + '/fc8'] = net
117 | return net, end_points
118 | overfeat.default_image_size = 231
119 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/hooks/pre-rebase.sample:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Copyright (c) 2006, 2008 Junio C Hamano
4 | #
5 | # The "pre-rebase" hook is run just before "git rebase" starts doing
6 | # its job, and can prevent the command from running by exiting with
7 | # non-zero status.
8 | #
9 | # The hook is called with the following parameters:
10 | #
11 | # $1 -- the upstream the series was forked from.
12 | # $2 -- the branch being rebased (or empty when rebasing the current branch).
13 | #
14 | # This sample shows how to prevent topic branches that are already
15 | # merged to 'next' branch from getting rebased, because allowing it
16 | # would result in rebasing already published history.
17 |
18 | publish=next
19 | basebranch="$1"
20 | if test "$#" = 2
21 | then
22 | topic="refs/heads/$2"
23 | else
24 | topic=`git symbolic-ref HEAD` ||
25 | exit 0 ;# we do not interrupt rebasing detached HEAD
26 | fi
27 |
28 | case "$topic" in
29 | refs/heads/??/*)
30 | ;;
31 | *)
32 | exit 0 ;# we do not interrupt others.
33 | ;;
34 | esac
35 |
36 | # Now we are dealing with a topic branch being rebased
37 | # on top of master. Is it OK to rebase it?
38 |
39 | # Does the topic really exist?
40 | git show-ref -q "$topic" || {
41 | echo >&2 "No such branch $topic"
42 | exit 1
43 | }
44 |
45 | # Is topic fully merged to master?
46 | not_in_master=`git rev-list --pretty=oneline ^master "$topic"`
47 | if test -z "$not_in_master"
48 | then
49 | echo >&2 "$topic is fully merged to master; better remove it."
50 | exit 1 ;# we could allow it, but there is no point.
51 | fi
52 |
53 | # Is topic ever merged to next? If so you should not be rebasing it.
54 | only_next_1=`git rev-list ^master "^$topic" ${publish} | sort`
55 | only_next_2=`git rev-list ^master ${publish} | sort`
56 | if test "$only_next_1" = "$only_next_2"
57 | then
58 | not_in_topic=`git rev-list "^$topic" master`
59 | if test -z "$not_in_topic"
60 | then
61 | echo >&2 "$topic is already up-to-date with master"
62 | exit 1 ;# we could allow it, but there is no point.
63 | else
64 | exit 0
65 | fi
66 | else
67 | not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"`
68 | /usr/bin/perl -e '
69 | my $topic = $ARGV[0];
70 | my $msg = "* $topic has commits already merged to public branch:\n";
71 | my (%not_in_next) = map {
72 | /^([0-9a-f]+) /;
73 | ($1 => 1);
74 | } split(/\n/, $ARGV[1]);
75 | for my $elem (map {
76 | /^([0-9a-f]+) (.*)$/;
77 | [$1 => $2];
78 | } split(/\n/, $ARGV[2])) {
79 | if (!exists $not_in_next{$elem->[0]}) {
80 | if ($msg) {
81 | print STDERR $msg;
82 | undef $msg;
83 | }
84 | print STDERR " $elem->[1]\n";
85 | }
86 | }
87 | ' "$topic" "$not_in_next" "$not_in_master"
88 | exit 1
89 | fi
90 |
91 | exit 0
92 |
93 | ################################################################
94 |
95 | This sample hook safeguards topic branches that have been
96 | published from being rewound.
97 |
98 | The workflow assumed here is:
99 |
100 | * Once a topic branch forks from "master", "master" is never
101 | merged into it again (either directly or indirectly).
102 |
103 | * Once a topic branch is fully cooked and merged into "master",
104 | it is deleted. If you need to build on top of it to correct
105 | earlier mistakes, a new topic branch is created by forking at
106 | the tip of the "master". This is not strictly necessary, but
107 | it makes it easier to keep your history simple.
108 |
109 | * Whenever you need to test or publish your changes to topic
110 | branches, merge them into "next" branch.
111 |
112 | The script, being an example, hardcodes the publish branch name
113 | to be "next", but it is trivial to make it configurable via
114 | $GIT_DIR/config mechanism.
115 |
116 | With this workflow, you would want to know:
117 |
118 | (1) ... if a topic branch has ever been merged to "next". Young
119 | topic branches can have stupid mistakes you would rather
120 | clean up before publishing, and things that have not been
121 | merged into other branches can be easily rebased without
122 | affecting other people. But once it is published, you would
123 | not want to rewind it.
124 |
125 | (2) ... if a topic branch has been fully merged to "master".
126 | Then you can delete it. More importantly, you should not
127 | build on top of it -- other people may already want to
128 | change things related to the topic as patches against your
129 | "master", so if you need further changes, it is better to
130 | fork the topic (perhaps with the same name) afresh from the
131 | tip of "master".
132 |
133 | Let's look at this example:
134 |
135 | o---o---o---o---o---o---o---o---o---o "next"
136 | / / / /
137 | / a---a---b A / /
138 | / / / /
139 | / / c---c---c---c B /
140 | / / / \ /
141 | / / / b---b C \ /
142 | / / / / \ /
143 | ---o---o---o---o---o---o---o---o---o---o---o "master"
144 |
145 |
146 | A, B and C are topic branches.
147 |
148 | * A has one fix since it was merged up to "next".
149 |
150 | * B has finished. It has been fully merged up to "master" and "next",
151 | and is ready to be deleted.
152 |
153 | * C has not merged to "next" at all.
154 |
155 | We would want to allow C to be rebased, refuse A, and encourage
156 | B to be deleted.
157 |
158 | To compute (1):
159 |
160 | git rev-list ^master ^topic next
161 | git rev-list ^master next
162 |
163 | if these match, topic has not merged in next at all.
164 |
165 | To compute (2):
166 |
167 | git rev-list master..topic
168 |
169 | if this is empty, it is fully merged to "master".
170 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/alexnet.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Contains a model definition for AlexNet.
16 |
17 | This work was first described in:
18 | ImageNet Classification with Deep Convolutional Neural Networks
19 | Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton
20 |
21 | and later refined in:
22 | One weird trick for parallelizing convolutional neural networks
23 | Alex Krizhevsky, 2014
24 |
25 | Here we provide the implementation proposed in "One weird trick" and not
26 | "ImageNet Classification", as per the paper, the LRN layers have been removed.
27 |
28 | Usage:
29 | with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
30 | outputs, end_points = alexnet.alexnet_v2(inputs)
31 |
32 | @@alexnet_v2
33 | """
34 |
35 | from __future__ import absolute_import
36 | from __future__ import division
37 | from __future__ import print_function
38 |
39 | import tensorflow as tf
40 |
41 | slim = tf.contrib.slim
42 | trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
43 |
44 |
45 | def alexnet_v2_arg_scope(weight_decay=0.0005):
46 | with slim.arg_scope([slim.conv2d, slim.fully_connected],
47 | activation_fn=tf.nn.relu,
48 | biases_initializer=tf.constant_initializer(0.1),
49 | weights_regularizer=slim.l2_regularizer(weight_decay)):
50 | with slim.arg_scope([slim.conv2d], padding='SAME'):
51 | with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
52 | return arg_sc
53 |
54 |
55 | def alexnet_v2(inputs,
56 | num_classes=1000,
57 | is_training=True,
58 | dropout_keep_prob=0.5,
59 | spatial_squeeze=True,
60 | scope='alexnet_v2'):
61 | """AlexNet version 2.
62 |
63 | Described in: http://arxiv.org/pdf/1404.5997v2.pdf
64 | Parameters from:
65 | github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
66 | layers-imagenet-1gpu.cfg
67 |
68 | Note: All the fully_connected layers have been transformed to conv2d layers.
69 | To use in classification mode, resize input to 224x224. To use in fully
70 | convolutional mode, set spatial_squeeze to false.
71 | The LRN layers have been removed and change the initializers from
72 | random_normal_initializer to xavier_initializer.
73 |
74 | Args:
75 | inputs: a tensor of size [batch_size, height, width, channels].
76 | num_classes: number of predicted classes.
77 | is_training: whether or not the model is being trained.
78 | dropout_keep_prob: the probability that activations are kept in the dropout
79 | layers during training.
80 | spatial_squeeze: whether or not should squeeze the spatial dimensions of the
81 | outputs. Useful to remove unnecessary dimensions for classification.
82 | scope: Optional scope for the variables.
83 |
84 | Returns:
85 | the last op containing the log predictions and end_points dict.
86 | """
87 | with tf.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
88 | end_points_collection = sc.name + '_end_points'
89 | # Collect outputs for conv2d, fully_connected and max_pool2d.
90 | with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
91 | outputs_collections=[end_points_collection]):
92 | net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
93 | scope='conv1')
94 | net = slim.max_pool2d(net, [3, 3], 2, scope='pool1')
95 | net = slim.conv2d(net, 192, [5, 5], scope='conv2')
96 | net = slim.max_pool2d(net, [3, 3], 2, scope='pool2')
97 | net = slim.conv2d(net, 384, [3, 3], scope='conv3')
98 | net = slim.conv2d(net, 384, [3, 3], scope='conv4')
99 | net = slim.conv2d(net, 256, [3, 3], scope='conv5')
100 | net = slim.max_pool2d(net, [3, 3], 2, scope='pool5')
101 |
102 | # Use conv2d instead of fully_connected layers.
103 | with slim.arg_scope([slim.conv2d],
104 | weights_initializer=trunc_normal(0.005),
105 | biases_initializer=tf.constant_initializer(0.1)):
106 | net = slim.conv2d(net, 4096, [5, 5], padding='VALID',
107 | scope='fc6')
108 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
109 | scope='dropout6')
110 | net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
111 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
112 | scope='dropout7')
113 | net = slim.conv2d(net, num_classes, [1, 1],
114 | activation_fn=None,
115 | normalizer_fn=None,
116 | biases_initializer=tf.zeros_initializer(),
117 | scope='fc8')
118 |
119 | # Convert end_points_collection into a end_point dict.
120 | end_points = slim.utils.convert_collection_to_dict(end_points_collection)
121 | if spatial_squeeze:
122 | net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
123 | end_points[sc.name + '/fc8'] = net
124 | return net, end_points
125 | alexnet_v2.default_image_size = 224
126 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/box_utils/boxes_utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 | from libs.box_utils.coordinate_convert import forward_convert
9 |
10 |
11 | def clip_boxes_to_img_boundaries(decode_boxes, img_shape):
12 | '''
13 |
14 | :param decode_boxes:
15 | :return: decode boxes, and already clip to boundaries
16 | '''
17 |
18 | with tf.name_scope('clip_boxes_to_img_boundaries'):
19 |
20 | ymin, xmin, ymax, xmax = tf.unstack(decode_boxes, axis=1)
21 | img_h, img_w = img_shape[1], img_shape[2]
22 |
23 | xmin = tf.maximum(xmin, 0.0)
24 | xmin = tf.minimum(xmin, tf.cast(img_w, tf.float32))
25 |
26 | ymin = tf.maximum(ymin, 0.0)
27 | ymin = tf.minimum(ymin, tf.cast(img_h, tf.float32)) # avoid xmin > img_w, ymin > img_h
28 |
29 | xmax = tf.minimum(xmax, tf.cast(img_w, tf.float32))
30 | ymax = tf.minimum(ymax, tf.cast(img_h, tf.float32))
31 |
32 | return tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))
33 |
34 |
35 | def filter_outside_boxes(boxes, img_w, img_h):
36 | '''
37 | :param anchors:boxes with format [xmin, ymin, xmax, ymax]
38 | :param img_h: height of image
39 | :param img_w: width of image
40 | :return: indices of anchors that not outside the image boundary
41 | '''
42 |
43 | with tf.name_scope('filter_outside_boxes'):
44 |
45 | ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
46 | xmin_index = tf.greater_equal(xmin, 0)
47 | ymin_index = tf.greater_equal(ymin, 0)
48 | xmax_index = tf.less_equal(xmax, img_w)
49 | ymax_index = tf.less_equal(ymax, img_h)
50 |
51 | indices = tf.transpose(tf.stack([ymin_index, xmin_index, ymax_index, xmax_index]))
52 | indices = tf.cast(indices, dtype=tf.int32)
53 | indices = tf.reduce_sum(indices, axis=1)
54 | indices = tf.where(tf.equal(indices, tf.shape(boxes)[1]))
55 |
56 | return tf.reshape(indices, [-1, ])
57 |
58 |
59 | def nms_boxes(decode_boxes, scores, iou_threshold, max_output_size, name):
60 | '''
61 | 1) NMS
62 | 2) get maximum num of proposals
63 | :return: valid_indices
64 | '''
65 |
66 | valid_index = tf.image.non_max_suppression(
67 | boxes=decode_boxes,
68 | scores=scores,
69 | max_output_size=max_output_size,
70 | iou_threshold=iou_threshold,
71 | name=name
72 | )
73 |
74 | return valid_index
75 |
76 |
77 | def padd_boxes_with_zeros(boxes, scores, max_num_of_boxes):
78 |
79 | '''
80 | num of boxes less than max num of boxes, so it need to pad with zeros[0, 0, 0, 0]
81 | :param boxes:
82 | :param scores: [-1]
83 | :param max_num_of_boxes:
84 | :return:
85 | '''
86 |
87 | pad_num = tf.cast(max_num_of_boxes, tf.int32) - tf.shape(boxes)[0]
88 |
89 | zero_boxes = tf.zeros(shape=[pad_num, 4], dtype=boxes.dtype)
90 | zero_scores = tf.zeros(shape=[pad_num], dtype=scores.dtype)
91 |
92 | final_boxes = tf.concat([boxes, zero_boxes], axis=0)
93 |
94 | final_scores = tf.concat([scores, zero_scores], axis=0)
95 |
96 | return final_boxes, final_scores
97 |
98 |
99 | def get_horizen_minAreaRectangle(boxs, with_label=True):
100 |
101 | rpn_proposals_boxes_convert = tf.py_func(forward_convert,
102 | inp=[boxs, with_label],
103 | Tout=tf.float32)
104 | if with_label:
105 | rpn_proposals_boxes_convert = tf.reshape(rpn_proposals_boxes_convert, [-1, 9])
106 |
107 | boxes_shape = tf.shape(rpn_proposals_boxes_convert)
108 | y_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 0], end=[boxes_shape[0], boxes_shape[1] - 1],
109 | strides=[1, 2])
110 | x_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 1], end=[boxes_shape[0], boxes_shape[1] - 1],
111 | strides=[1, 2])
112 |
113 | label = tf.unstack(rpn_proposals_boxes_convert, axis=1)[-1]
114 |
115 | y_max = tf.reduce_max(y_list, axis=1)
116 | y_min = tf.reduce_min(y_list, axis=1)
117 | x_max = tf.reduce_max(x_list, axis=1)
118 | x_min = tf.reduce_min(x_list, axis=1)
119 | return tf.transpose(tf.stack([y_min, x_min, y_max, x_max, label], axis=0))
120 | else:
121 | rpn_proposals_boxes_convert = tf.reshape(rpn_proposals_boxes_convert, [-1, 8])
122 |
123 | boxes_shape = tf.shape(rpn_proposals_boxes_convert)
124 | y_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 0], end=[boxes_shape[0], boxes_shape[1]],
125 | strides=[1, 2])
126 | x_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 1], end=[boxes_shape[0], boxes_shape[1]],
127 | strides=[1, 2])
128 |
129 | y_max = tf.reduce_max(y_list, axis=1)
130 | y_min = tf.reduce_min(y_list, axis=1)
131 | x_max = tf.reduce_max(x_list, axis=1)
132 | x_min = tf.reduce_min(x_list, axis=1)
133 |
134 | return tf.transpose(tf.stack([y_min, x_min, y_max, x_max], axis=0))
135 |
136 |
137 | def get_head(gtboxes_and_label_batch):
138 | """
139 | :param gtboxes_and_label_batch: [x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label]
140 | :return: [x1, y1, x2, y2, x3, y3, x4, y4, label], [head_x, head_y]
141 | """
142 | x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label = tf.unstack(gtboxes_and_label_batch, axis=1)
143 | coords_label = tf.transpose(tf.stack([x1, y1, x2, y2, x3, y3, x4, y4, label]))
144 | head = tf.transpose(tf.stack([head_x, head_y]))
145 |
146 | return coords_label, head
147 |
148 | def get_angle(gtboxes_and_label_batch):
149 |
150 | x1, y1, x2, y2, x3, y3, x4, y4, angle, label = tf.unstack(gtboxes_and_label_batch, axis=1)
151 | coords_label = tf.transpose(tf.stack([x1, y1, x2, y2, x3, y3, x4, y4, label]))
152 | angle = tf.transpose(tf.stack([angle]))
153 |
154 | return coords_label, angle
155 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/data/io/image_preprocess.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from __future__ import absolute_import
4 | from __future__ import print_function
5 | from __future__ import division
6 |
7 | import tensorflow as tf
8 |
9 | import numpy as np
10 | import cv2
11 | import math
12 | import random
13 |
14 |
15 | def short_side_resize(img_tensor, gtboxes_and_label, target_shortside_len):
16 | '''
17 |
18 | :param img_tensor:[h, w, c], gtboxes_and_label:[-1, 9]
19 | :param target_shortside_len:
20 | :return:
21 | '''
22 |
23 | h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
24 |
25 | new_h, new_w = tf.cond(tf.less(h, w),
26 | true_fn=lambda: (target_shortside_len, target_shortside_len * w//h),
27 | false_fn=lambda: (target_shortside_len * h//w, target_shortside_len))
28 |
29 | img_tensor = tf.expand_dims(img_tensor, axis=0)
30 | img_tensor = tf.image.resize_bilinear(img_tensor, [new_h, new_w])
31 |
32 | x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label = tf.unstack(gtboxes_and_label, axis=1)
33 |
34 | x1, x2, x3, x4, head_x = x1 * new_w//w, x2 * new_w//w, x3 * new_w//w, x4 * new_w//w, head_x * new_w//w
35 | y1, y2, y3, y4, head_y = y1 * new_h//h, y2 * new_h//h, y3 * new_h//h, y4 * new_h//h, head_y * new_h//h
36 |
37 | img_tensor = tf.squeeze(img_tensor, axis=0) # ensure image tensor rank is 3
38 | return img_tensor, tf.transpose(tf.stack([x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label], axis=0))
39 |
40 |
41 | def short_side_resize_for_inference_data(img_tensor, target_shortside_len, is_resize=True):
42 | h, w, = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
43 |
44 | img_tensor = tf.expand_dims(img_tensor, axis=0)
45 |
46 | if is_resize:
47 | new_h, new_w = tf.cond(tf.less(h, w),
48 | true_fn=lambda: (target_shortside_len, target_shortside_len * w // h),
49 | false_fn=lambda: (target_shortside_len * h // w, target_shortside_len))
50 | img_tensor = tf.image.resize_bilinear(img_tensor, [new_h, new_w])
51 |
52 | return img_tensor # [1, h, w, c]
53 |
54 |
55 | def flip_left_right(img_tensor, gtboxes_and_label):
56 | h, w = tf.shape(img_tensor)[0], tf.shape(img_tensor)[1]
57 | img_tensor = tf.image.flip_left_right(img_tensor)
58 |
59 | x1, y1, x2, y2, x3, y3, x4, y4, head_x, head_y, label = tf.unstack(gtboxes_and_label, axis=1)
60 | new_x1 = w - x1
61 | new_x2 = w - x2
62 | new_x3 = w - x3
63 | new_x4 = w - x4
64 | new_head_x = w - head_x
65 | return img_tensor, tf.transpose(tf.stack([new_x1, y1, new_x2, y2, new_x3, y3, new_x4, y4, new_head_x, head_y, label], axis=0))
66 |
67 |
68 | def random_flip_left_right(img_tensor, gtboxes_and_label):
69 |
70 | img_tensor, gtboxes_and_label = tf.cond(tf.less(tf.random_uniform(shape=[], minval=0, maxval=1), 0.5),
71 | lambda: flip_left_right(img_tensor, gtboxes_and_label),
72 | lambda: (img_tensor, gtboxes_and_label))
73 |
74 | return img_tensor, gtboxes_and_label
75 |
76 |
77 | # ******************************************Rotate img & bounding box by a random angle**********************************************
78 |
79 | def rotate_point(img_tensor, gtboxes_and_label):
80 |
81 | def rotate_point_cv(img, point_label=None, keep_size = False):
82 | '''
83 | input: 1.img type:numpy_array(np.float32) 2.angle type:int 3.point_label type:numpy_array(np.int32) shape(None, 11), last three represent head coordinates and label
84 | output: 1.rotated_img 2.rotated_point shape(None, 10)
85 | '''
86 | angle = random.randint(0,90)
87 |
88 | if keep_size:
89 | cols = img.shape[1]
90 | rows = img.shape[0]
91 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
92 | img = cv2.warpAffine(img, M, (cols, rows))
93 | a = M[:, :2] ##a.shape (2,2)
94 | b = M[:, 2:] ###b.shape(2,1)
95 | b = np.reshape(b,newshape=(1,2))
96 | a = np.transpose(a)
97 | point = np.reshape(point_label[:,:-1],newshape=(len(point_label)*5,2))
98 | point = np.dot(point,a)+b
99 | point = np.reshape(point,newshape=(np.int(len(point)/5),10))
100 | label = point_label[:,-1]
101 | label = label[:, np.newaxis]
102 | point = np.concatenate((point, label), axis=1)
103 | return img, np.int32(point)
104 | else:
105 | cols = img.shape[1]
106 | rows = img.shape[0]
107 | M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
108 |
109 | heightNew = int(cols * math.fabs(math.sin(math.radians(angle))) + rows* math.fabs(math.cos(math.radians(angle))))
110 | widthNew = int(rows * math.fabs(math.sin(math.radians(angle))) + cols* math.fabs(math.cos(math.radians(angle))))
111 | M[0, 2] += (widthNew - cols) / 2
112 | M[1, 2] += (heightNew - rows) / 2
113 |
114 | img = cv2.warpAffine(img, M, (widthNew, heightNew))
115 | a = M[:, :2] ##a.shape (2,2)
116 | b = M[:, 2:] ###b.shape(2,1)
117 | b = np.reshape(b, newshape=(1, 2))
118 | a = np.transpose(a)
119 | point = np.reshape(point_label[:,:-1], newshape=(len(point_label) * 5, 2))
120 | point = np.dot(point, a) + b
121 | point = np.reshape(point, newshape=(np.int(len(point) / 5), 10))
122 | label = point_label[:,-1]
123 | label = label[:, np.newaxis]
124 | point = np.concatenate((point, label), axis=1)
125 | return img, np.int32(point)
126 |
127 | rotated_img_tensor, rotated_gtboxes_and_label = tf.py_func(rotate_point_cv, inp=[img_tensor, gtboxes_and_label], Tout=[tf.float32, tf.int32])
128 | rotated_gtboxes_and_label.set_shape(gtboxes_and_label.get_shape())
129 | # rotated_img_tensor = tf.reshape(rotated_img_tensor, [-1,-1,3]) # We only consider RGB three channels now.
130 | rotated_img_tensor.set_shape(tf.TensorShape([None,None,3]))
131 | return rotated_img_tensor, rotated_gtboxes_and_label
132 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/overfeat_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Tests for slim.slim_nets.overfeat."""
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
20 | import tensorflow as tf
21 |
22 | from nets import overfeat
23 |
24 | slim = tf.contrib.slim
25 |
26 |
27 | class OverFeatTest(tf.test.TestCase):
28 |
29 | def testBuild(self):
30 | batch_size = 5
31 | height, width = 231, 231
32 | num_classes = 1000
33 | with self.test_session():
34 | inputs = tf.random_uniform((batch_size, height, width, 3))
35 | logits, _ = overfeat.overfeat(inputs, num_classes)
36 | self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
37 | self.assertListEqual(logits.get_shape().as_list(),
38 | [batch_size, num_classes])
39 |
40 | def testFullyConvolutional(self):
41 | batch_size = 1
42 | height, width = 281, 281
43 | num_classes = 1000
44 | with self.test_session():
45 | inputs = tf.random_uniform((batch_size, height, width, 3))
46 | logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
47 | self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
48 | self.assertListEqual(logits.get_shape().as_list(),
49 | [batch_size, 2, 2, num_classes])
50 |
51 | def testEndPoints(self):
52 | batch_size = 5
53 | height, width = 231, 231
54 | num_classes = 1000
55 | with self.test_session():
56 | inputs = tf.random_uniform((batch_size, height, width, 3))
57 | _, end_points = overfeat.overfeat(inputs, num_classes)
58 | expected_names = ['overfeat/conv1',
59 | 'overfeat/pool1',
60 | 'overfeat/conv2',
61 | 'overfeat/pool2',
62 | 'overfeat/conv3',
63 | 'overfeat/conv4',
64 | 'overfeat/conv5',
65 | 'overfeat/pool5',
66 | 'overfeat/fc6',
67 | 'overfeat/fc7',
68 | 'overfeat/fc8'
69 | ]
70 | self.assertSetEqual(set(end_points.keys()), set(expected_names))
71 |
72 | def testModelVariables(self):
73 | batch_size = 5
74 | height, width = 231, 231
75 | num_classes = 1000
76 | with self.test_session():
77 | inputs = tf.random_uniform((batch_size, height, width, 3))
78 | overfeat.overfeat(inputs, num_classes)
79 | expected_names = ['overfeat/conv1/weights',
80 | 'overfeat/conv1/biases',
81 | 'overfeat/conv2/weights',
82 | 'overfeat/conv2/biases',
83 | 'overfeat/conv3/weights',
84 | 'overfeat/conv3/biases',
85 | 'overfeat/conv4/weights',
86 | 'overfeat/conv4/biases',
87 | 'overfeat/conv5/weights',
88 | 'overfeat/conv5/biases',
89 | 'overfeat/fc6/weights',
90 | 'overfeat/fc6/biases',
91 | 'overfeat/fc7/weights',
92 | 'overfeat/fc7/biases',
93 | 'overfeat/fc8/weights',
94 | 'overfeat/fc8/biases',
95 | ]
96 | model_variables = [v.op.name for v in slim.get_model_variables()]
97 | self.assertSetEqual(set(model_variables), set(expected_names))
98 |
99 | def testEvaluation(self):
100 | batch_size = 2
101 | height, width = 231, 231
102 | num_classes = 1000
103 | with self.test_session():
104 | eval_inputs = tf.random_uniform((batch_size, height, width, 3))
105 | logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
106 | self.assertListEqual(logits.get_shape().as_list(),
107 | [batch_size, num_classes])
108 | predictions = tf.argmax(logits, 1)
109 | self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
110 |
111 | def testTrainEvalWithReuse(self):
112 | train_batch_size = 2
113 | eval_batch_size = 1
114 | train_height, train_width = 231, 231
115 | eval_height, eval_width = 281, 281
116 | num_classes = 1000
117 | with self.test_session():
118 | train_inputs = tf.random_uniform(
119 | (train_batch_size, train_height, train_width, 3))
120 | logits, _ = overfeat.overfeat(train_inputs)
121 | self.assertListEqual(logits.get_shape().as_list(),
122 | [train_batch_size, num_classes])
123 | tf.get_variable_scope().reuse_variables()
124 | eval_inputs = tf.random_uniform(
125 | (eval_batch_size, eval_height, eval_width, 3))
126 | logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
127 | spatial_squeeze=False)
128 | self.assertListEqual(logits.get_shape().as_list(),
129 | [eval_batch_size, 2, 2, num_classes])
130 | logits = tf.reduce_mean(logits, [1, 2])
131 | predictions = tf.argmax(logits, 1)
132 | self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
133 |
134 | def testForward(self):
135 | batch_size = 1
136 | height, width = 231, 231
137 | with self.test_session() as sess:
138 | inputs = tf.random_uniform((batch_size, height, width, 3))
139 | logits, _ = overfeat.overfeat(inputs)
140 | sess.run(tf.global_variables_initializer())
141 | output = sess.run(logits)
142 | self.assertTrue(output.any())
143 |
144 | if __name__ == '__main__':
145 | tf.test.main()
146 |
--------------------------------------------------------------------------------
/R2CNN_HEAD_FPN_Tensorflow/libs/networks/slim_nets/alexnet_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Tests for slim.slim_nets.alexnet."""
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
20 | import tensorflow as tf
21 |
22 | from nets import alexnet
23 |
24 | slim = tf.contrib.slim
25 |
26 |
27 | class AlexnetV2Test(tf.test.TestCase):
28 |
29 | def testBuild(self):
30 | batch_size = 5
31 | height, width = 224, 224
32 | num_classes = 1000
33 | with self.test_session():
34 | inputs = tf.random_uniform((batch_size, height, width, 3))
35 | logits, _ = alexnet.alexnet_v2(inputs, num_classes)
36 | self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
37 | self.assertListEqual(logits.get_shape().as_list(),
38 | [batch_size, num_classes])
39 |
40 | def testFullyConvolutional(self):
41 | batch_size = 1
42 | height, width = 300, 400
43 | num_classes = 1000
44 | with self.test_session():
45 | inputs = tf.random_uniform((batch_size, height, width, 3))
46 | logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
47 | self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
48 | self.assertListEqual(logits.get_shape().as_list(),
49 | [batch_size, 4, 7, num_classes])
50 |
51 | def testEndPoints(self):
52 | batch_size = 5
53 | height, width = 224, 224
54 | num_classes = 1000
55 | with self.test_session():
56 | inputs = tf.random_uniform((batch_size, height, width, 3))
57 | _, end_points = alexnet.alexnet_v2(inputs, num_classes)
58 | expected_names = ['alexnet_v2/conv1',
59 | 'alexnet_v2/pool1',
60 | 'alexnet_v2/conv2',
61 | 'alexnet_v2/pool2',
62 | 'alexnet_v2/conv3',
63 | 'alexnet_v2/conv4',
64 | 'alexnet_v2/conv5',
65 | 'alexnet_v2/pool5',
66 | 'alexnet_v2/fc6',
67 | 'alexnet_v2/fc7',
68 | 'alexnet_v2/fc8'
69 | ]
70 | self.assertSetEqual(set(end_points.keys()), set(expected_names))
71 |
72 | def testModelVariables(self):
73 | batch_size = 5
74 | height, width = 224, 224
75 | num_classes = 1000
76 | with self.test_session():
77 | inputs = tf.random_uniform((batch_size, height, width, 3))
78 | alexnet.alexnet_v2(inputs, num_classes)
79 | expected_names = ['alexnet_v2/conv1/weights',
80 | 'alexnet_v2/conv1/biases',
81 | 'alexnet_v2/conv2/weights',
82 | 'alexnet_v2/conv2/biases',
83 | 'alexnet_v2/conv3/weights',
84 | 'alexnet_v2/conv3/biases',
85 | 'alexnet_v2/conv4/weights',
86 | 'alexnet_v2/conv4/biases',
87 | 'alexnet_v2/conv5/weights',
88 | 'alexnet_v2/conv5/biases',
89 | 'alexnet_v2/fc6/weights',
90 | 'alexnet_v2/fc6/biases',
91 | 'alexnet_v2/fc7/weights',
92 | 'alexnet_v2/fc7/biases',
93 | 'alexnet_v2/fc8/weights',
94 | 'alexnet_v2/fc8/biases',
95 | ]
96 | model_variables = [v.op.name for v in slim.get_model_variables()]
97 | self.assertSetEqual(set(model_variables), set(expected_names))
98 |
99 | def testEvaluation(self):
100 | batch_size = 2
101 | height, width = 224, 224
102 | num_classes = 1000
103 | with self.test_session():
104 | eval_inputs = tf.random_uniform((batch_size, height, width, 3))
105 | logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
106 | self.assertListEqual(logits.get_shape().as_list(),
107 | [batch_size, num_classes])
108 | predictions = tf.argmax(logits, 1)
109 | self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
110 |
111 | def testTrainEvalWithReuse(self):
112 | train_batch_size = 2
113 | eval_batch_size = 1
114 | train_height, train_width = 224, 224
115 | eval_height, eval_width = 300, 400
116 | num_classes = 1000
117 | with self.test_session():
118 | train_inputs = tf.random_uniform(
119 | (train_batch_size, train_height, train_width, 3))
120 | logits, _ = alexnet.alexnet_v2(train_inputs)
121 | self.assertListEqual(logits.get_shape().as_list(),
122 | [train_batch_size, num_classes])
123 | tf.get_variable_scope().reuse_variables()
124 | eval_inputs = tf.random_uniform(
125 | (eval_batch_size, eval_height, eval_width, 3))
126 | logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
127 | spatial_squeeze=False)
128 | self.assertListEqual(logits.get_shape().as_list(),
129 | [eval_batch_size, 4, 7, num_classes])
130 | logits = tf.reduce_mean(logits, [1, 2])
131 | predictions = tf.argmax(logits, 1)
132 | self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
133 |
134 | def testForward(self):
135 | batch_size = 1
136 | height, width = 224, 224
137 | with self.test_session() as sess:
138 | inputs = tf.random_uniform((batch_size, height, width, 3))
139 | logits, _ = alexnet.alexnet_v2(inputs)
140 | sess.run(tf.global_variables_initializer())
141 | output = sess.run(logits)
142 | self.assertTrue(output.any())
143 |
144 | if __name__ == '__main__':
145 | tf.test.main()
146 |
--------------------------------------------------------------------------------
]