├── README.md
├── main.py
├── modelConfig.py
├── requirements.txt
├── static
└── js
│ └── main.js
├── templates
└── index.html
└── tf
├── bin
└── protoc
├── object_detection
├── CONTRIBUTING.md
├── README.md
├── __init__.py
├── anchor_generators
│ ├── __init__.py
│ ├── flexible_grid_anchor_generator.py
│ ├── flexible_grid_anchor_generator_test.py
│ ├── grid_anchor_generator.py
│ ├── grid_anchor_generator_test.py
│ ├── multiple_grid_anchor_generator.py
│ ├── multiple_grid_anchor_generator_test.py
│ ├── multiscale_grid_anchor_generator.py
│ └── multiscale_grid_anchor_generator_test.py
├── box_coders
│ ├── __init__.py
│ ├── faster_rcnn_box_coder.py
│ ├── faster_rcnn_box_coder_test.py
│ ├── keypoint_box_coder.py
│ ├── keypoint_box_coder_test.py
│ ├── mean_stddev_box_coder.py
│ ├── mean_stddev_box_coder_test.py
│ ├── square_box_coder.py
│ └── square_box_coder_test.py
├── builders
│ ├── __init__.py
│ ├── anchor_generator_builder.py
│ ├── anchor_generator_builder_test.py
│ ├── box_coder_builder.py
│ ├── box_coder_builder_test.py
│ ├── box_predictor_builder.py
│ ├── box_predictor_builder_test.py
│ ├── calibration_builder.py
│ ├── calibration_builder_test.py
│ ├── dataset_builder.py
│ ├── dataset_builder_test.py
│ ├── decoder_builder.py
│ ├── decoder_builder_test.py
│ ├── graph_rewriter_builder.py
│ ├── graph_rewriter_builder_tf1_test.py
│ ├── hyperparams_builder.py
│ ├── hyperparams_builder_test.py
│ ├── image_resizer_builder.py
│ ├── image_resizer_builder_test.py
│ ├── input_reader_builder.py
│ ├── input_reader_builder_tf1_test.py
│ ├── losses_builder.py
│ ├── losses_builder_test.py
│ ├── matcher_builder.py
│ ├── matcher_builder_test.py
│ ├── model_builder.py
│ ├── model_builder_test.py
│ ├── model_builder_tf1_test.py
│ ├── model_builder_tf2_test.py
│ ├── optimizer_builder.py
│ ├── optimizer_builder_tf1_test.py
│ ├── optimizer_builder_tf2_test.py
│ ├── post_processing_builder.py
│ ├── post_processing_builder_test.py
│ ├── preprocessor_builder.py
│ ├── preprocessor_builder_test.py
│ ├── region_similarity_calculator_builder.py
│ ├── region_similarity_calculator_builder_test.py
│ ├── target_assigner_builder.py
│ └── target_assigner_builder_test.py
├── colab_tutorials
│ ├── centernet_on_device.ipynb
│ ├── context_rcnn_tutorial.ipynb
│ ├── convert_odt_model_to_TFLite.ipynb
│ ├── deepmac_colab.ipynb
│ ├── eager_few_shot_od_training_tf2_colab.ipynb
│ ├── eager_few_shot_od_training_tflite.ipynb
│ ├── inference_from_saved_model_tf2_colab.ipynb
│ ├── inference_tf2_colab.ipynb
│ └── object_detection_tutorial.ipynb
├── configs
│ └── tf2
│ │ ├── center_net_deepmac_1024x1024_coco_tpu-128.config
│ │ ├── center_net_deepmac_1024x1024_non_voc_only_tpu-128.config
│ │ ├── center_net_deepmac_1024x1024_voc_only_tpu-128.config
│ │ ├── center_net_deepmac_512x512_voc_only_tpu-32.config
│ │ ├── centernet_hourglass104_1024x1024_coco17_tpu-32.config
│ │ ├── centernet_hourglass104_1024x1024_kpts_coco17_tpu-32.config
│ │ ├── centernet_hourglass104_512x512_coco17_tpu-8.config
│ │ ├── centernet_hourglass104_512x512_kpts_coco17_tpu-32.config
│ │ ├── centernet_resnet101_v1_fpn_512x512_coco17_tpu-8.config
│ │ ├── centernet_resnet50_v1_fpn_512x512_kpts_coco17_tpu-8.config
│ │ ├── centernet_resnet50_v2_512x512_kpts_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet101_v1_1024x1024_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet101_v1_640x640_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet101_v1_800x1333_coco17_gpu-8.config
│ │ ├── faster_rcnn_resnet152_v1_1024x1024_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet152_v1_640x640_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet152_v1_800x1333_coco17_gpu-8.config
│ │ ├── faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet50_v1_640x640_coco17_tpu-8.config
│ │ ├── faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8.config
│ │ ├── faster_rcnn_resnet50_v1_fpn_640x640_coco17_tpu-8.config
│ │ ├── mask_rcnn_inception_resnet_v2_1024x1024_coco17_gpu-8.config
│ │ ├── ssd_efficientdet_d0_512x512_coco17_tpu-8.config
│ │ ├── ssd_efficientdet_d1_640x640_coco17_tpu-8.config
│ │ ├── ssd_efficientdet_d2_768x768_coco17_tpu-8.config
│ │ ├── ssd_efficientdet_d3_896x896_coco17_tpu-32.config
│ │ ├── ssd_efficientdet_d4_1024x1024_coco17_tpu-32.config
│ │ ├── ssd_efficientdet_d5_1280x1280_coco17_tpu-32.config
│ │ ├── ssd_efficientdet_d6_1408x1408_coco17_tpu-32.config
│ │ ├── ssd_efficientdet_d7_1536x1536_coco17_tpu-32.config
│ │ ├── ssd_mobilenet_v1_fpn_640x640_coco17_tpu-8.config
│ │ ├── ssd_mobilenet_v2_320x320_coco17_tpu-8.config
│ │ ├── ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.config
│ │ ├── ssd_mobilenet_v2_fpnlite_640x640_coco17_tpu-8.config
│ │ ├── ssd_resnet101_v1_fpn_1024x1024_coco17_tpu-8.config
│ │ ├── ssd_resnet101_v1_fpn_640x640_coco17_tpu-8.config
│ │ ├── ssd_resnet152_v1_fpn_1024x1024_coco17_tpu-8.config
│ │ ├── ssd_resnet152_v1_fpn_640x640_coco17_tpu-8.config
│ │ ├── ssd_resnet50_v1_fpn_1024x1024_coco17_tpu-8.config
│ │ └── ssd_resnet50_v1_fpn_640x640_coco17_tpu-8.config
├── core
│ ├── __init__.py
│ ├── anchor_generator.py
│ ├── balanced_positive_negative_sampler.py
│ ├── balanced_positive_negative_sampler_test.py
│ ├── batch_multiclass_nms_test.py
│ ├── batcher.py
│ ├── batcher_tf1_test.py
│ ├── box_coder.py
│ ├── box_coder_test.py
│ ├── box_list.py
│ ├── box_list_ops.py
│ ├── box_list_ops_test.py
│ ├── box_list_test.py
│ ├── box_predictor.py
│ ├── class_agnostic_nms_test.py
│ ├── data_decoder.py
│ ├── data_parser.py
│ ├── densepose_ops.py
│ ├── densepose_ops_test.py
│ ├── freezable_batch_norm.py
│ ├── freezable_batch_norm_tf2_test.py
│ ├── freezable_sync_batch_norm.py
│ ├── keypoint_ops.py
│ ├── keypoint_ops_test.py
│ ├── losses.py
│ ├── losses_test.py
│ ├── matcher.py
│ ├── matcher_test.py
│ ├── minibatch_sampler.py
│ ├── minibatch_sampler_test.py
│ ├── model.py
│ ├── model_test.py
│ ├── multiclass_nms_test.py
│ ├── post_processing.py
│ ├── prefetcher.py
│ ├── prefetcher_tf1_test.py
│ ├── preprocessor.py
│ ├── preprocessor_cache.py
│ ├── preprocessor_test.py
│ ├── region_similarity_calculator.py
│ ├── region_similarity_calculator_test.py
│ ├── standard_fields.py
│ ├── target_assigner.py
│ └── target_assigner_test.py
├── data
│ ├── ava_label_map_v2.1.pbtxt
│ ├── face_label_map.pbtxt
│ ├── face_person_with_keypoints_label_map.pbtxt
│ ├── fgvc_2854_classes_label_map.pbtxt
│ ├── kitti_label_map.pbtxt
│ ├── mscoco_complete_label_map.pbtxt
│ ├── mscoco_label_map.pbtxt
│ ├── mscoco_minival_ids.txt
│ ├── oid_bbox_trainable_label_map.pbtxt
│ ├── oid_object_detection_challenge_500_label_map.pbtxt
│ ├── oid_v4_label_map.pbtxt
│ ├── pascal_label_map.pbtxt
│ ├── pet_label_map.pbtxt
│ └── snapshot_serengeti_label_map.pbtxt
├── data_decoders
│ ├── __init__.py
│ ├── tf_example_decoder.py
│ ├── tf_example_decoder_test.py
│ ├── tf_sequence_example_decoder.py
│ └── tf_sequence_example_decoder_test.py
├── dataset_tools
│ ├── __init__.py
│ ├── context_rcnn
│ │ ├── __init__.py
│ │ ├── add_context_to_examples.py
│ │ ├── add_context_to_examples_tf2_test.py
│ │ ├── create_cococameratraps_tfexample_main.py
│ │ ├── create_cococameratraps_tfexample_tf2_test.py
│ │ ├── generate_detection_data.py
│ │ ├── generate_detection_data_tf2_test.py
│ │ ├── generate_embedding_data.py
│ │ └── generate_embedding_data_tf2_test.py
│ ├── create_ava_actions_tf_record.py
│ ├── create_coco_tf_record.py
│ ├── create_coco_tf_record_test.py
│ ├── create_kitti_tf_record.py
│ ├── create_kitti_tf_record_test.py
│ ├── create_oid_tf_record.py
│ ├── create_pascal_tf_record.py
│ ├── create_pascal_tf_record_test.py
│ ├── create_pet_tf_record.py
│ ├── create_pycocotools_package.sh
│ ├── densepose
│ │ └── UV_symmetry_transforms.mat
│ ├── download_and_preprocess_ava.sh
│ ├── download_and_preprocess_mscoco.sh
│ ├── oid_hierarchical_labels_expansion.py
│ ├── oid_hierarchical_labels_expansion_test.py
│ ├── oid_tfrecord_creation.py
│ ├── oid_tfrecord_creation_test.py
│ ├── seq_example_util.py
│ ├── seq_example_util_test.py
│ ├── tf_record_creation_util.py
│ └── tf_record_creation_util_test.py
├── dockerfiles
│ ├── android
│ │ ├── Dockerfile
│ │ └── README.md
│ ├── tf1
│ │ ├── Dockerfile
│ │ └── README.md
│ └── tf2
│ │ ├── Dockerfile
│ │ └── README.md
├── eval_util.py
├── eval_util_test.py
├── export_inference_graph.py
├── export_tflite_graph_lib_tf2.py
├── export_tflite_graph_lib_tf2_test.py
├── export_tflite_graph_tf2.py
├── export_tflite_ssd_graph.py
├── export_tflite_ssd_graph_lib.py
├── export_tflite_ssd_graph_lib_tf1_test.py
├── exporter.py
├── exporter_lib_tf2_test.py
├── exporter_lib_v2.py
├── exporter_main_v2.py
├── exporter_tf1_test.py
├── g3doc
│ ├── challenge_evaluation.md
│ ├── configuring_jobs.md
│ ├── context_rcnn.md
│ ├── deepmac.md
│ ├── defining_your_own_model.md
│ ├── evaluation_protocols.md
│ ├── exporting_models.md
│ ├── faq.md
│ ├── img
│ │ ├── dogs_detections_output.jpg
│ │ ├── example_cat.jpg
│ │ ├── groupof_case_eval.png
│ │ ├── kites_detections_output.jpg
│ │ ├── kites_with_segment_overlay.png
│ │ ├── mask_improvement.png
│ │ ├── nongroupof_case_eval.png
│ │ ├── oid_bus_72e19c28aac34ed8.jpg
│ │ ├── oid_monkey_3b4168c89cecbc5b.jpg
│ │ ├── oxford_pet.png
│ │ ├── tensorboard.png
│ │ ├── tensorboard2.png
│ │ └── tf-od-api-logo.png
│ ├── instance_segmentation.md
│ ├── oid_inference_and_evaluation.md
│ ├── preparing_inputs.md
│ ├── release_notes.md
│ ├── running_notebook.md
│ ├── running_on_mobile_tensorflowlite.md
│ ├── running_on_mobile_tf2.md
│ ├── running_pets.md
│ ├── tf1.md
│ ├── tf1_detection_zoo.md
│ ├── tf1_training_and_evaluation.md
│ ├── tf2.md
│ ├── tf2_classification_zoo.md
│ ├── tf2_detection_zoo.md
│ ├── tf2_training_and_evaluation.md
│ ├── tpu_compatibility.md
│ ├── tpu_exporters.md
│ └── using_your_own_dataset.md
├── inference
│ ├── __init__.py
│ ├── detection_inference.py
│ ├── detection_inference_tf1_test.py
│ └── infer_detections.py
├── inputs.py
├── inputs_test.py
├── legacy
│ ├── __init__.py
│ ├── eval.py
│ ├── evaluator.py
│ ├── train.py
│ ├── trainer.py
│ └── trainer_tf1_test.py
├── matchers
│ ├── __init__.py
│ ├── argmax_matcher.py
│ ├── argmax_matcher_test.py
│ ├── bipartite_matcher.py
│ ├── bipartite_matcher_tf1_test.py
│ ├── hungarian_matcher.py
│ └── hungarian_matcher_tf2_test.py
├── meta_architectures
│ ├── __init__.py
│ ├── center_net_meta_arch.py
│ ├── center_net_meta_arch_tf2_test.py
│ ├── context_rcnn_lib.py
│ ├── context_rcnn_lib_tf1_test.py
│ ├── context_rcnn_lib_tf2.py
│ ├── context_rcnn_lib_tf2_test.py
│ ├── context_rcnn_meta_arch.py
│ ├── context_rcnn_meta_arch_test.py
│ ├── deepmac_meta_arch.py
│ ├── deepmac_meta_arch_test.py
│ ├── faster_rcnn_meta_arch.py
│ ├── faster_rcnn_meta_arch_test.py
│ ├── faster_rcnn_meta_arch_test_lib.py
│ ├── rfcn_meta_arch.py
│ ├── rfcn_meta_arch_test.py
│ ├── ssd_meta_arch.py
│ ├── ssd_meta_arch_test.py
│ └── ssd_meta_arch_test_lib.py
├── metrics
│ ├── __init__.py
│ ├── calibration_evaluation.py
│ ├── calibration_evaluation_tf1_test.py
│ ├── calibration_metrics.py
│ ├── calibration_metrics_tf1_test.py
│ ├── coco_evaluation.py
│ ├── coco_evaluation_test.py
│ ├── coco_tools.py
│ ├── coco_tools_test.py
│ ├── io_utils.py
│ ├── lvis_evaluation.py
│ ├── lvis_evaluation_test.py
│ ├── lvis_tools.py
│ ├── lvis_tools_test.py
│ ├── offline_eval_map_corloc.py
│ ├── offline_eval_map_corloc_test.py
│ ├── oid_challenge_evaluation.py
│ ├── oid_challenge_evaluation_utils.py
│ ├── oid_challenge_evaluation_utils_test.py
│ ├── oid_vrd_challenge_evaluation.py
│ ├── oid_vrd_challenge_evaluation_utils.py
│ ├── oid_vrd_challenge_evaluation_utils_test.py
│ ├── tf_example_parser.py
│ └── tf_example_parser_test.py
├── model_hparams.py
├── model_lib.py
├── model_lib_tf1_test.py
├── model_lib_tf2_test.py
├── model_lib_v2.py
├── model_main.py
├── model_main_tf2.py
├── model_tpu_main.py
├── models
│ ├── __init__.py
│ ├── bidirectional_feature_pyramid_generators.py
│ ├── bidirectional_feature_pyramid_generators_tf2_test.py
│ ├── center_net_hourglass_feature_extractor.py
│ ├── center_net_hourglass_feature_extractor_tf2_test.py
│ ├── center_net_mobilenet_v2_feature_extractor.py
│ ├── center_net_mobilenet_v2_feature_extractor_tf2_test.py
│ ├── center_net_mobilenet_v2_fpn_feature_extractor.py
│ ├── center_net_mobilenet_v2_fpn_feature_extractor_tf2_test.py
│ ├── center_net_resnet_feature_extractor.py
│ ├── center_net_resnet_feature_extractor_tf2_test.py
│ ├── center_net_resnet_v1_fpn_feature_extractor.py
│ ├── center_net_resnet_v1_fpn_feature_extractor_tf2_test.py
│ ├── embedded_ssd_mobilenet_v1_feature_extractor.py
│ ├── embedded_ssd_mobilenet_v1_feature_extractor_tf1_test.py
│ ├── faster_rcnn_inception_resnet_v2_feature_extractor.py
│ ├── faster_rcnn_inception_resnet_v2_feature_extractor_tf1_test.py
│ ├── faster_rcnn_inception_resnet_v2_keras_feature_extractor.py
│ ├── faster_rcnn_inception_resnet_v2_keras_feature_extractor_tf2_test.py
│ ├── faster_rcnn_inception_v2_feature_extractor.py
│ ├── faster_rcnn_inception_v2_feature_extractor_tf1_test.py
│ ├── faster_rcnn_mobilenet_v1_feature_extractor.py
│ ├── faster_rcnn_mobilenet_v1_feature_extractor_tf1_test.py
│ ├── faster_rcnn_nas_feature_extractor.py
│ ├── faster_rcnn_nas_feature_extractor_tf1_test.py
│ ├── faster_rcnn_pnas_feature_extractor.py
│ ├── faster_rcnn_pnas_feature_extractor_tf1_test.py
│ ├── faster_rcnn_resnet_keras_feature_extractor.py
│ ├── faster_rcnn_resnet_keras_feature_extractor_tf2_test.py
│ ├── faster_rcnn_resnet_v1_feature_extractor.py
│ ├── faster_rcnn_resnet_v1_feature_extractor_tf1_test.py
│ ├── faster_rcnn_resnet_v1_fpn_keras_feature_extractor.py
│ ├── faster_rcnn_resnet_v1_fpn_keras_feature_extractor_tf2_test.py
│ ├── feature_map_generators.py
│ ├── feature_map_generators_test.py
│ ├── keras_models
│ │ ├── __init__.py
│ │ ├── base_models
│ │ │ └── original_mobilenet_v2.py
│ │ ├── convert_keras_models.py
│ │ ├── hourglass_network.py
│ │ ├── hourglass_network_tf2_test.py
│ │ ├── inception_resnet_v2.py
│ │ ├── inception_resnet_v2_tf2_test.py
│ │ ├── mobilenet_v1.py
│ │ ├── mobilenet_v1_tf2_test.py
│ │ ├── mobilenet_v2.py
│ │ ├── mobilenet_v2_tf2_test.py
│ │ ├── model_utils.py
│ │ ├── resnet_v1.py
│ │ ├── resnet_v1_tf2_test.py
│ │ └── test_utils.py
│ ├── ssd_efficientnet_bifpn_feature_extractor.py
│ ├── ssd_efficientnet_bifpn_feature_extractor_tf2_test.py
│ ├── ssd_feature_extractor_test.py
│ ├── ssd_inception_v2_feature_extractor.py
│ ├── ssd_inception_v2_feature_extractor_tf1_test.py
│ ├── ssd_inception_v3_feature_extractor.py
│ ├── ssd_inception_v3_feature_extractor_tf1_test.py
│ ├── ssd_mobiledet_feature_extractor.py
│ ├── ssd_mobiledet_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_edgetpu_feature_extractor.py
│ ├── ssd_mobilenet_edgetpu_feature_extractor_testbase.py
│ ├── ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v1_feature_extractor.py
│ ├── ssd_mobilenet_v1_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v1_feature_extractor_tf2_test.py
│ ├── ssd_mobilenet_v1_fpn_feature_extractor.py
│ ├── ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v1_fpn_feature_extractor_tf2_test.py
│ ├── ssd_mobilenet_v1_fpn_keras_feature_extractor.py
│ ├── ssd_mobilenet_v1_keras_feature_extractor.py
│ ├── ssd_mobilenet_v1_ppn_feature_extractor.py
│ ├── ssd_mobilenet_v1_ppn_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v2_feature_extractor.py
│ ├── ssd_mobilenet_v2_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v2_feature_extractor_tf2_test.py
│ ├── ssd_mobilenet_v2_fpn_feature_extractor.py
│ ├── ssd_mobilenet_v2_fpn_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v2_fpn_feature_extractor_tf2_test.py
│ ├── ssd_mobilenet_v2_fpn_keras_feature_extractor.py
│ ├── ssd_mobilenet_v2_keras_feature_extractor.py
│ ├── ssd_mobilenet_v2_mnasfpn_feature_extractor.py
│ ├── ssd_mobilenet_v2_mnasfpn_feature_extractor_tf1_test.py
│ ├── ssd_mobilenet_v3_feature_extractor.py
│ ├── ssd_mobilenet_v3_feature_extractor_testbase.py
│ ├── ssd_mobilenet_v3_feature_extractor_tf1_test.py
│ ├── ssd_pnasnet_feature_extractor.py
│ ├── ssd_pnasnet_feature_extractor_tf1_test.py
│ ├── ssd_resnet_v1_fpn_feature_extractor.py
│ ├── ssd_resnet_v1_fpn_feature_extractor_testbase.py
│ ├── ssd_resnet_v1_fpn_feature_extractor_tf1_test.py
│ ├── ssd_resnet_v1_fpn_feature_extractor_tf2_test.py
│ ├── ssd_resnet_v1_fpn_keras_feature_extractor.py
│ ├── ssd_resnet_v1_ppn_feature_extractor.py
│ ├── ssd_resnet_v1_ppn_feature_extractor_testbase.py
│ └── ssd_resnet_v1_ppn_feature_extractor_tf1_test.py
├── packages
│ ├── tf1
│ │ └── setup.py
│ └── tf2
│ │ └── setup.py
├── predictors
│ ├── __init__.py
│ ├── convolutional_box_predictor.py
│ ├── convolutional_box_predictor_tf1_test.py
│ ├── convolutional_keras_box_predictor.py
│ ├── convolutional_keras_box_predictor_tf2_test.py
│ ├── heads
│ │ ├── __init__.py
│ │ ├── box_head.py
│ │ ├── box_head_tf1_test.py
│ │ ├── class_head.py
│ │ ├── class_head_tf1_test.py
│ │ ├── head.py
│ │ ├── keras_box_head.py
│ │ ├── keras_box_head_tf2_test.py
│ │ ├── keras_class_head.py
│ │ ├── keras_class_head_tf2_test.py
│ │ ├── keras_mask_head.py
│ │ ├── keras_mask_head_tf2_test.py
│ │ ├── keypoint_head.py
│ │ ├── keypoint_head_tf1_test.py
│ │ ├── mask_head.py
│ │ └── mask_head_tf1_test.py
│ ├── mask_rcnn_box_predictor.py
│ ├── mask_rcnn_box_predictor_tf1_test.py
│ ├── mask_rcnn_keras_box_predictor.py
│ ├── mask_rcnn_keras_box_predictor_tf2_test.py
│ ├── rfcn_box_predictor.py
│ ├── rfcn_box_predictor_tf1_test.py
│ ├── rfcn_keras_box_predictor.py
│ └── rfcn_keras_box_predictor_tf2_test.py
├── protos
│ ├── __init__.py
│ ├── anchor_generator.proto
│ ├── anchor_generator_pb2.py
│ ├── argmax_matcher.proto
│ ├── argmax_matcher_pb2.py
│ ├── bipartite_matcher.proto
│ ├── bipartite_matcher_pb2.py
│ ├── box_coder.proto
│ ├── box_coder_pb2.py
│ ├── box_predictor.proto
│ ├── box_predictor_pb2.py
│ ├── calibration.proto
│ ├── calibration_pb2.py
│ ├── center_net.proto
│ ├── center_net_pb2.py
│ ├── eval.proto
│ ├── eval_pb2.py
│ ├── faster_rcnn.proto
│ ├── faster_rcnn_box_coder.proto
│ ├── faster_rcnn_box_coder_pb2.py
│ ├── faster_rcnn_pb2.py
│ ├── flexible_grid_anchor_generator.proto
│ ├── flexible_grid_anchor_generator_pb2.py
│ ├── fpn.proto
│ ├── fpn_pb2.py
│ ├── graph_rewriter.proto
│ ├── graph_rewriter_pb2.py
│ ├── grid_anchor_generator.proto
│ ├── grid_anchor_generator_pb2.py
│ ├── hyperparams.proto
│ ├── hyperparams_pb2.py
│ ├── image_resizer.proto
│ ├── image_resizer_pb2.py
│ ├── input_reader.proto
│ ├── input_reader_pb2.py
│ ├── keypoint_box_coder.proto
│ ├── keypoint_box_coder_pb2.py
│ ├── losses.proto
│ ├── losses_pb2.py
│ ├── matcher.proto
│ ├── matcher_pb2.py
│ ├── mean_stddev_box_coder.proto
│ ├── mean_stddev_box_coder_pb2.py
│ ├── model.proto
│ ├── model_pb2.py
│ ├── multiscale_anchor_generator.proto
│ ├── multiscale_anchor_generator_pb2.py
│ ├── optimizer.proto
│ ├── optimizer_pb2.py
│ ├── pipeline.proto
│ ├── pipeline_pb2.py
│ ├── post_processing.proto
│ ├── post_processing_pb2.py
│ ├── preprocessor.proto
│ ├── preprocessor_pb2.py
│ ├── region_similarity_calculator.proto
│ ├── region_similarity_calculator_pb2.py
│ ├── square_box_coder.proto
│ ├── square_box_coder_pb2.py
│ ├── ssd.proto
│ ├── ssd_anchor_generator.proto
│ ├── ssd_anchor_generator_pb2.py
│ ├── ssd_pb2.py
│ ├── string_int_label_map.proto
│ ├── string_int_label_map_pb2.py
│ ├── target_assigner.proto
│ ├── target_assigner_pb2.py
│ ├── train.proto
│ └── train_pb2.py
├── samples
│ ├── cloud
│ │ └── cloud.yml
│ └── configs
│ │ ├── context_rcnn_resnet101_snapshot_serengeti.config
│ │ ├── context_rcnn_resnet101_snapshot_serengeti_sync.config
│ │ ├── embedded_ssd_mobilenet_v1_coco.config
│ │ ├── facessd_mobilenet_v2_quantized_320x320_open_image_v4.config
│ │ ├── faster_rcnn_inception_resnet_v2_atrous_coco.config
│ │ ├── faster_rcnn_inception_resnet_v2_atrous_cosine_lr_coco.config
│ │ ├── faster_rcnn_inception_resnet_v2_atrous_oid.config
│ │ ├── faster_rcnn_inception_resnet_v2_atrous_oid_v4.config
│ │ ├── faster_rcnn_inception_resnet_v2_atrous_pets.config
│ │ ├── faster_rcnn_inception_v2_coco.config
│ │ ├── faster_rcnn_inception_v2_pets.config
│ │ ├── faster_rcnn_nas_coco.config
│ │ ├── faster_rcnn_resnet101_atrous_coco.config
│ │ ├── faster_rcnn_resnet101_ava_v2.1.config
│ │ ├── faster_rcnn_resnet101_coco.config
│ │ ├── faster_rcnn_resnet101_fgvc.config
│ │ ├── faster_rcnn_resnet101_kitti.config
│ │ ├── faster_rcnn_resnet101_pets.config
│ │ ├── faster_rcnn_resnet101_voc07.config
│ │ ├── faster_rcnn_resnet152_coco.config
│ │ ├── faster_rcnn_resnet152_pets.config
│ │ ├── faster_rcnn_resnet50_coco.config
│ │ ├── faster_rcnn_resnet50_fgvc.config
│ │ ├── faster_rcnn_resnet50_pets.config
│ │ ├── mask_rcnn_inception_resnet_v2_atrous_coco.config
│ │ ├── mask_rcnn_inception_v2_coco.config
│ │ ├── mask_rcnn_resnet101_atrous_coco.config
│ │ ├── mask_rcnn_resnet101_pets.config
│ │ ├── mask_rcnn_resnet50_atrous_coco.config
│ │ ├── rfcn_resnet101_coco.config
│ │ ├── rfcn_resnet101_pets.config
│ │ ├── ssd_inception_v2_coco.config
│ │ ├── ssd_inception_v2_pets.config
│ │ ├── ssd_inception_v3_pets.config
│ │ ├── ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync.config
│ │ ├── ssd_mobilenet_v1_0.75_depth_quantized_300x300_coco14_sync.config
│ │ ├── ssd_mobilenet_v1_0.75_depth_quantized_300x300_pets_sync.config
│ │ ├── ssd_mobilenet_v1_300x300_coco14_sync.config
│ │ ├── ssd_mobilenet_v1_coco.config
│ │ ├── ssd_mobilenet_v1_focal_loss_pets.config
│ │ ├── ssd_mobilenet_v1_focal_loss_pets_inference.config
│ │ ├── ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync.config
│ │ ├── ssd_mobilenet_v1_pets.config
│ │ ├── ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync.config
│ │ ├── ssd_mobilenet_v1_quantized_300x300_coco14_sync.config
│ │ ├── ssd_mobilenet_v2_coco.config
│ │ ├── ssd_mobilenet_v2_fpnlite_quantized_shared_box_predictor_256x256_depthmultiplier_75_coco14_sync.config
│ │ ├── ssd_mobilenet_v2_fullyconv_coco.config
│ │ ├── ssd_mobilenet_v2_mnasfpn_shared_box_predictor_320x320_coco_sync.config
│ │ ├── ssd_mobilenet_v2_oid_v4.config
│ │ ├── ssd_mobilenet_v2_pets_keras.config
│ │ ├── ssd_mobilenet_v2_quantized_300x300_coco.config
│ │ ├── ssd_resnet101_v1_fpn_shared_box_predictor_oid_512x512_sync.config
│ │ ├── ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync.config
│ │ ├── ssdlite_mobiledet_cpu_320x320_coco_sync_4x4.config
│ │ ├── ssdlite_mobiledet_dsp_320x320_coco_sync_4x4.config
│ │ ├── ssdlite_mobiledet_edgetpu_320x320_coco_sync_4x4.config
│ │ ├── ssdlite_mobiledet_gpu_320x320_coco_sync_4x4.config
│ │ ├── ssdlite_mobilenet_edgetpu_320x320_coco.config
│ │ ├── ssdlite_mobilenet_edgetpu_320x320_coco_quant.config
│ │ ├── ssdlite_mobilenet_v1_coco.config
│ │ ├── ssdlite_mobilenet_v2_coco.config
│ │ ├── ssdlite_mobilenet_v3_large_320x320_coco.config
│ │ └── ssdlite_mobilenet_v3_small_320x320_coco.config
├── test_data
│ ├── context_rcnn_camera_trap.config
│ ├── pets_examples.record
│ ├── snapshot_serengeti_sequence_examples.record
│ └── ssd_mobilenet_v1_fpp.config
├── test_images
│ ├── ducky
│ │ ├── test
│ │ │ ├── out1.jpg
│ │ │ ├── out10.jpg
│ │ │ ├── out11.jpg
│ │ │ ├── out12.jpg
│ │ │ ├── out13.jpg
│ │ │ ├── out14.jpg
│ │ │ ├── out15.jpg
│ │ │ ├── out16.jpg
│ │ │ ├── out17.jpg
│ │ │ ├── out18.jpg
│ │ │ ├── out19.jpg
│ │ │ ├── out2.jpg
│ │ │ ├── out20.jpg
│ │ │ ├── out21.jpg
│ │ │ ├── out22.jpg
│ │ │ ├── out23.jpg
│ │ │ ├── out24.jpg
│ │ │ ├── out25.jpg
│ │ │ ├── out26.jpg
│ │ │ ├── out27.jpg
│ │ │ ├── out28.jpg
│ │ │ ├── out29.jpg
│ │ │ ├── out3.jpg
│ │ │ ├── out30.jpg
│ │ │ ├── out31.jpg
│ │ │ ├── out32.jpg
│ │ │ ├── out33.jpg
│ │ │ ├── out34.jpg
│ │ │ ├── out35.jpg
│ │ │ ├── out36.jpg
│ │ │ ├── out37.jpg
│ │ │ ├── out38.jpg
│ │ │ ├── out39.jpg
│ │ │ ├── out4.jpg
│ │ │ ├── out40.jpg
│ │ │ ├── out41.jpg
│ │ │ ├── out42.jpg
│ │ │ ├── out43.jpg
│ │ │ ├── out44.jpg
│ │ │ ├── out45.jpg
│ │ │ ├── out46.jpg
│ │ │ ├── out47.jpg
│ │ │ ├── out48.jpg
│ │ │ ├── out49.jpg
│ │ │ ├── out5.jpg
│ │ │ ├── out6.jpg
│ │ │ ├── out7.jpg
│ │ │ ├── out8.jpg
│ │ │ └── out9.jpg
│ │ └── train
│ │ │ ├── robertducky1.jpg
│ │ │ ├── robertducky2.jpg
│ │ │ ├── robertducky3.jpg
│ │ │ ├── robertducky4.jpg
│ │ │ └── robertducky5.jpg
│ ├── image1.jpg
│ ├── image2.jpg
│ ├── image3.jpg
│ ├── image_info.txt
│ └── snapshot_serengeti
│ │ ├── README.md
│ │ ├── S1_E03_R3_PICT0038.jpeg
│ │ ├── S1_E03_R3_PICT0039.jpeg
│ │ ├── S1_E03_R3_PICT0040.jpeg
│ │ ├── S1_E03_R3_PICT0041.jpeg
│ │ └── context_rcnn_demo_metadata.json
├── tpu_exporters
│ ├── __init__.py
│ ├── export_saved_model_tpu.py
│ ├── export_saved_model_tpu_lib.py
│ ├── export_saved_model_tpu_lib_tf1_test.py
│ ├── faster_rcnn.py
│ ├── ssd.py
│ ├── testdata
│ │ ├── __init__.py
│ │ ├── faster_rcnn
│ │ │ └── faster_rcnn_resnet101_atrous_coco.config
│ │ └── ssd
│ │ │ └── ssd_pipeline.config
│ ├── utils.py
│ └── utils_test.py
└── utils
│ ├── __init__.py
│ ├── autoaugment_utils.py
│ ├── bifpn_utils.py
│ ├── category_util.py
│ ├── category_util_test.py
│ ├── colab_utils.py
│ ├── config_util.py
│ ├── config_util_test.py
│ ├── context_manager.py
│ ├── context_manager_test.py
│ ├── dataset_util.py
│ ├── dataset_util_test.py
│ ├── json_utils.py
│ ├── json_utils_test.py
│ ├── label_map_util.py
│ ├── label_map_util_test.py
│ ├── learning_schedules.py
│ ├── learning_schedules_test.py
│ ├── metrics.py
│ ├── metrics_test.py
│ ├── model_util.py
│ ├── model_util_tf2_test.py
│ ├── np_box_list.py
│ ├── np_box_list_ops.py
│ ├── np_box_list_ops_test.py
│ ├── np_box_list_test.py
│ ├── np_box_mask_list.py
│ ├── np_box_mask_list_ops.py
│ ├── np_box_mask_list_ops_test.py
│ ├── np_box_mask_list_test.py
│ ├── np_box_ops.py
│ ├── np_box_ops_test.py
│ ├── np_mask_ops.py
│ ├── np_mask_ops_test.py
│ ├── object_detection_evaluation.py
│ ├── object_detection_evaluation_test.py
│ ├── ops.py
│ ├── ops_test.py
│ ├── patch_ops.py
│ ├── patch_ops_test.py
│ ├── per_image_evaluation.py
│ ├── per_image_evaluation_test.py
│ ├── per_image_vrd_evaluation.py
│ ├── per_image_vrd_evaluation_test.py
│ ├── shape_utils.py
│ ├── shape_utils_test.py
│ ├── spatial_transform_ops.py
│ ├── spatial_transform_ops_test.py
│ ├── static_shape.py
│ ├── static_shape_test.py
│ ├── target_assigner_utils.py
│ ├── target_assigner_utils_test.py
│ ├── test_case.py
│ ├── test_case_test.py
│ ├── test_utils.py
│ ├── test_utils_test.py
│ ├── tf_version.py
│ ├── variables_helper.py
│ ├── variables_helper_tf1_test.py
│ ├── visualization_utils.py
│ ├── visualization_utils_test.py
│ ├── vrd_evaluation.py
│ └── vrd_evaluation_test.py
├── slim
├── BUILD
├── README.md
├── WORKSPACE
├── __init__.py
├── datasets
│ ├── __init__.py
│ ├── build_imagenet_data.py
│ ├── cifar10.py
│ ├── dataset_factory.py
│ ├── dataset_utils.py
│ ├── download_and_convert_cifar10.py
│ ├── download_and_convert_flowers.py
│ ├── download_and_convert_imagenet.sh
│ ├── download_and_convert_mnist.py
│ ├── download_and_convert_visualwakewords.py
│ ├── download_and_convert_visualwakewords_lib.py
│ ├── download_imagenet.sh
│ ├── flowers.py
│ ├── imagenet.py
│ ├── imagenet_2012_validation_synset_labels.txt
│ ├── imagenet_lsvrc_2015_synsets.txt
│ ├── imagenet_metadata.txt
│ ├── mnist.py
│ ├── preprocess_imagenet_validation_data.py
│ ├── process_bounding_boxes.py
│ └── visualwakewords.py
├── deployment
│ ├── __init__.py
│ ├── model_deploy.py
│ └── model_deploy_test.py
├── download_and_convert_data.py
├── eval_image_classifier.py
├── export_inference_graph.py
├── export_inference_graph_test.py
├── nets
│ ├── __init__.py
│ ├── alexnet.py
│ ├── alexnet_test.py
│ ├── cifarnet.py
│ ├── cyclegan.py
│ ├── cyclegan_test.py
│ ├── dcgan.py
│ ├── dcgan_test.py
│ ├── i3d.py
│ ├── i3d_test.py
│ ├── i3d_utils.py
│ ├── inception.py
│ ├── inception_resnet_v2.py
│ ├── inception_resnet_v2_test.py
│ ├── inception_utils.py
│ ├── inception_v1.py
│ ├── inception_v1_test.py
│ ├── inception_v2.py
│ ├── inception_v2_test.py
│ ├── inception_v3.py
│ ├── inception_v3_test.py
│ ├── inception_v4.py
│ ├── inception_v4_test.py
│ ├── lenet.py
│ ├── mobilenet
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── conv_blocks.py
│ │ ├── g3doc
│ │ │ ├── edgetpu_latency.png
│ │ │ ├── latency_pixel1.png
│ │ │ └── madds_top1_accuracy.png
│ │ ├── mnet_v1_vs_v2_pixel1_latency.png
│ │ ├── mobilenet.py
│ │ ├── mobilenet_example.ipynb
│ │ ├── mobilenet_v2.py
│ │ ├── mobilenet_v2_test.py
│ │ ├── mobilenet_v3.py
│ │ └── mobilenet_v3_test.py
│ ├── mobilenet_v1.md
│ ├── mobilenet_v1.png
│ ├── mobilenet_v1.py
│ ├── mobilenet_v1_eval.py
│ ├── mobilenet_v1_test.py
│ ├── mobilenet_v1_train.py
│ ├── nasnet
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── nasnet.py
│ │ ├── nasnet_test.py
│ │ ├── nasnet_utils.py
│ │ ├── nasnet_utils_test.py
│ │ ├── pnasnet.py
│ │ └── pnasnet_test.py
│ ├── nets_factory.py
│ ├── nets_factory_test.py
│ ├── overfeat.py
│ ├── overfeat_test.py
│ ├── pix2pix.py
│ ├── pix2pix_test.py
│ ├── post_training_quantization.py
│ ├── resnet_utils.py
│ ├── resnet_v1.py
│ ├── resnet_v1_test.py
│ ├── resnet_v2.py
│ ├── resnet_v2_test.py
│ ├── s3dg.py
│ ├── s3dg_test.py
│ ├── vgg.py
│ └── vgg_test.py
├── preprocessing
│ ├── __init__.py
│ ├── cifarnet_preprocessing.py
│ ├── inception_preprocessing.py
│ ├── lenet_preprocessing.py
│ ├── preprocessing_factory.py
│ └── vgg_preprocessing.py
├── scripts
│ ├── export_mobilenet.sh
│ ├── finetune_inception_resnet_v2_on_flowers.sh
│ ├── finetune_inception_v1_on_flowers.sh
│ ├── finetune_inception_v3_on_flowers.sh
│ ├── finetune_resnet_v1_50_on_flowers.sh
│ ├── train_cifarnet_on_cifar10.sh
│ └── train_lenet_on_mnist.sh
├── setup.py
├── slim_walkthrough.ipynb
└── train_image_classifier.py
└── ssd_mobilenet_v2_320x320_coco17_tpu-8
├── checkpoint
├── checkpoint
├── ckpt-0.data-00000-of-00001
└── ckpt-0.index
├── mscoco_label_map.pbtxt
├── pipeline.config
└── saved_model
├── saved_model.pb
└── variables
├── variables.data-00000-of-00001
└── variables.index
/main.py:
--------------------------------------------------------------------------------
1 | from sys import stdout
2 | import logging
3 | from flask import Flask, render_template
4 | from flask_socketio import SocketIO, emit
5 | import base64
6 | import numpy as np
7 | import cv2
8 | import os
9 |
10 | # The processed image is created by interfering with the numpy array from the outside.
11 | from modelConfig import main
12 |
13 | app = Flask(__name__)
14 | app.logger.addHandler(logging.StreamHandler(stdout))
15 | app.config['SECRET_KEY'] = os.urandom(16)
16 | app.config['DEBUG'] = True
17 | socketio = SocketIO(app)
18 |
19 |
20 | @socketio.on('input image', namespace='/test')
21 | def test_message(input):
22 | input = input.split(",")[1]
23 | image_data = input
24 |
25 | img_data = base64.b64decode(image_data)
26 | nparr = np.frombuffer(img_data,np.uint8)
27 | img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
28 |
29 | # In the "main" function, the image is processed.
30 | img_np = main(img_np)
31 |
32 | retval, buffer = cv2.imencode('.jpg', img_np)
33 | pic_str = base64.b64encode(buffer)
34 | pic_str = pic_str.decode()
35 |
36 | image_data = "data:image/jpeg;base64," + pic_str
37 |
38 | emit('out-image-event', {'image_data': image_data}, namespace='/test')
39 |
40 |
41 | @app.route('/')
42 | def index():
43 |
44 | return render_template('index.html')
45 |
46 |
47 |
48 | if __name__ == '__main__':
49 | socketio.run(app, host="0.0.0.0", port=443, ssl_context=('cert.pem', 'key.pem'))
50 |
--------------------------------------------------------------------------------
/modelConfig.py:
--------------------------------------------------------------------------------
1 | import os
2 | import tensorflow as tf
3 | import numpy as np
4 | from object_detection.utils import ops as utils_ops
5 | from object_detection.utils import label_map_util
6 | from object_detection.utils import visualization_utils as vis_util
7 |
8 | # lbael
9 | category_index = label_map_util.create_category_index_from_labelmap(os.getcwd()+"/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/mscoco_label_map.pbtxt", use_display_name=True)
10 |
11 | # model
12 | tf.keras.backend.clear_session()
13 | a = os.getcwd()+"/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/"
14 | model = tf.saved_model.load(f'{a}')
15 |
16 |
17 | def run_inference_for_single_image(model, image):
18 |
19 | # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
20 | input_tensor = tf.convert_to_tensor(image)
21 | # The model expects a batch of images, so add an axis with `tf.newaxis`.
22 | input_tensor = input_tensor[tf.newaxis,...]
23 |
24 | # Run inference
25 | model_fn = model.signatures['serving_default']
26 | output_dict = model_fn(input_tensor)
27 |
28 | # All outputs are batches tensors.
29 | # Convert to numpy arrays, and take index [0] to remove the batch dimension.
30 | # We're only interested in the first num_detections.
31 | num_detections = int(output_dict.pop('num_detections'))
32 | output_dict = {key:value[0, :num_detections].numpy()
33 | for key,value in output_dict.items()}
34 | output_dict['num_detections'] = num_detections
35 |
36 | # detection_classes should be ints.
37 | output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
38 |
39 | # Handle models with masks:
40 | if 'detection_masks' in output_dict:
41 | # Reframe the the bbox mask to the image size.
42 | detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
43 | output_dict['detection_masks'], output_dict['detection_boxes'],
44 | image.shape[0], image.shape[1])
45 | detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
46 | tf.uint8)
47 | output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
48 |
49 | return output_dict
50 |
51 |
52 | def main(img_np):
53 |
54 | output_dict = run_inference_for_single_image(model, img_np)
55 |
56 | vis_util.visualize_boxes_and_labels_on_image_array(
57 | img_np,
58 | output_dict['detection_boxes'],
59 | output_dict['detection_classes'],
60 | output_dict['detection_scores'],
61 | category_index,
62 | instance_masks=output_dict.get('detection_masks_reframed', None),
63 | use_normalized_coordinates=True,
64 | line_thickness=3)
65 |
66 | return img_np
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow-gpu==2.4.1
2 | tensorflow-object-detection-api==0.1.1
3 | tf_slim==1.1.0
4 | click==6.7
5 | enum-compat==0.0.2
6 | enum34==1.1.6
7 | eventlet==0.20.0
8 | Flask==0.12.2
9 | Flask-SocketIO==2.9.1
10 | gevent==1.2.2
11 | gevent-websocket==0.10.1
12 | greenlet==0.4.12
13 | gunicorn==19.7.1
14 | itsdangerous==0.24
15 | Jinja2==2.9.6
16 | MarkupSafe==1.0
17 | olefile==0.44
18 | Pillow==4.2.1
19 | python-engineio==1.7.0
20 | python-socketio==1.7.7
21 | redis==2.10.5
22 | six==1.10.0
23 | Werkzeug==0.12.2
24 |
--------------------------------------------------------------------------------
/static/js/main.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function(){
2 | let namespace = "/test";
3 | let video = document.querySelector("#videoElement");
4 | let canvas = document.querySelector("#canvasElement");
5 | let ctx = canvas.getContext('2d');
6 | photo = document.getElementById('photo');
7 | var localMediaStream = null;
8 |
9 | var socket = io.connect(location.protocol + '//' + document.domain + ':' + location.port + namespace);
10 |
11 | function sendSnapshot() {
12 | if (!localMediaStream) {
13 | return;
14 | }
15 |
16 | ctx.drawImage(video, 0, 0, video.videoWidth, video.videoHeight, 0, 0, 300, 150);
17 |
18 | let dataURL = canvas.toDataURL('image/jpeg');
19 | socket.emit('input image', dataURL);
20 |
21 | socket.emit('output image')
22 |
23 | var img = new Image();
24 | socket.on('out-image-event',function(data){
25 |
26 |
27 | img.src = dataURL//data.image_data
28 | photo.setAttribute('src', data.image_data);
29 |
30 | });
31 |
32 |
33 | }
34 |
35 | socket.on('connect', function() {
36 | console.log('Connected!');
37 | });
38 |
39 | var constraints = {
40 | video: {
41 | width: { min: 480 },
42 | height: { min: 480 }
43 | }
44 | };
45 |
46 | navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
47 | video.srcObject = stream;
48 | localMediaStream = stream;
49 |
50 | setInterval(function () {
51 | sendSnapshot();
52 | }, 50);
53 | }).catch(function(error) {
54 | console.log(error);
55 | });
56 | });
57 |
58 |
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Move On
8 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
![]()
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/tf/bin/protoc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/bin/protoc
--------------------------------------------------------------------------------
/tf/object_detection/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to the TensorFlow Object Detection API
2 |
3 | Patches to TensorFlow Object Detection API are welcome!
4 |
5 | We require contributors to fill out either the individual or corporate
6 | Contributor License Agreement (CLA).
7 |
8 | * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
9 | * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
10 |
11 | Please follow the
12 | [TensorFlow contributing guidelines](https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md)
13 | when submitting pull requests.
14 |
--------------------------------------------------------------------------------
/tf/object_detection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/anchor_generators/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/anchor_generators/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/box_coders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/box_coders/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/box_coders/mean_stddev_box_coder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Mean stddev box coder.
17 |
18 | This box coder use the following coding schema to encode boxes:
19 | rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev.
20 | """
21 | from object_detection.core import box_coder
22 | from object_detection.core import box_list
23 |
24 |
25 | class MeanStddevBoxCoder(box_coder.BoxCoder):
26 | """Mean stddev box coder."""
27 |
28 | def __init__(self, stddev=0.01):
29 | """Constructor for MeanStddevBoxCoder.
30 |
31 | Args:
32 | stddev: The standard deviation used to encode and decode boxes.
33 | """
34 | self._stddev = stddev
35 |
36 | @property
37 | def code_size(self):
38 | return 4
39 |
40 | def _encode(self, boxes, anchors):
41 | """Encode a box collection with respect to anchor collection.
42 |
43 | Args:
44 | boxes: BoxList holding N boxes to be encoded.
45 | anchors: BoxList of N anchors.
46 |
47 | Returns:
48 | a tensor representing N anchor-encoded boxes
49 |
50 | Raises:
51 | ValueError: if the anchors still have deprecated stddev field.
52 | """
53 | box_corners = boxes.get()
54 | if anchors.has_field('stddev'):
55 | raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and "
56 | "should not be specified in the box list.")
57 | means = anchors.get()
58 | return (box_corners - means) / self._stddev
59 |
60 | def _decode(self, rel_codes, anchors):
61 | """Decode.
62 |
63 | Args:
64 | rel_codes: a tensor representing N anchor-encoded boxes.
65 | anchors: BoxList of anchors.
66 |
67 | Returns:
68 | boxes: BoxList holding N bounding boxes
69 |
70 | Raises:
71 | ValueError: if the anchors still have deprecated stddev field and expects
72 | the decode method to use stddev value from that field.
73 | """
74 | means = anchors.get()
75 | if anchors.has_field('stddev'):
76 | raise ValueError("'stddev' is a parameter of MeanStddevBoxCoder and "
77 | "should not be specified in the box list.")
78 | box_corners = rel_codes * self._stddev + means
79 | return box_list.BoxList(box_corners)
80 |
--------------------------------------------------------------------------------
/tf/object_detection/box_coders/mean_stddev_box_coder_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.box_coder.mean_stddev_boxcoder."""
17 | import numpy as np
18 | import tensorflow.compat.v1 as tf
19 |
20 | from object_detection.box_coders import mean_stddev_box_coder
21 | from object_detection.core import box_list
22 | from object_detection.utils import test_case
23 |
24 |
25 | class MeanStddevBoxCoderTest(test_case.TestCase):
26 |
27 | def testGetCorrectRelativeCodesAfterEncoding(self):
28 | boxes = np.array([[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]], np.float32)
29 | anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32)
30 | expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]]
31 |
32 | def graph_fn(boxes, anchors):
33 | anchors = box_list.BoxList(anchors)
34 | boxes = box_list.BoxList(boxes)
35 | coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
36 | rel_codes = coder.encode(boxes, anchors)
37 | return rel_codes
38 |
39 | rel_codes_out = self.execute(graph_fn, [boxes, anchors])
40 | self.assertAllClose(rel_codes_out, expected_rel_codes, rtol=1e-04,
41 | atol=1e-04)
42 |
43 | def testGetCorrectBoxesAfterDecoding(self):
44 | rel_codes = np.array([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]],
45 | np.float32)
46 | expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]]
47 | anchors = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]], np.float32)
48 |
49 | def graph_fn(rel_codes, anchors):
50 | anchors = box_list.BoxList(anchors)
51 | coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
52 | decoded_boxes = coder.decode(rel_codes, anchors).get()
53 | return decoded_boxes
54 |
55 | decoded_boxes_out = self.execute(graph_fn, [rel_codes, anchors])
56 | self.assertAllClose(decoded_boxes_out, expected_box_corners, rtol=1e-04,
57 | atol=1e-04)
58 |
59 |
60 | if __name__ == '__main__':
61 | tf.test.main()
62 |
--------------------------------------------------------------------------------
/tf/object_detection/builders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/builders/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/builders/graph_rewriter_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Functions for quantized training and evaluation."""
16 |
17 | import tensorflow.compat.v1 as tf
18 | import tf_slim as slim
19 | # pylint: disable=g-import-not-at-top
20 | try:
21 | from tensorflow.contrib import quantize as contrib_quantize
22 | except ImportError:
23 | # TF 2.0 doesn't ship with contrib.
24 | pass
25 | # pylint: enable=g-import-not-at-top
26 |
27 |
28 | def build(graph_rewriter_config, is_training):
29 | """Returns a function that modifies default graph based on options.
30 |
31 | Args:
32 | graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
33 | is_training: whether in training of eval mode.
34 | """
35 | def graph_rewrite_fn():
36 | """Function to quantize weights and activation of the default graph."""
37 | if (graph_rewriter_config.quantization.weight_bits != 8 or
38 | graph_rewriter_config.quantization.activation_bits != 8):
39 | raise ValueError('Only 8bit quantization is supported')
40 |
41 | # Quantize the graph by inserting quantize ops for weights and activations
42 | if is_training:
43 | contrib_quantize.experimental_create_training_graph(
44 | input_graph=tf.get_default_graph(),
45 | quant_delay=graph_rewriter_config.quantization.delay
46 | )
47 | else:
48 | contrib_quantize.experimental_create_eval_graph(
49 | input_graph=tf.get_default_graph()
50 | )
51 | slim.summarize_collection('quant_vars')
52 |
53 | return graph_rewrite_fn
54 |
--------------------------------------------------------------------------------
/tf/object_detection/builders/matcher_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """A function to build an object detection matcher from configuration."""
17 |
18 | from object_detection.matchers import argmax_matcher
19 | from object_detection.protos import matcher_pb2
20 | from object_detection.utils import tf_version
21 |
22 | if tf_version.is_tf1():
23 | from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
24 |
25 |
26 | def build(matcher_config):
27 | """Builds a matcher object based on the matcher config.
28 |
29 | Args:
30 | matcher_config: A matcher.proto object containing the config for the desired
31 | Matcher.
32 |
33 | Returns:
34 | Matcher based on the config.
35 |
36 | Raises:
37 | ValueError: On empty matcher proto.
38 | """
39 | if not isinstance(matcher_config, matcher_pb2.Matcher):
40 | raise ValueError('matcher_config not of type matcher_pb2.Matcher.')
41 | if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher':
42 | matcher = matcher_config.argmax_matcher
43 | matched_threshold = unmatched_threshold = None
44 | if not matcher.ignore_thresholds:
45 | matched_threshold = matcher.matched_threshold
46 | unmatched_threshold = matcher.unmatched_threshold
47 | return argmax_matcher.ArgMaxMatcher(
48 | matched_threshold=matched_threshold,
49 | unmatched_threshold=unmatched_threshold,
50 | negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched,
51 | force_match_for_each_row=matcher.force_match_for_each_row,
52 | use_matmul_gather=matcher.use_matmul_gather)
53 | if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher':
54 | if tf_version.is_tf2():
55 | raise ValueError('bipartite_matcher is not supported in TF 2.X')
56 | matcher = matcher_config.bipartite_matcher
57 | return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather)
58 | raise ValueError('Empty matcher.')
59 |
--------------------------------------------------------------------------------
/tf/object_detection/builders/model_builder_tf1_test.py:
--------------------------------------------------------------------------------
1 | # Lint as: python2, python3
2 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 | """Tests for model_builder under TensorFlow 1.X."""
17 | import unittest
18 | from absl.testing import parameterized
19 | import tensorflow.compat.v1 as tf
20 |
21 | from object_detection.builders import model_builder
22 | from object_detection.builders import model_builder_test
23 | from object_detection.meta_architectures import context_rcnn_meta_arch
24 | from object_detection.meta_architectures import ssd_meta_arch
25 | from object_detection.protos import losses_pb2
26 | from object_detection.utils import tf_version
27 |
28 |
29 | @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
30 | class ModelBuilderTF1Test(model_builder_test.ModelBuilderTest):
31 |
32 | def default_ssd_feature_extractor(self):
33 | return 'ssd_resnet50_v1_fpn'
34 |
35 | def default_faster_rcnn_feature_extractor(self):
36 | return 'faster_rcnn_resnet101'
37 |
38 | def ssd_feature_extractors(self):
39 | return model_builder.SSD_FEATURE_EXTRACTOR_CLASS_MAP
40 |
41 | def get_override_base_feature_extractor_hyperparams(self, extractor_type):
42 | return extractor_type in {'ssd_inception_v2', 'ssd_inception_v3'}
43 |
44 | def faster_rcnn_feature_extractors(self):
45 | return model_builder.FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP
46 |
47 |
48 | @parameterized.parameters(True, False)
49 | def test_create_context_rcnn_from_config_with_params(self, is_training):
50 | model_proto = self.create_default_faster_rcnn_model_proto()
51 | model_proto.faster_rcnn.context_config.attention_bottleneck_dimension = 10
52 | model_proto.faster_rcnn.context_config.attention_temperature = 0.5
53 | model = model_builder.build(model_proto, is_training=is_training)
54 | self.assertIsInstance(model, context_rcnn_meta_arch.ContextRCNNMetaArch)
55 |
56 |
57 | if __name__ == '__main__':
58 | tf.test.main()
59 |
--------------------------------------------------------------------------------
/tf/object_detection/builders/region_similarity_calculator_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Builder for region similarity calculators."""
17 |
18 | from object_detection.core import region_similarity_calculator
19 | from object_detection.protos import region_similarity_calculator_pb2
20 |
21 |
22 | def build(region_similarity_calculator_config):
23 | """Builds region similarity calculator based on the configuration.
24 |
25 | Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See
26 | core/region_similarity_calculator.proto for details.
27 |
28 | Args:
29 | region_similarity_calculator_config: RegionSimilarityCalculator
30 | configuration proto.
31 |
32 | Returns:
33 | region_similarity_calculator: RegionSimilarityCalculator object.
34 |
35 | Raises:
36 | ValueError: On unknown region similarity calculator.
37 | """
38 |
39 | if not isinstance(
40 | region_similarity_calculator_config,
41 | region_similarity_calculator_pb2.RegionSimilarityCalculator):
42 | raise ValueError(
43 | 'region_similarity_calculator_config not of type '
44 | 'region_similarity_calculator_pb2.RegionsSimilarityCalculator')
45 |
46 | similarity_calculator = region_similarity_calculator_config.WhichOneof(
47 | 'region_similarity')
48 | if similarity_calculator == 'iou_similarity':
49 | return region_similarity_calculator.IouSimilarity()
50 | if similarity_calculator == 'ioa_similarity':
51 | return region_similarity_calculator.IoaSimilarity()
52 | if similarity_calculator == 'neg_sq_dist_similarity':
53 | return region_similarity_calculator.NegSqDistSimilarity()
54 | if similarity_calculator == 'thresholded_iou_similarity':
55 | return region_similarity_calculator.ThresholdedIouSimilarity(
56 | region_similarity_calculator_config.thresholded_iou_similarity
57 | .iou_threshold)
58 |
59 | raise ValueError('Unknown region similarity calculator.')
60 |
--------------------------------------------------------------------------------
/tf/object_detection/builders/target_assigner_builder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """A function to build an object detection box coder from configuration."""
17 | from object_detection.builders import box_coder_builder
18 | from object_detection.builders import matcher_builder
19 | from object_detection.builders import region_similarity_calculator_builder
20 | from object_detection.core import target_assigner
21 |
22 |
23 | def build(target_assigner_config):
24 | """Builds a TargetAssigner object based on the config.
25 |
26 | Args:
27 | target_assigner_config: A target_assigner proto message containing config
28 | for the desired target assigner.
29 |
30 | Returns:
31 | TargetAssigner object based on the config.
32 | """
33 | matcher_instance = matcher_builder.build(target_assigner_config.matcher)
34 | similarity_calc_instance = region_similarity_calculator_builder.build(
35 | target_assigner_config.similarity_calculator)
36 | box_coder = box_coder_builder.build(target_assigner_config.box_coder)
37 | return target_assigner.TargetAssigner(
38 | matcher=matcher_instance,
39 | similarity_calc=similarity_calc_instance,
40 | box_coder_instance=box_coder)
41 |
--------------------------------------------------------------------------------
/tf/object_detection/builders/target_assigner_builder_test.py:
--------------------------------------------------------------------------------
1 | """Tests for google3.third_party.tensorflow_models.object_detection.builders.target_assigner_builder."""
2 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 |
17 | import tensorflow.compat.v1 as tf
18 |
19 | from google.protobuf import text_format
20 |
21 |
22 | from object_detection.builders import target_assigner_builder
23 | from object_detection.core import target_assigner
24 | from object_detection.protos import target_assigner_pb2
25 |
26 |
27 | class TargetAssignerBuilderTest(tf.test.TestCase):
28 |
29 | def test_build_a_target_assigner(self):
30 | target_assigner_text_proto = """
31 | matcher {
32 | argmax_matcher {matched_threshold: 0.5}
33 | }
34 | similarity_calculator {
35 | iou_similarity {}
36 | }
37 | box_coder {
38 | faster_rcnn_box_coder {}
39 | }
40 | """
41 | target_assigner_proto = target_assigner_pb2.TargetAssigner()
42 | text_format.Merge(target_assigner_text_proto, target_assigner_proto)
43 | target_assigner_instance = target_assigner_builder.build(
44 | target_assigner_proto)
45 | self.assertIsInstance(target_assigner_instance,
46 | target_assigner.TargetAssigner)
47 |
48 |
49 | if __name__ == '__main__':
50 | tf.test.main()
51 |
--------------------------------------------------------------------------------
/tf/object_detection/core/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/object_detection/core/box_coder_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.core.box_coder."""
17 | import tensorflow.compat.v1 as tf
18 |
19 | from object_detection.core import box_coder
20 | from object_detection.core import box_list
21 | from object_detection.utils import test_case
22 |
23 |
24 | class MockBoxCoder(box_coder.BoxCoder):
25 | """Test BoxCoder that encodes/decodes using the multiply-by-two function."""
26 |
27 | def code_size(self):
28 | return 4
29 |
30 | def _encode(self, boxes, anchors):
31 | return 2.0 * boxes.get()
32 |
33 | def _decode(self, rel_codes, anchors):
34 | return box_list.BoxList(rel_codes / 2.0)
35 |
36 |
37 | class BoxCoderTest(test_case.TestCase):
38 |
39 | def test_batch_decode(self):
40 |
41 | expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]],
42 | [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]
43 |
44 | def graph_fn():
45 | mock_anchor_corners = tf.constant(
46 | [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
47 | mock_anchors = box_list.BoxList(mock_anchor_corners)
48 | mock_box_coder = MockBoxCoder()
49 |
50 | encoded_boxes_list = [mock_box_coder.encode(
51 | box_list.BoxList(tf.constant(boxes)), mock_anchors)
52 | for boxes in expected_boxes]
53 | encoded_boxes = tf.stack(encoded_boxes_list)
54 | decoded_boxes = box_coder.batch_decode(
55 | encoded_boxes, mock_box_coder, mock_anchors)
56 | return decoded_boxes
57 | decoded_boxes_result = self.execute(graph_fn, [])
58 | self.assertAllClose(expected_boxes, decoded_boxes_result)
59 |
60 |
61 | if __name__ == '__main__':
62 | tf.test.main()
63 |
--------------------------------------------------------------------------------
/tf/object_detection/core/data_decoder.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Interface for data decoders.
17 |
18 | Data decoders decode the input data and return a dictionary of tensors keyed by
19 | the entries in core.reader.Fields.
20 | """
21 | from __future__ import absolute_import
22 | from __future__ import division
23 | from __future__ import print_function
24 | from abc import ABCMeta
25 | from abc import abstractmethod
26 | import six
27 |
28 |
29 | class DataDecoder(six.with_metaclass(ABCMeta, object)):
30 | """Interface for data decoders."""
31 |
32 | @abstractmethod
33 | def decode(self, data):
34 | """Return a single image and associated labels.
35 |
36 | Args:
37 | data: a string tensor holding a serialized protocol buffer corresponding
38 | to data for a single image.
39 |
40 | Returns:
41 | tensor_dict: a dictionary containing tensors. Possible keys are defined in
42 | reader.Fields.
43 | """
44 | pass
45 |
--------------------------------------------------------------------------------
/tf/object_detection/core/data_parser.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Interface for data parsers.
16 |
17 | Data parser parses input data and returns a dictionary of numpy arrays
18 | keyed by the entries in standard_fields.py. Since the parser parses records
19 | to numpy arrays (materialized tensors) directly, it is used to read data for
20 | evaluation/visualization; to parse the data during training, DataDecoder should
21 | be used.
22 | """
23 | from __future__ import absolute_import
24 | from __future__ import division
25 | from __future__ import print_function
26 | from abc import ABCMeta
27 | from abc import abstractmethod
28 | import six
29 |
30 |
31 | class DataToNumpyParser(six.with_metaclass(ABCMeta, object)):
32 | """Abstract interface for data parser that produces numpy arrays."""
33 |
34 | @abstractmethod
35 | def parse(self, input_data):
36 | """Parses input and returns a numpy array or a dictionary of numpy arrays.
37 |
38 | Args:
39 | input_data: an input data
40 |
41 | Returns:
42 | A numpy array or a dictionary of numpy arrays or None, if input
43 | cannot be parsed.
44 | """
45 | pass
46 |
--------------------------------------------------------------------------------
/tf/object_detection/core/prefetcher.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Provides functions to prefetch tensors to feed into models."""
17 | import tensorflow.compat.v1 as tf
18 |
19 |
20 | def prefetch(tensor_dict, capacity):
21 | """Creates a prefetch queue for tensors.
22 |
23 | Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a
24 | dequeue op that evaluates to a tensor_dict. This function is useful in
25 | prefetching preprocessed tensors so that the data is readily available for
26 | consumers.
27 |
28 | Example input pipeline when you don't need batching:
29 | ----------------------------------------------------
30 | key, string_tensor = slim.parallel_reader.parallel_read(...)
31 | tensor_dict = decoder.decode(string_tensor)
32 | tensor_dict = preprocessor.preprocess(tensor_dict, ...)
33 | prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20)
34 | tensor_dict = prefetch_queue.dequeue()
35 | outputs = Model(tensor_dict)
36 | ...
37 | ----------------------------------------------------
38 |
39 | For input pipelines with batching, refer to core/batcher.py
40 |
41 | Args:
42 | tensor_dict: a dictionary of tensors to prefetch.
43 | capacity: the size of the prefetch queue.
44 |
45 | Returns:
46 | a FIFO prefetcher queue
47 | """
48 | names = list(tensor_dict.keys())
49 | dtypes = [t.dtype for t in tensor_dict.values()]
50 | shapes = [t.get_shape() for t in tensor_dict.values()]
51 | prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes,
52 | shapes=shapes,
53 | names=names,
54 | name='prefetch_queue')
55 | enqueue_op = prefetch_queue.enqueue(tensor_dict)
56 | tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
57 | prefetch_queue, [enqueue_op]))
58 | tf.summary.scalar(
59 | 'queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity),
60 | tf.cast(prefetch_queue.size(), dtype=tf.float32) * (1. / capacity))
61 | return prefetch_queue
62 |
--------------------------------------------------------------------------------
/tf/object_detection/data/face_label_map.pbtxt:
--------------------------------------------------------------------------------
1 | item {
2 | name: "face"
3 | id: 1
4 | display_name: "face"
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/tf/object_detection/data/face_person_with_keypoints_label_map.pbtxt:
--------------------------------------------------------------------------------
1 | item: {
2 | id: 1
3 | name: 'face'
4 | display_name: 'face'
5 | keypoints {
6 | id: 0
7 | label: "left_eye_center"
8 | }
9 | keypoints {
10 | id: 1
11 | label: "right_eye_center"
12 | }
13 | keypoints {
14 | id: 2
15 | label: "nose_tip"
16 | }
17 | keypoints {
18 | id: 3
19 | label: "mouth_center"
20 | }
21 | keypoints {
22 | id: 4
23 | label: "left_ear_tragion"
24 | }
25 | keypoints {
26 | id: 5
27 | label: "right_ear_tragion"
28 | }
29 | }
30 | item: {
31 | id: 2
32 | name: 'Person'
33 | display_name: 'PERSON'
34 | keypoints {
35 | id: 6
36 | label: "NOSE_TIP"
37 | }
38 | keypoints {
39 | id: 7
40 | label: "LEFT_EYE"
41 | }
42 | keypoints {
43 | id: 8
44 | label: "RIGHT_EYE"
45 | }
46 | keypoints {
47 | id: 9
48 | label: "LEFT_EAR_TRAGION"
49 | }
50 | keypoints {
51 | id: 10
52 | label: "RIGHT_EAR_TRAGION"
53 | }
54 | keypoints {
55 | id: 11
56 | label: "LEFT_SHOULDER"
57 | }
58 | keypoints {
59 | id: 12
60 | label: "RIGHT_SHOULDER"
61 | }
62 | keypoints {
63 | id: 13
64 | label: "LEFT_ELBOW"
65 | }
66 | keypoints {
67 | id: 14
68 | label: "RIGHT_ELBOW"
69 | }
70 | keypoints {
71 | id: 15
72 | label: "LEFT_WRIST"
73 | }
74 | keypoints {
75 | id: 16
76 | label: "RIGHT_WRIST"
77 | }
78 | keypoints {
79 | id: 17
80 | label: "LEFT_HIP"
81 | }
82 | keypoints {
83 | id: 18
84 | label: "RIGHT_HIP"
85 | }
86 | keypoints {
87 | id: 19
88 | label: "LEFT_KNEE"
89 | }
90 | keypoints {
91 | id: 20
92 | label: "RIGHT_KNEE"
93 | }
94 | keypoints {
95 | id: 21
96 | label: "LEFT_ANKLE"
97 | }
98 | keypoints {
99 | id: 22
100 | label: "RIGHT_ANKLE"
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/tf/object_detection/data/kitti_label_map.pbtxt:
--------------------------------------------------------------------------------
1 | item {
2 | id: 1
3 | name: 'car'
4 | }
5 |
6 | item {
7 | id: 2
8 | name: 'pedestrian'
9 | }
10 |
--------------------------------------------------------------------------------
/tf/object_detection/data/pascal_label_map.pbtxt:
--------------------------------------------------------------------------------
1 | item {
2 | id: 1
3 | name: 'aeroplane'
4 | }
5 |
6 | item {
7 | id: 2
8 | name: 'bicycle'
9 | }
10 |
11 | item {
12 | id: 3
13 | name: 'bird'
14 | }
15 |
16 | item {
17 | id: 4
18 | name: 'boat'
19 | }
20 |
21 | item {
22 | id: 5
23 | name: 'bottle'
24 | }
25 |
26 | item {
27 | id: 6
28 | name: 'bus'
29 | }
30 |
31 | item {
32 | id: 7
33 | name: 'car'
34 | }
35 |
36 | item {
37 | id: 8
38 | name: 'cat'
39 | }
40 |
41 | item {
42 | id: 9
43 | name: 'chair'
44 | }
45 |
46 | item {
47 | id: 10
48 | name: 'cow'
49 | }
50 |
51 | item {
52 | id: 11
53 | name: 'diningtable'
54 | }
55 |
56 | item {
57 | id: 12
58 | name: 'dog'
59 | }
60 |
61 | item {
62 | id: 13
63 | name: 'horse'
64 | }
65 |
66 | item {
67 | id: 14
68 | name: 'motorbike'
69 | }
70 |
71 | item {
72 | id: 15
73 | name: 'person'
74 | }
75 |
76 | item {
77 | id: 16
78 | name: 'pottedplant'
79 | }
80 |
81 | item {
82 | id: 17
83 | name: 'sheep'
84 | }
85 |
86 | item {
87 | id: 18
88 | name: 'sofa'
89 | }
90 |
91 | item {
92 | id: 19
93 | name: 'train'
94 | }
95 |
96 | item {
97 | id: 20
98 | name: 'tvmonitor'
99 | }
100 |
--------------------------------------------------------------------------------
/tf/object_detection/data/pet_label_map.pbtxt:
--------------------------------------------------------------------------------
1 | item {
2 | id: 1
3 | name: 'Abyssinian'
4 | }
5 |
6 | item {
7 | id: 2
8 | name: 'american_bulldog'
9 | }
10 |
11 | item {
12 | id: 3
13 | name: 'american_pit_bull_terrier'
14 | }
15 |
16 | item {
17 | id: 4
18 | name: 'basset_hound'
19 | }
20 |
21 | item {
22 | id: 5
23 | name: 'beagle'
24 | }
25 |
26 | item {
27 | id: 6
28 | name: 'Bengal'
29 | }
30 |
31 | item {
32 | id: 7
33 | name: 'Birman'
34 | }
35 |
36 | item {
37 | id: 8
38 | name: 'Bombay'
39 | }
40 |
41 | item {
42 | id: 9
43 | name: 'boxer'
44 | }
45 |
46 | item {
47 | id: 10
48 | name: 'British_Shorthair'
49 | }
50 |
51 | item {
52 | id: 11
53 | name: 'chihuahua'
54 | }
55 |
56 | item {
57 | id: 12
58 | name: 'Egyptian_Mau'
59 | }
60 |
61 | item {
62 | id: 13
63 | name: 'english_cocker_spaniel'
64 | }
65 |
66 | item {
67 | id: 14
68 | name: 'english_setter'
69 | }
70 |
71 | item {
72 | id: 15
73 | name: 'german_shorthaired'
74 | }
75 |
76 | item {
77 | id: 16
78 | name: 'great_pyrenees'
79 | }
80 |
81 | item {
82 | id: 17
83 | name: 'havanese'
84 | }
85 |
86 | item {
87 | id: 18
88 | name: 'japanese_chin'
89 | }
90 |
91 | item {
92 | id: 19
93 | name: 'keeshond'
94 | }
95 |
96 | item {
97 | id: 20
98 | name: 'leonberger'
99 | }
100 |
101 | item {
102 | id: 21
103 | name: 'Maine_Coon'
104 | }
105 |
106 | item {
107 | id: 22
108 | name: 'miniature_pinscher'
109 | }
110 |
111 | item {
112 | id: 23
113 | name: 'newfoundland'
114 | }
115 |
116 | item {
117 | id: 24
118 | name: 'Persian'
119 | }
120 |
121 | item {
122 | id: 25
123 | name: 'pomeranian'
124 | }
125 |
126 | item {
127 | id: 26
128 | name: 'pug'
129 | }
130 |
131 | item {
132 | id: 27
133 | name: 'Ragdoll'
134 | }
135 |
136 | item {
137 | id: 28
138 | name: 'Russian_Blue'
139 | }
140 |
141 | item {
142 | id: 29
143 | name: 'saint_bernard'
144 | }
145 |
146 | item {
147 | id: 30
148 | name: 'samoyed'
149 | }
150 |
151 | item {
152 | id: 31
153 | name: 'scottish_terrier'
154 | }
155 |
156 | item {
157 | id: 32
158 | name: 'shiba_inu'
159 | }
160 |
161 | item {
162 | id: 33
163 | name: 'Siamese'
164 | }
165 |
166 | item {
167 | id: 34
168 | name: 'Sphynx'
169 | }
170 |
171 | item {
172 | id: 35
173 | name: 'staffordshire_bull_terrier'
174 | }
175 |
176 | item {
177 | id: 36
178 | name: 'wheaten_terrier'
179 | }
180 |
181 | item {
182 | id: 37
183 | name: 'yorkshire_terrier'
184 | }
185 |
--------------------------------------------------------------------------------
/tf/object_detection/data_decoders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/data_decoders/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/dataset_tools/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/context_rcnn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/dataset_tools/context_rcnn/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/create_pycocotools_package.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 |
17 | # Script to download pycocotools and make package for CMLE jobs.
18 | #
19 | # usage:
20 | # bash object_detection/dataset_tools/create_pycocotools_package.sh \
21 | # /tmp/pycocotools
22 | set -e
23 |
24 | if [ -z "$1" ]; then
25 | echo "usage create_pycocotools_package.sh [output dir]"
26 | exit
27 | fi
28 |
29 | # Create the output directory.
30 | OUTPUT_DIR="${1%/}"
31 | SCRATCH_DIR="${OUTPUT_DIR}/raw"
32 | mkdir -p "${OUTPUT_DIR}"
33 | mkdir -p "${SCRATCH_DIR}"
34 |
35 | cd ${SCRATCH_DIR}
36 | git clone https://github.com/cocodataset/cocoapi.git
37 | cd cocoapi/PythonAPI && mv ../common ./
38 |
39 | sed "s/\.\.\/common/common/g" setup.py > setup.py.updated
40 | cp -f setup.py.updated setup.py
41 | rm setup.py.updated
42 |
43 | sed "s/\.\.\/common/common/g" pycocotools/_mask.pyx > _mask.pyx.updated
44 | cp -f _mask.pyx.updated pycocotools/_mask.pyx
45 | rm _mask.pyx.updated
46 |
47 | sed "s/import matplotlib\.pyplot as plt/import matplotlib;matplotlib\.use\(\'Agg\'\);import matplotlib\.pyplot as plt/g" pycocotools/coco.py > coco.py.updated
48 | cp -f coco.py.updated pycocotools/coco.py
49 | rm coco.py.updated
50 |
51 | cd "${OUTPUT_DIR}"
52 | tar -czf pycocotools-2.0.tar.gz -C "${SCRATCH_DIR}/cocoapi/" PythonAPI/
53 | rm -rf ${SCRATCH_DIR}
54 |
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/densepose/UV_symmetry_transforms.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/dataset_tools/densepose/UV_symmetry_transforms.mat
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/download_and_preprocess_ava.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This script downloads the videos for the AVA dataset. There are no arguments.
3 | # Copy this script into the desired parent directory of the ava_vids_raw/
4 | # directory created in this script to store the raw videos.
5 |
6 | mkdir ava_vids_raw
7 | cd ava_vids_raw
8 |
9 | curl -O s3.amazonaws.com/ava-dataset/annotations/ava_file_names_trainval_v2.1.txt
10 |
11 | echo "Downloading all videos."
12 |
13 | cat "ava_file_names_trainval_v2.1.txt" | while read line
14 | do
15 | curl -O s3.amazonaws.com/ava-dataset/trainval/$line
16 | echo "Downloaded " $line
17 | done
18 |
19 | rm "ava_file_names_trainval_v2.1.txt"
20 | cd ..
21 |
22 | # Trimming causes issues with frame seeking in the python script, so it is best left out.
23 | # If included, need to modify the python script to subtract 900 seconds wheen seeking.
24 |
25 | # echo "Trimming all videos."
26 |
27 | # mkdir ava_vids_trimmed
28 | # for filename in ava_vids_raw/*; do
29 | # ffmpeg -ss 900 -to 1800 -i $filename -c copy ava_vids_trimmed/${filename##*/}
30 | # done
31 |
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/tf_record_creation_util.py:
--------------------------------------------------------------------------------
1 | # Lint as: python2, python3
2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 | r"""Utilities for creating TFRecords of TF examples for the Open Images dataset.
17 | """
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | from six.moves import range
23 | import tensorflow.compat.v1 as tf
24 |
25 |
26 | def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
27 | """Opens all TFRecord shards for writing and adds them to an exit stack.
28 |
29 | Args:
30 | exit_stack: A context2.ExitStack used to automatically closed the TFRecords
31 | opened in this function.
32 | base_path: The base path for all shards
33 | num_shards: The number of shards
34 |
35 | Returns:
36 | The list of opened TFRecords. Position k in the list corresponds to shard k.
37 | """
38 | tf_record_output_filenames = [
39 | '{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
40 | for idx in range(num_shards)
41 | ]
42 |
43 | tfrecords = [
44 | exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name))
45 | for file_name in tf_record_output_filenames
46 | ]
47 |
48 | return tfrecords
49 |
--------------------------------------------------------------------------------
/tf/object_detection/dataset_tools/tf_record_creation_util_test.py:
--------------------------------------------------------------------------------
1 | # Lint as: python2, python3
2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 | """Tests for tf_record_creation_util.py."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import os
23 | import contextlib2
24 | import six
25 | from six.moves import range
26 | import tensorflow.compat.v1 as tf
27 |
28 | from object_detection.dataset_tools import tf_record_creation_util
29 |
30 |
31 | class OpenOutputTfrecordsTests(tf.test.TestCase):
32 |
33 | def test_sharded_tfrecord_writes(self):
34 | with contextlib2.ExitStack() as tf_record_close_stack:
35 | output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
36 | tf_record_close_stack,
37 | os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10)
38 | for idx in range(10):
39 | output_tfrecords[idx].write(six.ensure_binary('test_{}'.format(idx)))
40 |
41 | for idx in range(10):
42 | tf_record_path = '{}-{:05d}-of-00010'.format(
43 | os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx)
44 | records = list(tf.python_io.tf_record_iterator(tf_record_path))
45 | self.assertAllEqual(records, ['test_{}'.format(idx).encode('utf-8')])
46 |
47 |
48 | if __name__ == '__main__':
49 | tf.test.main()
50 |
--------------------------------------------------------------------------------
/tf/object_detection/dockerfiles/tf1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:1.15.2-gpu-py3
2 |
3 | ARG DEBIAN_FRONTEND=noninteractive
4 |
5 | # Install apt dependencies
6 | RUN apt-get update && apt-get install -y \
7 | git \
8 | gpg-agent \
9 | python3-cairocffi \
10 | protobuf-compiler \
11 | python3-pil \
12 | python3-lxml \
13 | python3-tk \
14 | wget
15 |
16 | # Install gcloud and gsutil commands
17 | # https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu
18 | RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \
19 | echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
20 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
21 | apt-get update -y && apt-get install google-cloud-sdk -y
22 |
23 | # Add new user to avoid running as root
24 | RUN useradd -ms /bin/bash tensorflow
25 | USER tensorflow
26 | WORKDIR /home/tensorflow
27 |
28 | # Copy this version of of the model garden into the image
29 | COPY --chown=tensorflow . /home/tensorflow/models
30 |
31 | # Compile protobuf configs
32 | RUN (cd /home/tensorflow/models/research/ && protoc object_detection/protos/*.proto --python_out=.)
33 | WORKDIR /home/tensorflow/models/research/
34 |
35 | RUN cp object_detection/packages/tf1/setup.py ./
36 | ENV PATH="/home/tensorflow/.local/bin:${PATH}"
37 |
38 | RUN python -m pip install --user -U pip
39 | RUN python -m pip install --user .
40 |
41 | ENV TF_CPP_MIN_LOG_LEVEL 3
42 |
--------------------------------------------------------------------------------
/tf/object_detection/dockerfiles/tf1/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlow Object Detection on Docker
2 |
3 | These instructions are experimental.
4 |
5 | ## Building and running:
6 |
7 | ```bash
8 | # From the root of the git repository
9 | docker build -f research/object_detection/dockerfiles/tf1/Dockerfile -t od .
10 | docker run -it od
11 | ```
12 |
--------------------------------------------------------------------------------
/tf/object_detection/dockerfiles/tf2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.2.0-gpu
2 |
3 | ARG DEBIAN_FRONTEND=noninteractive
4 |
5 | # Install apt dependencies
6 | RUN apt-get update && apt-get install -y \
7 | git \
8 | gpg-agent \
9 | python3-cairocffi \
10 | protobuf-compiler \
11 | python3-pil \
12 | python3-lxml \
13 | python3-tk \
14 | wget
15 |
16 | # Install gcloud and gsutil commands
17 | # https://cloud.google.com/sdk/docs/quickstart-debian-ubuntu
18 | RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \
19 | echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
20 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \
21 | apt-get update -y && apt-get install google-cloud-sdk -y
22 |
23 | # Add new user to avoid running as root
24 | RUN useradd -ms /bin/bash tensorflow
25 | USER tensorflow
26 | WORKDIR /home/tensorflow
27 |
28 | # Copy this version of of the model garden into the image
29 | COPY --chown=tensorflow . /home/tensorflow/models
30 |
31 | # Compile protobuf configs
32 | RUN (cd /home/tensorflow/models/research/ && protoc object_detection/protos/*.proto --python_out=.)
33 | WORKDIR /home/tensorflow/models/research/
34 |
35 | RUN cp object_detection/packages/tf2/setup.py ./
36 | ENV PATH="/home/tensorflow/.local/bin:${PATH}"
37 |
38 | RUN python -m pip install -U pip
39 | RUN python -m pip install .
40 |
41 | ENV TF_CPP_MIN_LOG_LEVEL 3
42 |
--------------------------------------------------------------------------------
/tf/object_detection/dockerfiles/tf2/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlow Object Detection on Docker
2 |
3 | These instructions are experimental.
4 |
5 | ## Building and running:
6 |
7 | ```bash
8 | # From the root of the git repository
9 | docker build -f research/object_detection/dockerfiles/tf2/Dockerfile -t od .
10 | docker run -it od
11 | ```
12 |
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/exporting_models.md:
--------------------------------------------------------------------------------
1 | # Exporting a trained model for inference
2 |
3 | [](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0)
4 |
5 | After your model has been trained, you should export it to a TensorFlow
6 | graph proto. A checkpoint will typically consist of three files:
7 |
8 | * model.ckpt-${CHECKPOINT_NUMBER}.data-00000-of-00001
9 | * model.ckpt-${CHECKPOINT_NUMBER}.index
10 | * model.ckpt-${CHECKPOINT_NUMBER}.meta
11 |
12 | After you've identified a candidate checkpoint to export, run the following
13 | command from tensorflow/models/research:
14 |
15 | ``` bash
16 | # From tensorflow/models/research/
17 | INPUT_TYPE=image_tensor
18 | PIPELINE_CONFIG_PATH={path to pipeline config file}
19 | TRAINED_CKPT_PREFIX={path to model.ckpt}
20 | EXPORT_DIR={path to folder that will be used for export}
21 | python object_detection/export_inference_graph.py \
22 | --input_type=${INPUT_TYPE} \
23 | --pipeline_config_path=${PIPELINE_CONFIG_PATH} \
24 | --trained_checkpoint_prefix=${TRAINED_CKPT_PREFIX} \
25 | --output_directory=${EXPORT_DIR}
26 | ```
27 |
28 | NOTE: We are configuring our exported model to ingest 4-D image tensors. We can
29 | also configure the exported model to take encoded images or serialized
30 | `tf.Example`s.
31 |
32 | After export, you should see the directory ${EXPORT_DIR} containing the following:
33 |
34 | * saved_model/, a directory containing the saved model format of the exported model
35 | * frozen_inference_graph.pb, the frozen graph format of the exported model
36 | * model.ckpt.*, the model checkpoints used for exporting
37 | * checkpoint, a file specifying to restore included checkpoint files
38 | * pipeline.config, pipeline config file for the exported model
39 |
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/faq.md:
--------------------------------------------------------------------------------
1 | # Frequently Asked Questions
2 |
3 | ## Q: How can I ensure that all the groundtruth boxes are used during train and eval?
4 | A: For the object detecion framework to be TPU-complient, we must pad our input
5 | tensors to static shapes. This means that we must pad to a fixed number of
6 | bounding boxes, configured by `InputReader.max_number_of_boxes`. It is
7 | important to set this value to a number larger than the maximum number of
8 | groundtruth boxes in the dataset. If an image is encountered with more
9 | bounding boxes, the excess boxes will be clipped.
10 |
11 | ## Q: AttributeError: 'module' object has no attribute 'BackupHandler'
12 | A: This BackupHandler (tf_slim.tfexample_decoder.BackupHandler) was
13 | introduced in tensorflow 1.5.0 so runing with earlier versions may cause this
14 | issue. It now has been replaced by
15 | object_detection.data_decoders.tf_example_decoder.BackupHandler. Whoever sees
16 | this issue should be able to resolve it by syncing your fork to HEAD.
17 | Same for LookupTensor.
18 |
19 | ## Q: AttributeError: 'module' object has no attribute 'LookupTensor'
20 | A: Similar to BackupHandler, syncing your fork to HEAD should make it work.
21 |
22 | ## Q: Why can't I get the inference time as reported in model zoo?
23 | A: The inference time reported in model zoo is mean time of testing hundreds of
24 | images with an internal machine. As mentioned in
25 | [TensorFlow detection model zoo](tf1_detection_zoo.md), this speed depends
26 | highly on one's specific hardware configuration and should be treated more as
27 | relative timing.
28 |
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/dogs_detections_output.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/dogs_detections_output.jpg
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/example_cat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/example_cat.jpg
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/groupof_case_eval.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/groupof_case_eval.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/kites_detections_output.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/kites_detections_output.jpg
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/kites_with_segment_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/kites_with_segment_overlay.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/mask_improvement.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/mask_improvement.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/nongroupof_case_eval.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/nongroupof_case_eval.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/oid_bus_72e19c28aac34ed8.jpg
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/oid_monkey_3b4168c89cecbc5b.jpg
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/oxford_pet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/oxford_pet.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/tensorboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/tensorboard.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/tensorboard2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/tensorboard2.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/img/tf-od-api-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/g3doc/img/tf-od-api-logo.png
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/preparing_inputs.md:
--------------------------------------------------------------------------------
1 | # Preparing Inputs
2 |
3 | TensorFlow Object Detection API reads data using the TFRecord file format. Two
4 | sample scripts (`create_pascal_tf_record.py` and `create_pet_tf_record.py`) are
5 | provided to convert from the PASCAL VOC dataset and Oxford-IIIT Pet dataset to
6 | TFRecords.
7 |
8 | ## Generating the PASCAL VOC TFRecord files.
9 |
10 | The raw 2012 PASCAL VOC data set is located
11 | [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar).
12 | To download, extract and convert it to TFRecords, run the following commands
13 | below:
14 |
15 | ```bash
16 | # From tensorflow/models/research/
17 | wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
18 | tar -xvf VOCtrainval_11-May-2012.tar
19 | python object_detection/dataset_tools/create_pascal_tf_record.py \
20 | --label_map_path=object_detection/data/pascal_label_map.pbtxt \
21 | --data_dir=VOCdevkit --year=VOC2012 --set=train \
22 | --output_path=pascal_train.record
23 | python object_detection/dataset_tools/create_pascal_tf_record.py \
24 | --label_map_path=object_detection/data/pascal_label_map.pbtxt \
25 | --data_dir=VOCdevkit --year=VOC2012 --set=val \
26 | --output_path=pascal_val.record
27 | ```
28 |
29 | You should end up with two TFRecord files named `pascal_train.record` and
30 | `pascal_val.record` in the `tensorflow/models/research/` directory.
31 |
32 | The label map for the PASCAL VOC data set can be found at
33 | `object_detection/data/pascal_label_map.pbtxt`.
34 |
35 | ## Generating the Oxford-IIIT Pet TFRecord files.
36 |
37 | The Oxford-IIIT Pet data set is located
38 | [here](http://www.robots.ox.ac.uk/~vgg/data/pets/). To download, extract and
39 | convert it to TFRecords, run the following commands below:
40 |
41 | ```bash
42 | # From tensorflow/models/research/
43 | wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz
44 | wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz
45 | tar -xvf annotations.tar.gz
46 | tar -xvf images.tar.gz
47 | python object_detection/dataset_tools/create_pet_tf_record.py \
48 | --label_map_path=object_detection/data/pet_label_map.pbtxt \
49 | --data_dir=`pwd` \
50 | --output_dir=`pwd`
51 | ```
52 |
53 | You should end up with two 10-sharded TFRecord files named
54 | `pet_faces_train.record-?????-of-00010` and
55 | `pet_faces_val.record-?????-of-00010` in the `tensorflow/models/research/`
56 | directory.
57 |
58 | The label map for the Pet dataset can be found at
59 | `object_detection/data/pet_label_map.pbtxt`.
60 |
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/running_notebook.md:
--------------------------------------------------------------------------------
1 | # Quick Start: Jupyter notebook for off-the-shelf inference
2 |
3 | [](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0)
4 | [](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0)
5 |
6 | If you'd like to hit the ground running and run detection on a few example
7 | images right out of the box, we recommend trying out the Jupyter notebook demo.
8 | To run the Jupyter notebook, run the following command from
9 | `tensorflow/models/research/object_detection`:
10 |
11 | ```
12 | # From tensorflow/models/research/object_detection
13 | jupyter notebook
14 | ```
15 |
16 | The notebook should open in your favorite web browser. Click the
17 | [`object_detection_tutorial.ipynb`](../object_detection_tutorial.ipynb) link to
18 | open the demo.
19 |
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/tf2_classification_zoo.md:
--------------------------------------------------------------------------------
1 | # TensorFlow 2 Classification Model Zoo
2 |
3 | [](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0)
4 | [](https://www.python.org/downloads/release/python-360/)
5 |
6 | We provide a collection of classification models pre-trained on the
7 | [Imagenet](http://www.image-net.org). These can be used to initilize detection
8 | model parameters.
9 |
10 | Model name |
11 | ---------- |
12 | [EfficientNet B0](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b0.tar.gz) |
13 | [EfficientNet B1](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b1.tar.gz) |
14 | [EfficientNet B2](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b2.tar.gz) |
15 | [EfficientNet B3](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b3.tar.gz) |
16 | [EfficientNet B4](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b4.tar.gz) |
17 | [EfficientNet B5](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b5.tar.gz) |
18 | [EfficientNet B6](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b6.tar.gz) |
19 | [EfficientNet B7](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/efficientnet_b7.tar.gz) |
20 | [Resnet V1 50](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/resnet50_v1.tar.gz) |
21 | [Resnet V1 101](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/resnet101_v1.tar.gz) |
22 | [Resnet V1 152](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/resnet152_v1.tar.gz) |
23 | [Inception Resnet V2](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/inception_resnet_v2.tar.gz) |
24 | [MobileNet V1](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/mobilnet_v1.tar.gz) |
25 | [MobileNet V2](http://download.tensorflow.org/models/object_detection/classification/tf2/20200710/mobilnet_v2.tar.gz) |
26 |
--------------------------------------------------------------------------------
/tf/object_detection/g3doc/tpu_exporters.md:
--------------------------------------------------------------------------------
1 | # Object Detection TPU Inference Exporter
2 |
3 | [](https://github.com/tensorflow/tensorflow/releases/tag/v1.15.0)
4 |
5 | This package contains SavedModel Exporter for TPU Inference of object detection
6 | models.
7 |
8 | ## Usage
9 |
10 | This Exporter is intended for users who have trained models with CPUs / GPUs,
11 | but would like to use them for inference on TPU without changing their code or
12 | re-training their models.
13 |
14 | Users are assumed to have:
15 |
16 | + `PIPELINE_CONFIG`: A pipeline_pb2.TrainEvalPipelineConfig config file;
17 | + `CHECKPOINT`: A model checkpoint trained on any device;
18 |
19 | and need to correctly set:
20 |
21 | + `EXPORT_DIR`: Path to export SavedModel;
22 | + `INPUT_PLACEHOLDER`: Name of input placeholder in model's signature_def_map;
23 | + `INPUT_TYPE`: Type of input node, which can be one of 'image_tensor',
24 | 'encoded_image_string_tensor', or 'tf_example';
25 | + `USE_BFLOAT16`: Whether to use bfloat16 instead of float32 on TPU.
26 |
27 | The model can be exported with:
28 |
29 | ```
30 | python object_detection/tpu_exporters/export_saved_model_tpu.py \
31 | --pipeline_config_file= \
32 | --ckpt_path= \
33 | --export_dir= \
34 | --input_placeholder_name= \
35 | --input_type= \
36 | --use_bfloat16=
37 | ```
38 |
--------------------------------------------------------------------------------
/tf/object_detection/inference/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/inference/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/legacy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/legacy/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/matchers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/matchers/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/matchers/hungarian_matcher.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Hungarian bipartite matcher implementation."""
17 |
18 | import numpy as np
19 | from scipy.optimize import linear_sum_assignment
20 |
21 | import tensorflow.compat.v1 as tf
22 | from object_detection.core import matcher
23 |
24 |
25 | class HungarianBipartiteMatcher(matcher.Matcher):
26 | """Wraps a Hungarian bipartite matcher into TensorFlow."""
27 |
28 | def _match(self, similarity_matrix, valid_rows):
29 | """Optimally bipartite matches a collection rows and columns.
30 |
31 | Args:
32 | similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
33 | where higher values mean more similar.
34 | valid_rows: A boolean tensor of shape [N] indicating the rows that are
35 | valid.
36 |
37 | Returns:
38 | match_results: int32 tensor of shape [M] with match_results[i]=-1
39 | meaning that column i is not matched and otherwise that it is matched to
40 | row match_results[i].
41 | """
42 | valid_row_sim_matrix = tf.gather(similarity_matrix,
43 | tf.squeeze(tf.where(valid_rows), axis=-1))
44 | distance_matrix = -1 * valid_row_sim_matrix
45 |
46 | def numpy_wrapper(inputs):
47 | def numpy_matching(input_matrix):
48 | row_indices, col_indices = linear_sum_assignment(input_matrix)
49 | match_results = np.full(input_matrix.shape[1], -1)
50 | match_results[col_indices] = row_indices
51 | return match_results.astype(np.int32)
52 |
53 | return tf.numpy_function(numpy_matching, inputs, Tout=[tf.int32])
54 |
55 | matching_result = tf.autograph.experimental.do_not_convert(
56 | numpy_wrapper)([distance_matrix])
57 |
58 | return tf.reshape(matching_result, [-1])
59 |
--------------------------------------------------------------------------------
/tf/object_detection/meta_architectures/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/meta_architectures/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/meta_architectures/rfcn_meta_arch_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.meta_architectures.rfcn_meta_arch."""
17 |
18 | import tensorflow.compat.v1 as tf
19 |
20 | from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib
21 | from object_detection.meta_architectures import rfcn_meta_arch
22 |
23 |
24 | class RFCNMetaArchTest(
25 | faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase):
26 |
27 | def _get_second_stage_box_predictor_text_proto(
28 | self, share_box_across_classes=False):
29 | del share_box_across_classes
30 | box_predictor_text_proto = """
31 | rfcn_box_predictor {
32 | conv_hyperparams {
33 | op: CONV
34 | activation: NONE
35 | regularizer {
36 | l2_regularizer {
37 | weight: 0.0005
38 | }
39 | }
40 | initializer {
41 | variance_scaling_initializer {
42 | factor: 1.0
43 | uniform: true
44 | mode: FAN_AVG
45 | }
46 | }
47 | }
48 | }
49 | """
50 | return box_predictor_text_proto
51 |
52 | def _get_model(self, box_predictor, **common_kwargs):
53 | return rfcn_meta_arch.RFCNMetaArch(
54 | second_stage_rfcn_box_predictor=box_predictor, **common_kwargs)
55 |
56 | def _get_box_classifier_features_shape(self,
57 | image_size,
58 | batch_size,
59 | max_num_proposals,
60 | initial_crop_size,
61 | maxpool_stride,
62 | num_features):
63 | return (batch_size, image_size, image_size, num_features)
64 |
65 |
66 | if __name__ == '__main__':
67 | tf.test.main()
68 |
--------------------------------------------------------------------------------
/tf/object_detection/metrics/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/metrics/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/metrics/io_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Common IO utils used in offline metric computation.
16 | """
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import csv
23 |
24 |
25 | def write_csv(fid, metrics):
26 | """Writes metrics key-value pairs to CSV file.
27 |
28 | Args:
29 | fid: File identifier of an opened file.
30 | metrics: A dictionary with metrics to be written.
31 | """
32 | metrics_writer = csv.writer(fid, delimiter=',')
33 | for metric_name, metric_value in metrics.items():
34 | metrics_writer.writerow([metric_name, str(metric_value)])
35 |
--------------------------------------------------------------------------------
/tf/object_detection/metrics/offline_eval_map_corloc_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Tests for utilities in offline_eval_map_corloc binary."""
16 |
17 | import tensorflow.compat.v1 as tf
18 |
19 | from object_detection.metrics import offline_eval_map_corloc as offline_eval
20 |
21 |
22 | class OfflineEvalMapCorlocTest(tf.test.TestCase):
23 |
24 | def test_generateShardedFilenames(self):
25 | test_filename = '/path/to/file'
26 | result = offline_eval._generate_sharded_filenames(test_filename)
27 | self.assertEqual(result, [test_filename])
28 |
29 | test_filename = '/path/to/file-00000-of-00050'
30 | result = offline_eval._generate_sharded_filenames(test_filename)
31 | self.assertEqual(result, [test_filename])
32 |
33 | result = offline_eval._generate_sharded_filenames('/path/to/@3.record')
34 | self.assertEqual(result, [
35 | '/path/to/-00000-of-00003.record', '/path/to/-00001-of-00003.record',
36 | '/path/to/-00002-of-00003.record'
37 | ])
38 |
39 | result = offline_eval._generate_sharded_filenames('/path/to/abc@3')
40 | self.assertEqual(result, [
41 | '/path/to/abc-00000-of-00003', '/path/to/abc-00001-of-00003',
42 | '/path/to/abc-00002-of-00003'
43 | ])
44 |
45 | result = offline_eval._generate_sharded_filenames('/path/to/@1')
46 | self.assertEqual(result, ['/path/to/-00000-of-00001'])
47 |
48 | def test_generateFilenames(self):
49 | test_filenames = ['/path/to/file', '/path/to/@3.record']
50 | result = offline_eval._generate_filenames(test_filenames)
51 | self.assertEqual(result, [
52 | '/path/to/file', '/path/to/-00000-of-00003.record',
53 | '/path/to/-00001-of-00003.record', '/path/to/-00002-of-00003.record'
54 | ])
55 |
56 |
57 | if __name__ == '__main__':
58 | tf.test.main()
59 |
--------------------------------------------------------------------------------
/tf/object_detection/model_hparams.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Hyperparameters for the object detection model in TF.learn.
16 |
17 | This file consolidates and documents the hyperparameters used by the model.
18 | """
19 |
20 | from __future__ import absolute_import
21 | from __future__ import division
22 | from __future__ import print_function
23 |
24 | # pylint: disable=g-import-not-at-top
25 | try:
26 | from tensorflow.contrib import training as contrib_training
27 | except ImportError:
28 | # TF 2.0 doesn't ship with contrib.
29 | pass
30 | # pylint: enable=g-import-not-at-top
31 |
32 |
33 | def create_hparams(hparams_overrides=None):
34 | """Returns hyperparameters, including any flag value overrides.
35 |
36 | Args:
37 | hparams_overrides: Optional hparams overrides, represented as a
38 | string containing comma-separated hparam_name=value pairs.
39 |
40 | Returns:
41 | The hyperparameters as a tf.HParams object.
42 | """
43 | hparams = contrib_training.HParams(
44 | # Whether a fine tuning checkpoint (provided in the pipeline config)
45 | # should be loaded for training.
46 | load_pretrained=True)
47 | # Override any of the preceding hyperparameter values.
48 | if hparams_overrides:
49 | hparams = hparams.parse(hparams_overrides)
50 | return hparams
51 |
--------------------------------------------------------------------------------
/tf/object_detection/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/models/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/models/center_net_hourglass_feature_extractor_tf2_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Testing hourglass feature extractor for CenterNet."""
16 | import unittest
17 | import numpy as np
18 | import tensorflow.compat.v1 as tf
19 |
20 | from object_detection.models import center_net_hourglass_feature_extractor as hourglass
21 | from object_detection.models.keras_models import hourglass_network
22 | from object_detection.utils import test_case
23 | from object_detection.utils import tf_version
24 |
25 |
26 | @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
27 | class CenterNetHourglassFeatureExtractorTest(test_case.TestCase):
28 |
29 | def test_center_net_hourglass_feature_extractor(self):
30 |
31 | net = hourglass_network.HourglassNetwork(
32 | num_stages=4, blocks_per_stage=[2, 3, 4, 5, 6],
33 | input_channel_dims=4, channel_dims_per_stage=[6, 8, 10, 12, 14],
34 | num_hourglasses=2)
35 |
36 | model = hourglass.CenterNetHourglassFeatureExtractor(net)
37 | def graph_fn():
38 | return model(tf.zeros((2, 64, 64, 3), dtype=np.float32))
39 | outputs = self.execute(graph_fn, [])
40 | self.assertEqual(outputs[0].shape, (2, 16, 16, 6))
41 | self.assertEqual(outputs[1].shape, (2, 16, 16, 6))
42 |
43 |
44 | if __name__ == '__main__':
45 | tf.test.main()
46 |
--------------------------------------------------------------------------------
/tf/object_detection/models/center_net_mobilenet_v2_feature_extractor_tf2_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Testing mobilenet_v2 feature extractor for CenterNet."""
16 | import unittest
17 | import numpy as np
18 | import tensorflow.compat.v1 as tf
19 |
20 | from object_detection.models import center_net_mobilenet_v2_feature_extractor
21 | from object_detection.models.keras_models import mobilenet_v2
22 | from object_detection.utils import test_case
23 | from object_detection.utils import tf_version
24 |
25 |
26 | @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
27 | class CenterNetMobileNetV2FeatureExtractorTest(test_case.TestCase):
28 |
29 | def test_center_net_mobilenet_v2_feature_extractor(self):
30 |
31 | net = mobilenet_v2.mobilenet_v2(True, include_top=False)
32 |
33 | model = center_net_mobilenet_v2_feature_extractor.CenterNetMobileNetV2FeatureExtractor(
34 | net)
35 |
36 | def graph_fn():
37 | img = np.zeros((8, 224, 224, 3), dtype=np.float32)
38 | processed_img = model.preprocess(img)
39 | return model(processed_img)
40 |
41 | outputs = self.execute(graph_fn, [])
42 | self.assertEqual(outputs.shape, (8, 56, 56, 64))
43 |
44 |
45 | if __name__ == '__main__':
46 | tf.test.main()
47 |
--------------------------------------------------------------------------------
/tf/object_detection/models/center_net_resnet_feature_extractor_tf2_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Testing ResNet v2 models for the CenterNet meta architecture."""
16 | import unittest
17 | import numpy as np
18 | import tensorflow.compat.v1 as tf
19 |
20 | from object_detection.models import center_net_resnet_feature_extractor
21 | from object_detection.utils import test_case
22 | from object_detection.utils import tf_version
23 |
24 |
25 | @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
26 | class CenterNetResnetFeatureExtractorTest(test_case.TestCase):
27 |
28 | def test_output_size(self):
29 | """Verify that shape of features returned by the backbone is correct."""
30 |
31 | model = center_net_resnet_feature_extractor.\
32 | CenterNetResnetFeatureExtractor('resnet_v2_101')
33 | def graph_fn():
34 | img = np.zeros((8, 512, 512, 3), dtype=np.float32)
35 | processed_img = model.preprocess(img)
36 | return model(processed_img)
37 | outputs = self.execute(graph_fn, [])
38 | self.assertEqual(outputs.shape, (8, 128, 128, 64))
39 |
40 | def test_output_size_resnet50(self):
41 | """Verify that shape of features returned by the backbone is correct."""
42 |
43 | model = center_net_resnet_feature_extractor.\
44 | CenterNetResnetFeatureExtractor('resnet_v2_50')
45 | def graph_fn():
46 | img = np.zeros((8, 224, 224, 3), dtype=np.float32)
47 | processed_img = model.preprocess(img)
48 | return model(processed_img)
49 | outputs = self.execute(graph_fn, [])
50 | self.assertEqual(outputs.shape, (8, 56, 56, 64))
51 |
52 |
53 | if __name__ == '__main__':
54 | tf.test.main()
55 |
--------------------------------------------------------------------------------
/tf/object_detection/models/center_net_resnet_v1_fpn_feature_extractor_tf2_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Testing ResNet v1 FPN models for the CenterNet meta architecture."""
16 | import unittest
17 | from absl.testing import parameterized
18 |
19 | import numpy as np
20 | import tensorflow.compat.v1 as tf
21 |
22 | from object_detection.models import center_net_resnet_v1_fpn_feature_extractor
23 | from object_detection.utils import test_case
24 | from object_detection.utils import tf_version
25 |
26 |
27 | @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
28 | class CenterNetResnetV1FpnFeatureExtractorTest(test_case.TestCase,
29 | parameterized.TestCase):
30 |
31 | @parameterized.parameters(
32 | {'resnet_type': 'resnet_v1_50'},
33 | {'resnet_type': 'resnet_v1_101'},
34 | {'resnet_type': 'resnet_v1_18'},
35 | {'resnet_type': 'resnet_v1_34'},
36 | )
37 | def test_correct_output_size(self, resnet_type):
38 | """Verify that shape of features returned by the backbone is correct."""
39 |
40 | model = center_net_resnet_v1_fpn_feature_extractor.\
41 | CenterNetResnetV1FpnFeatureExtractor(resnet_type)
42 | def graph_fn():
43 | img = np.zeros((8, 512, 512, 3), dtype=np.float32)
44 | processed_img = model.preprocess(img)
45 | return model(processed_img)
46 |
47 | self.assertEqual(self.execute(graph_fn, []).shape, (8, 128, 128, 64))
48 |
49 |
50 | if __name__ == '__main__':
51 | tf.test.main()
52 |
--------------------------------------------------------------------------------
/tf/object_detection/models/keras_models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/models/keras_models/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/models/keras_models/model_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Utils for Keras models."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import collections
23 | import tensorflow.compat.v1 as tf
24 |
25 | # This is to specify the custom config of model structures. For example,
26 | # ConvDefs(conv_name='conv_pw_12', filters=512) for Mobilenet V1 is to specify
27 | # the filters of the conv layer with name 'conv_pw_12' as 512.s
28 | ConvDefs = collections.namedtuple('ConvDefs', ['conv_name', 'filters'])
29 |
30 |
31 | def get_conv_def(conv_defs, layer_name):
32 | """Get the custom config for some layer of the model structure.
33 |
34 | Args:
35 | conv_defs: A named tuple to specify the custom config of the model
36 | network. See `ConvDefs` for details.
37 | layer_name: A string, the name of the layer to be customized.
38 |
39 | Returns:
40 | The number of filters for the layer, or `None` if there is no custom
41 | config for the requested layer.
42 | """
43 | for conv_def in conv_defs:
44 | if layer_name == conv_def.conv_name:
45 | return conv_def.filters
46 | return None
47 |
48 |
49 | def input_layer(shape, placeholder_with_default):
50 | if tf.executing_eagerly():
51 | return tf.keras.layers.Input(shape=shape)
52 | else:
53 | return tf.keras.layers.Input(tensor=placeholder_with_default)
54 |
--------------------------------------------------------------------------------
/tf/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """SSDFeatureExtractor for MobileNetEdgeTPU features."""
16 |
17 | from object_detection.models import ssd_mobilenet_v3_feature_extractor
18 | from nets.mobilenet import mobilenet_v3
19 |
20 |
21 | class SSDMobileNetEdgeTPUFeatureExtractor(
22 | ssd_mobilenet_v3_feature_extractor.SSDMobileNetV3FeatureExtractorBase):
23 | """MobileNetEdgeTPU feature extractor."""
24 |
25 | def __init__(self,
26 | is_training,
27 | depth_multiplier,
28 | min_depth,
29 | pad_to_multiple,
30 | conv_hyperparams_fn,
31 | reuse_weights=None,
32 | use_explicit_padding=False,
33 | use_depthwise=False,
34 | override_base_feature_extractor_hyperparams=False,
35 | scope_name='MobilenetEdgeTPU'):
36 | super(SSDMobileNetEdgeTPUFeatureExtractor, self).__init__(
37 | conv_defs=mobilenet_v3.V3_EDGETPU,
38 | from_layer=['layer_18/expansion_output', 'layer_23'],
39 | is_training=is_training,
40 | depth_multiplier=depth_multiplier,
41 | min_depth=min_depth,
42 | pad_to_multiple=pad_to_multiple,
43 | conv_hyperparams_fn=conv_hyperparams_fn,
44 | reuse_weights=reuse_weights,
45 | use_explicit_padding=use_explicit_padding,
46 | use_depthwise=use_depthwise,
47 | override_base_feature_extractor_hyperparams=override_base_feature_extractor_hyperparams,
48 | scope_name=scope_name
49 | )
50 |
--------------------------------------------------------------------------------
/tf/object_detection/models/ssd_mobilenet_edgetpu_feature_extractor_tf1_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Tests for ssd_mobilenet_edgetpu_feature_extractor."""
16 | import unittest
17 | import tensorflow.compat.v1 as tf
18 |
19 | from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor
20 | from object_detection.models import ssd_mobilenet_edgetpu_feature_extractor_testbase
21 | from object_detection.utils import tf_version
22 |
23 |
24 | @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
25 | class SsdMobilenetEdgeTPUFeatureExtractorTest(
26 | ssd_mobilenet_edgetpu_feature_extractor_testbase
27 | ._SsdMobilenetEdgeTPUFeatureExtractorTestBase):
28 |
29 | def _get_input_sizes(self):
30 | """Return first two input feature map sizes."""
31 | return [384, 192]
32 |
33 | def _create_feature_extractor(self,
34 | depth_multiplier,
35 | pad_to_multiple,
36 | use_explicit_padding=False,
37 | use_keras=False):
38 | """Constructs a new MobileNetEdgeTPU feature extractor.
39 |
40 | Args:
41 | depth_multiplier: float depth multiplier for feature extractor
42 | pad_to_multiple: the nearest multiple to zero pad the input height and
43 | width dimensions to.
44 | use_explicit_padding: use 'VALID' padding for convolutions, but prepad
45 | inputs so that the output dimensions are the same as if 'SAME' padding
46 | were used.
47 | use_keras: if True builds a keras-based feature extractor, if False builds
48 | a slim-based one.
49 |
50 | Returns:
51 | an ssd_meta_arch.SSDFeatureExtractor object.
52 | """
53 | min_depth = 32
54 | return (ssd_mobilenet_edgetpu_feature_extractor
55 | .SSDMobileNetEdgeTPUFeatureExtractor(
56 | False,
57 | depth_multiplier,
58 | min_depth,
59 | pad_to_multiple,
60 | self.conv_hyperparams_fn,
61 | use_explicit_padding=use_explicit_padding))
62 |
63 |
64 | if __name__ == '__main__':
65 | tf.test.main()
66 |
--------------------------------------------------------------------------------
/tf/object_detection/packages/tf1/setup.py:
--------------------------------------------------------------------------------
1 | """Setup script for object_detection with TF1.0."""
2 | import os
3 | from setuptools import find_packages
4 | from setuptools import setup
5 |
6 | REQUIRED_PACKAGES = ['pillow', 'lxml', 'matplotlib', 'Cython',
7 | 'contextlib2', 'tf-slim', 'six', 'pycocotools', 'lvis',
8 | 'scipy', 'pandas']
9 |
10 | setup(
11 | name='object_detection',
12 | version='0.1',
13 | install_requires=REQUIRED_PACKAGES,
14 | include_package_data=True,
15 | packages=(
16 | [p for p in find_packages() if p.startswith('object_detection')] +
17 | find_packages(where=os.path.join('.', 'slim'))),
18 | package_dir={
19 | 'datasets': os.path.join('slim', 'datasets'),
20 | 'nets': os.path.join('slim', 'nets'),
21 | 'preprocessing': os.path.join('slim', 'preprocessing'),
22 | 'deployment': os.path.join('slim', 'deployment'),
23 | 'scripts': os.path.join('slim', 'scripts'),
24 | },
25 | description='Tensorflow Object Detection Library with TF1.0',
26 | python_requires='>3.6',
27 | )
28 |
--------------------------------------------------------------------------------
/tf/object_detection/packages/tf2/setup.py:
--------------------------------------------------------------------------------
1 | """Setup script for object_detection with TF2.0."""
2 | import os
3 | from setuptools import find_packages
4 | from setuptools import setup
5 |
6 | # Note: adding apache-beam to required packages causes conflict with
7 | # tf-models-offical requirements. These packages request for incompatible
8 | # oauth2client package.
9 | REQUIRED_PACKAGES = [
10 | # Required for apache-beam with PY3
11 | 'avro-python3',
12 | 'apache-beam',
13 | 'pillow',
14 | 'lxml',
15 | 'matplotlib',
16 | 'Cython',
17 | 'contextlib2',
18 | 'tf-slim',
19 | 'six',
20 | 'pycocotools',
21 | 'lvis',
22 | 'scipy',
23 | 'pandas',
24 | 'tf-models-official'
25 | ]
26 |
27 | setup(
28 | name='object_detection',
29 | version='0.1',
30 | install_requires=REQUIRED_PACKAGES,
31 | include_package_data=True,
32 | packages=(
33 | [p for p in find_packages() if p.startswith('object_detection')] +
34 | find_packages(where=os.path.join('.', 'slim'))),
35 | package_dir={
36 | 'datasets': os.path.join('slim', 'datasets'),
37 | 'nets': os.path.join('slim', 'nets'),
38 | 'preprocessing': os.path.join('slim', 'preprocessing'),
39 | 'deployment': os.path.join('slim', 'deployment'),
40 | 'scripts': os.path.join('slim', 'scripts'),
41 | },
42 | description='Tensorflow Object Detection Library',
43 | python_requires='>3.6',
44 | )
45 |
--------------------------------------------------------------------------------
/tf/object_detection/predictors/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/predictors/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/predictors/heads/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/predictors/heads/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/predictors/heads/keypoint_head_tf1_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.predictors.heads.keypoint_head."""
17 | import unittest
18 | import tensorflow.compat.v1 as tf
19 |
20 | from google.protobuf import text_format
21 | from object_detection.builders import hyperparams_builder
22 | from object_detection.predictors.heads import keypoint_head
23 | from object_detection.protos import hyperparams_pb2
24 | from object_detection.utils import test_case
25 | from object_detection.utils import tf_version
26 |
27 |
28 | @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
29 | class MaskRCNNKeypointHeadTest(test_case.TestCase):
30 |
31 | def _build_arg_scope_with_hyperparams(self,
32 | op_type=hyperparams_pb2.Hyperparams.FC):
33 | hyperparams = hyperparams_pb2.Hyperparams()
34 | hyperparams_text_proto = """
35 | activation: NONE
36 | regularizer {
37 | l2_regularizer {
38 | }
39 | }
40 | initializer {
41 | truncated_normal_initializer {
42 | }
43 | }
44 | """
45 | text_format.Merge(hyperparams_text_proto, hyperparams)
46 | hyperparams.op = op_type
47 | return hyperparams_builder.build(hyperparams, is_training=True)
48 |
49 | def test_prediction_size(self):
50 | keypoint_prediction_head = keypoint_head.MaskRCNNKeypointHead(
51 | conv_hyperparams_fn=self._build_arg_scope_with_hyperparams())
52 | roi_pooled_features = tf.random_uniform(
53 | [64, 14, 14, 1024], minval=-2.0, maxval=2.0, dtype=tf.float32)
54 | prediction = keypoint_prediction_head.predict(
55 | features=roi_pooled_features, num_predictions_per_location=1)
56 | self.assertAllEqual([64, 1, 17, 56, 56], prediction.get_shape().as_list())
57 |
58 |
59 | if __name__ == '__main__':
60 | tf.test.main()
61 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/protos/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/protos/anchor_generator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | import "object_detection/protos/flexible_grid_anchor_generator.proto";
6 | import "object_detection/protos/grid_anchor_generator.proto";
7 | import "object_detection/protos/multiscale_anchor_generator.proto";
8 | import "object_detection/protos/ssd_anchor_generator.proto";
9 |
10 | // Configuration proto for the anchor generator to use in the object detection
11 | // pipeline. See core/anchor_generator.py for details.
12 | message AnchorGenerator {
13 | oneof anchor_generator_oneof {
14 | GridAnchorGenerator grid_anchor_generator = 1;
15 | SsdAnchorGenerator ssd_anchor_generator = 2;
16 | MultiscaleAnchorGenerator multiscale_anchor_generator = 3;
17 | FlexibleGridAnchorGenerator flexible_grid_anchor_generator = 4;
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/argmax_matcher.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for ArgMaxMatcher. See
6 | // matchers/argmax_matcher.py for details.
7 | message ArgMaxMatcher {
8 | // Threshold for positive matches.
9 | optional float matched_threshold = 1 [default = 0.5];
10 |
11 | // Threshold for negative matches.
12 | optional float unmatched_threshold = 2 [default = 0.5];
13 |
14 | // Whether to construct ArgMaxMatcher without thresholds.
15 | optional bool ignore_thresholds = 3 [default = false];
16 |
17 | // If True then negative matches are the ones below the unmatched_threshold,
18 | // whereas ignored matches are in between the matched and umatched
19 | // threshold. If False, then negative matches are in between the matched
20 | // and unmatched threshold, and everything lower than unmatched is ignored.
21 | optional bool negatives_lower_than_unmatched = 4 [default = true];
22 |
23 | // Whether to ensure each row is matched to at least one column.
24 | optional bool force_match_for_each_row = 5 [default = false];
25 |
26 | // Force constructed match objects to use matrix multiplication based gather
27 | // instead of standard tf.gather
28 | optional bool use_matmul_gather = 6 [default = false];
29 | }
30 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/bipartite_matcher.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for bipartite matcher. See
6 | // matchers/bipartite_matcher.py for details.
7 | message BipartiteMatcher {
8 | // Force constructed match objects to use matrix multiplication based gather
9 | // instead of standard tf.gather
10 | optional bool use_matmul_gather = 6 [default = false];
11 | }
12 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/bipartite_matcher_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: object_detection/protos/bipartite_matcher.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf import descriptor as _descriptor
6 | from google.protobuf import message as _message
7 | from google.protobuf import reflection as _reflection
8 | from google.protobuf import symbol_database as _symbol_database
9 | # @@protoc_insertion_point(imports)
10 |
11 | _sym_db = _symbol_database.Default()
12 |
13 |
14 |
15 |
16 | DESCRIPTOR = _descriptor.FileDescriptor(
17 | name='object_detection/protos/bipartite_matcher.proto',
18 | package='object_detection.protos',
19 | syntax='proto2',
20 | serialized_options=None,
21 | create_key=_descriptor._internal_create_key,
22 | serialized_pb=b'\n/object_detection/protos/bipartite_matcher.proto\x12\x17object_detection.protos\"4\n\x10\x42ipartiteMatcher\x12 \n\x11use_matmul_gather\x18\x06 \x01(\x08:\x05\x66\x61lse'
23 | )
24 |
25 |
26 |
27 |
28 | _BIPARTITEMATCHER = _descriptor.Descriptor(
29 | name='BipartiteMatcher',
30 | full_name='object_detection.protos.BipartiteMatcher',
31 | filename=None,
32 | file=DESCRIPTOR,
33 | containing_type=None,
34 | create_key=_descriptor._internal_create_key,
35 | fields=[
36 | _descriptor.FieldDescriptor(
37 | name='use_matmul_gather', full_name='object_detection.protos.BipartiteMatcher.use_matmul_gather', index=0,
38 | number=6, type=8, cpp_type=7, label=1,
39 | has_default_value=True, default_value=False,
40 | message_type=None, enum_type=None, containing_type=None,
41 | is_extension=False, extension_scope=None,
42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
43 | ],
44 | extensions=[
45 | ],
46 | nested_types=[],
47 | enum_types=[
48 | ],
49 | serialized_options=None,
50 | is_extendable=False,
51 | syntax='proto2',
52 | extension_ranges=[],
53 | oneofs=[
54 | ],
55 | serialized_start=76,
56 | serialized_end=128,
57 | )
58 |
59 | DESCRIPTOR.message_types_by_name['BipartiteMatcher'] = _BIPARTITEMATCHER
60 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
61 |
62 | BipartiteMatcher = _reflection.GeneratedProtocolMessageType('BipartiteMatcher', (_message.Message,), {
63 | 'DESCRIPTOR' : _BIPARTITEMATCHER,
64 | '__module__' : 'object_detection.protos.bipartite_matcher_pb2'
65 | # @@protoc_insertion_point(class_scope:object_detection.protos.BipartiteMatcher)
66 | })
67 | _sym_db.RegisterMessage(BipartiteMatcher)
68 |
69 |
70 | # @@protoc_insertion_point(module_scope)
71 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/box_coder.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | import "object_detection/protos/faster_rcnn_box_coder.proto";
6 | import "object_detection/protos/keypoint_box_coder.proto";
7 | import "object_detection/protos/mean_stddev_box_coder.proto";
8 | import "object_detection/protos/square_box_coder.proto";
9 |
10 | // Configuration proto for the box coder to be used in the object detection
11 | // pipeline. See core/box_coder.py for details.
12 | message BoxCoder {
13 | oneof box_coder_oneof {
14 | FasterRcnnBoxCoder faster_rcnn_box_coder = 1;
15 | MeanStddevBoxCoder mean_stddev_box_coder = 2;
16 | SquareBoxCoder square_box_coder = 3;
17 | KeypointBoxCoder keypoint_box_coder = 4;
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/faster_rcnn_box_coder.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for FasterRCNNBoxCoder. See
6 | // box_coders/faster_rcnn_box_coder.py for details.
7 | message FasterRcnnBoxCoder {
8 | // Scale factor for anchor encoded box center.
9 | optional float y_scale = 1 [default = 10.0];
10 | optional float x_scale = 2 [default = 10.0];
11 |
12 | // Scale factor for anchor encoded box height.
13 | optional float height_scale = 3 [default = 5.0];
14 |
15 | // Scale factor for anchor encoded box width.
16 | optional float width_scale = 4 [default = 5.0];
17 | }
18 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/flexible_grid_anchor_generator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | message FlexibleGridAnchorGenerator {
6 | repeated AnchorGrid anchor_grid = 1;
7 |
8 | // Whether to produce anchors in normalized coordinates.
9 | optional bool normalize_coordinates = 2 [default = true];
10 | }
11 |
12 | message AnchorGrid {
13 | // The base sizes in pixels for each anchor in this anchor layer.
14 | repeated float base_sizes = 1;
15 |
16 | // The aspect ratios for each anchor in this anchor layer.
17 | repeated float aspect_ratios = 2;
18 |
19 | // The anchor height stride in pixels.
20 | optional uint32 height_stride = 3;
21 |
22 | // The anchor width stride in pixels.
23 | optional uint32 width_stride = 4;
24 |
25 | // The anchor height offset in pixels.
26 | optional uint32 height_offset = 5 [default = 0];
27 |
28 | // The anchor width offset in pixels.
29 | optional uint32 width_offset = 6 [default = 0];
30 | }
31 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/fpn.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration for Feature Pyramid Networks.
6 | message FeaturePyramidNetworks {
7 | // We recommend to use multi_resolution_feature_map_generator with FPN, and
8 | // the levels there must match the levels defined below for better
9 | // performance.
10 | // Correspondence from FPN levels to Resnet/Mobilenet V1 feature maps:
11 | // FPN Level Resnet Feature Map Mobilenet-V1 Feature Map
12 | // 2 Block 1 Conv2d_3_pointwise
13 | // 3 Block 2 Conv2d_5_pointwise
14 | // 4 Block 3 Conv2d_11_pointwise
15 | // 5 Block 4 Conv2d_13_pointwise
16 | // 6 Bottomup_5 bottom_up_Conv2d_14
17 | // 7 Bottomup_6 bottom_up_Conv2d_15
18 | // 8 Bottomup_7 bottom_up_Conv2d_16
19 | // 9 Bottomup_8 bottom_up_Conv2d_17
20 |
21 | // minimum level in feature pyramid
22 | optional int32 min_level = 1 [default = 3];
23 |
24 | // maximum level in feature pyramid
25 | optional int32 max_level = 2 [default = 7];
26 |
27 | // channel depth for additional coarse feature layers.
28 | optional int32 additional_layer_depth = 3 [default = 256];
29 |
30 | }
31 |
32 | // Configuration for Bidirectional Feature Pyramid Networks.
33 | message BidirectionalFeaturePyramidNetworks {
34 | // minimum level in the feature pyramid.
35 | optional int32 min_level = 1 [default = 3];
36 |
37 | // maximum level in the feature pyramid.
38 | optional int32 max_level = 2 [default = 7];
39 |
40 | // The number of repeated top-down bottom-up iterations for BiFPN-based
41 | // feature extractors (bidirectional feature pyramid networks).
42 | optional int32 num_iterations = 3;
43 |
44 | // The number of filters (channels) to use in feature pyramid layers for
45 | // BiFPN-based feature extractors (bidirectional feature pyramid networks).
46 | optional int32 num_filters = 4;
47 |
48 | // Method used to combine inputs to BiFPN nodes.
49 | optional string combine_method = 5 [default = 'fast_attention'];
50 | }
51 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/graph_rewriter.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Message to configure graph rewriter for the tf graph.
6 | message GraphRewriter {
7 | optional Quantization quantization = 1;
8 | extensions 1000 to max;
9 | }
10 |
11 | // Message for quantization options. See
12 | // tensorflow/contrib/quantize/python/quantize.py for details.
13 | message Quantization {
14 | // Number of steps to delay before quantization takes effect during training.
15 | optional int32 delay = 1 [default = 500000];
16 |
17 | // Number of bits to use for quantizing weights.
18 | // Only 8 bit is supported for now.
19 | optional int32 weight_bits = 2 [default = 8];
20 |
21 | // Number of bits to use for quantizing activations.
22 | // Only 8 bit is supported for now.
23 | optional int32 activation_bits = 3 [default = 8];
24 |
25 | // Whether to use symmetric weight quantization.
26 | optional bool symmetric = 4 [default = false];
27 | }
28 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/grid_anchor_generator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for GridAnchorGenerator. See
6 | // anchor_generators/grid_anchor_generator.py for details.
7 | message GridAnchorGenerator {
8 | // Anchor height in pixels.
9 | optional int32 height = 1 [default = 256];
10 |
11 | // Anchor width in pixels.
12 | optional int32 width = 2 [default = 256];
13 |
14 | // Anchor stride in height dimension in pixels.
15 | optional int32 height_stride = 3 [default = 16];
16 |
17 | // Anchor stride in width dimension in pixels.
18 | optional int32 width_stride = 4 [default = 16];
19 |
20 | // Anchor height offset in pixels.
21 | optional int32 height_offset = 5 [default = 0];
22 |
23 | // Anchor width offset in pixels.
24 | optional int32 width_offset = 6 [default = 0];
25 |
26 | // At any given location, len(scales) * len(aspect_ratios) anchors are
27 | // generated with all possible combinations of scales and aspect ratios.
28 |
29 | // List of scales for the anchors.
30 | repeated float scales = 7;
31 |
32 | // List of aspect ratios for the anchors.
33 | repeated float aspect_ratios = 8;
34 | }
35 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/keypoint_box_coder.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for KeypointBoxCoder. See
6 | // box_coders/keypoint_box_coder.py for details.
7 | message KeypointBoxCoder {
8 | optional int32 num_keypoints = 1;
9 |
10 | // Scale factor for anchor encoded box center and keypoints.
11 | optional float y_scale = 2 [default = 10.0];
12 | optional float x_scale = 3 [default = 10.0];
13 |
14 | // Scale factor for anchor encoded box height.
15 | optional float height_scale = 4 [default = 5.0];
16 |
17 | // Scale factor for anchor encoded box width.
18 | optional float width_scale = 5 [default = 5.0];
19 | }
20 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/matcher.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | import "object_detection/protos/argmax_matcher.proto";
6 | import "object_detection/protos/bipartite_matcher.proto";
7 |
8 | // Configuration proto for the matcher to be used in the object detection
9 | // pipeline. See core/matcher.py for details.
10 | message Matcher {
11 | oneof matcher_oneof {
12 | ArgMaxMatcher argmax_matcher = 1;
13 | BipartiteMatcher bipartite_matcher = 2;
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/mean_stddev_box_coder.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for MeanStddevBoxCoder. See
6 | // box_coders/mean_stddev_box_coder.py for details.
7 | message MeanStddevBoxCoder {
8 | // The standard deviation used to encode and decode boxes.
9 | optional float stddev = 1 [default=0.01];
10 | }
11 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/mean_stddev_box_coder_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: object_detection/protos/mean_stddev_box_coder.proto
4 | """Generated protocol buffer code."""
5 | from google.protobuf import descriptor as _descriptor
6 | from google.protobuf import message as _message
7 | from google.protobuf import reflection as _reflection
8 | from google.protobuf import symbol_database as _symbol_database
9 | # @@protoc_insertion_point(imports)
10 |
11 | _sym_db = _symbol_database.Default()
12 |
13 |
14 |
15 |
16 | DESCRIPTOR = _descriptor.FileDescriptor(
17 | name='object_detection/protos/mean_stddev_box_coder.proto',
18 | package='object_detection.protos',
19 | syntax='proto2',
20 | serialized_options=None,
21 | create_key=_descriptor._internal_create_key,
22 | serialized_pb=b'\n3object_detection/protos/mean_stddev_box_coder.proto\x12\x17object_detection.protos\"*\n\x12MeanStddevBoxCoder\x12\x14\n\x06stddev\x18\x01 \x01(\x02:\x04\x30.01'
23 | )
24 |
25 |
26 |
27 |
28 | _MEANSTDDEVBOXCODER = _descriptor.Descriptor(
29 | name='MeanStddevBoxCoder',
30 | full_name='object_detection.protos.MeanStddevBoxCoder',
31 | filename=None,
32 | file=DESCRIPTOR,
33 | containing_type=None,
34 | create_key=_descriptor._internal_create_key,
35 | fields=[
36 | _descriptor.FieldDescriptor(
37 | name='stddev', full_name='object_detection.protos.MeanStddevBoxCoder.stddev', index=0,
38 | number=1, type=2, cpp_type=6, label=1,
39 | has_default_value=True, default_value=float(0.01),
40 | message_type=None, enum_type=None, containing_type=None,
41 | is_extension=False, extension_scope=None,
42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
43 | ],
44 | extensions=[
45 | ],
46 | nested_types=[],
47 | enum_types=[
48 | ],
49 | serialized_options=None,
50 | is_extendable=False,
51 | syntax='proto2',
52 | extension_ranges=[],
53 | oneofs=[
54 | ],
55 | serialized_start=80,
56 | serialized_end=122,
57 | )
58 |
59 | DESCRIPTOR.message_types_by_name['MeanStddevBoxCoder'] = _MEANSTDDEVBOXCODER
60 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
61 |
62 | MeanStddevBoxCoder = _reflection.GeneratedProtocolMessageType('MeanStddevBoxCoder', (_message.Message,), {
63 | 'DESCRIPTOR' : _MEANSTDDEVBOXCODER,
64 | '__module__' : 'object_detection.protos.mean_stddev_box_coder_pb2'
65 | # @@protoc_insertion_point(class_scope:object_detection.protos.MeanStddevBoxCoder)
66 | })
67 | _sym_db.RegisterMessage(MeanStddevBoxCoder)
68 |
69 |
70 | # @@protoc_insertion_point(module_scope)
71 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/model.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | import "object_detection/protos/center_net.proto";
6 | import "object_detection/protos/faster_rcnn.proto";
7 | import "object_detection/protos/ssd.proto";
8 |
9 | // Top level configuration for DetectionModels.
10 | message DetectionModel {
11 | oneof model {
12 | FasterRcnn faster_rcnn = 1;
13 | Ssd ssd = 2;
14 |
15 | // This can be used to define experimental models. To define your own
16 | // experimental meta architecture, populate a key in the
17 | // model_builder.EXPERIMENTAL_META_ARCH_BUILDER_MAP dict and set its
18 | // value to a function that builds your model.
19 | ExperimentalModel experimental_model = 3;
20 |
21 | CenterNet center_net = 4;
22 | }
23 | }
24 |
25 | message ExperimentalModel {
26 | optional string name = 1;
27 | }
28 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/multiscale_anchor_generator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for RetinaNet anchor generator described in
6 | // https://arxiv.org/abs/1708.02002. See
7 | // anchor_generators/multiscale_grid_anchor_generator.py for details.
8 | message MultiscaleAnchorGenerator {
9 | // minimum level in feature pyramid
10 | optional int32 min_level = 1 [default = 3];
11 |
12 | // maximum level in feature pyramid
13 | optional int32 max_level = 2 [default = 7];
14 |
15 | // Scale of anchor to feature stride
16 | optional float anchor_scale = 3 [default = 4.0];
17 |
18 | // Aspect ratios for anchors at each grid point.
19 | repeated float aspect_ratios = 4;
20 |
21 | // Number of intermediate scale each scale octave
22 | optional int32 scales_per_octave = 5 [default = 2];
23 |
24 | // Whether to produce anchors in normalized coordinates.
25 | optional bool normalize_coordinates = 6 [default = true];
26 | }
27 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/pipeline.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | import "object_detection/protos/eval.proto";
6 | import "object_detection/protos/graph_rewriter.proto";
7 | import "object_detection/protos/input_reader.proto";
8 | import "object_detection/protos/model.proto";
9 | import "object_detection/protos/train.proto";
10 |
11 | // Convenience message for configuring a training and eval pipeline. Allows all
12 | // of the pipeline parameters to be configured from one file.
13 | // Next id: 8
14 | message TrainEvalPipelineConfig {
15 | optional DetectionModel model = 1;
16 | optional TrainConfig train_config = 2;
17 | optional InputReader train_input_reader = 3;
18 | optional EvalConfig eval_config = 4;
19 | repeated InputReader eval_input_reader = 5;
20 | optional GraphRewriter graph_rewriter = 6;
21 | extensions 1000 to max;
22 | }
23 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/region_similarity_calculator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for region similarity calculators. See
6 | // core/region_similarity_calculator.py for details.
7 | message RegionSimilarityCalculator {
8 | oneof region_similarity {
9 | NegSqDistSimilarity neg_sq_dist_similarity = 1;
10 | IouSimilarity iou_similarity = 2;
11 | IoaSimilarity ioa_similarity = 3;
12 | ThresholdedIouSimilarity thresholded_iou_similarity = 4;
13 | }
14 | }
15 |
16 | // Configuration for negative squared distance similarity calculator.
17 | message NegSqDistSimilarity {
18 | }
19 |
20 | // Configuration for intersection-over-union (IOU) similarity calculator.
21 | message IouSimilarity {
22 | }
23 |
24 | // Configuration for intersection-over-area (IOA) similarity calculator.
25 | message IoaSimilarity {
26 | }
27 |
28 | // Configuration for thresholded-intersection-over-union similarity calculator.
29 | message ThresholdedIouSimilarity {
30 |
31 | // IOU threshold used for filtering scores.
32 | optional float iou_threshold = 1 [default = 0.5];
33 | }
34 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/square_box_coder.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for SquareBoxCoder. See
6 | // box_coders/square_box_coder.py for details.
7 | message SquareBoxCoder {
8 | // Scale factor for anchor encoded box center.
9 | optional float y_scale = 1 [default = 10.0];
10 | optional float x_scale = 2 [default = 10.0];
11 |
12 | // Scale factor for anchor encoded box length.
13 | optional float length_scale = 3 [default = 5.0];
14 | }
15 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/ssd_anchor_generator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | // Configuration proto for SSD anchor generator described in
6 | // https://arxiv.org/abs/1512.02325. See
7 | // anchor_generators/multiple_grid_anchor_generator.py for details.
8 | message SsdAnchorGenerator {
9 | // Number of grid layers to create anchors for.
10 | optional int32 num_layers = 1 [default = 6];
11 |
12 | // Scale of anchors corresponding to finest resolution.
13 | optional float min_scale = 2 [default = 0.2];
14 |
15 | // Scale of anchors corresponding to coarsest resolution
16 | optional float max_scale = 3 [default = 0.95];
17 |
18 | // Can be used to override min_scale->max_scale, with an explicitly defined
19 | // set of scales. If empty, then min_scale->max_scale is used.
20 | repeated float scales = 12;
21 |
22 | // Aspect ratios for anchors at each grid point.
23 | repeated float aspect_ratios = 4;
24 |
25 | // When this aspect ratio is greater than 0, then an additional
26 | // anchor, with an interpolated scale is added with this aspect ratio.
27 | optional float interpolated_scale_aspect_ratio = 13 [default = 1.0];
28 |
29 | // Whether to use the following aspect ratio and scale combination for the
30 | // layer with the finest resolution : (scale=0.1, aspect_ratio=1.0),
31 | // (scale=min_scale, aspect_ration=2.0), (scale=min_scale, aspect_ratio=0.5).
32 | optional bool reduce_boxes_in_lowest_layer = 5 [default = true];
33 |
34 | // The base anchor size in height dimension.
35 | optional float base_anchor_height = 6 [default = 1.0];
36 |
37 | // The base anchor size in width dimension.
38 | optional float base_anchor_width = 7 [default = 1.0];
39 |
40 | // Anchor stride in height dimension in pixels for each layer. The length of
41 | // this field is expected to be equal to the value of num_layers.
42 | repeated int32 height_stride = 8;
43 |
44 | // Anchor stride in width dimension in pixels for each layer. The length of
45 | // this field is expected to be equal to the value of num_layers.
46 | repeated int32 width_stride = 9;
47 |
48 | // Anchor height offset in pixels for each layer. The length of this field is
49 | // expected to be equal to the value of num_layers.
50 | repeated int32 height_offset = 10;
51 |
52 | // Anchor width offset in pixels for each layer. The length of this field is
53 | // expected to be equal to the value of num_layers.
54 | repeated int32 width_offset = 11;
55 | }
56 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/string_int_label_map.proto:
--------------------------------------------------------------------------------
1 | // Message to store the mapping from class label strings to class id. Datasets
2 | // use string labels to represent classes while the object detection framework
3 | // works with class ids. This message maps them so they can be converted back
4 | // and forth as needed.
5 | syntax = "proto2";
6 |
7 | package object_detection.protos;
8 |
9 | // LVIS frequency:
10 | enum LVISFrequency {
11 | UNSPECIFIED = 0;
12 | FREQUENT = 1;
13 | COMMON = 2;
14 | RARE = 3;
15 | }
16 |
17 | message StringIntLabelMapItem {
18 | // String name. The most common practice is to set this to a MID or synsets
19 | // id.
20 | optional string name = 1;
21 |
22 | // Integer id that maps to the string name above. Label ids should start from
23 | // 1.
24 | optional int32 id = 2;
25 |
26 | // Human readable string label.
27 | optional string display_name = 3;
28 |
29 | // Name of class specific keypoints for each class object and their respective
30 | // keypoint IDs.
31 | message KeypointMap {
32 | // Id for the keypoint. Id must be unique within a given class, however, it
33 | // could be shared across classes. For example "nose" keypoint can occur
34 | // in both "face" and "person" classes. Hence they can be mapped to the same
35 | // id.
36 | //
37 | // Note: It is advised to assign ids in range [1, num_unique_keypoints] to
38 | // encode keypoint targets efficiently.
39 | optional int32 id = 1;
40 | // Label for the keypoint.
41 | optional string label = 2;
42 | }
43 | repeated KeypointMap keypoints = 4;
44 |
45 | // Label ids for the elements that are connected in the hierarchy with the
46 | // current element. Value should correspond to another label id element.
47 | repeated int32 ancestor_ids = 5;
48 | repeated int32 descendant_ids = 6;
49 |
50 | // LVIS specific label map fields
51 | optional LVISFrequency frequency = 7;
52 | optional int32 instance_count = 8;
53 | };
54 |
55 | message StringIntLabelMap {
56 | repeated StringIntLabelMapItem item = 1;
57 | };
58 |
--------------------------------------------------------------------------------
/tf/object_detection/protos/target_assigner.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto2";
2 |
3 | package object_detection.protos;
4 |
5 | import "object_detection/protos/box_coder.proto";
6 | import "object_detection/protos/matcher.proto";
7 | import "object_detection/protos/region_similarity_calculator.proto";
8 |
9 | // Message to configure Target Assigner for object detectors.
10 | message TargetAssigner {
11 | optional Matcher matcher = 1;
12 | optional RegionSimilarityCalculator similarity_calculator = 2;
13 | optional BoxCoder box_coder = 3;
14 | }
15 |
--------------------------------------------------------------------------------
/tf/object_detection/samples/cloud/cloud.yml:
--------------------------------------------------------------------------------
1 | trainingInput:
2 | runtimeVersion: "1.12"
3 | scaleTier: CUSTOM
4 | masterType: standard_gpu
5 | workerCount: 5
6 | workerType: standard_gpu
7 | parameterServerCount: 3
8 | parameterServerType: standard
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/tf/object_detection/test_data/pets_examples.record:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_data/pets_examples.record
--------------------------------------------------------------------------------
/tf/object_detection/test_data/snapshot_serengeti_sequence_examples.record:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_data/snapshot_serengeti_sequence_examples.record
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out1.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out10.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out11.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out12.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out13.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out14.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out15.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out16.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out17.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out18.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out19.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out2.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out20.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out21.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out21.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out22.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out22.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out23.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out23.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out24.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out24.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out25.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out25.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out26.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out26.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out27.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out27.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out28.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out28.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out29.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out29.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out3.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out30.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out30.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out31.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out31.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out32.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out32.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out33.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out33.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out34.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out34.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out35.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out35.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out36.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out36.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out37.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out37.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out38.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out38.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out39.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out39.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out4.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out40.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out40.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out41.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out41.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out42.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out42.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out43.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out43.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out44.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out44.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out45.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out45.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out46.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out46.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out47.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out47.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out48.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out48.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out49.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out49.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out5.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out6.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out7.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out8.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/test/out9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/test/out9.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/train/robertducky1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/train/robertducky1.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/train/robertducky2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/train/robertducky2.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/train/robertducky3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/train/robertducky3.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/train/robertducky4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/train/robertducky4.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/ducky/train/robertducky5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/ducky/train/robertducky5.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/image1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/image1.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/image2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/image2.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/image3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/image3.jpg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/image_info.txt:
--------------------------------------------------------------------------------
1 |
2 | Image provenance:
3 | image1.jpg: https://commons.wikimedia.org/wiki/File:Baegle_dwa.jpg
4 | image2.jpg: Michael Miley,
5 | https://www.flickr.com/photos/mike_miley/4678754542/in/photolist-88rQHL-88oBVp-88oC2B-88rS6J-88rSqm-88oBLv-88oBC4
6 | image3.jpg: Chris Briggs, https://unsplash.com/photos/ILBrHd6PFJA
7 |
--------------------------------------------------------------------------------
/tf/object_detection/test_images/snapshot_serengeti/README.md:
--------------------------------------------------------------------------------
1 | # Citation and license
2 |
3 | The images and metadata in this folder come from the Snapshot Serengeti dataset,
4 | and were accessed via [LILA.science](http://lila.science/datasets/snapshot-serengeti).
5 | The images and species-level labels are described in more detail in the
6 | associated manuscript:
7 |
8 | ```
9 | Swanson AB, Kosmala M, Lintott CJ, Simpson RJ, Smith A, Packer C (2015)
10 | Snapshot Serengeti, high-frequency annotated camera trap images of 40 mammalian
11 | species in an African savanna. Scientific Data 2: 150026. (DOI) (bibtex)
12 | ```
13 |
14 | Please cite this manuscript if you use this dataset.
15 |
16 | This data set is released under the
17 | [Community Data License Agreement (permissive variant)](https://cdla.io/permissive-1-0/).
18 |
--------------------------------------------------------------------------------
/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0038.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0038.jpeg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0039.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0039.jpeg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0040.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0040.jpeg
--------------------------------------------------------------------------------
/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0041.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/test_images/snapshot_serengeti/S1_E03_R3_PICT0041.jpeg
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/export_saved_model_tpu.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Python binary for exporting SavedModel, tailored for TPU inference."""
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
20 | import tensorflow.compat.v1 as tf
21 | from object_detection.tpu_exporters import export_saved_model_tpu_lib
22 |
23 | flags = tf.app.flags
24 | FLAGS = flags.FLAGS
25 |
26 | flags.DEFINE_string('pipeline_config_file', None,
27 | 'A pipeline_pb2.TrainEvalPipelineConfig config file.')
28 | flags.DEFINE_string(
29 | 'ckpt_path', None, 'Path to trained checkpoint, typically of the form '
30 | 'path/to/model.ckpt')
31 | flags.DEFINE_string('export_dir', None, 'Path to export SavedModel.')
32 | flags.DEFINE_string('input_placeholder_name', 'placeholder_tensor',
33 | 'Name of input placeholder in model\'s signature_def_map.')
34 | flags.DEFINE_string(
35 | 'input_type', 'tf_example', 'Type of input node. Can be '
36 | 'one of [`image_tensor`, `encoded_image_string_tensor`, '
37 | '`tf_example`]')
38 | flags.DEFINE_boolean('use_bfloat16', False, 'If true, use tf.bfloat16 on TPU.')
39 |
40 |
41 | def main(argv):
42 | if len(argv) > 1:
43 | raise tf.app.UsageError('Too many command-line arguments.')
44 | export_saved_model_tpu_lib.export(FLAGS.pipeline_config_file, FLAGS.ckpt_path,
45 | FLAGS.export_dir,
46 | FLAGS.input_placeholder_name,
47 | FLAGS.input_type, FLAGS.use_bfloat16)
48 |
49 |
50 | if __name__ == '__main__':
51 | tf.app.flags.mark_flag_as_required('pipeline_config_file')
52 | tf.app.flags.mark_flag_as_required('ckpt_path')
53 | tf.app.flags.mark_flag_as_required('export_dir')
54 | tf.app.run()
55 |
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/export_saved_model_tpu_lib_tf1_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Test for object detection's TPU exporter."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import os
22 | import unittest
23 |
24 | from absl.testing import parameterized
25 | import numpy as np
26 | import tensorflow.compat.v1 as tf
27 |
28 | from object_detection.tpu_exporters import export_saved_model_tpu_lib
29 | from object_detection.utils import tf_version
30 |
31 | flags = tf.app.flags
32 | FLAGS = flags.FLAGS
33 |
34 |
35 | def get_path(path_suffix):
36 | return os.path.join(tf.resource_loader.get_data_files_path(), 'testdata',
37 | path_suffix)
38 |
39 |
40 | @unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
41 | class ExportSavedModelTPUTest(tf.test.TestCase, parameterized.TestCase):
42 |
43 | @parameterized.named_parameters(
44 | ('ssd', get_path('ssd/ssd_pipeline.config'), 'image_tensor', True, 20),
45 | ('faster_rcnn',
46 | get_path('faster_rcnn/faster_rcnn_resnet101_atrous_coco.config'),
47 | 'image_tensor', True, 20))
48 | def testExportAndLoad(self,
49 | pipeline_config_file,
50 | input_type='image_tensor',
51 | use_bfloat16=False,
52 | repeat=1):
53 |
54 | input_placeholder_name = 'placeholder_tensor'
55 | export_dir = os.path.join(FLAGS.test_tmpdir, 'tpu_saved_model')
56 | if tf.gfile.Exists(export_dir):
57 | tf.gfile.DeleteRecursively(export_dir)
58 | ckpt_path = None
59 | export_saved_model_tpu_lib.export(pipeline_config_file, ckpt_path,
60 | export_dir, input_placeholder_name,
61 | input_type, use_bfloat16)
62 |
63 | inputs = np.random.rand(256, 256, 3)
64 | tensor_dict_out = export_saved_model_tpu_lib.run_inference_from_saved_model(
65 | inputs, export_dir, input_placeholder_name, repeat)
66 | for k, v in tensor_dict_out.items():
67 | tf.logging.info('{}: {}'.format(k, v))
68 |
69 |
70 | if __name__ == '__main__':
71 | tf.test.main()
72 |
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/testdata/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/testdata/faster_rcnn/faster_rcnn_resnet101_atrous_coco.config:
--------------------------------------------------------------------------------
1 | # Faster R-CNN with Resnet-101 (v1), Atrous version
2 | # Trained on COCO, initialized from Imagenet classification checkpoint
3 |
4 | model {
5 | faster_rcnn {
6 | num_classes: 90
7 | image_resizer {
8 | keep_aspect_ratio_resizer {
9 | min_dimension: 600
10 | max_dimension: 1024
11 | }
12 | }
13 | feature_extractor {
14 | type: 'faster_rcnn_resnet101'
15 | first_stage_features_stride: 8
16 | }
17 | first_stage_anchor_generator {
18 | grid_anchor_generator {
19 | scales: [0.25, 0.5, 1.0, 2.0]
20 | aspect_ratios: [0.5, 1.0, 2.0]
21 | height_stride: 8
22 | width_stride: 8
23 | }
24 | }
25 | first_stage_atrous_rate: 2
26 | first_stage_box_predictor_conv_hyperparams {
27 | op: CONV
28 | regularizer {
29 | l2_regularizer {
30 | weight: 0.0
31 | }
32 | }
33 | initializer {
34 | truncated_normal_initializer {
35 | stddev: 0.01
36 | }
37 | }
38 | }
39 | first_stage_nms_score_threshold: 0.0
40 | first_stage_nms_iou_threshold: 0.7
41 | first_stage_max_proposals: 300
42 | first_stage_localization_loss_weight: 2.0
43 | first_stage_objectness_loss_weight: 1.0
44 | initial_crop_size: 14
45 | maxpool_kernel_size: 2
46 | maxpool_stride: 2
47 | second_stage_box_predictor {
48 | mask_rcnn_box_predictor {
49 | use_dropout: false
50 | dropout_keep_probability: 1.0
51 | fc_hyperparams {
52 | op: FC
53 | regularizer {
54 | l2_regularizer {
55 | weight: 0.0
56 | }
57 | }
58 | initializer {
59 | variance_scaling_initializer {
60 | factor: 1.0
61 | uniform: true
62 | mode: FAN_AVG
63 | }
64 | }
65 | }
66 | }
67 | }
68 | second_stage_post_processing {
69 | batch_non_max_suppression {
70 | score_threshold: 0.0
71 | iou_threshold: 0.6
72 | max_detections_per_class: 100
73 | max_total_detections: 300
74 | }
75 | score_converter: SOFTMAX
76 | }
77 | second_stage_localization_loss_weight: 2.0
78 | second_stage_classification_loss_weight: 1.0
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Utilities for TPU inference."""
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
20 | import tensorflow.compat.v1 as tf
21 |
22 |
23 | def bfloat16_to_float32(tensor):
24 | """Converts a tensor to tf.float32 only if it is tf.bfloat16."""
25 | if tensor.dtype == tf.bfloat16:
26 | return tf.cast(tensor, dtype=tf.float32)
27 | else:
28 | return tensor
29 |
30 |
31 | def bfloat16_to_float32_nested(bfloat16_tensor_dict):
32 | """Converts bfloat16 tensors in a nested structure to float32.
33 |
34 | Other tensors not of dtype bfloat16 will be left as is.
35 |
36 | Args:
37 | bfloat16_tensor_dict: A Python dict, values being Tensor or Python
38 | list/tuple of Tensor.
39 |
40 | Returns:
41 | A Python dict with the same structure as `bfloat16_tensor_dict`,
42 | with all bfloat16 tensors converted to float32.
43 | """
44 | float32_tensor_dict = {}
45 | for k, v in bfloat16_tensor_dict.items():
46 | if isinstance(v, tf.Tensor):
47 | float32_tensor_dict[k] = bfloat16_to_float32(v)
48 | elif isinstance(v, (list, tuple)):
49 | float32_tensor_dict[k] = [bfloat16_to_float32(t) for t in v]
50 | return float32_tensor_dict
51 |
--------------------------------------------------------------------------------
/tf/object_detection/tpu_exporters/utils_test.py:
--------------------------------------------------------------------------------
1 | # Lint as: python2, python3
2 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 | """Test for Utility functions."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | from six.moves import range
23 | import tensorflow.compat.v1 as tf
24 |
25 | from object_detection.tpu_exporters import utils
26 |
27 |
28 | class UtilsTest(tf.test.TestCase):
29 |
30 | def testBfloat16ToFloat32(self):
31 | bfloat16_tensor = tf.random.uniform([2, 3], dtype=tf.bfloat16)
32 | float32_tensor = utils.bfloat16_to_float32(bfloat16_tensor)
33 | self.assertEqual(float32_tensor.dtype, tf.float32)
34 |
35 | def testOtherDtypesNotConverted(self):
36 | int32_tensor = tf.ones([2, 3], dtype=tf.int32)
37 | converted_tensor = utils.bfloat16_to_float32(int32_tensor)
38 | self.assertEqual(converted_tensor.dtype, tf.int32)
39 |
40 | def testBfloat16ToFloat32Nested(self):
41 | tensor_dict = {
42 | 'key1': tf.random.uniform([2, 3], dtype=tf.bfloat16),
43 | 'key2': [
44 | tf.random.uniform([1, 2], dtype=tf.bfloat16) for _ in range(3)
45 | ],
46 | 'key3': tf.ones([2, 3], dtype=tf.int32),
47 | }
48 | tensor_dict = utils.bfloat16_to_float32_nested(tensor_dict)
49 |
50 | self.assertEqual(tensor_dict['key1'].dtype, tf.float32)
51 | for t in tensor_dict['key2']:
52 | self.assertEqual(t.dtype, tf.float32)
53 | self.assertEqual(tensor_dict['key3'].dtype, tf.int32)
54 |
55 |
56 | if __name__ == '__main__':
57 | tf.test.main()
58 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/object_detection/utils/__init__.py
--------------------------------------------------------------------------------
/tf/object_detection/utils/category_util.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Functions for importing/exporting Object Detection categories."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import csv
23 |
24 | import tensorflow.compat.v1 as tf
25 |
26 |
27 | def load_categories_from_csv_file(csv_path):
28 | """Loads categories from a csv file.
29 |
30 | The CSV file should have one comma delimited numeric category id and string
31 | category name pair per line. For example:
32 |
33 | 0,"cat"
34 | 1,"dog"
35 | 2,"bird"
36 | ...
37 |
38 | Args:
39 | csv_path: Path to the csv file to be parsed into categories.
40 | Returns:
41 | categories: A list of dictionaries representing all possible categories.
42 | The categories will contain an integer 'id' field and a string
43 | 'name' field.
44 | Raises:
45 | ValueError: If the csv file is incorrectly formatted.
46 | """
47 | categories = []
48 |
49 | with tf.gfile.Open(csv_path, 'r') as csvfile:
50 | reader = csv.reader(csvfile, delimiter=',', quotechar='"')
51 | for row in reader:
52 | if not row:
53 | continue
54 |
55 | if len(row) != 2:
56 | raise ValueError('Expected 2 fields per row in csv: %s' % ','.join(row))
57 |
58 | category_id = int(row[0])
59 | category_name = row[1]
60 | categories.append({'id': category_id, 'name': category_name})
61 |
62 | return categories
63 |
64 |
65 | def save_categories_to_csv_file(categories, csv_path):
66 | """Saves categories to a csv file.
67 |
68 | Args:
69 | categories: A list of dictionaries representing categories to save to file.
70 | Each category must contain an 'id' and 'name' field.
71 | csv_path: Path to the csv file to be parsed into categories.
72 | """
73 | categories.sort(key=lambda x: x['id'])
74 | with tf.gfile.Open(csv_path, 'w') as csvfile:
75 | writer = csv.writer(csvfile, delimiter=',', quotechar='"')
76 | for category in categories:
77 | writer.writerow([category['id'], category['name']])
78 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/category_util_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.utils.category_util."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import os
23 |
24 | import tensorflow.compat.v1 as tf
25 |
26 | from object_detection.utils import category_util
27 |
28 |
29 | class EvalUtilTest(tf.test.TestCase):
30 |
31 | def test_load_categories_from_csv_file(self):
32 | csv_data = """
33 | 0,"cat"
34 | 1,"dog"
35 | 2,"bird"
36 | """.strip(' ')
37 | csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
38 | with tf.gfile.Open(csv_path, 'wb') as f:
39 | f.write(csv_data)
40 |
41 | categories = category_util.load_categories_from_csv_file(csv_path)
42 | self.assertTrue({'id': 0, 'name': 'cat'} in categories)
43 | self.assertTrue({'id': 1, 'name': 'dog'} in categories)
44 | self.assertTrue({'id': 2, 'name': 'bird'} in categories)
45 |
46 | def test_save_categories_to_csv_file(self):
47 | categories = [
48 | {'id': 0, 'name': 'cat'},
49 | {'id': 1, 'name': 'dog'},
50 | {'id': 2, 'name': 'bird'},
51 | ]
52 | csv_path = os.path.join(self.get_temp_dir(), 'test.csv')
53 | category_util.save_categories_to_csv_file(categories, csv_path)
54 | saved_categories = category_util.load_categories_from_csv_file(csv_path)
55 | self.assertEqual(saved_categories, categories)
56 |
57 |
58 | if __name__ == '__main__':
59 | tf.test.main()
60 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/context_manager.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Python context management helper."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 |
22 | class IdentityContextManager(object):
23 | """Returns an identity context manager that does nothing.
24 |
25 | This is helpful in setting up conditional `with` statement as below:
26 |
27 | with slim.arg_scope(x) if use_slim_scope else IdentityContextManager():
28 | do_stuff()
29 |
30 | """
31 |
32 | def __enter__(self):
33 | return None
34 |
35 | def __exit__(self, exec_type, exec_value, traceback):
36 | del exec_type
37 | del exec_value
38 | del traceback
39 | return False
40 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/context_manager_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Tests for tensorflow_models.object_detection.utils.context_manager."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import tensorflow.compat.v1 as tf
22 | from object_detection.utils import context_manager
23 |
24 |
25 | class ContextManagerTest(tf.test.TestCase):
26 |
27 | def test_identity_context_manager(self):
28 | with context_manager.IdentityContextManager() as identity_context:
29 | self.assertIsNone(identity_context)
30 |
31 |
32 | if __name__ == '__main__':
33 | tf.test.main()
34 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/dataset_util_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.utils.dataset_util."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import os
23 | import tensorflow.compat.v1 as tf
24 |
25 | from object_detection.utils import dataset_util
26 |
27 |
28 | class DatasetUtilTest(tf.test.TestCase):
29 |
30 | def test_read_examples_list(self):
31 | example_list_data = """example1 1\nexample2 2"""
32 | example_list_path = os.path.join(self.get_temp_dir(), 'examples.txt')
33 | with tf.gfile.Open(example_list_path, 'wb') as f:
34 | f.write(example_list_data)
35 |
36 | examples = dataset_util.read_examples_list(example_list_path)
37 | self.assertListEqual(['example1', 'example2'], examples)
38 |
39 |
40 | if __name__ == '__main__':
41 | tf.test.main()
42 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/model_util_tf2_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Test utility functions for manipulating Keras models."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import unittest
23 | import tensorflow.compat.v1 as tf
24 |
25 | from object_detection.utils import model_util
26 | from object_detection.utils import tf_version
27 |
28 |
29 | @unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
30 | class ExtractSubmodelUtilTest(tf.test.TestCase):
31 |
32 | def test_simple_model(self):
33 | inputs = tf.keras.Input(shape=(256,)) # Returns a placeholder tensor
34 |
35 | # A layer instance is callable on a tensor, and returns a tensor.
36 | x = tf.keras.layers.Dense(128, activation='relu', name='a')(inputs)
37 | x = tf.keras.layers.Dense(64, activation='relu', name='b')(x)
38 | x = tf.keras.layers.Dense(32, activation='relu', name='c')(x)
39 | x = tf.keras.layers.Dense(16, activation='relu', name='d')(x)
40 | x = tf.keras.layers.Dense(8, activation='relu', name='e')(x)
41 | predictions = tf.keras.layers.Dense(10, activation='softmax')(x)
42 |
43 | model = tf.keras.Model(inputs=inputs, outputs=predictions)
44 |
45 | new_in = model.get_layer(
46 | name='b').input
47 | new_out = model.get_layer(
48 | name='d').output
49 |
50 | new_model = model_util.extract_submodel(
51 | model=model,
52 | inputs=new_in,
53 | outputs=new_out)
54 |
55 | batch_size = 3
56 | ones = tf.ones((batch_size, 128))
57 | final_out = new_model(ones)
58 | self.assertAllEqual(final_out.shape, (batch_size, 16))
59 |
60 | if __name__ == '__main__':
61 | tf.test.main()
62 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/np_box_mask_list.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Numpy BoxMaskList classes and functions."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import numpy as np
23 | from object_detection.utils import np_box_list
24 |
25 |
26 | class BoxMaskList(np_box_list.BoxList):
27 | """Convenience wrapper for BoxList with masks.
28 |
29 | BoxMaskList extends the np_box_list.BoxList to contain masks as well.
30 | In particular, its constructor receives both boxes and masks. Note that the
31 | masks correspond to the full image.
32 | """
33 |
34 | def __init__(self, box_data, mask_data):
35 | """Constructs box collection.
36 |
37 | Args:
38 | box_data: a numpy array of shape [N, 4] representing box coordinates
39 | mask_data: a numpy array of shape [N, height, width] representing masks
40 | with values are in {0,1}. The masks correspond to the full
41 | image. The height and the width will be equal to image height and width.
42 |
43 | Raises:
44 | ValueError: if bbox data is not a numpy array
45 | ValueError: if invalid dimensions for bbox data
46 | ValueError: if mask data is not a numpy array
47 | ValueError: if invalid dimension for mask data
48 | """
49 | super(BoxMaskList, self).__init__(box_data)
50 | if not isinstance(mask_data, np.ndarray):
51 | raise ValueError('Mask data must be a numpy array.')
52 | if len(mask_data.shape) != 3:
53 | raise ValueError('Invalid dimensions for mask data.')
54 | if mask_data.dtype != np.uint8:
55 | raise ValueError('Invalid data type for mask data: uint8 is required.')
56 | if mask_data.shape[0] != box_data.shape[0]:
57 | raise ValueError('There should be the same number of boxes and masks.')
58 | self.data['masks'] = mask_data
59 |
60 | def get_masks(self):
61 | """Convenience function for accessing masks.
62 |
63 | Returns:
64 | a numpy array of shape [N, height, width] representing masks
65 | """
66 | return self.get_field('masks')
67 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/np_box_ops_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.np_box_ops."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import numpy as np
23 | import tensorflow.compat.v1 as tf
24 |
25 | from object_detection.utils import np_box_ops
26 |
27 |
28 | class BoxOpsTests(tf.test.TestCase):
29 |
30 | def setUp(self):
31 | boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
32 | dtype=float)
33 | boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
34 | [0.0, 0.0, 20.0, 20.0]],
35 | dtype=float)
36 | self.boxes1 = boxes1
37 | self.boxes2 = boxes2
38 |
39 | def testArea(self):
40 | areas = np_box_ops.area(self.boxes1)
41 | expected_areas = np.array([6.0, 5.0], dtype=float)
42 | self.assertAllClose(expected_areas, areas)
43 |
44 | def testIntersection(self):
45 | intersection = np_box_ops.intersection(self.boxes1, self.boxes2)
46 | expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
47 | dtype=float)
48 | self.assertAllClose(intersection, expected_intersection)
49 |
50 | def testIOU(self):
51 | iou = np_box_ops.iou(self.boxes1, self.boxes2)
52 | expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
53 | [1.0 / 16.0, 0.0, 5.0 / 400.0]],
54 | dtype=float)
55 | self.assertAllClose(iou, expected_iou)
56 |
57 | def testIOA(self):
58 | boxes1 = np.array([[0.25, 0.25, 0.75, 0.75],
59 | [0.0, 0.0, 0.5, 0.75]],
60 | dtype=np.float32)
61 | boxes2 = np.array([[0.5, 0.25, 1.0, 1.0],
62 | [0.0, 0.0, 1.0, 1.0]],
63 | dtype=np.float32)
64 | ioa21 = np_box_ops.ioa(boxes2, boxes1)
65 | expected_ioa21 = np.array([[0.5, 0.0],
66 | [1.0, 1.0]],
67 | dtype=np.float32)
68 | self.assertAllClose(ioa21, expected_ioa21)
69 |
70 |
71 | if __name__ == '__main__':
72 | tf.test.main()
73 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/static_shape.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Helper functions to access TensorShape values.
17 |
18 | The rank 4 tensor_shape must be of the form [batch_size, height, width, depth].
19 | """
20 |
21 | from __future__ import absolute_import
22 | from __future__ import division
23 | from __future__ import print_function
24 |
25 |
26 | def get_dim_as_int(dim):
27 | """Utility to get v1 or v2 TensorShape dim as an int.
28 |
29 | Args:
30 | dim: The TensorShape dimension to get as an int
31 |
32 | Returns:
33 | None or an int.
34 | """
35 | try:
36 | return dim.value
37 | except AttributeError:
38 | return dim
39 |
40 |
41 | def get_batch_size(tensor_shape):
42 | """Returns batch size from the tensor shape.
43 |
44 | Args:
45 | tensor_shape: A rank 4 TensorShape.
46 |
47 | Returns:
48 | An integer representing the batch size of the tensor.
49 | """
50 | tensor_shape.assert_has_rank(rank=4)
51 | return get_dim_as_int(tensor_shape[0])
52 |
53 |
54 | def get_height(tensor_shape):
55 | """Returns height from the tensor shape.
56 |
57 | Args:
58 | tensor_shape: A rank 4 TensorShape.
59 |
60 | Returns:
61 | An integer representing the height of the tensor.
62 | """
63 | tensor_shape.assert_has_rank(rank=4)
64 | return get_dim_as_int(tensor_shape[1])
65 |
66 |
67 | def get_width(tensor_shape):
68 | """Returns width from the tensor shape.
69 |
70 | Args:
71 | tensor_shape: A rank 4 TensorShape.
72 |
73 | Returns:
74 | An integer representing the width of the tensor.
75 | """
76 | tensor_shape.assert_has_rank(rank=4)
77 | return get_dim_as_int(tensor_shape[2])
78 |
79 |
80 | def get_depth(tensor_shape):
81 | """Returns depth from the tensor shape.
82 |
83 | Args:
84 | tensor_shape: A rank 4 TensorShape.
85 |
86 | Returns:
87 | An integer representing the depth of the tensor.
88 | """
89 | tensor_shape.assert_has_rank(rank=4)
90 | return get_dim_as_int(tensor_shape[3])
91 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/static_shape_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for object_detection.utils.static_shape."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import tensorflow.compat.v1 as tf
23 |
24 | from object_detection.utils import static_shape
25 |
26 |
27 | class StaticShapeTest(tf.test.TestCase):
28 |
29 | def test_return_correct_batchSize(self):
30 | tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
31 | self.assertEqual(32, static_shape.get_batch_size(tensor_shape))
32 |
33 | def test_return_correct_height(self):
34 | tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
35 | self.assertEqual(299, static_shape.get_height(tensor_shape))
36 |
37 | def test_return_correct_width(self):
38 | tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
39 | self.assertEqual(384, static_shape.get_width(tensor_shape))
40 |
41 | def test_return_correct_depth(self):
42 | tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])
43 | self.assertEqual(3, static_shape.get_depth(tensor_shape))
44 |
45 | def test_die_on_tensor_shape_with_rank_three(self):
46 | tensor_shape = tf.TensorShape(dims=[32, 299, 384])
47 | with self.assertRaises(ValueError):
48 | static_shape.get_batch_size(tensor_shape)
49 | static_shape.get_height(tensor_shape)
50 | static_shape.get_width(tensor_shape)
51 | static_shape.get_depth(tensor_shape)
52 |
53 | if __name__ == '__main__':
54 | tf.test.main()
55 |
--------------------------------------------------------------------------------
/tf/object_detection/utils/tf_version.py:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Functions to check TensorFlow Version."""
16 |
17 | from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
18 |
19 |
20 | def is_tf1():
21 | """Whether current TensorFlow Version is 1.X."""
22 | return not tf2.enabled()
23 |
24 |
25 | def is_tf2():
26 | """Whether current TensorFlow Version is 2.X."""
27 | return tf2.enabled()
28 |
--------------------------------------------------------------------------------
/tf/slim/WORKSPACE:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/WORKSPACE
--------------------------------------------------------------------------------
/tf/slim/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/__init__.py
--------------------------------------------------------------------------------
/tf/slim/datasets/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/slim/datasets/dataset_factory.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """A factory-pattern class which returns classification image/label pairs."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | from datasets import cifar10
22 | from datasets import flowers
23 | from datasets import imagenet
24 | from datasets import mnist
25 | from datasets import visualwakewords
26 |
27 | datasets_map = {
28 | 'cifar10': cifar10,
29 | 'flowers': flowers,
30 | 'imagenet': imagenet,
31 | 'mnist': mnist,
32 | 'visualwakewords': visualwakewords,
33 | }
34 |
35 |
36 | def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None):
37 | """Given a dataset name and a split_name returns a Dataset.
38 |
39 | Args:
40 | name: String, the name of the dataset.
41 | split_name: A train/test split name.
42 | dataset_dir: The directory where the dataset files are stored.
43 | file_pattern: The file pattern to use for matching the dataset source files.
44 | reader: The subclass of tf.ReaderBase. If left as `None`, then the default
45 | reader defined by each dataset is used.
46 |
47 | Returns:
48 | A `Dataset` class.
49 |
50 | Raises:
51 | ValueError: If the dataset `name` is unknown.
52 | """
53 | if name not in datasets_map:
54 | raise ValueError('Name of dataset unknown %s' % name)
55 | return datasets_map[name].get_split(
56 | split_name,
57 | dataset_dir,
58 | file_pattern,
59 | reader)
60 |
--------------------------------------------------------------------------------
/tf/slim/deployment/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/slim/export_inference_graph_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Tests for export_inference_graph."""
17 |
18 | from __future__ import absolute_import
19 | from __future__ import division
20 | from __future__ import print_function
21 |
22 | import os
23 |
24 |
25 | import tensorflow.compat.v1 as tf
26 |
27 | from tensorflow.python.platform import gfile
28 | import export_inference_graph
29 |
30 |
31 | class ExportInferenceGraphTest(tf.test.TestCase):
32 |
33 | def testExportInferenceGraph(self):
34 | tmpdir = self.get_temp_dir()
35 | output_file = os.path.join(tmpdir, 'inception_v3.pb')
36 | flags = tf.app.flags.FLAGS
37 | flags.output_file = output_file
38 | flags.model_name = 'inception_v3'
39 | flags.dataset_dir = tmpdir
40 | export_inference_graph.main(None)
41 | self.assertTrue(gfile.Exists(output_file))
42 |
43 | if __name__ == '__main__':
44 | tf.test.main()
45 |
--------------------------------------------------------------------------------
/tf/slim/nets/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/slim/nets/inception.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Brings all inception models under one namespace."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | # pylint: disable=unused-import
22 | from nets.inception_resnet_v2 import inception_resnet_v2
23 | from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope
24 | from nets.inception_resnet_v2 import inception_resnet_v2_base
25 | from nets.inception_v1 import inception_v1
26 | from nets.inception_v1 import inception_v1_arg_scope
27 | from nets.inception_v1 import inception_v1_base
28 | from nets.inception_v2 import inception_v2
29 | from nets.inception_v2 import inception_v2_arg_scope
30 | from nets.inception_v2 import inception_v2_base
31 | from nets.inception_v3 import inception_v3
32 | from nets.inception_v3 import inception_v3_arg_scope
33 | from nets.inception_v3 import inception_v3_base
34 | from nets.inception_v4 import inception_v4
35 | from nets.inception_v4 import inception_v4_arg_scope
36 | from nets.inception_v4 import inception_v4_base
37 | # pylint: enable=unused-import
38 |
--------------------------------------------------------------------------------
/tf/slim/nets/mobilenet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/nets/mobilenet/__init__.py
--------------------------------------------------------------------------------
/tf/slim/nets/mobilenet/g3doc/edgetpu_latency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/nets/mobilenet/g3doc/edgetpu_latency.png
--------------------------------------------------------------------------------
/tf/slim/nets/mobilenet/g3doc/latency_pixel1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/nets/mobilenet/g3doc/latency_pixel1.png
--------------------------------------------------------------------------------
/tf/slim/nets/mobilenet/g3doc/madds_top1_accuracy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/nets/mobilenet/g3doc/madds_top1_accuracy.png
--------------------------------------------------------------------------------
/tf/slim/nets/mobilenet/mnet_v1_vs_v2_pixel1_latency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/nets/mobilenet/mnet_v1_vs_v2_pixel1_latency.png
--------------------------------------------------------------------------------
/tf/slim/nets/mobilenet_v1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/slim/nets/mobilenet_v1.png
--------------------------------------------------------------------------------
/tf/slim/nets/nasnet/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/slim/nets/nasnet/nasnet_utils_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Tests for slim.nets.nasnet.nasnet_utils."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 | import tensorflow.compat.v1 as tf
21 |
22 | from nets.nasnet import nasnet_utils
23 |
24 |
25 | class NasnetUtilsTest(tf.test.TestCase):
26 |
27 | def testCalcReductionLayers(self):
28 | num_cells = 18
29 | num_reduction_layers = 2
30 | reduction_layers = nasnet_utils.calc_reduction_layers(
31 | num_cells, num_reduction_layers)
32 | self.assertEqual(len(reduction_layers), 2)
33 | self.assertEqual(reduction_layers[0], 6)
34 | self.assertEqual(reduction_layers[1], 12)
35 |
36 | def testGetChannelIndex(self):
37 | data_formats = ['NHWC', 'NCHW']
38 | for data_format in data_formats:
39 | index = nasnet_utils.get_channel_index(data_format)
40 | correct_index = 3 if data_format == 'NHWC' else 1
41 | self.assertEqual(index, correct_index)
42 |
43 | def testGetChannelDim(self):
44 | data_formats = ['NHWC', 'NCHW']
45 | shape = [10, 20, 30, 40]
46 | for data_format in data_formats:
47 | dim = nasnet_utils.get_channel_dim(shape, data_format)
48 | correct_dim = shape[3] if data_format == 'NHWC' else shape[1]
49 | self.assertEqual(dim, correct_dim)
50 |
51 | def testGlobalAvgPool(self):
52 | data_formats = ['NHWC', 'NCHW']
53 | inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
54 | for data_format in data_formats:
55 | output = nasnet_utils.global_avg_pool(
56 | inputs, data_format)
57 | self.assertEqual(output.shape, [5, 10])
58 |
59 | def test_factorized_reduction(self):
60 | data_format = 'NHWC'
61 | output_shape = (5, 10, 20, 16)
62 | inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
63 | output = nasnet_utils.factorized_reduction(
64 | inputs, 16, stride=1, data_format=data_format)
65 | self.assertSequenceEqual(output_shape, output.shape.as_list())
66 |
67 |
68 | if __name__ == '__main__':
69 | tf.test.main()
70 |
--------------------------------------------------------------------------------
/tf/slim/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tf/slim/preprocessing/lenet_preprocessing.py:
--------------------------------------------------------------------------------
1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Provides utilities for preprocessing."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import tensorflow.compat.v1 as tf
22 |
23 |
24 | def preprocess_image(image,
25 | output_height,
26 | output_width,
27 | is_training,
28 | use_grayscale=False):
29 | """Preprocesses the given image.
30 |
31 | Args:
32 | image: A `Tensor` representing an image of arbitrary size.
33 | output_height: The height of the image after preprocessing.
34 | output_width: The width of the image after preprocessing.
35 | is_training: `True` if we're preprocessing the image for training and
36 | `False` otherwise.
37 | use_grayscale: Whether to convert the image from RGB to grayscale.
38 |
39 | Returns:
40 | A preprocessed image.
41 | """
42 | del is_training # Unused argument
43 | image = tf.to_float(image)
44 | if use_grayscale:
45 | image = tf.image.rgb_to_grayscale(image)
46 | image = tf.image.resize_image_with_crop_or_pad(
47 | image, output_width, output_height)
48 | image = tf.subtract(image, 128.0)
49 | image = tf.div(image, 128.0)
50 | return image
51 |
--------------------------------------------------------------------------------
/tf/slim/scripts/train_cifarnet_on_cifar10.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 | #
17 | # This script performs the following operations:
18 | # 1. Downloads the Cifar10 dataset
19 | # 2. Trains a CifarNet model on the Cifar10 training set.
20 | # 3. Evaluates the model on the Cifar10 testing set.
21 | #
22 | # Usage:
23 | # cd slim
24 | # ./scripts/train_cifarnet_on_cifar10.sh
25 | set -e
26 |
27 | # Where the checkpoint and logs will be saved to.
28 | TRAIN_DIR=/tmp/cifarnet-model
29 |
30 | # Where the dataset is saved to.
31 | DATASET_DIR=/tmp/cifar10
32 |
33 | # Download the dataset
34 | python download_and_convert_data.py \
35 | --dataset_name=cifar10 \
36 | --dataset_dir=${DATASET_DIR}
37 |
38 | # Run training.
39 | python train_image_classifier.py \
40 | --train_dir=${TRAIN_DIR} \
41 | --dataset_name=cifar10 \
42 | --dataset_split_name=train \
43 | --dataset_dir=${DATASET_DIR} \
44 | --model_name=cifarnet \
45 | --preprocessing_name=cifarnet \
46 | --max_number_of_steps=100000 \
47 | --batch_size=128 \
48 | --save_interval_secs=120 \
49 | --save_summaries_secs=120 \
50 | --log_every_n_steps=100 \
51 | --optimizer=sgd \
52 | --learning_rate=0.1 \
53 | --learning_rate_decay_factor=0.1 \
54 | --num_epochs_per_decay=200 \
55 | --weight_decay=0.004
56 |
57 | # Run evaluation.
58 | python eval_image_classifier.py \
59 | --checkpoint_path=${TRAIN_DIR} \
60 | --eval_dir=${TRAIN_DIR} \
61 | --dataset_name=cifar10 \
62 | --dataset_split_name=test \
63 | --dataset_dir=${DATASET_DIR} \
64 | --model_name=cifarnet
65 |
--------------------------------------------------------------------------------
/tf/slim/scripts/train_lenet_on_mnist.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ==============================================================================
16 | #
17 | # This script performs the following operations:
18 | # 1. Downloads the MNIST dataset
19 | # 2. Trains a LeNet model on the MNIST training set.
20 | # 3. Evaluates the model on the MNIST testing set.
21 | #
22 | # Usage:
23 | # cd slim
24 | # ./slim/scripts/train_lenet_on_mnist.sh
25 | set -e
26 |
27 | # Where the checkpoint and logs will be saved to.
28 | TRAIN_DIR=/tmp/lenet-model
29 |
30 | # Where the dataset is saved to.
31 | DATASET_DIR=/tmp/mnist
32 |
33 | # Download the dataset
34 | python download_and_convert_data.py \
35 | --dataset_name=mnist \
36 | --dataset_dir=${DATASET_DIR}
37 |
38 | # Run training.
39 | python train_image_classifier.py \
40 | --train_dir=${TRAIN_DIR} \
41 | --dataset_name=mnist \
42 | --dataset_split_name=train \
43 | --dataset_dir=${DATASET_DIR} \
44 | --model_name=lenet \
45 | --preprocessing_name=lenet \
46 | --max_number_of_steps=20000 \
47 | --batch_size=50 \
48 | --learning_rate=0.01 \
49 | --save_interval_secs=60 \
50 | --save_summaries_secs=60 \
51 | --log_every_n_steps=100 \
52 | --optimizer=sgd \
53 | --learning_rate_decay_type=fixed \
54 | --weight_decay=0
55 |
56 | # Run evaluation.
57 | python eval_image_classifier.py \
58 | --checkpoint_path=${TRAIN_DIR} \
59 | --eval_dir=${TRAIN_DIR} \
60 | --dataset_name=mnist \
61 | --dataset_split_name=test \
62 | --dataset_dir=${DATASET_DIR} \
63 | --model_name=lenet
64 |
--------------------------------------------------------------------------------
/tf/slim/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | """Setup script for slim."""
16 |
17 | from setuptools import find_packages
18 | from setuptools import setup
19 |
20 | install_requires = [
21 | 'six',
22 | 'tf-slim>=1.1',
23 | ]
24 |
25 | setup(
26 | name='slim',
27 | version='0.1',
28 | install_requires=install_requires,
29 | include_package_data=True,
30 | packages=find_packages(),
31 | description='tf-slim',
32 | )
33 |
--------------------------------------------------------------------------------
/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/checkpoint/checkpoint:
--------------------------------------------------------------------------------
1 | model_checkpoint_path: "ckpt-0"
2 | all_model_checkpoint_paths: "ckpt-0"
3 | all_model_checkpoint_timestamps: 1594332515.4689217
4 | last_preserved_timestamp: 1594332510.8896894
5 |
--------------------------------------------------------------------------------
/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/checkpoint/ckpt-0.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/checkpoint/ckpt-0.data-00000-of-00001
--------------------------------------------------------------------------------
/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/checkpoint/ckpt-0.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/checkpoint/ckpt-0.index
--------------------------------------------------------------------------------
/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/saved_model.pb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/saved_model.pb
--------------------------------------------------------------------------------
/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/variables/variables.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/variables/variables.data-00000-of-00001
--------------------------------------------------------------------------------
/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/variables/variables.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AhmetFurkanDEMIR/Deploy-TensorFlow-2-Object-Detection-API-models-with-Python-Flask/92f599371782a50997ce4f27bc07524fdf8585bb/tf/ssd_mobilenet_v2_320x320_coco17_tpu-8/saved_model/variables/variables.index
--------------------------------------------------------------------------------