├── .gitignore ├── README.md ├── _init_paths.py ├── cfgs ├── c3d_activitynet.yml ├── c3d_thumos14.yml ├── res18_activitynet.yml ├── res18_thumos14.yml ├── res34_activitynet.yml ├── res34_thumos14.yml └── res50_activitynet.yml ├── evaluation ├── activitynet │ ├── Evaluation │ │ ├── README.md │ │ ├── __init__.py │ │ ├── data │ │ │ ├── activity_net.v1-3.min.json │ │ │ ├── sample_classification_prediction.json │ │ │ ├── sample_detection_prediction.json │ │ │ └── uniform_random_proposals.json │ │ ├── eval_classification.py │ │ ├── eval_detection.py │ │ ├── eval_kinetics.py │ │ ├── eval_proposal.py │ │ ├── get_classification_performance.py │ │ ├── get_detection_performance.py │ │ ├── get_kinetics_performance.py │ │ ├── get_proposal_performance.py │ │ └── utils.py │ └── activitynet_log_analysis.py └── thumos14 │ ├── Evaluation │ ├── TH14evalDet_Updated.m │ ├── TH14evalProp.m │ ├── annotation │ │ ├── annotation_test │ │ │ ├── Ambiguous_test.txt │ │ │ ├── BaseballPitch_test.txt │ │ │ ├── BasketballDunk_test.txt │ │ │ ├── Billiards_test.txt │ │ │ ├── CleanAndJerk_test.txt │ │ │ ├── CliffDiving_test.txt │ │ │ ├── CricketBowling_test.txt │ │ │ ├── CricketShot_test.txt │ │ │ ├── Diving_test.txt │ │ │ ├── FrisbeeCatch_test.txt │ │ │ ├── GolfSwing_test.txt │ │ │ ├── HammerThrow_test.txt │ │ │ ├── HighJump_test.txt │ │ │ ├── JavelinThrow_test.txt │ │ │ ├── LongJump_test.txt │ │ │ ├── PoleVault_test.txt │ │ │ ├── Shotput_test.txt │ │ │ ├── SoccerPenalty_test.txt │ │ │ ├── TennisSwing_test.txt │ │ │ ├── ThrowDiscus_test.txt │ │ │ ├── VolleyballSpiking_test.txt │ │ │ └── detclasslist.txt │ │ └── annotation_val │ │ │ ├── Ambiguous_val.txt │ │ │ ├── BaseballPitch_val.txt │ │ │ ├── BasketballDunk_val.txt │ │ │ ├── Billiards_val.txt │ │ │ ├── CleanAndJerk_val.txt │ │ │ ├── CliffDiving_val.txt │ │ │ ├── CricketBowling_val.txt │ │ │ ├── CricketShot_val.txt │ │ │ ├── Diving_val.txt │ │ │ ├── FrisbeeCatch_val.txt │ │ │ ├── GolfSwing_val.txt │ │ │ ├── HammerThrow_val.txt │ │ │ ├── HighJump_val.txt │ │ │ ├── JavelinThrow_val.txt │ │ │ ├── LongJump_val.txt │ │ │ ├── PoleVault_val.txt │ │ │ ├── Shotput_val.txt │ │ │ ├── SoccerPenalty_val.txt │ │ │ ├── TennisSwing_val.txt │ │ │ ├── ThrowDiscus_val.txt │ │ │ ├── VolleyballSpiking_val.txt │ │ │ └── detclasslist.txt │ ├── eval_proposal.m │ ├── eval_thumos14.m │ ├── intervaloverlapvalseconds.m │ ├── nms_temporal.m │ ├── python │ │ ├── TH14evalDet_Updated.py │ │ ├── eval_thumos14.py │ │ └── utils.py │ ├── res_thumos14.mat │ ├── soft_nms_temporal.m │ ├── temporal_act_grouping.m │ ├── tmp_run.txt │ ├── transform_reg_target_inv.m │ └── untitled.m │ ├── record │ ├── thumos14_log_analysis.py │ └── tmp.txt.bak ├── lib ├── datasets │ ├── VOCdevkit-matlab-wrapper │ │ ├── get_voc_opts.m │ │ ├── voc_eval.m │ │ └── xVOCap.m │ ├── __init__.py │ ├── coco.py │ ├── ds_utils.py │ ├── factory.py │ ├── imagenet.py │ ├── imdb.py │ ├── pascal_voc.py │ ├── pascal_voc_rbg.py │ ├── tools │ │ └── mcg_munge.py │ ├── vg.py │ ├── vg_eval.py │ └── voc_eval.py ├── make.sh ├── model │ ├── __init__.py │ ├── nms │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── _ext │ │ │ ├── __init__.py │ │ │ └── nms │ │ │ │ └── __init__.py │ │ ├── build.py │ │ ├── make.sh │ │ ├── nms_cpu.py │ │ ├── nms_gpu.py │ │ ├── nms_wrapper.py │ │ └── src │ │ │ ├── nms_cuda.c │ │ │ ├── nms_cuda.h │ │ │ ├── nms_cuda_kernel.cu │ │ │ └── nms_cuda_kernel.h │ ├── roi_temporal_pooling │ │ ├── __init__.py │ │ ├── _ext │ │ │ ├── __init__.py │ │ │ └── roi_temporal_pooling │ │ │ │ └── __init__.py │ │ ├── build.py │ │ ├── functions │ │ │ ├── __init__.py │ │ │ └── roi_temporal_pool.py │ │ ├── modules │ │ │ ├── __init__.py │ │ │ └── roi_temporal_pool.py │ │ └── src │ │ │ ├── roi_temporal_pooling.c │ │ │ ├── roi_temporal_pooling.h │ │ │ ├── roi_temporal_pooling_cuda.c │ │ │ ├── roi_temporal_pooling_cuda.h │ │ │ ├── roi_temporal_pooling_kernel.cu │ │ │ └── roi_temporal_pooling_kernel.h │ ├── rpn │ │ ├── __init__.py │ │ ├── anchor_target_layer.py │ │ ├── generate_anchors.py │ │ ├── proposal_layer.py │ │ ├── proposal_target_layer_cascade.py │ │ ├── resnet.py │ │ ├── rpn.py │ │ └── twin_transform.py │ ├── tdcnn │ │ ├── __init__.py │ │ ├── c3d.py │ │ ├── eco.py │ │ ├── i3d.py │ │ ├── resnet.py │ │ ├── tdcnn.py │ │ └── vgg16.py │ └── utils │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── blob.py │ │ ├── config.py │ │ ├── logger.py │ │ ├── net_utils.py │ │ ├── non_local_dot_product.py │ │ └── transforms.py ├── roi_data_layer │ ├── __init__.py │ ├── minibatch.py │ └── roibatchLoader.py ├── setup.py ├── test_model │ └── test_tdcnn.py └── tf_model_zoo │ ├── C3DRes18 │ ├── C3DRes18.yaml │ ├── __init__.py │ ├── layer_factory.py │ └── pytorch_load.py │ ├── ECO │ ├── ECO.yaml │ ├── ECO_bottom.yaml │ ├── ECO_top.yaml │ ├── __init__.py │ ├── layer_factory.py │ └── pytorch_load.py │ ├── ECOfull │ ├── ECOfull.yaml │ ├── __init__.py │ ├── layer_factory.py │ └── pytorch_load.py │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── bninception │ ├── __init__.py │ ├── bn_inception.yaml │ ├── bn_inception_bottom.yaml │ ├── caffe_pb2.py │ ├── inceptionv3.yaml │ ├── layer_factory.py │ ├── parse_caffe.py │ └── pytorch_load.py │ ├── eco-lite-4f-prec-1.png │ ├── inceptionresnetv2 │ ├── __init__.py │ ├── pytorch_load.py │ ├── tensorflow_dump.py │ └── torch_load.lua │ ├── inceptionv4 │ ├── __init__.py │ ├── pytorch_load.py │ ├── tensorflow_dump.py │ └── torch_load.lua │ ├── lena_224.png │ ├── lena_299.png │ ├── lena_origin.png │ └── models │ ├── .DS_Store │ ├── ._.DS_Store │ ├── .github │ └── ISSUE_TEMPLATE.md │ ├── .gitignore │ ├── .gitmodules │ ├── AUTHORS │ ├── CONTRIBUTING.md │ ├── LICENSE │ ├── README.md │ ├── WORKSPACE │ ├── autoencoder │ ├── AdditiveGaussianNoiseAutoencoderRunner.py │ ├── AutoencoderRunner.py │ ├── MaskingNoiseAutoencoderRunner.py │ ├── Utils.py │ ├── VariationalAutoencoderRunner.py │ ├── __init__.py │ └── autoencoder_models │ │ ├── Autoencoder.py │ │ ├── DenoisingAutoencoder.py │ │ ├── VariationalAutoencoder.py │ │ └── __init__.py │ ├── compression │ ├── README.md │ ├── decoder.py │ ├── encoder.py │ ├── example.png │ └── msssim.py │ ├── differential_privacy │ ├── README.md │ ├── __init__.py │ ├── dp_sgd │ │ ├── README.md │ │ ├── dp_mnist │ │ │ ├── BUILD │ │ │ └── dp_mnist.py │ │ ├── dp_optimizer │ │ │ ├── BUILD │ │ │ ├── dp_optimizer.py │ │ │ ├── dp_pca.py │ │ │ ├── sanitizer.py │ │ │ └── utils.py │ │ └── per_example_gradients │ │ │ ├── BUILD │ │ │ └── per_example_gradients.py │ ├── multiple_teachers │ │ ├── BUILD │ │ ├── README.md │ │ ├── aggregation.py │ │ ├── analysis.py │ │ ├── deep_cnn.py │ │ ├── input.py │ │ ├── metrics.py │ │ ├── train_student.py │ │ ├── train_student_mnist_250_lap_20_count_50_epochs_600.sh │ │ ├── train_teachers.py │ │ └── utils.py │ └── privacy_accountant │ │ ├── python │ │ ├── BUILD │ │ └── gaussian_moments.py │ │ └── tf │ │ ├── BUILD │ │ └── accountant.py │ ├── im2txt │ ├── .gitignore │ ├── README.md │ ├── WORKSPACE │ ├── g3doc │ │ ├── COCO_val2014_000000224477.jpg │ │ ├── example_captions.jpg │ │ └── show_and_tell_architecture.png │ └── im2txt │ │ ├── BUILD │ │ ├── configuration.py │ │ ├── data │ │ ├── build_mscoco_data.py │ │ └── download_and_preprocess_mscoco.sh │ │ ├── evaluate.py │ │ ├── inference_utils │ │ ├── BUILD │ │ ├── caption_generator.py │ │ ├── caption_generator_test.py │ │ ├── inference_wrapper_base.py │ │ └── vocabulary.py │ │ ├── inference_wrapper.py │ │ ├── ops │ │ ├── BUILD │ │ ├── image_embedding.py │ │ ├── image_embedding_test.py │ │ ├── image_processing.py │ │ └── inputs.py │ │ ├── run_inference.py │ │ ├── show_and_tell_model.py │ │ ├── show_and_tell_model_test.py │ │ └── train.py │ ├── inception │ ├── .gitignore │ ├── README.md │ ├── WORKSPACE │ ├── g3doc │ │ └── inception_v3_architecture.png │ └── inception │ │ ├── BUILD │ │ ├── data │ │ ├── build_image_data.py │ │ ├── build_imagenet_data.py │ │ ├── download_and_preprocess_flowers.sh │ │ ├── download_and_preprocess_flowers_mac.sh │ │ ├── download_and_preprocess_imagenet.sh │ │ ├── download_imagenet.sh │ │ ├── imagenet_2012_validation_synset_labels.txt │ │ ├── imagenet_lsvrc_2015_synsets.txt │ │ ├── imagenet_metadata.txt │ │ ├── preprocess_imagenet_validation_data.py │ │ └── process_bounding_boxes.py │ │ ├── dataset.py │ │ ├── flowers_data.py │ │ ├── flowers_eval.py │ │ ├── flowers_train.py │ │ ├── image_processing.py │ │ ├── imagenet_data.py │ │ ├── imagenet_distributed_train.py │ │ ├── imagenet_eval.py │ │ ├── imagenet_train.py │ │ ├── inception_distributed_train.py │ │ ├── inception_eval.py │ │ ├── inception_model.py │ │ ├── inception_train.py │ │ └── slim │ │ ├── BUILD │ │ ├── README.md │ │ ├── collections_test.py │ │ ├── inception_model.py │ │ ├── inception_test.py │ │ ├── losses.py │ │ ├── losses_test.py │ │ ├── ops.py │ │ ├── ops_test.py │ │ ├── scopes.py │ │ ├── scopes_test.py │ │ ├── slim.py │ │ ├── variables.py │ │ └── variables_test.py │ ├── lm_1b │ ├── BUILD │ ├── README.md │ ├── data_utils.py │ └── lm_1b_eval.py │ ├── namignizer │ ├── .gitignore │ ├── README.md │ ├── data_utils.py │ ├── model.py │ └── names.py │ ├── neural_gpu │ ├── README.md │ ├── data_utils.py │ ├── neural_gpu.py │ └── neural_gpu_trainer.py │ ├── neural_programmer │ ├── README.md │ ├── data_utils.py │ ├── model.py │ ├── neural_programmer.py │ ├── nn_utils.py │ ├── parameters.py │ └── wiki_data.py │ ├── resnet │ ├── BUILD │ ├── README.md │ ├── cifar_input.py │ ├── g3doc │ │ ├── cifar_resnet.gif │ │ └── cifar_resnet_legends.gif │ ├── resnet_main.py │ └── resnet_model.py │ ├── slim │ ├── .DS_Store │ ├── ._.DS_Store │ ├── BUILD │ ├── README.md │ ├── datasets │ │ ├── __init__.py │ │ ├── cifar10.py │ │ ├── dataset_factory.py │ │ ├── dataset_utils.py │ │ ├── download_and_convert_cifar10.py │ │ ├── download_and_convert_flowers.py │ │ ├── download_and_convert_mnist.py │ │ ├── flowers.py │ │ ├── imagenet.py │ │ └── mnist.py │ ├── deployment │ │ ├── __init__.py │ │ ├── model_deploy.py │ │ └── model_deploy_test.py │ ├── download_and_convert_data.py │ ├── eval_image_classifier.py │ ├── nets │ │ ├── __init__.py │ │ ├── alexnet.py │ │ ├── alexnet_test.py │ │ ├── cifarnet.py │ │ ├── inception.py │ │ ├── inception_resnet_v2.py │ │ ├── inception_resnet_v2_test.py │ │ ├── inception_utils.py │ │ ├── inception_v1.py │ │ ├── inception_v1_test.py │ │ ├── inception_v2.py │ │ ├── inception_v2_test.py │ │ ├── inception_v3.py │ │ ├── inception_v3_test.py │ │ ├── inception_v4.py │ │ ├── inception_v4_test.py │ │ ├── lenet.py │ │ ├── nets_factory.py │ │ ├── nets_factory_test.py │ │ ├── overfeat.py │ │ ├── overfeat_test.py │ │ ├── resnet_utils.py │ │ ├── resnet_v1.py │ │ ├── resnet_v1_test.py │ │ ├── resnet_v2.py │ │ ├── resnet_v2_test.py │ │ ├── vgg.py │ │ └── vgg_test.py │ ├── preprocessing │ │ ├── __init__.py │ │ ├── cifarnet_preprocessing.py │ │ ├── inception_preprocessing.py │ │ ├── lenet_preprocessing.py │ │ ├── preprocessing_factory.py │ │ └── vgg_preprocessing.py │ ├── scripts │ │ ├── finetune_inception_v1_on_flowers.sh │ │ ├── finetune_inception_v3_on_flowers.sh │ │ ├── train_cifarnet_on_cifar10.sh │ │ └── train_lenet_on_mnist.sh │ ├── slim_walkthough.ipynb │ └── train_image_classifier.py │ ├── street │ ├── README.md │ ├── cc │ │ └── rnn_ops.cc │ ├── g3doc │ │ ├── avdessapins.png │ │ └── vgslspecs.md │ ├── python │ │ ├── decoder.py │ │ ├── decoder_test.py │ │ ├── errorcounter.py │ │ ├── errorcounter_test.py │ │ ├── nn_ops.py │ │ ├── shapes.py │ │ ├── shapes_test.py │ │ ├── vgsl_eval.py │ │ ├── vgsl_input.py │ │ ├── vgsl_model.py │ │ ├── vgsl_model_test.py │ │ ├── vgsl_train.py │ │ ├── vgslspecs.py │ │ └── vgslspecs_test.py │ └── testdata │ │ ├── arial-32-tiny │ │ ├── arial.charset_size=105.txt │ │ ├── charset_size=134.txt │ │ ├── charset_size_10.txt │ │ ├── mnist-tiny │ │ ├── numbers-16-tiny │ │ └── numbers.charset_size=12.txt │ ├── swivel │ ├── .gitignore │ ├── README.md │ ├── analogy.cc │ ├── eval.mk │ ├── fastprep.cc │ ├── fastprep.mk │ ├── glove_to_shards.py │ ├── nearest.py │ ├── prep.py │ ├── swivel.py │ ├── text2bin.py │ ├── vecs.py │ └── wordsim.py │ ├── syntaxnet │ ├── .gitignore │ ├── Dockerfile │ ├── README.md │ ├── WORKSPACE │ ├── beam_search_training.png │ ├── ff_nn_schematic.png │ ├── looping-parser.gif │ ├── sawman.png │ ├── syntaxnet │ │ ├── BUILD │ │ ├── affix.cc │ │ ├── affix.h │ │ ├── arc_standard_transitions.cc │ │ ├── arc_standard_transitions_test.cc │ │ ├── base.h │ │ ├── beam_reader_ops.cc │ │ ├── beam_reader_ops_test.py │ │ ├── binary_segment_state.cc │ │ ├── binary_segment_state.h │ │ ├── binary_segment_state_test.cc │ │ ├── binary_segment_transitions.cc │ │ ├── binary_segment_transitions_test.cc │ │ ├── char_properties.cc │ │ ├── char_properties.h │ │ ├── char_properties_test.cc │ │ ├── conll2tree.py │ │ ├── context.pbtxt │ │ ├── demo.sh │ │ ├── dictionary.proto │ │ ├── document_filters.cc │ │ ├── document_format.cc │ │ ├── document_format.h │ │ ├── embedding_feature_extractor.cc │ │ ├── embedding_feature_extractor.h │ │ ├── feature_extractor.cc │ │ ├── feature_extractor.h │ │ ├── feature_extractor.proto │ │ ├── feature_types.h │ │ ├── fml_parser.cc │ │ ├── fml_parser.h │ │ ├── graph_builder.py │ │ ├── graph_builder_test.py │ │ ├── kbest_syntax.proto │ │ ├── lexicon_builder.cc │ │ ├── lexicon_builder_test.py │ │ ├── load_parser_ops.py │ │ ├── models │ │ │ ├── parsey_mcparseface │ │ │ │ ├── context.pbtxt │ │ │ │ ├── fine-to-universal.map │ │ │ │ ├── label-map │ │ │ │ ├── parser-params │ │ │ │ ├── prefix-table │ │ │ │ ├── suffix-table │ │ │ │ ├── tag-map │ │ │ │ ├── tagger-params │ │ │ │ └── word-map │ │ │ └── parsey_universal │ │ │ │ ├── context-tokenize-zh.pbtxt │ │ │ │ ├── context.pbtxt │ │ │ │ ├── parse.sh │ │ │ │ ├── tokenize.sh │ │ │ │ └── tokenize_zh.sh │ │ ├── morpher_transitions.cc │ │ ├── morphology_label_set.cc │ │ ├── morphology_label_set.h │ │ ├── morphology_label_set_test.cc │ │ ├── ops │ │ │ └── parser_ops.cc │ │ ├── parser_eval.py │ │ ├── parser_features.cc │ │ ├── parser_features.h │ │ ├── parser_features_test.cc │ │ ├── parser_state.cc │ │ ├── parser_state.h │ │ ├── parser_trainer.py │ │ ├── parser_trainer_test.sh │ │ ├── parser_transitions.cc │ │ ├── parser_transitions.h │ │ ├── populate_test_inputs.cc │ │ ├── populate_test_inputs.h │ │ ├── proto_io.h │ │ ├── reader_ops.cc │ │ ├── reader_ops_test.py │ │ ├── registry.cc │ │ ├── registry.h │ │ ├── segmenter_utils.cc │ │ ├── segmenter_utils.h │ │ ├── segmenter_utils_test.cc │ │ ├── sentence.proto │ │ ├── sentence_batch.cc │ │ ├── sentence_batch.h │ │ ├── sentence_features.cc │ │ ├── sentence_features.h │ │ ├── sentence_features_test.cc │ │ ├── shared_store.cc │ │ ├── shared_store.h │ │ ├── shared_store_test.cc │ │ ├── sparse.proto │ │ ├── structured_graph_builder.py │ │ ├── syntaxnet.bzl │ │ ├── tagger_transitions.cc │ │ ├── tagger_transitions_test.cc │ │ ├── task_context.cc │ │ ├── task_context.h │ │ ├── task_spec.proto │ │ ├── term_frequency_map.cc │ │ ├── term_frequency_map.h │ │ ├── test_main.cc │ │ ├── testdata │ │ │ ├── context.pbtxt │ │ │ ├── document │ │ │ └── mini-training-set │ │ ├── text_formats.cc │ │ ├── text_formats_test.py │ │ ├── unpack_sparse_features.cc │ │ ├── utils.cc │ │ ├── utils.h │ │ ├── workspace.cc │ │ └── workspace.h │ ├── third_party │ │ └── utf │ │ │ ├── BUILD │ │ │ ├── README │ │ │ ├── rune.c │ │ │ ├── runestrcat.c │ │ │ ├── runestrchr.c │ │ │ ├── runestrcmp.c │ │ │ ├── runestrcpy.c │ │ │ ├── runestrdup.c │ │ │ ├── runestrecpy.c │ │ │ ├── runestrlen.c │ │ │ ├── runestrncat.c │ │ │ ├── runestrncmp.c │ │ │ ├── runestrncpy.c │ │ │ ├── runestrrchr.c │ │ │ ├── runestrstr.c │ │ │ ├── runetype.c │ │ │ ├── runetypebody.c │ │ │ ├── utf.h │ │ │ ├── utfdef.h │ │ │ ├── utfecpy.c │ │ │ ├── utflen.c │ │ │ ├── utfnlen.c │ │ │ ├── utfrrune.c │ │ │ ├── utfrune.c │ │ │ └── utfutf.c │ ├── tools │ │ └── bazel.rc │ ├── universal.md │ └── util │ │ └── utf8 │ │ ├── BUILD │ │ ├── gtest_main.cc │ │ ├── unicodetext.cc │ │ ├── unicodetext.h │ │ ├── unicodetext_main.cc │ │ ├── unicodetext_unittest.cc │ │ ├── unilib.cc │ │ ├── unilib.h │ │ └── unilib_utf8_utils.h │ ├── textsum │ ├── BUILD │ ├── README.md │ ├── batch_reader.py │ ├── beam_search.py │ ├── data.py │ ├── data │ │ ├── data │ │ └── vocab │ ├── data_convert_example.py │ ├── seq2seq_attention.py │ ├── seq2seq_attention_decode.py │ ├── seq2seq_attention_model.py │ └── seq2seq_lib.py │ ├── transformer │ ├── README.md │ ├── cluttered_mnist.py │ ├── data │ │ └── README.md │ ├── example.py │ ├── spatial_transformer.py │ └── tf_utils.py │ └── video_prediction │ ├── README.md │ ├── download_data.sh │ ├── lstm_ops.py │ ├── prediction_input.py │ ├── prediction_model.py │ ├── prediction_train.py │ └── push_datafiles.txt ├── preprocess ├── activitynet │ ├── activity_net.v1-3.min.json │ ├── download_video.py │ ├── generate_frames.py │ ├── generate_roidb_training.py │ ├── generate_roidb_validation.py │ └── util.py ├── charades │ ├── generate_frames.py │ ├── generate_roidb_training.py │ ├── generate_roidb_validation.py │ ├── segment.txt │ └── util.py └── thumos14 │ ├── generate_frames.py │ ├── generate_roidb_training.py │ ├── generate_roidb_validation.py │ ├── segment.txt │ └── util.py ├── requirements.txt ├── script_test.sh ├── script_train.sh ├── test_net.py └── trainval_net.py /.gitignore: -------------------------------------------------------------------------------- 1 | ## General 2 | 3 | # Compiled Object files 4 | *.slo 5 | *.lo 6 | *.o 7 | *.cuo 8 | 9 | # Compiled Dynamic libraries 10 | *.so 11 | *.dylib 12 | 13 | # Compiled Static libraries 14 | *.lai 15 | *.la 16 | *.a 17 | 18 | # Compiled python 19 | *.pyc 20 | 21 | # Compiled MATLAB 22 | *.mex* 23 | 24 | # IPython notebook checkpoints 25 | .ipynb_checkpoints 26 | 27 | # Editor temporaries 28 | *.swp 29 | *~ 30 | 31 | # Sublime Text settings 32 | *.sublime-workspace 33 | *.sublime-project 34 | 35 | # Eclipse Project settings 36 | *.*project 37 | .settings 38 | 39 | # QtCreator files 40 | *.user 41 | 42 | # PyCharm files 43 | .idea 44 | 45 | # Data and models are either 46 | # 1. reference, and not casually committed 47 | # 2. custom, and live on their own unless they're deliberated contributed 48 | data/* 49 | models/* 50 | *.caffemodel 51 | *.caffemodel.h5 52 | *.solverstate 53 | *.solverstate.h5 54 | *.binaryproto 55 | *leveldb 56 | *lmdb 57 | *.pkl 58 | *.bin 59 | 60 | # LevelDB files 61 | *.sst 62 | *.ldb 63 | LOCK 64 | LOG* 65 | CURRENT 66 | MANIFEST-* 67 | 68 | # Log files 69 | logs 70 | logs/* 71 | 72 | # Output files 73 | output/* 74 | -------------------------------------------------------------------------------- /_init_paths.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | import sys 3 | 4 | def add_path(path): 5 | if path not in sys.path: 6 | sys.path.insert(0, path) 7 | 8 | this_dir = osp.dirname(__file__) 9 | 10 | # Add lib to PYTHONPATH 11 | lib_path = osp.join(this_dir, 'lib') 12 | add_path(lib_path) 13 | -------------------------------------------------------------------------------- /cfgs/c3d_activitynet.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | 19 | 20 | TEST: 21 | NMS: 0.4 22 | 23 | RPN_MIN_SIZE: 0 24 | RPN_NMS_THRESH: 0.9 25 | 26 | INPUT: "video" 27 | POOLING_LENGTH: 1 28 | POOLING_HEIGHT: 4 29 | POOLING_WIDTH: 4 30 | DEDUP_TWINS: 0.125 31 | FEAT_STRIDE: [8, ] 32 | #NUM_CLASSES: 21 33 | #ANCHOR_SCALES: [1,2,] 34 | -------------------------------------------------------------------------------- /cfgs/c3d_thumos14.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | 19 | 20 | TEST: 21 | NMS: 0.4 22 | 23 | RPN_MIN_SIZE: 0 24 | RPN_NMS_THRESH: 0.9 25 | 26 | INPUT: "video" 27 | POOLING_LENGTH: 4 28 | POOLING_HEIGHT: 2 29 | POOLING_WIDTH: 2 30 | DEDUP_TWINS: 0.125 31 | FEAT_STRIDE: [8, ] 32 | #NUM_CLASSES: 21 33 | #ANCHOR_SCALES: [1,2,] 34 | -------------------------------------------------------------------------------- /cfgs/res18_activitynet.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | BATCH_SIZE: 128 19 | HARD_MINING_RATIO: 1.0 20 | 21 | 22 | TEST: 23 | NMS: 0.4 24 | 25 | RPN_MIN_SIZE: 0 26 | RPN_NMS_THRESH: 0.9 27 | 28 | INPUT: "video" 29 | POOLING_LENGTH: 16 30 | POOLING_HEIGHT: 4 31 | POOLING_WIDTH: 4 32 | DEDUP_TWINS: 0.125 33 | FEAT_STRIDE: [8, ] 34 | 35 | RESNET: 36 | FIXED_BLOCKS: 0 37 | 38 | # session 2 39 | #ROI_CTX_SCALE: [1.5] 40 | RPN_HAS_MASK: False 41 | POOLING_MODE: pool 42 | USE_ATTENTION: False 43 | -------------------------------------------------------------------------------- /cfgs/res18_thumos14.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | BATCH_SIZE: 128 19 | HARD_MINING_RATIO: 1.0 20 | 21 | 22 | TEST: 23 | NMS: 0.4 24 | 25 | RPN_MIN_SIZE: 0 26 | RPN_NMS_THRESH: 0.9 27 | 28 | INPUT: "video" 29 | POOLING_LENGTH: 4 30 | POOLING_HEIGHT: 2 31 | POOLING_WIDTH: 2 32 | DEDUP_TWINS: 0.125 33 | FEAT_STRIDE: [8, ] 34 | 35 | RESNET: 36 | FIXED_BLOCKS: 0 37 | 38 | # session 2 39 | #ROI_CTX_SCALE: [1.5] 40 | RPN_HAS_MASK: False 41 | POOLING_MODE: pool 42 | USE_ATTENTION: False 43 | -------------------------------------------------------------------------------- /cfgs/res34_activitynet.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | 19 | 20 | TEST: 21 | NMS: 0.4 22 | 23 | RPN_MIN_SIZE: 0 24 | RPN_NMS_THRESH: 0.9 25 | 26 | INPUT: "video" 27 | POOLING_LENGTH: 16 28 | POOLING_HEIGHT: 4 29 | POOLING_WIDTH: 4 30 | DEDUP_TWINS: 0.125 31 | FEAT_STRIDE: [8, ] 32 | 33 | RESNET: 34 | FIXED_BLOCKS: 0 35 | #NUM_CLASSES: 21 36 | #ANCHOR_SCALES: [1,2,] 37 | -------------------------------------------------------------------------------- /cfgs/res34_thumos14.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | 19 | 20 | TEST: 21 | NMS: 0.4 22 | 23 | RPN_MIN_SIZE: 0 24 | RPN_NMS_THRESH: 0.9 25 | 26 | INPUT: "video" 27 | POOLING_LENGTH: 4 28 | POOLING_HEIGHT: 2 29 | POOLING_WIDTH: 2 30 | DEDUP_TWINS: 0.125 31 | FEAT_STRIDE: [8, ] 32 | 33 | RESNET: 34 | FIXED_BLOCKS: 0 35 | #NUM_CLASSES: 21 36 | #ANCHOR_SCALES: [1,2,] 37 | -------------------------------------------------------------------------------- /cfgs/res50_activitynet.yml: -------------------------------------------------------------------------------- 1 | TRAIN: 2 | FRAME_SIZE: [128, 171] # [h, w] 3 | CROP_SIZE: 112 4 | LENGTH: [768] 5 | TWIN_NORMALIZE_TARGETS_PRECOMPUTED: True 6 | 7 | RPN_POSITIVE_OVERLAP: 0.7 8 | RPN_NEGATIVE_OVERLAP: 0.3 9 | RPN_FG_FRACTION: 0.5 10 | RPN_BATCHSIZE: 64 11 | RPN_MIN_SIZE: 0 12 | RPN_NMS_THRESH: 0.8 13 | 14 | FG_FRACTION: 0.5 15 | FG_THRESH: 0.5 16 | BG_THRESH_LO: 0.1 17 | BG_THRESH_HI: 0.5 18 | 19 | 20 | TEST: 21 | NMS: 0.4 22 | 23 | RPN_MIN_SIZE: 0 24 | RPN_NMS_THRESH: 0.9 25 | 26 | INPUT: "video" 27 | POOLING_LENGTH: 16 28 | POOLING_HEIGHT: 4 29 | POOLING_WIDTH: 4 30 | DEDUP_TWINS: 0.125 31 | FEAT_STRIDE: [8, ] 32 | 33 | RESNET: 34 | FIXED_BLOCKS: 0 35 | #NUM_CLASSES: 21 36 | #ANCHOR_SCALES: [1,2,] 37 | -------------------------------------------------------------------------------- /evaluation/activitynet/Evaluation/README.md: -------------------------------------------------------------------------------- 1 | #ActivityNet Large Scale Activity Recognition Challenge - Evaluation Toolkit 2 | This is the documentation of the ActivityNet Large Scale Activity Recognition 3 | Challenge Evaluation Toolkit. It includes APIs to evaluate the performance of a method in the two different tasks in the challenge: *untrimmed video classification* and *activity detection*. For more information about the challenge competitions, please read the [guidelines](http://activity-net.org/challenges/2016/guidelines.html). 4 | 5 | ##Dependencies 6 | The Evaluation Toolkit is purely written in Python (>=2.7) and it requires the 7 | following third party libraries: 8 | * [Numpy](http://www.numpy.org/) 9 | * [Pandas](http://pandas.pydata.org/) 10 | 11 | ##Getting started 12 | We include sample prediction files in the folder data to show how to evaluate your prediction results. Please follow this steps to obtain the performance evaluation on the provided sample files: 13 | * Run `git clone` this repository. 14 | * To evaluate classification performance call: `python get_classification_performance.py data/activity_net.v1-3.min.json sample_classification_prediction.json` 15 | * To evaluate detection performance call: `python get_detection_performance.py data/activity_net.v1-3.min.json sample_detection_prediction.json` 16 | 17 | ##Contributions and Troubleshooting 18 | We are welcome to contributions, please keep your pull-request simple so we can go back to you as soon as we can. If you found a bug please open a new issue and describe the problem. 19 | -------------------------------------------------------------------------------- /evaluation/activitynet/Evaluation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/evaluation/activitynet/Evaluation/__init__.py -------------------------------------------------------------------------------- /evaluation/activitynet/Evaluation/get_classification_performance.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from eval_classification import ANETclassification 4 | 5 | def main(ground_truth_filename, prediction_filename, 6 | subset='validation', verbose=True, check_status=True): 7 | anet_classification = ANETclassification(ground_truth_filename, 8 | prediction_filename, 9 | subset=subset, verbose=verbose, 10 | check_status=True) 11 | anet_classification.evaluate() 12 | 13 | def parse_input(): 14 | description = ('This script allows you to evaluate the ActivityNet ' 15 | 'untrimmed video classification task which is intended to ' 16 | 'evaluate the ability of algorithms to predict activities ' 17 | 'in untrimmed video sequences.') 18 | p = argparse.ArgumentParser(description=description) 19 | p.add_argument('ground_truth_filename', 20 | help='Full path to json file containing the ground truth.') 21 | p.add_argument('prediction_filename', 22 | help='Full path to json file containing the predictions.') 23 | p.add_argument('--subset', default='validation', 24 | help=('String indicating subset to evaluate: ' 25 | '(training, validation)')) 26 | p.add_argument('--verbose', type=bool, default=True) 27 | p.add_argument('--check_status', type=bool, default=True) 28 | return p.parse_args() 29 | 30 | if __name__ == '__main__': 31 | args = parse_input() 32 | main(**vars(args)) 33 | -------------------------------------------------------------------------------- /evaluation/activitynet/Evaluation/get_detection_performance.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | 4 | from eval_detection import ANETdetection 5 | 6 | def main(ground_truth_filename, prediction_filename, 7 | subset='validation', tiou_thresholds=np.linspace(0.5, 0.95, 10), 8 | verbose=True, check_status=True): 9 | 10 | anet_detection = ANETdetection(ground_truth_filename, prediction_filename, 11 | subset=subset, tiou_thresholds=tiou_thresholds, 12 | verbose=verbose, check_status=check_status) 13 | anet_detection.evaluate() 14 | 15 | def parse_input(): 16 | description = ('This script allows you to evaluate the ActivityNet ' 17 | 'detection task which is intended to evaluate the ability ' 18 | 'of algorithms to temporally localize activities in ' 19 | 'untrimmed video sequences.') 20 | p = argparse.ArgumentParser(description=description) 21 | p.add_argument('ground_truth_filename', 22 | help='Full path to json file containing the ground truth.') 23 | p.add_argument('prediction_filename', 24 | help='Full path to json file containing the predictions.') 25 | p.add_argument('--subset', default='validation', 26 | help=('String indicating subset to evaluate: ' 27 | '(training, validation)')) 28 | p.add_argument('--tiou_thresholds', type=float, default=np.linspace(0.5, 0.95, 10), 29 | help='Temporal intersection over union threshold.') 30 | p.add_argument('--verbose', type=bool, default=True) 31 | p.add_argument('--check_status', type=bool, default=True) 32 | return p.parse_args() 33 | 34 | if __name__ == '__main__': 35 | args = parse_input() 36 | main(**vars(args)) 37 | -------------------------------------------------------------------------------- /evaluation/activitynet/Evaluation/get_kinetics_performance.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from eval_kinetics import ANETclassification 4 | 5 | def main(ground_truth_filename, prediction_filename, 6 | subset='validation', verbose=True, check_status=True): 7 | anet_classification = ANETclassification(ground_truth_filename, 8 | prediction_filename, 9 | subset=subset, verbose=verbose, 10 | check_status=True, top_k=1) 11 | anet_classification.evaluate() 12 | 13 | anet_classification = ANETclassification(ground_truth_filename, 14 | prediction_filename, 15 | subset=subset, verbose=verbose, 16 | check_status=True, top_k=5) 17 | anet_classification.evaluate() 18 | 19 | def parse_input(): 20 | description = ('This script allows you to evaluate the ActivityNet ' 21 | 'untrimmed video classification task which is intended to ' 22 | 'evaluate the ability of algorithms to predict activities ' 23 | 'in untrimmed video sequences.') 24 | p = argparse.ArgumentParser(description=description) 25 | p.add_argument('ground_truth_filename', 26 | help='Full path to json file containing the ground truth.') 27 | p.add_argument('prediction_filename', 28 | help='Full path to json file containing the predictions.') 29 | p.add_argument('--subset', default='validation', 30 | help=('String indicating subset to evaluate: ' 31 | '(training, validation)')) 32 | p.add_argument('--verbose', type=bool, default=True) 33 | p.add_argument('--check_status', type=bool, default=True) 34 | return p.parse_args() 35 | 36 | if __name__ == '__main__': 37 | args = parse_input() 38 | main(**vars(args)) 39 | -------------------------------------------------------------------------------- /evaluation/activitynet/Evaluation/get_proposal_performance.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | 4 | from eval_proposal import ANETproposal 5 | 6 | def main(ground_truth_filename, proposal_filename, max_avg_nr_proposals=100, 7 | tiou_thresholds=np.linspace(0.5, 0.95, 10), 8 | subset='validation', verbose=True, check_status=True): 9 | 10 | anet_proposal = ANETproposal(ground_truth_filename, proposal_filename, 11 | tiou_thresholds=tiou_thresholds, 12 | max_avg_nr_proposals=max_avg_nr_proposals, 13 | subset=subset, verbose=True, check_status=True) 14 | anet_proposal.evaluate() 15 | 16 | def parse_input(): 17 | description = ('This script allows you to evaluate the ActivityNet ' 18 | 'proposal task which is intended to evaluate the ability ' 19 | 'of algorithms to generate activity proposals that temporally ' 20 | 'localize activities in untrimmed video sequences.') 21 | p = argparse.ArgumentParser(description=description) 22 | p.add_argument('ground_truth_filename', 23 | help='Full path to json file containing the ground truth.') 24 | p.add_argument('proposal_filename', 25 | help='Full path to json file containing the proposals.') 26 | p.add_argument('--subset', default='validation', 27 | help=('String indicating subset to evaluate: ' 28 | '(training, validation)')) 29 | p.add_argument('--verbose', type=bool, default=True) 30 | p.add_argument('--check_status', type=bool, default=True) 31 | return p.parse_args() 32 | 33 | if __name__ == '__main__': 34 | args = parse_input() 35 | main(**vars(args)) 36 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_test/BaseballPitch_test.txt: -------------------------------------------------------------------------------- 1 | video_test_0000324 49.2 53.5 2 | video_test_0000324 116.7 122.5 3 | video_test_0000664 1.3 4.8 4 | video_test_0000664 22.3 25.3 5 | video_test_0000664 56.8 59.7 6 | video_test_0000714 26.2 29.9 7 | video_test_0000714 37.0 39.1 8 | video_test_0000714 141.4 144.7 9 | video_test_0000714 150.1 152.7 10 | video_test_0000767 10.9 12.3 11 | video_test_0000873 6.3 9.0 12 | video_test_0000946 0.9 3.5 13 | video_test_0000946 13.1 17.4 14 | video_test_0000964 7.5 15.3 15 | video_test_0000964 42.5 47.3 16 | video_test_0000964 52.2 56.4 17 | video_test_0001038 50.6 54.2 18 | video_test_0001038 110.5 114.1 19 | video_test_0001182 1.3 3.7 20 | video_test_0001182 78.1 81.2 21 | video_test_0001182 89.9 92.9 22 | video_test_0001324 8.4 10.9 23 | video_test_0001324 18.2 20.2 24 | video_test_0001324 44.6 46.7 25 | video_test_0001324 54.7 57.3 26 | video_test_0001324 62.4 64.4 27 | video_test_0001324 72.2 75.2 28 | video_test_0001324 85.7 87.9 29 | video_test_0001324 95.9 98.5 30 | video_test_0001324 104.7 106.4 31 | video_test_0001324 119.7 121.0 32 | video_test_0001324 129.2 130.8 33 | video_test_0001324 131.0 132.2 34 | video_test_0001324 134.8 136.5 35 | video_test_0001324 136.7 137.7 36 | video_test_0001324 138.0 139.3 37 | video_test_0001324 139.9 141.5 38 | video_test_0001324 141.6 142.8 39 | video_test_0001324 143.0 144.1 40 | video_test_0001324 144.7 145.6 41 | video_test_0001447 172.7 175.3 42 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_test/FrisbeeCatch_test.txt: -------------------------------------------------------------------------------- 1 | video_test_0000413 2.5 4.8 2 | video_test_0000413 6.3 9.0 3 | video_test_0000413 9.6 11.4 4 | video_test_0000413 12.2 14.5 5 | video_test_0000413 17.2 19.8 6 | video_test_0000413 22.2 27.1 7 | video_test_0000413 32.0 35.2 8 | video_test_0000672 2.2 5.6 9 | video_test_0000672 7.7 10.6 10 | video_test_0000672 19.4 24.9 11 | video_test_0001135 7.9 10.4 12 | video_test_0001135 16.7 27.5 13 | video_test_0001135 29.7 38.4 14 | video_test_0001433 4.3 14.6 15 | video_test_0001508 4.9 7.6 16 | video_test_0001508 13.3 15.7 17 | video_test_0001508 39.3 42.3 18 | video_test_0001508 50.5 53.4 19 | video_test_0001508 56.5 59.7 20 | video_test_0001508 62.2 63.9 21 | video_test_0001508 64.4 66.0 22 | video_test_0001508 66.5 68.1 23 | video_test_0001508 68.8 72.0 24 | video_test_0001508 78.3 82.1 25 | video_test_0001508 84.3 87.4 26 | video_test_0001508 95.2 96.5 27 | video_test_0001508 97.0 101.7 28 | video_test_0001508 103.9 107.8 29 | video_test_0001508 110.3 112.1 30 | video_test_0001508 112.5 114.7 31 | video_test_0001508 115.8 118.3 32 | video_test_0001508 122.9 125.9 33 | video_test_0001508 126.1 128.9 34 | video_test_0001508 129.5 130.9 35 | video_test_0001508 136.8 138.6 36 | video_test_0001508 142.9 145.2 37 | video_test_0001508 155.5 157.2 38 | video_test_0001508 157.2 159.9 39 | video_test_0001508 166.2 168.5 40 | video_test_0001512 10.7 17.2 41 | video_test_0001512 25.5 33.7 42 | video_test_0001549 3.1 4.7 43 | video_test_0001549 4.9 5.9 44 | video_test_0001549 6.1 6.8 45 | video_test_0001549 7.8 10.2 46 | video_test_0001549 57.7 61.0 47 | video_test_0001549 70.6 74.5 48 | video_test_0001549 107.3 111.0 49 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_test/GolfSwing_test.txt: -------------------------------------------------------------------------------- 1 | video_test_0000028 20.0 24.0 2 | video_test_0000028 29.4 32.8 3 | video_test_0000028 38.2 40.2 4 | video_test_0000028 42.2 44.4 5 | video_test_0000028 45.4 47.4 6 | video_test_0000028 55.0 58.5 7 | video_test_0000028 61.4 64.2 8 | video_test_0000028 66.6 70.2 9 | video_test_0000028 72.1 76.8 10 | video_test_0000028 93.5 96.8 11 | video_test_0000028 99.7 104.0 12 | video_test_0000028 105.4 109.4 13 | video_test_0000028 113.9 117.4 14 | video_test_0000028 131.9 135.8 15 | video_test_0000046 128.1 132.7 16 | video_test_0000046 147.4 150.9 17 | video_test_0000046 79.2 84.0 18 | video_test_0000046 170.5 173.6 19 | video_test_0000113 60.8 79.0 20 | video_test_0000238 9.6 15.7 21 | video_test_0000238 21.0 31.8 22 | video_test_0000238 38.5 78.2 23 | video_test_0000238 81.6 89.3 24 | video_test_0000611 88.6 90.9 25 | video_test_0000611 124.1 129.6 26 | video_test_0000611 158.2 159.9 27 | video_test_0000847 0.5 3.3 28 | video_test_0000847 221.6 232.1 29 | video_test_0001127 25.8 49.8 30 | video_test_0001127 50.1 69.9 31 | video_test_0001127 165.6 169.8 32 | video_test_0001163 5.2 8.0 33 | video_test_0001163 22.4 25.6 34 | video_test_0001163 33.8 40.5 35 | video_test_0001163 46.9 66.0 36 | video_test_0001163 78.4 83.0 37 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_test/SoccerPenalty_test.txt: -------------------------------------------------------------------------------- 1 | video_test_0000292 65.9 67.4 2 | video_test_0000541 1.7 4.8 3 | video_test_0000541 10.1 12.0 4 | video_test_0000591 10.5 11.6 5 | video_test_0000591 24.3 25.9 6 | video_test_0000689 62.8 65.4 7 | video_test_0000689 101.6 103.7 8 | video_test_0000701 0.9 2.4 9 | video_test_0000701 16.2 26.2 10 | video_test_0000701 33.9 35.8 11 | video_test_0000701 41.4 44.0 12 | video_test_0000991 8.1 11.7 13 | video_test_0000991 14.8 17.6 14 | video_test_0000991 26.2 28.9 15 | video_test_0000991 29.8 33.6 16 | video_test_0000991 34.0 38.3 17 | video_test_0000991 38.4 38.9 18 | video_test_0000991 39.0 39.5 19 | video_test_0000991 39.7 40.3 20 | video_test_0000991 45.9 49.6 21 | video_test_0000991 54.8 57.9 22 | video_test_0000991 60.0 66.6 23 | video_test_0000991 66.8 69.7 24 | video_test_0001118 22.6 27.5 25 | video_test_0001118 48.7 56.8 26 | video_test_0001153 69.5 72.1 27 | video_test_0001153 97.5 100.5 28 | video_test_0001153 101.9 106.2 29 | video_test_0001409 19.6 22.2 30 | video_test_0001409 57.7 59.8 31 | video_test_0001409 80.3 84.6 32 | video_test_0001409 100.4 103.4 33 | video_test_0001409 121.1 123.6 34 | video_test_0001409 141.1 146.7 35 | video_test_0001409 163.5 167.1 36 | video_test_0001484 53.7 55.8 37 | video_test_0001484 56.7 58.9 38 | video_test_0001484 59.4 61.4 39 | video_test_0001484 61.7 65.2 40 | video_test_0001484 65.8 67.9 41 | video_test_0001484 129.9 132.2 42 | video_test_0001484 132.9 135.3 43 | video_test_0001484 135.9 137.7 44 | video_test_0001484 138.3 141.7 45 | video_test_0001484 142.5 143.9 46 | video_test_0001484 198.1 200.2 47 | video_test_0001556 53.0 55.3 48 | video_test_0001556 78.2 83.8 49 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_test/detclasslist.txt: -------------------------------------------------------------------------------- 1 | 1 BaseballPitch 2 | 2 BasketballDunk 3 | 3 Billiards 4 | 4 CleanAndJerk 5 | 5 CliffDiving 6 | 6 CricketBowling 7 | 7 CricketShot 8 | 8 Diving 9 | 9 FrisbeeCatch 10 | 10 GolfSwing 11 | 11 HammerThrow 12 | 12 HighJump 13 | 13 JavelinThrow 14 | 14 LongJump 15 | 15 PoleVault 16 | 16 Shotput 17 | 17 SoccerPenalty 18 | 18 TennisSwing 19 | 19 ThrowDiscus 20 | 20 VolleyballSpiking 21 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_val/BaseballPitch_val.txt: -------------------------------------------------------------------------------- 1 | video_validation_0000266 72.8 76.4 2 | video_validation_0000681 44.0 50.9 3 | video_validation_0000682 1.5 5.4 4 | video_validation_0000682 79.3 83.9 5 | video_validation_0000683 0.3 5.5 6 | video_validation_0000683 7.5 13.5 7 | video_validation_0000683 15.9 18.1 8 | video_validation_0000683 23.8 25.7 9 | video_validation_0000684 21.8 23.3 10 | video_validation_0000684 40.0 43.1 11 | video_validation_0000684 63.7 66.6 12 | video_validation_0000685 18.7 23.4 13 | video_validation_0000685 23.4 26.7 14 | video_validation_0000685 43.8 48.1 15 | video_validation_0000685 55.1 58.2 16 | video_validation_0000685 63.2 66.8 17 | video_validation_0000685 67.2 71.3 18 | video_validation_0000685 78.4 81.5 19 | video_validation_0000686 2.4 5.0 20 | video_validation_0000687 5.1 7.1 21 | video_validation_0000687 17.7 20.5 22 | video_validation_0000688 77.6 81.9 23 | video_validation_0000688 103.7 106.7 24 | video_validation_0000688 127.3 130.1 25 | video_validation_0000689 0.5 4.9 26 | video_validation_0000690 5.7 10.6 27 | video_validation_0000690 14.9 18.1 28 | video_validation_0000690 35.0 38.0 29 | video_validation_0000690 156.8 161.3 30 | video_validation_0000690 251.6 256.2 31 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_val/CleanAndJerk_val.txt: -------------------------------------------------------------------------------- 1 | video_validation_0000151 10.2 22.9 2 | video_validation_0000152 12.8 28.4 3 | video_validation_0000152 56.0 62.6 4 | video_validation_0000152 98.1 124.8 5 | video_validation_0000153 4.3 10.9 6 | video_validation_0000153 16.3 21.1 7 | video_validation_0000153 28.5 36.8 8 | video_validation_0000153 56.2 73.0 9 | video_validation_0000153 85.7 101.0 10 | video_validation_0000153 121.4 141.6 11 | video_validation_0000154 5.4 11.6 12 | video_validation_0000154 13.0 19.7 13 | video_validation_0000154 21.4 29.3 14 | video_validation_0000155 35.6 45.2 15 | video_validation_0000156 3.5 10.7 16 | video_validation_0000156 19.5 27.2 17 | video_validation_0000156 116.8 121.5 18 | video_validation_0000156 127.2 133.4 19 | video_validation_0000156 144.9 151.4 20 | video_validation_0000156 164.8 173.9 21 | video_validation_0000156 184.7 192.9 22 | video_validation_0000156 226.1 234.6 23 | video_validation_0000157 0.3 9.6 24 | video_validation_0000157 10.7 49.6 25 | video_validation_0000158 3.9 16.1 26 | video_validation_0000158 24.4 34.6 27 | video_validation_0000158 47.0 165.1 28 | video_validation_0000158 215.2 224.3 29 | video_validation_0000158 235.8 244.2 30 | video_validation_0000158 249.0 258.8 31 | video_validation_0000158 262.7 272.8 32 | video_validation_0000159 35.2 51.6 33 | video_validation_0000159 98.8 123.2 34 | video_validation_0000159 133.8 136.3 35 | video_validation_0000159 230.3 246.1 36 | video_validation_0000159 266.5 274.0 37 | video_validation_0000159 368.2 388.6 38 | video_validation_0000159 400.2 411.6 39 | video_validation_0000160 46.3 67.3 40 | video_validation_0000160 94.9 108.5 41 | video_validation_0000160 237.8 255.2 42 | video_validation_0000160 301.2 319.6 43 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_val/GolfSwing_val.txt: -------------------------------------------------------------------------------- 1 | video_validation_0000281 169.7 175.3 2 | video_validation_0000281 221.6 225.3 3 | video_validation_0000282 7.7 15.5 4 | video_validation_0000282 29.3 32.4 5 | video_validation_0000282 32.8 35.8 6 | video_validation_0000283 0.0 2.3 7 | video_validation_0000283 11.1 61.3 8 | video_validation_0000284 13.0 19.2 9 | video_validation_0000284 22.4 42.7 10 | video_validation_0000284 43.9 47.0 11 | video_validation_0000284 47.6 50.8 12 | video_validation_0000285 74.2 81.8 13 | video_validation_0000286 172.4 178.6 14 | video_validation_0000287 29.2 33.5 15 | video_validation_0000287 39.9 44.9 16 | video_validation_0000287 57.9 63.8 17 | video_validation_0000287 77.3 88.7 18 | video_validation_0000288 79.5 84.0 19 | video_validation_0000288 105.8 109.2 20 | video_validation_0000289 3.0 25.9 21 | video_validation_0000289 32.8 123.3 22 | video_validation_0000290 2.6 5.8 23 | video_validation_0000290 10.3 43.4 24 | video_validation_0000290 45.5 53.9 25 | video_validation_0000290 55.5 60.3 26 | video_validation_0000290 63.0 65.6 27 | video_validation_0000932 2.8 3.3 28 | video_validation_0000932 16.0 16.6 29 | video_validation_0000932 16.7 17.3 30 | video_validation_0000932 17.4 18.0 31 | video_validation_0000932 27.1 28.5 32 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/annotation/annotation_val/detclasslist.txt: -------------------------------------------------------------------------------- 1 | 1 BaseballPitch 2 | 2 BasketballDunk 3 | 3 Billiards 4 | 4 CleanAndJerk 5 | 5 CliffDiving 6 | 6 CricketBowling 7 | 7 CricketShot 8 | 8 Diving 9 | 9 FrisbeeCatch 10 | 10 GolfSwing 11 | 11 HammerThrow 12 | 12 HighJump 13 | 13 JavelinThrow 14 | 14 LongJump 15 | 15 PoleVault 16 | 16 Shotput 17 | 17 SoccerPenalty 18 | 18 TennisSwing 19 | 19 ThrowDiscus 20 | 20 VolleyballSpiking 21 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/intervaloverlapvalseconds.m: -------------------------------------------------------------------------------- 1 | function ov=intervaloverlapvalseconds(i1,i2,normtype,gt,det) 2 | % 3 | 4 | 5 | if nargin<3 normtype=0; end 6 | 7 | ov=zeros(size(i1,1),size(i2,1)); 8 | for i=1:size(i1,1) 9 | for j=1:size(i2,1) 10 | ov(i,j)=intervalsingleoverlapvalseconds(i1(i,:),i2(j,:),normtype); 11 | if nargin==5 12 | ov(i,j)=ov(i,j)*strcmp(gt(i).class,det(j).class); 13 | end 14 | end 15 | end 16 | 17 | function ov=intervalsingleoverlapvalseconds(i1,i2,normtype) 18 | 19 | 20 | 21 | i1=[min(i1) max(i1)]; 22 | i2=[min(i2) max(i2)]; 23 | 24 | ov=0; 25 | if normtype<0 ua=1; 26 | elseif normtype==1 27 | ua=(i1(2)-i1(1)); 28 | elseif normtype==2 29 | ua=(i2(2)-i2(1)); 30 | else 31 | bu=[min(i1(1),i2(1)) ; max(i1(2),i2(2))]; 32 | ua=(bu(2)-bu(1)); 33 | end 34 | 35 | bi=[max(i1(1),i2(1)) ; min(i1(2),i2(2))]; 36 | iw=bi(2)-bi(1); 37 | if iw>0 38 | if normtype<0 % no normalization! 39 | ov=iw; 40 | else 41 | ov=iw/ua; 42 | end 43 | end 44 | 45 | 46 | %i1=i1(:)'; 47 | %i2=i2(:)'; 48 | 49 | %ov=0; 50 | %[vs,is]=sort([i1(1:2) i2(1:2)]); 51 | %ind=[1 1 2 2]; 52 | %inds=ind(is); 53 | %if inds(1)~=inds(2) 54 | % ov=vs(3)-vs(2); 55 | %end 56 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/nms_temporal.m: -------------------------------------------------------------------------------- 1 | function pick = nms_temporal(boxes, overlap) 2 | % 3 | % top = nms(boxes, overlap) 4 | % Non-maximum suppression. (FAST VERSION) 5 | % Greedily select high-scoring detections and skip detections 6 | % that are significantly covered by a previously selected 7 | % detection. 8 | % 9 | % NOTE: This is adapted from Pedro Felzenszwalb's version (nms.m), 10 | % but an inner loop has been eliminated to significantly speed it 11 | % up in the case of a large number of boxes 12 | 13 | % Copyright (C) 2011-12 by Tomasz Malisiewicz 14 | % All rights reserved. 15 | % 16 | % This file is part of the Exemplar-SVM library and is made 17 | % available under the terms of the MIT license (see COPYING file). 18 | % Project homepage: https://github.com/quantombone/exemplarsvm 19 | 20 | 21 | if isempty(boxes) 22 | pick = []; 23 | return; 24 | end 25 | 26 | x1 = boxes(:,1); 27 | x2 = boxes(:,2); 28 | s = boxes(:,end); 29 | 30 | union = x2 - x1; 31 | [vals, I] = sort(s); 32 | 33 | pick = s*0; 34 | counter = 1; 35 | while ~isempty(I) 36 | last = length(I); 37 | i = I(last); 38 | pick(counter) = i; 39 | counter = counter + 1; 40 | 41 | xx1 = max(x1(i), x1(I(1:last-1))); 42 | xx2 = min(x2(i), x2(I(1:last-1))); 43 | 44 | w = max(0.0, xx2-xx1); 45 | 46 | inter = w; 47 | o = inter ./ (union(i) + union(I(1:last-1)) - inter); 48 | 49 | I = I(find(o<=overlap)); 50 | end 51 | 52 | pick = pick(1:(counter-1)); 53 | -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/res_thumos14.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/evaluation/thumos14/Evaluation/res_thumos14.mat -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/transform_reg_target_inv.m: -------------------------------------------------------------------------------- 1 | function target_intervals = transform_reg_target_inv(src_intervals, reg_label) 2 | %input: bt_intervals, reg_label 3 | %output: 4 | %t = (p_t x t_src) + t_src), l = l_src x exp(p_l) 5 | %t* = (p_t* x t_src) + t_src), l* = l_src x exp(p_l*) 6 | %where 7 | 8 | center_intervals_src = 0.5 * (src_intervals(:, 2) + src_intervals(:, 1)); 9 | len_intervals_src = src_intervals(:, 2) - src_intervals(:, 1); 10 | 11 | t_target = reg_label(:, 1) .* center_intervals_src + center_intervals_src; 12 | l_target = len_intervals_src .* exp(reg_label(:,2)); 13 | 14 | target_intervals = [t_target - 0.5 * l_target, t_target + 0.5 * l_target]; 15 | end -------------------------------------------------------------------------------- /evaluation/thumos14/Evaluation/untitled.m: -------------------------------------------------------------------------------- 1 | % variable: ov 2 | clear all; clf; 3 | sigma = 0.5; 4 | ov = 0:0.1:1; 5 | 6 | figure(1); 7 | hold on; 8 | for score=0.3:0.3:1 9 | f = score .* exp(ov.^2 ./sigma); 10 | plot(ov, f); 11 | end 12 | hold off; 13 | title('Adjust score along overlap, with score fixed'); 14 | -------------------------------------------------------------------------------- /evaluation/thumos14/record: -------------------------------------------------------------------------------- 1 | mAP = 2 | 0.5938 0.5885 0.5585 0.4876 0.3735 0.2863 0.1678 3 | 4 | # 2gpu,epoch4 $(RC3D_ROOT)/output/c3d/thumos14/test_log_.txt.2018-10-18_11-26-14 5 | mAP = 6 | 0.5480 0.5416 0.5150 0.4629 0.3564 0.2762 0.1592 7 | 8 | # 2gpu,epoch5 $(RC3D_ROOT)/output/c3d/thumos14/test_log_.txt.2018-10-18_11-25-14 9 | mAP = 10 | 0.5527 0.5491 0.5244 0.4664 0.3549 0.2698 0.1644 11 | -------------------------------------------------------------------------------- /lib/datasets/VOCdevkit-matlab-wrapper/get_voc_opts.m: -------------------------------------------------------------------------------- 1 | function VOCopts = get_voc_opts(path) 2 | 3 | tmp = pwd; 4 | cd(path); 5 | try 6 | addpath('VOCcode'); 7 | VOCinit; 8 | catch 9 | rmpath('VOCcode'); 10 | cd(tmp); 11 | error(sprintf('VOCcode directory not found under %s', path)); 12 | end 13 | rmpath('VOCcode'); 14 | cd(tmp); 15 | -------------------------------------------------------------------------------- /lib/datasets/VOCdevkit-matlab-wrapper/voc_eval.m: -------------------------------------------------------------------------------- 1 | function res = voc_eval(path, comp_id, test_set, output_dir) 2 | 3 | VOCopts = get_voc_opts(path); 4 | VOCopts.testset = test_set; 5 | 6 | for i = 1:length(VOCopts.classes) 7 | cls = VOCopts.classes{i}; 8 | res(i) = voc_eval_cls(cls, VOCopts, comp_id, output_dir); 9 | end 10 | 11 | fprintf('\n~~~~~~~~~~~~~~~~~~~~\n'); 12 | fprintf('Results:\n'); 13 | aps = [res(:).ap]'; 14 | fprintf('%.1f\n', aps * 100); 15 | fprintf('%.1f\n', mean(aps) * 100); 16 | fprintf('~~~~~~~~~~~~~~~~~~~~\n'); 17 | 18 | function res = voc_eval_cls(cls, VOCopts, comp_id, output_dir) 19 | 20 | test_set = VOCopts.testset; 21 | year = VOCopts.dataset(4:end); 22 | 23 | addpath(fullfile(VOCopts.datadir, 'VOCcode')); 24 | 25 | res_fn = sprintf(VOCopts.detrespath, comp_id, cls); 26 | 27 | recall = []; 28 | prec = []; 29 | ap = 0; 30 | ap_auc = 0; 31 | 32 | do_eval = (str2num(year) <= 2007) | ~strcmp(test_set, 'test'); 33 | if do_eval 34 | % Bug in VOCevaldet requires that tic has been called first 35 | tic; 36 | [recall, prec, ap] = VOCevaldet(VOCopts, comp_id, cls, true); 37 | ap_auc = xVOCap(recall, prec); 38 | 39 | % force plot limits 40 | ylim([0 1]); 41 | xlim([0 1]); 42 | 43 | print(gcf, '-djpeg', '-r0', ... 44 | [output_dir '/' cls '_pr.jpg']); 45 | end 46 | fprintf('!!! %s : %.4f %.4f\n', cls, ap, ap_auc); 47 | 48 | res.recall = recall; 49 | res.prec = prec; 50 | res.ap = ap; 51 | res.ap_auc = ap_auc; 52 | 53 | save([output_dir '/' cls '_pr.mat'], ... 54 | 'res', 'recall', 'prec', 'ap', 'ap_auc'); 55 | 56 | rmpath(fullfile(VOCopts.datadir, 'VOCcode')); 57 | -------------------------------------------------------------------------------- /lib/datasets/VOCdevkit-matlab-wrapper/xVOCap.m: -------------------------------------------------------------------------------- 1 | function ap = xVOCap(rec,prec) 2 | % From the PASCAL VOC 2011 devkit 3 | 4 | mrec=[0 ; rec ; 1]; 5 | mpre=[0 ; prec ; 0]; 6 | for i=numel(mpre)-1:-1:1 7 | mpre(i)=max(mpre(i),mpre(i+1)); 8 | end 9 | i=find(mrec(2:end)~=mrec(1:end-1))+1; 10 | ap=sum((mrec(i)-mrec(i-1)).*mpre(i)); 11 | -------------------------------------------------------------------------------- /lib/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /lib/datasets/ds_utils.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast/er R-CNN 3 | # Licensed under The MIT License [see LICENSE for details] 4 | # Written by Ross Girshick 5 | # -------------------------------------------------------- 6 | from __future__ import absolute_import 7 | from __future__ import division 8 | from __future__ import print_function 9 | 10 | import numpy as np 11 | 12 | 13 | def unique_boxes(boxes, scale=1.0): 14 | """Return indices of unique boxes.""" 15 | v = np.array([1, 1e3, 1e6, 1e9]) 16 | hashes = np.round(boxes * scale).dot(v) 17 | _, index = np.unique(hashes, return_index=True) 18 | return np.sort(index) 19 | 20 | 21 | def xywh_to_xyxy(boxes): 22 | """Convert [x y w h] box format to [x1 y1 x2 y2] format.""" 23 | return np.hstack((boxes[:, 0:2], boxes[:, 0:2] + boxes[:, 2:4] - 1)) 24 | 25 | 26 | def xyxy_to_xywh(boxes): 27 | """Convert [x1 y1 x2 y2] box format to [x y w h] format.""" 28 | return np.hstack((boxes[:, 0:2], boxes[:, 2:4] - boxes[:, 0:2] + 1)) 29 | 30 | 31 | def validate_boxes(boxes, width=0, height=0): 32 | """Check that a set of boxes are valid.""" 33 | x1 = boxes[:, 0] 34 | y1 = boxes[:, 1] 35 | x2 = boxes[:, 2] 36 | y2 = boxes[:, 3] 37 | assert (x1 >= 0).all() 38 | assert (y1 >= 0).all() 39 | assert (x2 >= x1).all() 40 | assert (y2 >= y1).all() 41 | assert (x2 < width).all() 42 | assert (y2 < height).all() 43 | 44 | 45 | def filter_small_boxes(boxes, min_size): 46 | w = boxes[:, 2] - boxes[:, 0] 47 | h = boxes[:, 3] - boxes[:, 1] 48 | keep = np.where((w >= min_size) & (h > min_size))[0] 49 | return keep 50 | -------------------------------------------------------------------------------- /lib/datasets/tools/mcg_munge.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import sys 4 | 5 | """Hacky tool to convert file system layout of MCG boxes downloaded from 6 | http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/mcg/ 7 | so that it's consistent with those computed by Jan Hosang (see: 8 | http://www.mpi-inf.mpg.de/departments/computer-vision-and-multimodal- 9 | computing/research/object-recognition-and-scene-understanding/how- 10 | good-are-detection-proposals-really/) 11 | 12 | NB: Boxes from the MCG website are in (y1, x1, y2, x2) order. 13 | Boxes from Hosang et al. are in (x1, y1, x2, y2) order. 14 | """ 15 | 16 | def munge(src_dir): 17 | # stored as: ./MCG-COCO-val2014-boxes/COCO_val2014_000000193401.mat 18 | # want: ./MCG/mat/COCO_val2014_0/COCO_val2014_000000141/COCO_val2014_000000141334.mat 19 | 20 | files = os.listdir(src_dir) 21 | for fn in files: 22 | base, ext = os.path.splitext(fn) 23 | # first 14 chars / first 22 chars / all chars + .mat 24 | # COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat 25 | first = base[:14] 26 | second = base[:22] 27 | dst_dir = os.path.join('MCG', 'mat', first, second) 28 | if not os.path.exists(dst_dir): 29 | os.makedirs(dst_dir) 30 | src = os.path.join(src_dir, fn) 31 | dst = os.path.join(dst_dir, fn) 32 | print('MV: {} -> {}'.format(src, dst)) 33 | os.rename(src, dst) 34 | 35 | if __name__ == '__main__': 36 | # src_dir should look something like: 37 | # src_dir = 'MCG-COCO-val2014-boxes' 38 | src_dir = sys.argv[1] 39 | munge(src_dir) 40 | -------------------------------------------------------------------------------- /lib/make.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CUDA_PATH=/usr/local/cuda/ 4 | 5 | #python setup.py build_ext --inplace 6 | #rm -rf build 7 | 8 | CUDA_ARCH="-gencode arch=compute_30,code=sm_30 \ 9 | -gencode arch=compute_35,code=sm_35 \ 10 | -gencode arch=compute_50,code=sm_50 \ 11 | -gencode arch=compute_52,code=sm_52 \ 12 | -gencode arch=compute_60,code=sm_60 \ 13 | -gencode arch=compute_61,code=sm_61 " 14 | 15 | # compile NMS 16 | cd model/nms/src 17 | echo "Compiling nms kernels by nvcc..." 18 | nvcc -c -o nms_cuda_kernel.cu.o nms_cuda_kernel.cu \ 19 | -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC $CUDA_ARCH 20 | 21 | cd ../ 22 | python build.py 23 | 24 | # compile roi_temporal_pooling 25 | cd ../../ 26 | cd model/roi_temporal_pooling/src 27 | echo "Compiling roi temporal pooling kernels by nvcc..." 28 | nvcc -c -o roi_temporal_pooling_kernel.cu.o roi_temporal_pooling_kernel.cu \ 29 | -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC $CUDA_ARCH 30 | cd ../ 31 | python build.py 32 | -------------------------------------------------------------------------------- /lib/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/__init__.py -------------------------------------------------------------------------------- /lib/model/nms/.gitignore: -------------------------------------------------------------------------------- 1 | *.so 2 | -------------------------------------------------------------------------------- /lib/model/nms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/nms/__init__.py -------------------------------------------------------------------------------- /lib/model/nms/_ext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/nms/_ext/__init__.py -------------------------------------------------------------------------------- /lib/model/nms/_ext/nms/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from torch.utils.ffi import _wrap_function 3 | from ._nms import lib as _lib, ffi as _ffi 4 | 5 | __all__ = [] 6 | def _import_symbols(locals): 7 | for symbol in dir(_lib): 8 | fn = getattr(_lib, symbol) 9 | if callable(fn): 10 | locals[symbol] = _wrap_function(fn, _ffi) 11 | else: 12 | locals[symbol] = fn 13 | __all__.append(symbol) 14 | 15 | _import_symbols(locals()) 16 | -------------------------------------------------------------------------------- /lib/model/nms/build.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import torch 4 | from torch.utils.ffi import create_extension 5 | 6 | #this_file = os.path.dirname(__file__) 7 | 8 | sources = [] 9 | headers = [] 10 | defines = [] 11 | with_cuda = False 12 | 13 | if torch.cuda.is_available(): 14 | print('Including CUDA code.') 15 | sources += ['src/nms_cuda.c'] 16 | headers += ['src/nms_cuda.h'] 17 | defines += [('WITH_CUDA', None)] 18 | with_cuda = True 19 | 20 | this_file = os.path.dirname(os.path.realpath(__file__)) 21 | print(this_file) 22 | extra_objects = ['src/nms_cuda_kernel.cu.o'] 23 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects] 24 | print(extra_objects) 25 | 26 | ffi = create_extension( 27 | '_ext.nms', 28 | headers=headers, 29 | sources=sources, 30 | define_macros=defines, 31 | relative_to=__file__, 32 | with_cuda=with_cuda, 33 | extra_objects=extra_objects 34 | ) 35 | 36 | if __name__ == '__main__': 37 | ffi.build() 38 | -------------------------------------------------------------------------------- /lib/model/nms/make.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # CUDA_PATH=/usr/local/cuda/ 4 | 5 | cd src 6 | echo "Compiling stnm kernels by nvcc..." 7 | nvcc -c -o nms_cuda_kernel.cu.o nms_cuda_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_52 8 | 9 | cd ../ 10 | python build.py 11 | -------------------------------------------------------------------------------- /lib/model/nms/nms_cpu.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | import torch 5 | 6 | def nms_cpu(dets, thresh): 7 | dets = dets.numpy() 8 | x1 = dets[:, 0] 9 | x2 = dets[:, 1] 10 | scores = dets[:, 2] 11 | 12 | length = (x2 - x1 + 1) 13 | order = scores.argsort()[::-1] 14 | 15 | keep = [] 16 | while order.size > 0: 17 | i = order.item(0) 18 | keep.append(i) 19 | xx1 = np.maximum(x1[i], x1[order[1:]]) 20 | #yy1 = np.maximum(y1[i], y1[order[1:]]) 21 | xx2 = np.minimum(x2[i], x2[order[1:]]) 22 | #yy2 = np.minimum(y2[i], y2[order[1:]]) 23 | 24 | inter = np.maximum(0.0, xx2 - xx1 + 1) 25 | ovr = inter / (length[i] + length[order[1:]] - inter) 26 | 27 | inds = np.where(ovr < thresh)[0] 28 | order = order[inds+1] 29 | 30 | return torch.IntTensor(keep) 31 | -------------------------------------------------------------------------------- /lib/model/nms/nms_gpu.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import torch 3 | import numpy as np 4 | from ._ext import nms 5 | import pdb 6 | 7 | def nms_gpu(dets, thresh): 8 | keep = dets.new(dets.size(0), 1).zero_().int() 9 | num_out = dets.new(1).zero_().int() 10 | nms.nms_cuda(keep, dets, num_out, thresh) 11 | keep = keep[:num_out[0]] 12 | return keep 13 | -------------------------------------------------------------------------------- /lib/model/nms/nms_wrapper.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | import torch 8 | from model.utils.config import cfg 9 | if torch.cuda.is_available(): 10 | from model.nms.nms_gpu import nms_gpu 11 | from model.nms.nms_cpu import nms_cpu 12 | 13 | def nms(dets, thresh, force_cpu=False): 14 | """Dispatch to either CPU or GPU NMS implementations.""" 15 | if dets.shape[0] == 0: 16 | return [] 17 | # ---numpy version--- 18 | # original: return gpu_nms(dets, thresh, device_id=cfg.GPU_ID) 19 | # ---pytorch version--- 20 | 21 | return nms_gpu(dets, thresh) if force_cpu == False else nms_cpu(dets, thresh) 22 | -------------------------------------------------------------------------------- /lib/model/nms/src/nms_cuda.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "nms_cuda_kernel.h" 4 | 5 | // this symbol will be resolved automatically from PyTorch libs 6 | extern THCState *state; 7 | 8 | int nms_cuda(THCudaIntTensor *keep_out, THCudaTensor *boxes_host, 9 | THCudaIntTensor *num_out, float nms_overlap_thresh) { 10 | 11 | nms_cuda_compute(THCudaIntTensor_data(state, keep_out), 12 | THCudaIntTensor_data(state, num_out), 13 | THCudaTensor_data(state, boxes_host), 14 | boxes_host->size[0], 15 | boxes_host->size[1], 16 | nms_overlap_thresh); 17 | 18 | return 1; 19 | } 20 | -------------------------------------------------------------------------------- /lib/model/nms/src/nms_cuda.h: -------------------------------------------------------------------------------- 1 | // int nms_cuda(THCudaTensor *keep_out, THCudaTensor *num_out, 2 | // THCudaTensor *boxes_host, THCudaTensor *nms_overlap_thresh); 3 | 4 | int nms_cuda(THCudaIntTensor *keep_out, THCudaTensor *boxes_host, 5 | THCudaIntTensor *num_out, float nms_overlap_thresh); 6 | -------------------------------------------------------------------------------- /lib/model/nms/src/nms_cuda_kernel.h: -------------------------------------------------------------------------------- 1 | #ifdef __cplusplus 2 | extern "C" { 3 | #endif 4 | 5 | void nms_cuda_compute(int* keep_out, int *num_out, float* boxes_host, int boxes_num, 6 | int boxes_dim, float nms_overlap_thresh); 7 | 8 | #ifdef __cplusplus 9 | } 10 | #endif 11 | -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/roi_temporal_pooling/__init__.py -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/_ext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/roi_temporal_pooling/_ext/__init__.py -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/_ext/roi_temporal_pooling/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from torch.utils.ffi import _wrap_function 3 | from ._roi_temporal_pooling import lib as _lib, ffi as _ffi 4 | 5 | __all__ = [] 6 | def _import_symbols(locals): 7 | for symbol in dir(_lib): 8 | fn = getattr(_lib, symbol) 9 | if callable(fn): 10 | locals[symbol] = _wrap_function(fn, _ffi) 11 | else: 12 | locals[symbol] = fn 13 | __all__.append(symbol) 14 | 15 | _import_symbols(locals()) 16 | -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/build.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import torch 4 | from torch.utils.ffi import create_extension 5 | 6 | 7 | sources = ['src/roi_temporal_pooling.c'] 8 | headers = ['src/roi_temporal_pooling.h'] 9 | extra_objects = [] 10 | defines = [] 11 | with_cuda = False 12 | 13 | this_file = os.path.dirname(os.path.realpath(__file__)) 14 | print(this_file) 15 | 16 | if torch.cuda.is_available(): 17 | print('Including CUDA code.') 18 | sources += ['src/roi_temporal_pooling_cuda.c'] 19 | headers += ['src/roi_temporal_pooling_cuda.h'] 20 | defines += [('WITH_CUDA', None)] 21 | with_cuda = True 22 | extra_objects = ['src/roi_temporal_pooling_kernel.cu.o'] 23 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects] 24 | 25 | ffi = create_extension( 26 | '_ext.roi_temporal_pooling', 27 | headers=headers, 28 | sources=sources, 29 | define_macros=defines, 30 | relative_to=__file__, 31 | with_cuda=with_cuda, 32 | extra_objects=extra_objects 33 | ) 34 | 35 | if __name__ == '__main__': 36 | ffi.build() 37 | -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/roi_temporal_pooling/functions/__init__.py -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/roi_temporal_pooling/modules/__init__.py -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/modules/roi_temporal_pool.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from ..functions.roi_temporal_pool import RoITemporalPoolFunction 3 | 4 | 5 | class _RoITemporalPooling(Module): 6 | def __init__(self, pooled_length, pooled_height, pooled_width, temporal_scale, ctx_ratio=1.0): 7 | super(_RoITemporalPooling, self).__init__() 8 | 9 | self.pooled_width = int(pooled_width) 10 | self.pooled_height = int(pooled_height) 11 | self.pooled_length = int(pooled_length) 12 | self.temporal_scale = float(temporal_scale) 13 | self.ctx_ratio = float(ctx_ratio) 14 | 15 | def forward(self, features, rois): 16 | return RoITemporalPoolFunction(self.pooled_length, self.pooled_height, self.pooled_width, self.temporal_scale, self.ctx_ratio)(features, rois) 17 | -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/src/roi_temporal_pooling.h: -------------------------------------------------------------------------------- 1 | int roi_temporal_pooling_forward(int pooled_length, int pooled_height, int pooled_width, float temporal_scale, float ctx_ratio, 2 | THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output); 3 | -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/src/roi_temporal_pooling_cuda.h: -------------------------------------------------------------------------------- 1 | int roi_temporal_pooling_forward_cuda(int pooled_length, int pooled_height, int pooled_width, float temporal_scale, float ctx_ratio, 2 | THCudaTensor * features, THCudaTensor * rois, THCudaTensor * output, THCudaIntTensor * argmax); 3 | 4 | int roi_temporal_pooling_backward_cuda(int pooled_length, int pooled_height, int pooled_width, float temporal_scale, float ctx_ratio, 5 | THCudaTensor * top_grad, THCudaTensor * rois, THCudaTensor * bottom_grad, THCudaIntTensor * argmax); 6 | -------------------------------------------------------------------------------- /lib/model/roi_temporal_pooling/src/roi_temporal_pooling_kernel.h: -------------------------------------------------------------------------------- 1 | #ifndef _ROI_TEMPORAL_POOLING_KERNEL 2 | #define _ROI_TEMPORAL_POOLING_KERNEL 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | int ROITemporalPoolForwardLaucher( 9 | const float* bottom_data, const float temporal_scale, const float ctx_ratio, const int num_rois, const int length, const int height, 10 | const int width, const int channels, const int pooled_length, const int pooled_height, 11 | const int pooled_width, const float* bottom_rois, 12 | float* top_data, int* argmax_data, cudaStream_t stream); 13 | 14 | 15 | int ROITemporalPoolBackwardLaucher( 16 | const float* top_diff, const float temporal_scale, const float ctx_ratio, const int batch_size, const int num_rois, 17 | const int length, const int height, const int width, const int channels, const int pooled_length, const int pooled_height, 18 | const int pooled_width, const float* bottom_rois, 19 | float* bottom_diff, const int* argmax_data, cudaStream_t stream); 20 | 21 | #ifdef __cplusplus 22 | } 23 | #endif 24 | 25 | #endif 26 | 27 | -------------------------------------------------------------------------------- /lib/model/rpn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/rpn/__init__.py -------------------------------------------------------------------------------- /lib/model/rpn/generate_anchors.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # R-C3D 3 | # Copyright (c) 2017 Boston University 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Huijuan Xu 6 | # -------------------------------------------------------- 7 | 8 | import numpy as np 9 | import pdb 10 | 11 | def generate_anchors(base_size=8, scales=2**np.arange(3, 6)): 12 | """ 13 | Generate anchor (reference) windows by enumerating aspect 14 | scales wrt a reference (0, 7) window. 15 | """ 16 | 17 | base_anchor = np.array([1, base_size]) - 1 18 | anchors = _scale_enum(base_anchor, scales) 19 | return anchors 20 | 21 | def _whctrs(anchor): 22 | """ 23 | Return width, height, x center, and y center for an anchor (window). 24 | """ 25 | 26 | l = anchor[1] - anchor[0] + 1 27 | x_ctr = anchor[0] + 0.5 * (l - 1) 28 | return l, x_ctr 29 | 30 | def _mkanchors(ls, x_ctr): 31 | """ 32 | Given a vector of lengths (ls) around a center 33 | (x_ctr), output a set of anchors (windows). 34 | """ 35 | 36 | ls = ls[:, np.newaxis] 37 | anchors = np.hstack((x_ctr - 0.5 * (ls - 1), 38 | x_ctr + 0.5 * (ls - 1))) 39 | return anchors 40 | 41 | def _scale_enum(anchor, scales): 42 | """ 43 | Enumerate a set of anchors for each scale wrt an anchor. 44 | """ 45 | 46 | l, x_ctr = _whctrs(anchor) 47 | ls = l * scales 48 | anchors = _mkanchors(ls, x_ctr) 49 | return anchors 50 | 51 | if __name__ == '__main__': 52 | import time 53 | t = time.time() 54 | a = generate_anchors(scales=np.array([2, 4, 5, 6, 8, 9, 10, 12, 14, 16])) 55 | print (time.time() - t) 56 | print (a) 57 | from IPython import embed; embed() 58 | -------------------------------------------------------------------------------- /lib/model/tdcnn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/tdcnn/__init__.py -------------------------------------------------------------------------------- /lib/model/utils/.gitignore: -------------------------------------------------------------------------------- 1 | *.so 2 | -------------------------------------------------------------------------------- /lib/model/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/model/utils/__init__.py -------------------------------------------------------------------------------- /lib/model/utils/blob.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # R-C3D 3 | # Copyright (c) 2017 Boston University 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Huijuan Xu 6 | # -------------------------------------------------------- 7 | 8 | """Blob helper functions.""" 9 | 10 | import numpy as np 11 | # from scipy.misc import imread, imresize 12 | import cv2 13 | 14 | try: 15 | xrange # Python 2 16 | except NameError: 17 | xrange = range # Python 3 18 | 19 | 20 | def video_list_to_blob(videos): 21 | """Convert a list of videos into a network input. 22 | 23 | Assumes videos are already prepared (means subtracted, BGR order, ...). 24 | """ 25 | shape = videos[0].shape 26 | num_videos = len(videos) 27 | blob = np.zeros((num_videos, shape[0], shape[1], shape[2], shape[3]), 28 | dtype=np.float32) 29 | for i in xrange(num_videos): 30 | blob[i] = videos[i] 31 | # Move channels (axis 3) to axis 1 32 | # Axis order will become: (batch elem, channel, length, height, width) 33 | channel_swap = (0, 4, 1, 2, 3) 34 | blob = blob.transpose(channel_swap) 35 | return blob 36 | 37 | def prep_im_for_blob(im, pixel_means, target_size, crop_size, random_idx): 38 | """Mean subtract, resize and crop an frame for use in a blob.""" 39 | im = im.astype(np.float32, copy=False) 40 | im = cv2.resize(im, target_size, interpolation=cv2.INTER_LINEAR) 41 | im -= pixel_means 42 | x = random_idx[1] 43 | y = random_idx[0] 44 | return im[x:x+crop_size, y:y+crop_size] 45 | -------------------------------------------------------------------------------- /lib/roi_data_layer/__init__.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # Fast R-CNN 3 | # Copyright (c) 2015 Microsoft 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Ross Girshick 6 | # -------------------------------------------------------- 7 | -------------------------------------------------------------------------------- /lib/roi_data_layer/roibatchLoader.py: -------------------------------------------------------------------------------- 1 | 2 | """The data layer used during training to train a Fast R-CNN network. 3 | """ 4 | 5 | from __future__ import absolute_import 6 | from __future__ import division 7 | from __future__ import print_function 8 | 9 | import torch.utils.data as data 10 | import torch 11 | 12 | from model.utils.config import cfg 13 | from roi_data_layer.minibatch import get_minibatch 14 | 15 | import numpy as np 16 | import random 17 | import time 18 | import pdb 19 | 20 | class roibatchLoader(data.Dataset): 21 | def __init__(self, roidb, normalize=None, phase='train'): 22 | self._roidb = roidb 23 | self.max_num_box = cfg.MAX_NUM_GT_TWINS 24 | self.normalize = normalize 25 | self.phase = phase 26 | 27 | def __getitem__(self, index): 28 | # get the anchor index for current sample index 29 | item = self._roidb[index] 30 | blobs = get_minibatch([item], self.phase) 31 | data = torch.from_numpy(blobs['data']) 32 | length, height, width = data.shape[-3:] 33 | data = data.contiguous().view(3, length, height, width) 34 | 35 | gt_windows = torch.from_numpy(blobs['gt_windows']) 36 | gt_windows_padding = gt_windows.new(self.max_num_box, gt_windows.size(1)).zero_() 37 | num_gt = min(gt_windows.size(0), self.max_num_box) 38 | gt_windows_padding[:num_gt, :] = gt_windows[:num_gt] 39 | 40 | if self.phase == 'test': 41 | video_info = '' 42 | for key, value in item.items(): 43 | video_info = video_info + " {}: {}\n".format(key, value) 44 | # drop the last "\n" 45 | video_info = video_info[:-1] 46 | return data, gt_windows_padding, num_gt, video_info 47 | else: 48 | return data, gt_windows_padding, num_gt 49 | 50 | def __len__(self): 51 | return len(self._roidb) 52 | -------------------------------------------------------------------------------- /lib/test_model/test_tdcnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn.modules.module import Module 3 | from torch.autograd import Variable 4 | from model.psroi_pooling.modules.psroi_pool import _PSRoIPooling 5 | import numpy as np 6 | 7 | class test_PSRoIPooling(Module): 8 | def __init__(self, output_dim, group_size, spatial_scale): 9 | super(test_PSRoIPooling, self).__init__() 10 | self.psroi_pool = _PSRoIPooling(output_dim, group_size, spatial_scale) 11 | 12 | def forward(self, feature, rois): 13 | return self.psroi_pool(feature, rois) 14 | 15 | 16 | if __name__ == '__main__': 17 | feature = Variable(torch.FloatTensor(1, 18, 12, 10), requires_grad=True) 18 | rois = Variable(torch.from_numpy(np.array([[0, 20, 50, 120, 150], [0, 80, 100, 150, 200]])).float()) 19 | psroi_pool = test_PSRoIPooling(2, 3, 1./16) 20 | if torch.cuda.is_available(): 21 | feature = feature.cuda() 22 | rois = rois.cuda() 23 | psroi_pool = psroi_pool.cuda() 24 | print(feature) 25 | print(rois) 26 | print(psroi_pool) 27 | pooled_feature = psroi_pool(feature, rois) 28 | 29 | 30 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/C3DRes18/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/C3DRes18/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/ECO/ECO_top.yaml: -------------------------------------------------------------------------------- 1 | inputs: [] 2 | layers: 3 | # (N, 256, K/2, 7, 7) 4 | - attrs: {} 5 | expr: res4b_bn<=Identity<=bottom_input 6 | id: res4b_bn 7 | # (N, 256, K/2, 7, 7) 8 | - attrs: {kernel_size: 3, num_output: 512, pad: 1, stride: 2} 9 | expr: res5a_1<=Conv3d<=res4b_bn 10 | id: res5a_1 11 | - attrs: {frozen: false} 12 | expr: res5a_1_bn<=BN3d<=res5a_1 13 | id: res5a_1_bn 14 | - {expr: res5a_1_bn<=ReLU<=res5a_1_bn, id: res5a_1_relu} 15 | - attrs: {kernel_size: 3, num_output: 512, pad: 1, stride: 1} 16 | expr: res5a_2<=Conv3d<=res5a_1_bn 17 | id: res5a_2 18 | - attrs: {kernel_size: 3, num_output: 512, pad: 1, stride: 2} 19 | expr: res5a_down<=Conv3d<=res4b_bn 20 | id: res5a_down 21 | - {expr: 'res5a<=Eltwise<=res5a_2,res5a_down', id: res5a} 22 | # (N, 512, K/4, 4, 4) 23 | - attrs: {frozen: false} 24 | expr: res5a_bn<=BN3d<=res5a 25 | id: res5a_bn 26 | - {expr: res5a_bn<=ReLU<=res5a_bn, id: res5a_relu} 27 | - attrs: {kernel_size: 3, num_output: 512, pad: 1, stride: 1} 28 | expr: res5b_1<=Conv3d<=res5a_bn 29 | id: res5b_1 30 | - attrs: {frozen: false} 31 | expr: res5b_1_bn<=BN3d<=res5b_1 32 | id: res5b_1_bn 33 | - {expr: res5b_1_bn<=ReLU<=res5b_1_bn, id: res5b_1_relu} 34 | - attrs: {kernel_size: 3, num_output: 512, pad: 1, stride: 1} 35 | expr: res5b_2<=Conv3d<=res5b_1_bn 36 | id: res5b_2 37 | - {expr: 'res5b<=Eltwise<=res5b_2,res5a', id: res5b} 38 | - attrs: {frozen: false} 39 | expr: res5b_bn<=BN3d<=res5b 40 | id: res5b_bn 41 | - {expr: res5b_bn<=ReLU<=res5b_bn, id: res5b_relu} 42 | # (N, 512, K/4, 4, 4) 43 | #- attrs: {kernel_d: 1, kernel_h: 4, kernel_w: 4, stride: 1, mode: ave} 44 | # expr: global_pool<=Pooling3d<=res5b_bn 45 | # id: global_pool 46 | #- attrs: {num_output: 400} 47 | # expr: fc_action<=InnerProduct<=global_pool 48 | # id: fc_final 49 | name: ECO_top 50 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/ECO/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/ECO/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/ECOfull/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/ECOfull/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 LIP6 Lab 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | 22 | ------------------------------------------------- 23 | 24 | This product contains portions of third party software provided under this license: 25 | 26 | dump_filters.py (x) 27 | =============== 28 | 29 | Copyright 2015 Google Inc. All Rights Reserved. 30 | 31 | Licensed under the Apache License, Version 2.0 (the "License"); 32 | you may not use this file except in compliance with the License. 33 | You may obtain a copy of the License at 34 | 35 | http://www.apache.org/licenses/LICENSE-2.0 36 | 37 | Unless required by applicable law or agreed to in writing, software 38 | distributed under the License is distributed on an "AS IS" BASIS, 39 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 40 | See the License for the specific language governing permissions and 41 | limitations under the License. 42 | 43 | 44 | (x) adapted from https://github.com/tensorflow/tensorflow/blob/411f57e/tensorflow/models/image/imagenet/classify_image.py 45 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/__init__.py: -------------------------------------------------------------------------------- 1 | from .bninception.pytorch_load import BNInception, InceptionV3 2 | from .ECO.pytorch_load import ECO 3 | from .C3DRes18.pytorch_load import C3DRes18 4 | from .ECOfull.pytorch_load import ECOfull 5 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/bninception/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/bninception/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/eco-lite-4f-prec-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/eco-lite-4f-prec-1.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/inceptionresnetv2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/inceptionresnetv2/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/inceptionv4/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/inceptionv4/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/lena_224.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/lena_224.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/lena_299.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/lena_299.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/lena_origin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/lena_origin.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/.DS_Store -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/._.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/._.DS_Store -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Please let us know which model this issue is about (specify the top-level directory) 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "tensorflow"] 2 | path = syntaxnet/tensorflow 3 | url = https://github.com/tensorflow/tensorflow.git 4 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | 5 | # Names should be added to this file as: 6 | # Name or Organization 7 | # The email address is not required for organizations. 8 | 9 | Google Inc. 10 | David Dao 11 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | If you have created a model and would like to publish it here, please send us a 4 | pull request. For those just getting started with pull reuests, GitHub has a 5 | [howto](https://help.github.com/articles/using-pull-requests/). 6 | 7 | The code for any model in this repository is licensed under the Apache License 8 | 2.0. 9 | 10 | In order to accept our code, we have to make sure that we can publish your code: 11 | You have to sign a Contributor License Agreement (CLA). 12 | 13 | ### Contributor License Agreements 14 | 15 | Please fill out either the individual or corporate Contributor License Agreement (CLA). 16 | 17 | * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). 18 | * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). 19 | 20 | Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests. 21 | 22 | ***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the repository. 23 | 24 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow Models 2 | 3 | This repository contains machine learning models implemented in 4 | [TensorFlow](https://tensorflow.org). The models are maintained by their 5 | respective authors. 6 | 7 | To propose a model for inclusion please submit a pull request. 8 | 9 | 10 | ## Models 11 | - [autoencoder](autoencoder) -- various autoencoders 12 | - [inception](inception) -- deep convolutional networks for computer vision 13 | - [namignizer](namignizer) -- recognize and generate names 14 | - [neural_gpu](neural_gpu) -- highly parallel neural computer 15 | - [privacy](privacy) -- privacy-preserving student models from multiple teachers 16 | - [resnet](resnet) -- deep and wide residual networks 17 | - [slim](slim) -- image classification models in TF-Slim 18 | - [swivel](swivel) -- the Swivel algorithm for generating word embeddings 19 | - [syntaxnet](syntaxnet) -- neural models of natural language syntax 20 | - [textsum](textsum) -- sequence-to-sequence with attention model for text summarization. 21 | - [transformer](transformer) -- spatial transformer network, which allows the spatial manipulation of data within the network 22 | - [im2txt](im2txt) -- image-to-text neural network for image captioning. 23 | - [neural_programmer](neural programmer) -- neural network augmented with logic and mathematic operations. 24 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/WORKSPACE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/WORKSPACE -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import sklearn.preprocessing as prep 4 | import tensorflow as tf 5 | from tensorflow.examples.tutorials.mnist import input_data 6 | 7 | from autoencoder.autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder 8 | 9 | mnist = input_data.read_data_sets('MNIST_data', one_hot = True) 10 | 11 | def standard_scale(X_train, X_test): 12 | preprocessor = prep.StandardScaler().fit(X_train) 13 | X_train = preprocessor.transform(X_train) 14 | X_test = preprocessor.transform(X_test) 15 | return X_train, X_test 16 | 17 | def get_random_block_from_data(data, batch_size): 18 | start_index = np.random.randint(0, len(data) - batch_size) 19 | return data[start_index:(start_index + batch_size)] 20 | 21 | X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) 22 | 23 | n_samples = int(mnist.train.num_examples) 24 | training_epochs = 20 25 | batch_size = 128 26 | display_step = 1 27 | 28 | autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784, 29 | n_hidden = 200, 30 | transfer_function = tf.nn.softplus, 31 | optimizer = tf.train.AdamOptimizer(learning_rate = 0.001), 32 | scale = 0.01) 33 | 34 | for epoch in range(training_epochs): 35 | avg_cost = 0. 36 | total_batch = int(n_samples / batch_size) 37 | # Loop over all batches 38 | for i in range(total_batch): 39 | batch_xs = get_random_block_from_data(X_train, batch_size) 40 | 41 | # Fit training using batch data 42 | cost = autoencoder.partial_fit(batch_xs) 43 | # Compute average loss 44 | avg_cost += cost / n_samples * batch_size 45 | 46 | # Display logs per epoch step 47 | if epoch % display_step == 0: 48 | print "Epoch:", '%04d' % (epoch + 1), \ 49 | "cost=", "{:.9f}".format(avg_cost) 50 | 51 | print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) 52 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/AutoencoderRunner.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import sklearn.preprocessing as prep 4 | import tensorflow as tf 5 | from tensorflow.examples.tutorials.mnist import input_data 6 | 7 | from autoencoder.autoencoder_models.Autoencoder import Autoencoder 8 | 9 | mnist = input_data.read_data_sets('MNIST_data', one_hot = True) 10 | 11 | def standard_scale(X_train, X_test): 12 | preprocessor = prep.StandardScaler().fit(X_train) 13 | X_train = preprocessor.transform(X_train) 14 | X_test = preprocessor.transform(X_test) 15 | return X_train, X_test 16 | 17 | def get_random_block_from_data(data, batch_size): 18 | start_index = np.random.randint(0, len(data) - batch_size) 19 | return data[start_index:(start_index + batch_size)] 20 | 21 | X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) 22 | 23 | n_samples = int(mnist.train.num_examples) 24 | training_epochs = 20 25 | batch_size = 128 26 | display_step = 1 27 | 28 | autoencoder = Autoencoder(n_input = 784, 29 | n_hidden = 200, 30 | transfer_function = tf.nn.softplus, 31 | optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)) 32 | 33 | for epoch in range(training_epochs): 34 | avg_cost = 0. 35 | total_batch = int(n_samples / batch_size) 36 | # Loop over all batches 37 | for i in range(total_batch): 38 | batch_xs = get_random_block_from_data(X_train, batch_size) 39 | 40 | # Fit training using batch data 41 | cost = autoencoder.partial_fit(batch_xs) 42 | # Compute average loss 43 | avg_cost += cost / n_samples * batch_size 44 | 45 | # Display logs per epoch step 46 | if epoch % display_step == 0: 47 | print "Epoch:", '%04d' % (epoch + 1), \ 48 | "cost=", "{:.9f}".format(avg_cost) 49 | 50 | print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) 51 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/MaskingNoiseAutoencoderRunner.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import sklearn.preprocessing as prep 4 | import tensorflow as tf 5 | from tensorflow.examples.tutorials.mnist import input_data 6 | 7 | from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder 8 | 9 | mnist = input_data.read_data_sets('MNIST_data', one_hot = True) 10 | 11 | def standard_scale(X_train, X_test): 12 | preprocessor = prep.StandardScaler().fit(X_train) 13 | X_train = preprocessor.transform(X_train) 14 | X_test = preprocessor.transform(X_test) 15 | return X_train, X_test 16 | 17 | def get_random_block_from_data(data, batch_size): 18 | start_index = np.random.randint(0, len(data) - batch_size) 19 | return data[start_index:(start_index + batch_size)] 20 | 21 | X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) 22 | 23 | 24 | n_samples = int(mnist.train.num_examples) 25 | training_epochs = 100 26 | batch_size = 128 27 | display_step = 1 28 | 29 | autoencoder = MaskingNoiseAutoencoder(n_input = 784, 30 | n_hidden = 200, 31 | transfer_function = tf.nn.softplus, 32 | optimizer = tf.train.AdamOptimizer(learning_rate = 0.001), 33 | dropout_probability = 0.95) 34 | 35 | for epoch in range(training_epochs): 36 | avg_cost = 0. 37 | total_batch = int(n_samples / batch_size) 38 | for i in range(total_batch): 39 | batch_xs = get_random_block_from_data(X_train, batch_size) 40 | 41 | cost = autoencoder.partial_fit(batch_xs) 42 | 43 | avg_cost += cost / n_samples * batch_size 44 | 45 | if epoch % display_step == 0: 46 | print "Epoch:", '%04d' % (epoch + 1), \ 47 | "cost=", "{:.9f}".format(avg_cost) 48 | 49 | print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) 50 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/Utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | def xavier_init(fan_in, fan_out, constant = 1): 5 | low = -constant * np.sqrt(6.0 / (fan_in + fan_out)) 6 | high = constant * np.sqrt(6.0 / (fan_in + fan_out)) 7 | return tf.random_uniform((fan_in, fan_out), 8 | minval = low, maxval = high, 9 | dtype = tf.float32) 10 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/VariationalAutoencoderRunner.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import sklearn.preprocessing as prep 4 | import tensorflow as tf 5 | from tensorflow.examples.tutorials.mnist import input_data 6 | 7 | from autoencoder.autoencoder_models.VariationalAutoencoder import VariationalAutoencoder 8 | 9 | mnist = input_data.read_data_sets('MNIST_data', one_hot = True) 10 | 11 | 12 | 13 | def min_max_scale(X_train, X_test): 14 | preprocessor = prep.MinMaxScaler().fit(X_train) 15 | X_train = preprocessor.transform(X_train) 16 | X_test = preprocessor.transform(X_test) 17 | return X_train, X_test 18 | 19 | 20 | def get_random_block_from_data(data, batch_size): 21 | start_index = np.random.randint(0, len(data) - batch_size) 22 | return data[start_index:(start_index + batch_size)] 23 | 24 | 25 | X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images) 26 | 27 | n_samples = int(mnist.train.num_examples) 28 | training_epochs = 20 29 | batch_size = 128 30 | display_step = 1 31 | 32 | autoencoder = VariationalAutoencoder(n_input = 784, 33 | n_hidden = 200, 34 | optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)) 35 | 36 | for epoch in range(training_epochs): 37 | avg_cost = 0. 38 | total_batch = int(n_samples / batch_size) 39 | # Loop over all batches 40 | for i in range(total_batch): 41 | batch_xs = get_random_block_from_data(X_train, batch_size) 42 | 43 | # Fit training using batch data 44 | cost = autoencoder.partial_fit(batch_xs) 45 | # Compute average loss 46 | avg_cost += cost / n_samples * batch_size 47 | 48 | # Display logs per epoch step 49 | if epoch % display_step == 0: 50 | print "Epoch:", '%04d' % (epoch + 1), \ 51 | "cost=", "{:.9f}".format(avg_cost) 52 | 53 | print "Total cost: " + str(autoencoder.calc_total_cost(X_test)) 54 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/autoencoder/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/autoencoder/autoencoder_models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/autoencoder/autoencoder_models/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/compression/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/compression/example.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/README.md: -------------------------------------------------------------------------------- 1 | Deep Learning with Differential Privacy 2 | 3 | Open Sourced By: Xin Pan (xpan@google.com, github: panyx0718) 4 | 5 | 6 | ###Introduction for dp_sgd/README.md 7 | 8 | Machine learning techniques based on neural networks are achieving remarkable 9 | results in a wide variety of domains. Often, the training of models requires 10 | large, representative datasets, which may be crowdsourced and contain sensitive 11 | information. The models should not expose private information in these datasets. 12 | Addressing this goal, we develop new algorithmic techniques for learning and a 13 | refined analysis of privacy costs within the framework of differential privacy. 14 | Our implementation and experiments demonstrate that we can train deep neural 15 | networks with non-convex objectives, under a modest privacy budget, and at a 16 | manageable cost in software complexity, training efficiency, and model quality. 17 | 18 | paper: https://arxiv.org/abs/1607.00133 19 | 20 | 21 | ###Introduction for multiple_teachers/README.md 22 | 23 | This repository contains code to create a setup for learning privacy-preserving 24 | student models by transferring knowledge from an ensemble of teachers trained 25 | on disjoint subsets of the data for which privacy guarantees are to be provided. 26 | 27 | Knowledge acquired by teachers is transferred to the student in a differentially 28 | private manner by noisily aggregating the teacher decisions before feeding them 29 | to the student during training. 30 | 31 | paper: https://arxiv.org/abs/1610.05755 32 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/differential_privacy/__init__.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/dp_sgd/dp_mnist/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//differential_privacy/...", 11 | ], 12 | ) 13 | 14 | py_binary( 15 | name = "dp_mnist", 16 | srcs = [ 17 | "dp_mnist.py", 18 | ], 19 | deps = [ 20 | "//differential_privacy/dp_sgd/dp_optimizer", 21 | "//differential_privacy/dp_sgd/dp_optimizer:dp_pca", 22 | "//differential_privacy/dp_sgd/dp_optimizer:utils", 23 | ], 24 | ) 25 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/dp_sgd/dp_optimizer/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//differential_privacy/...", 11 | ], 12 | ) 13 | 14 | py_library( 15 | name = "utils", 16 | srcs = [ 17 | "utils.py", 18 | ], 19 | deps = [ 20 | ], 21 | ) 22 | 23 | py_library( 24 | name = "dp_pca", 25 | srcs = [ 26 | "dp_pca.py", 27 | ], 28 | deps = [ 29 | ], 30 | ) 31 | 32 | py_library( 33 | name = "dp_optimizer", 34 | srcs = [ 35 | "dp_optimizer.py", 36 | "sanitizer.py", 37 | ], 38 | deps = [ 39 | ":utils", 40 | "//differential_privacy/dp_sgd/per_example_gradients", 41 | "//differential_privacy/privacy_accountant/tf:accountant", 42 | ], 43 | ) 44 | 45 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/dp_sgd/per_example_gradients/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//differential_privacy/...", 11 | ], 12 | ) 13 | 14 | py_library( 15 | name = "per_example_gradients", 16 | srcs = [ 17 | "per_example_gradients.py", 18 | ], 19 | deps = [ 20 | ], 21 | ) 22 | 23 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/multiple_teachers/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//differential_privacy/...", 11 | ], 12 | ) 13 | 14 | py_library( 15 | name = "aggregation", 16 | srcs = [ 17 | "aggregation.py", 18 | ], 19 | deps = [ 20 | ], 21 | ) 22 | 23 | py_library( 24 | name = "deep_cnn", 25 | srcs = [ 26 | "deep_cnn.py", 27 | ], 28 | deps = [ 29 | ":utils", 30 | ], 31 | ) 32 | 33 | py_library( 34 | name = "input", 35 | srcs = [ 36 | "input.py", 37 | ], 38 | deps = [ 39 | ], 40 | ) 41 | 42 | py_library( 43 | name = "metrics", 44 | srcs = [ 45 | "metrics.py", 46 | ], 47 | deps = [ 48 | ], 49 | ) 50 | 51 | py_library( 52 | name = "utils", 53 | srcs = [ 54 | "utils.py", 55 | ], 56 | deps = [ 57 | ], 58 | ) 59 | 60 | py_binary( 61 | name = "train_student", 62 | srcs = [ 63 | "train_student.py", 64 | ], 65 | deps = [ 66 | ":aggregation", 67 | ":deep_cnn", 68 | ":input", 69 | ":metrics", 70 | ], 71 | ) 72 | 73 | py_binary( 74 | name = "train_teachers", 75 | srcs = [ 76 | "train_teachers.py", 77 | ":deep_cnn", 78 | ":input", 79 | ":metrics", 80 | ], 81 | deps = [ 82 | ], 83 | ) 84 | 85 | py_library( 86 | name = "analysis", 87 | srcs = [ 88 | "analysis.py", 89 | ], 90 | deps = [ 91 | "//differential_privacy/multiple_teachers:input", 92 | ], 93 | ) 94 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/multiple_teachers/metrics.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | from __future__ import absolute_import 17 | from __future__ import division 18 | from __future__ import print_function 19 | 20 | import numpy as np 21 | 22 | 23 | def accuracy(logits, labels): 24 | """ 25 | Return accuracy of the array of logits (or label predictions) wrt the labels 26 | :param logits: this can either be logits, probabilities, or a single label 27 | :param labels: the correct labels to match against 28 | :return: the accuracy as a float 29 | """ 30 | assert len(logits) == len(labels) 31 | 32 | if len(np.shape(logits)) > 1: 33 | # Predicted labels are the argmax over axis 1 34 | predicted_labels = np.argmax(logits, axis=1) 35 | else: 36 | # Input was already labels 37 | assert len(np.shape(logits)) == 1 38 | predicted_labels = logits 39 | 40 | # Check against correct labels to compute correct guesses 41 | correct = np.sum(predicted_labels == labels.reshape(len(labels))) 42 | 43 | # Divide by number of labels to obtain accuracy 44 | accuracy = float(correct) / len(labels) 45 | 46 | # Return float value 47 | return accuracy 48 | 49 | 50 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/multiple_teachers/train_student_mnist_250_lap_20_count_50_epochs_600.sh: -------------------------------------------------------------------------------- 1 | # Be sure to clone https://github.com/openai/improved-gan 2 | # and add improved-gan/mnist_svhn_cifar10 to your PATH variable 3 | 4 | # Download labels used to train the student 5 | wget https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_student_labels_lap_20.npy 6 | 7 | # Train the student using improved-gan 8 | THEANO_FLAGS='floatX=float32,device=gpu,lib.cnmem=1' train_mnist_fm_custom_labels.py --labels mnist_250_student_labels_lap_20.npy --count 50 --epochs 600 9 | 10 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/multiple_teachers/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | 17 | def batch_indices(batch_nb, data_length, batch_size): 18 | """ 19 | This helper function computes a batch start and end index 20 | :param batch_nb: the batch number 21 | :param data_length: the total length of the data being parsed by batches 22 | :param batch_size: the number of inputs in each batch 23 | :return: pair of (start, end) indices 24 | """ 25 | # Batch start and end index 26 | start = int(batch_nb * batch_size) 27 | end = int((batch_nb + 1) * batch_size) 28 | 29 | # When there are not enough inputs left, we reuse some to complete the batch 30 | if end > data_length: 31 | shift = end - data_length 32 | start -= shift 33 | end -= shift 34 | 35 | return start, end 36 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/privacy_accountant/python/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//third_party/tensorflow_models/...", 11 | ], 12 | ) 13 | 14 | py_binary( 15 | name = "gaussian_moments", 16 | srcs = [ 17 | "gaussian_moments.py", 18 | ], 19 | deps = [ 20 | ], 21 | ) 22 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/differential_privacy/privacy_accountant/tf/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//differential_privacy/...", 11 | ], 12 | ) 13 | 14 | py_library( 15 | name = "accountant", 16 | srcs = [ 17 | "accountant.py", 18 | ], 19 | deps = [ 20 | "//differential_privacy/dp_sgd/dp_optimizer:utils", 21 | ], 22 | ) 23 | 24 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/.gitignore: -------------------------------------------------------------------------------- 1 | /bazel-bin 2 | /bazel-ci_build-cache 3 | /bazel-genfiles 4 | /bazel-out 5 | /bazel-im2txt 6 | /bazel-testlogs 7 | /bazel-tf 8 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/WORKSPACE: -------------------------------------------------------------------------------- 1 | workspace(name = "im2txt") 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/g3doc/COCO_val2014_000000224477.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/im2txt/g3doc/COCO_val2014_000000224477.jpg -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/g3doc/example_captions.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/im2txt/g3doc/example_captions.jpg -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/g3doc/show_and_tell_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/im2txt/g3doc/show_and_tell_architecture.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/im2txt/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//im2txt/...", 11 | ], 12 | ) 13 | 14 | py_binary( 15 | name = "build_mscoco_data", 16 | srcs = [ 17 | "data/build_mscoco_data.py", 18 | ], 19 | ) 20 | 21 | sh_binary( 22 | name = "download_and_preprocess_mscoco", 23 | srcs = ["data/download_and_preprocess_mscoco.sh"], 24 | data = [ 25 | ":build_mscoco_data", 26 | ], 27 | ) 28 | 29 | py_library( 30 | name = "configuration", 31 | srcs = ["configuration.py"], 32 | srcs_version = "PY2AND3", 33 | ) 34 | 35 | py_library( 36 | name = "show_and_tell_model", 37 | srcs = ["show_and_tell_model.py"], 38 | srcs_version = "PY2AND3", 39 | deps = [ 40 | "//im2txt/ops:image_embedding", 41 | "//im2txt/ops:image_processing", 42 | "//im2txt/ops:inputs", 43 | ], 44 | ) 45 | 46 | py_test( 47 | name = "show_and_tell_model_test", 48 | size = "large", 49 | srcs = ["show_and_tell_model_test.py"], 50 | deps = [ 51 | ":configuration", 52 | ":show_and_tell_model", 53 | ], 54 | ) 55 | 56 | py_library( 57 | name = "inference_wrapper", 58 | srcs = ["inference_wrapper.py"], 59 | srcs_version = "PY2AND3", 60 | deps = [ 61 | ":show_and_tell_model", 62 | "//im2txt/inference_utils:inference_wrapper_base", 63 | ], 64 | ) 65 | 66 | py_binary( 67 | name = "train", 68 | srcs = ["train.py"], 69 | srcs_version = "PY2AND3", 70 | deps = [ 71 | ":configuration", 72 | ":show_and_tell_model", 73 | ], 74 | ) 75 | 76 | py_binary( 77 | name = "evaluate", 78 | srcs = ["evaluate.py"], 79 | srcs_version = "PY2AND3", 80 | deps = [ 81 | ":configuration", 82 | ":show_and_tell_model", 83 | ], 84 | ) 85 | 86 | py_binary( 87 | name = "run_inference", 88 | srcs = ["run_inference.py"], 89 | srcs_version = "PY2AND3", 90 | deps = [ 91 | ":configuration", 92 | ":inference_wrapper", 93 | "//im2txt/inference_utils:caption_generator", 94 | "//im2txt/inference_utils:vocabulary", 95 | ], 96 | ) 97 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/im2txt/inference_utils/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//im2txt:internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | py_library( 8 | name = "inference_wrapper_base", 9 | srcs = ["inference_wrapper_base.py"], 10 | srcs_version = "PY2AND3", 11 | ) 12 | 13 | py_library( 14 | name = "vocabulary", 15 | srcs = ["vocabulary.py"], 16 | srcs_version = "PY2AND3", 17 | ) 18 | 19 | py_library( 20 | name = "caption_generator", 21 | srcs = ["caption_generator.py"], 22 | srcs_version = "PY2AND3", 23 | ) 24 | 25 | py_test( 26 | name = "caption_generator_test", 27 | srcs = ["caption_generator_test.py"], 28 | deps = [ 29 | ":caption_generator", 30 | ], 31 | ) 32 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/im2txt/inference_wrapper.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Model wrapper class for performing inference with a ShowAndTellModel.""" 17 | 18 | from __future__ import absolute_import 19 | from __future__ import division 20 | from __future__ import print_function 21 | 22 | 23 | 24 | from im2txt import show_and_tell_model 25 | from im2txt.inference_utils import inference_wrapper_base 26 | 27 | 28 | class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase): 29 | """Model wrapper class for performing inference with a ShowAndTellModel.""" 30 | 31 | def __init__(self): 32 | super(InferenceWrapper, self).__init__() 33 | 34 | def build_model(self, model_config): 35 | model = show_and_tell_model.ShowAndTellModel(model_config, mode="inference") 36 | model.build() 37 | return model 38 | 39 | def feed_image(self, sess, encoded_image): 40 | initial_state = sess.run(fetches="lstm/initial_state:0", 41 | feed_dict={"image_feed:0": encoded_image}) 42 | return initial_state 43 | 44 | def inference_step(self, sess, input_feed, state_feed): 45 | softmax_output, state_output = sess.run( 46 | fetches=["softmax:0", "lstm/state:0"], 47 | feed_dict={ 48 | "input_feed:0": input_feed, 49 | "lstm/state_feed:0": state_feed, 50 | }) 51 | return softmax_output, state_output, None 52 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/im2txt/im2txt/ops/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = ["//im2txt:internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | py_library( 8 | name = "image_processing", 9 | srcs = ["image_processing.py"], 10 | srcs_version = "PY2AND3", 11 | ) 12 | 13 | py_library( 14 | name = "image_embedding", 15 | srcs = ["image_embedding.py"], 16 | srcs_version = "PY2AND3", 17 | ) 18 | 19 | py_test( 20 | name = "image_embedding_test", 21 | size = "small", 22 | srcs = ["image_embedding_test.py"], 23 | deps = [ 24 | ":image_embedding", 25 | ], 26 | ) 27 | 28 | py_library( 29 | name = "inputs", 30 | srcs = ["inputs.py"], 31 | srcs_version = "PY2AND3", 32 | ) 33 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/.gitignore: -------------------------------------------------------------------------------- 1 | /bazel-bin 2 | /bazel-ci_build-cache 3 | /bazel-genfiles 4 | /bazel-out 5 | /bazel-inception 6 | /bazel-testlogs 7 | /bazel-tf 8 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/WORKSPACE: -------------------------------------------------------------------------------- 1 | workspace(name = "inception") 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/g3doc/inception_v3_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/inception/g3doc/inception_v3_architecture.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/inception/flowers_data.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Small library that points to the flowers data set. 16 | """ 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | 22 | 23 | from inception.dataset import Dataset 24 | 25 | 26 | class FlowersData(Dataset): 27 | """Flowers data set.""" 28 | 29 | def __init__(self, subset): 30 | super(FlowersData, self).__init__('Flowers', subset) 31 | 32 | def num_classes(self): 33 | """Returns the number of classes in the data set.""" 34 | return 5 35 | 36 | def num_examples_per_epoch(self): 37 | """Returns the number of examples in the data subset.""" 38 | if self.subset == 'train': 39 | return 3170 40 | if self.subset == 'validation': 41 | return 500 42 | 43 | def download_message(self): 44 | """Instruction to download and extract the tarball from Flowers website.""" 45 | 46 | print('Failed to find any Flowers %s files'% self.subset) 47 | print('') 48 | print('If you have already downloaded and processed the data, then make ' 49 | 'sure to set --data_dir to point to the directory containing the ' 50 | 'location of the sharded TFRecords.\n') 51 | print('Please see README.md for instructions on how to build ' 52 | 'the flowers dataset using download_and_preprocess_flowers.\n') 53 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/inception/flowers_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A binary to evaluate Inception on the flowers data set. 16 | """ 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | 22 | import tensorflow as tf 23 | 24 | from inception import inception_eval 25 | from inception.flowers_data import FlowersData 26 | 27 | FLAGS = tf.app.flags.FLAGS 28 | 29 | 30 | def main(unused_argv=None): 31 | dataset = FlowersData(subset=FLAGS.subset) 32 | assert dataset.data_files() 33 | if tf.gfile.Exists(FLAGS.eval_dir): 34 | tf.gfile.DeleteRecursively(FLAGS.eval_dir) 35 | tf.gfile.MakeDirs(FLAGS.eval_dir) 36 | inception_eval.evaluate(dataset) 37 | 38 | 39 | if __name__ == '__main__': 40 | tf.app.run() 41 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/inception/flowers_train.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A binary to train Inception on the flowers data set. 16 | """ 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | 22 | 23 | import tensorflow as tf 24 | 25 | from inception import inception_train 26 | from inception.flowers_data import FlowersData 27 | 28 | FLAGS = tf.app.flags.FLAGS 29 | 30 | 31 | def main(_): 32 | dataset = FlowersData(subset=FLAGS.subset) 33 | assert dataset.data_files() 34 | if tf.gfile.Exists(FLAGS.train_dir): 35 | tf.gfile.DeleteRecursively(FLAGS.train_dir) 36 | tf.gfile.MakeDirs(FLAGS.train_dir) 37 | inception_train.train(dataset) 38 | 39 | 40 | if __name__ == '__main__': 41 | tf.app.run() 42 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/inception/imagenet_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A binary to evaluate Inception on the flowers data set. 16 | 17 | Note that using the supplied pre-trained inception checkpoint, the eval should 18 | achieve: 19 | precision @ 1 = 0.7874 recall @ 5 = 0.9436 [50000 examples] 20 | 21 | See the README.md for more details. 22 | """ 23 | from __future__ import absolute_import 24 | from __future__ import division 25 | from __future__ import print_function 26 | 27 | 28 | import tensorflow as tf 29 | 30 | from inception import inception_eval 31 | from inception.imagenet_data import ImagenetData 32 | 33 | FLAGS = tf.app.flags.FLAGS 34 | 35 | 36 | def main(unused_argv=None): 37 | dataset = ImagenetData(subset=FLAGS.subset) 38 | assert dataset.data_files() 39 | if tf.gfile.Exists(FLAGS.eval_dir): 40 | tf.gfile.DeleteRecursively(FLAGS.eval_dir) 41 | tf.gfile.MakeDirs(FLAGS.eval_dir) 42 | inception_eval.evaluate(dataset) 43 | 44 | 45 | if __name__ == '__main__': 46 | tf.app.run() 47 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/inception/imagenet_train.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A binary to train Inception on the ImageNet data set. 16 | """ 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | 22 | 23 | import tensorflow as tf 24 | 25 | from inception import inception_train 26 | from inception.imagenet_data import ImagenetData 27 | 28 | FLAGS = tf.app.flags.FLAGS 29 | 30 | 31 | def main(_): 32 | dataset = ImagenetData(subset=FLAGS.subset) 33 | assert dataset.data_files() 34 | if tf.gfile.Exists(FLAGS.train_dir): 35 | tf.gfile.DeleteRecursively(FLAGS.train_dir) 36 | tf.gfile.MakeDirs(FLAGS.train_dir) 37 | inception_train.train(dataset) 38 | 39 | 40 | if __name__ == '__main__': 41 | tf.app.run() 42 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/inception/inception/slim/slim.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """TF-Slim grouped API. Please see README.md for details and usage.""" 16 | # pylint: disable=unused-import 17 | 18 | # Collapse tf-slim into a single namespace. 19 | from inception.slim import inception_model as inception 20 | from inception.slim import losses 21 | from inception.slim import ops 22 | from inception.slim import scopes 23 | from inception.slim import variables 24 | from inception.slim.scopes import arg_scope 25 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/lm_1b/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//lm_1b/...", 11 | ], 12 | ) 13 | 14 | py_library( 15 | name = "data_utils", 16 | srcs = ["data_utils.py"], 17 | ) 18 | 19 | py_binary( 20 | name = "lm_1b_eval", 21 | srcs = [ 22 | "lm_1b_eval.py", 23 | ], 24 | deps = [ 25 | ":data_utils", 26 | ], 27 | ) 28 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/namignizer/.gitignore: -------------------------------------------------------------------------------- 1 | # Remove the pyc files 2 | *.pyc 3 | 4 | # Ignore the model and the data 5 | model/ 6 | data/ 7 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/neural_gpu/README.md: -------------------------------------------------------------------------------- 1 | # NeuralGPU 2 | Code for the Neural GPU model as described 3 | in [[http://arxiv.org/abs/1511.08228]]. 4 | 5 | Requirements: 6 | * TensorFlow (see tensorflow.org for how to install) 7 | * Matplotlib for Python (sudo apt-get install python-matplotlib) 8 | 9 | The model can be trained on the following algorithmic tasks: 10 | 11 | * `sort` - Sort a symbol list 12 | * `kvsort` - Sort symbol keys in dictionary 13 | * `id` - Return the same symbol list 14 | * `rev` - Reverse a symbol list 15 | * `rev2` - Reverse a symbol dictionary by key 16 | * `incr` - Add one to a symbol value 17 | * `add` - Long decimal addition 18 | * `left` - First symbol in list 19 | * `right` - Last symbol in list 20 | * `left-shift` - Left shift a symbol list 21 | * `right-shift` - Right shift a symbol list 22 | * `bmul` - Long binary multiplication 23 | * `mul` - Long decimal multiplication 24 | * `dup` - Duplicate a symbol list with padding 25 | * `badd` - Long binary addition 26 | * `qadd` - Long quaternary addition 27 | * `search` - Search for symbol key in dictionary 28 | 29 | The value range for symbols are defined by the `niclass` and `noclass` flags. 30 | In particular, the values are in the range `min(--niclass, noclass) - 1`. 31 | So if you set `--niclass=33` and `--noclass=33` (the default) then `--task=rev` 32 | will be reversing lists of 32 symbols, and `--task=id` will be identity on a 33 | list of up to 32 symbols. 34 | 35 | 36 | To train the model on the reverse task run: 37 | 38 | ``` 39 | python neural_gpu_trainer.py --task=rev 40 | ``` 41 | 42 | While training, interim / checkpoint model parameters will be 43 | written to `/tmp/neural_gpu/`. 44 | 45 | Once the amount of error gets down to what you're comfortable 46 | with, hit `Ctrl-C` to stop the training process. The latest 47 | model parameters will be in `/tmp/neural_gpu/neural_gpu.ckpt-` 48 | and used on any subsequent run. 49 | 50 | To test a trained model on how well it decodes run: 51 | 52 | ``` 53 | python neural_gpu_trainer.py --task=rev --mode=1 54 | ``` 55 | 56 | To produce an animation of the result run: 57 | 58 | ``` 59 | python neural_gpu_trainer.py --task=rev --mode=1 --animate=True 60 | ``` 61 | 62 | Maintained by Lukasz Kaiser (lukaszkaiser) 63 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/neural_programmer/README.md: -------------------------------------------------------------------------------- 1 | Implementation of the Neural Programmer model described in https://openreview.net/pdf?id=ry2YOrcge 2 | 3 | Download the data from http://www-nlp.stanford.edu/software/sempre/wikitable/ Change the data_dir FLAG to the location of the data 4 | 5 | Training: python neural_programmer.py 6 | 7 | The models are written to FLAGS.output_dir 8 | 9 | Testing: python neural_programmer.py --evaluator_job=True 10 | 11 | The models are loaded from FLAGS.output_dir. The evaluation is done on development data. 12 | 13 | Maintained by Arvind Neelakantan (arvind2505) 14 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/resnet/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//resnet/...", 11 | ], 12 | ) 13 | 14 | filegroup( 15 | name = "py_srcs", 16 | data = glob([ 17 | "**/*.py", 18 | ]), 19 | ) 20 | 21 | py_library( 22 | name = "resnet_model", 23 | srcs = ["resnet_model.py"], 24 | ) 25 | 26 | py_binary( 27 | name = "resnet_main", 28 | srcs = [ 29 | "resnet_main.py", 30 | ], 31 | deps = [ 32 | ":cifar_input", 33 | ":resnet_model", 34 | ], 35 | ) 36 | 37 | py_library( 38 | name = "cifar_input", 39 | srcs = ["cifar_input.py"], 40 | ) 41 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/resnet/g3doc/cifar_resnet.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/resnet/g3doc/cifar_resnet.gif -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/resnet/g3doc/cifar_resnet_legends.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/resnet/g3doc/cifar_resnet_legends.gif -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/slim/.DS_Store -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/._.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/slim/._.DS_Store -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/datasets/dataset_factory.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A factory-pattern class which returns classification image/label pairs.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | from datasets import cifar10 22 | from datasets import flowers 23 | from datasets import imagenet 24 | from datasets import mnist 25 | 26 | datasets_map = { 27 | 'cifar10': cifar10, 28 | 'flowers': flowers, 29 | 'imagenet': imagenet, 30 | 'mnist': mnist, 31 | } 32 | 33 | 34 | def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None): 35 | """Given a dataset name and a split_name returns a Dataset. 36 | 37 | Args: 38 | name: String, the name of the dataset. 39 | split_name: A train/test split name. 40 | dataset_dir: The directory where the dataset files are stored. 41 | file_pattern: The file pattern to use for matching the dataset source files. 42 | reader: The subclass of tf.ReaderBase. If left as `None`, then the default 43 | reader defined by each dataset is used. 44 | 45 | Returns: 46 | A `Dataset` class. 47 | 48 | Raises: 49 | ValueError: If the dataset `name` is unknown. 50 | """ 51 | if name not in datasets_map: 52 | raise ValueError('Name of dataset unknown %s' % name) 53 | return datasets_map[name].get_split( 54 | split_name, 55 | dataset_dir, 56 | file_pattern, 57 | reader) 58 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/deployment/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/nets/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/nets/inception.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Brings all inception models under one namespace.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | # pylint: disable=unused-import 22 | from nets.inception_resnet_v2 import inception_resnet_v2 23 | from nets.inception_resnet_v2 import inception_resnet_v2_arg_scope 24 | from nets.inception_v1 import inception_v1 25 | from nets.inception_v1 import inception_v1_arg_scope 26 | from nets.inception_v1 import inception_v1_base 27 | from nets.inception_v2 import inception_v2 28 | from nets.inception_v2 import inception_v2_arg_scope 29 | from nets.inception_v2 import inception_v2_base 30 | from nets.inception_v3 import inception_v3 31 | from nets.inception_v3 import inception_v3_arg_scope 32 | from nets.inception_v3 import inception_v3_base 33 | from nets.inception_v4 import inception_v4 34 | from nets.inception_v4 import inception_v4_arg_scope 35 | from nets.inception_v4 import inception_v4_base 36 | # pylint: enable=unused-import 37 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/nets/nets_factory_test.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Tests for slim.inception.""" 17 | 18 | from __future__ import absolute_import 19 | from __future__ import division 20 | from __future__ import print_function 21 | 22 | 23 | import tensorflow as tf 24 | 25 | from nets import nets_factory 26 | 27 | 28 | class NetworksTest(tf.test.TestCase): 29 | 30 | def testGetNetworkFn(self): 31 | batch_size = 5 32 | num_classes = 1000 33 | for net in nets_factory.networks_map: 34 | with self.test_session(): 35 | net_fn = nets_factory.get_network_fn(net, num_classes) 36 | # Most networks use 224 as their default_image_size 37 | image_size = getattr(net_fn, 'default_image_size', 224) 38 | inputs = tf.random_uniform((batch_size, image_size, image_size, 3)) 39 | logits, end_points = net_fn(inputs) 40 | self.assertTrue(isinstance(logits, tf.Tensor)) 41 | self.assertTrue(isinstance(end_points, dict)) 42 | self.assertEqual(logits.get_shape().as_list()[0], batch_size) 43 | self.assertEqual(logits.get_shape().as_list()[-1], num_classes) 44 | 45 | if __name__ == '__main__': 46 | tf.test.main() 47 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/preprocessing/lenet_preprocessing.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Provides utilities for preprocessing.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import tensorflow as tf 22 | 23 | slim = tf.contrib.slim 24 | 25 | 26 | def preprocess_image(image, output_height, output_width, is_training): 27 | """Preprocesses the given image. 28 | 29 | Args: 30 | image: A `Tensor` representing an image of arbitrary size. 31 | output_height: The height of the image after preprocessing. 32 | output_width: The width of the image after preprocessing. 33 | is_training: `True` if we're preprocessing the image for training and 34 | `False` otherwise. 35 | 36 | Returns: 37 | A preprocessed image. 38 | """ 39 | image = tf.to_float(image) 40 | image = tf.image.resize_image_with_crop_or_pad( 41 | image, output_width, output_height) 42 | image = tf.sub(image, 128.0) 43 | image = tf.div(image, 128.0) 44 | return image 45 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/scripts/train_cifarnet_on_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script performs the following operations: 4 | # 1. Downloads the Cifar10 dataset 5 | # 2. Trains a CifarNet model on the Cifar10 training set. 6 | # 3. Evaluates the model on the Cifar10 testing set. 7 | # 8 | # Usage: 9 | # cd slim 10 | # ./scripts/train_cifar_net_on_mnist.sh 11 | 12 | # Where the checkpoint and logs will be saved to. 13 | TRAIN_DIR=/tmp/cifarnet-model 14 | 15 | # Where the dataset is saved to. 16 | DATASET_DIR=/tmp/cifar10 17 | 18 | # Download the dataset 19 | python download_and_convert_data.py \ 20 | --dataset_name=cifar10 \ 21 | --dataset_dir=${DATASET_DIR} 22 | 23 | # Run training. 24 | python train_image_classifier.py \ 25 | --train_dir=${TRAIN_DIR} \ 26 | --dataset_name=cifar10 \ 27 | --dataset_split_name=train \ 28 | --dataset_dir=${DATASET_DIR} \ 29 | --model_name=cifarnet \ 30 | --preprocessing_name=cifarnet \ 31 | --max_number_of_steps=100000 \ 32 | --batch_size=128 \ 33 | --save_interval_secs=120 \ 34 | --save_summaries_secs=120 \ 35 | --log_every_n_steps=100 \ 36 | --optimizer=sgd \ 37 | --learning_rate=0.1 \ 38 | --learning_rate_decay_factor=0.1 \ 39 | --num_epochs_per_decay=200 \ 40 | --weight_decay=0.004 41 | 42 | # Run evaluation. 43 | python eval_image_classifier.py \ 44 | --checkpoint_path=${TRAIN_DIR} \ 45 | --eval_dir=${TRAIN_DIR} \ 46 | --dataset_name=cifar10 \ 47 | --dataset_split_name=test \ 48 | --dataset_dir=${DATASET_DIR} \ 49 | --model_name=cifarnet 50 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/slim/scripts/train_lenet_on_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script performs the following operations: 4 | # 1. Downloads the MNIST dataset 5 | # 2. Trains a LeNet model on the MNIST training set. 6 | # 3. Evaluates the model on the MNIST testing set. 7 | # 8 | # Usage: 9 | # cd slim 10 | # ./slim/scripts/train_lenet_on_mnist.sh 11 | 12 | # Where the checkpoint and logs will be saved to. 13 | TRAIN_DIR=/tmp/lenet-model 14 | 15 | # Where the dataset is saved to. 16 | DATASET_DIR=/tmp/mnist 17 | 18 | # Download the dataset 19 | python download_and_convert_data.py \ 20 | --dataset_name=mnist \ 21 | --dataset_dir=${DATASET_DIR} 22 | 23 | # Run training. 24 | python train_image_classifier.py \ 25 | --train_dir=${TRAIN_DIR} \ 26 | --dataset_name=mnist \ 27 | --dataset_split_name=train \ 28 | --dataset_dir=${DATASET_DIR} \ 29 | --model_name=lenet \ 30 | --preprocessing_name=lenet \ 31 | --max_number_of_steps=20000 \ 32 | --batch_size=50 \ 33 | --learning_rate=0.01 \ 34 | --save_interval_secs=60 \ 35 | --save_summaries_secs=60 \ 36 | --log_every_n_steps=100 \ 37 | --optimizer=sgd \ 38 | --learning_rate_decay_type=fixed \ 39 | --weight_decay=0 40 | 41 | # Run evaluation. 42 | python eval_image_classifier.py \ 43 | --checkpoint_path=${TRAIN_DIR} \ 44 | --eval_dir=${TRAIN_DIR} \ 45 | --dataset_name=mnist \ 46 | --dataset_split_name=test \ 47 | --dataset_dir=${DATASET_DIR} \ 48 | --model_name=lenet 49 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/g3doc/avdessapins.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/street/g3doc/avdessapins.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/python/vgsl_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Model eval separate from training.""" 16 | from tensorflow import app 17 | from tensorflow.python.platform import flags 18 | 19 | import vgsl_model 20 | 21 | flags.DEFINE_string('eval_dir', '/tmp/mdir/eval', 22 | 'Directory where to write event logs.') 23 | flags.DEFINE_string('graph_def_file', None, 24 | 'Output eval graph definition file.') 25 | flags.DEFINE_string('train_dir', '/tmp/mdir', 26 | 'Directory where to find training checkpoints.') 27 | flags.DEFINE_string('model_str', 28 | '1,150,600,3[S2(4x150)0,2 Ct5,5,16 Mp2,2 Ct5,5,64 Mp3,3' 29 | '([Lrys64 Lbx128][Lbys64 Lbx128][Lfys64 Lbx128])S3(3x0)2,3' 30 | 'Lfx128 Lrx128 S0(1x4)0,3 Do Lfx256]O1c134', 31 | 'Network description.') 32 | flags.DEFINE_integer('num_steps', 1000, 'Number of steps to run evaluation.') 33 | flags.DEFINE_integer('eval_interval_secs', 60, 34 | 'Time interval between eval runs.') 35 | flags.DEFINE_string('eval_data', None, 'Evaluation data filepattern') 36 | flags.DEFINE_string('decoder', None, 'Charset decoder') 37 | 38 | FLAGS = flags.FLAGS 39 | 40 | 41 | def main(argv): 42 | del argv 43 | vgsl_model.Eval(FLAGS.train_dir, FLAGS.eval_dir, FLAGS.model_str, 44 | FLAGS.eval_data, FLAGS.decoder, FLAGS.num_steps, 45 | FLAGS.graph_def_file, FLAGS.eval_interval_secs) 46 | 47 | 48 | if __name__ == '__main__': 49 | app.run() 50 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/arial-32-tiny: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/street/testdata/arial-32-tiny -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/arial.charset_size=105.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 104 3 | 1 G 4 | 2 r 5 | 3 a 6 | 4 s 7 | 5 l 8 | 6 n 9 | 7 d 10 | 8 . 11 | 9 B 12 | 10 C 13 | 11 O 14 | 12 W 15 | 13 Y 16 | 14 , 17 | 15 ( 18 | 16 u 19 | 17 z 20 | 18 i 21 | 19 e 22 | 20 ) 23 | 21 1 24 | 22 9 25 | 23 2 26 | 24 - 27 | 25 6 28 | 26 o 29 | 27 L 30 | 28 P 31 | 29 ' 32 | 30 t 33 | 31 m 34 | 32 K 35 | 33 c 36 | 34 k 37 | 35 V 38 | 36 S 39 | 37 D 40 | 38 J 41 | 39 h 42 | 40 M 43 | 41 x 44 | 42 E 45 | 43 q 46 | 44 ; 47 | 45 A 48 | 46 y 49 | 47 f 50 | 48 5 51 | 49 7 52 | 50 b 53 | 51 4 54 | 52 0 55 | 53 3 56 | 54 N 57 | 55 I 58 | 56 T 59 | 57 / 60 | 58 p 61 | 59 w 62 | 60 g 63 | 61 H 64 | 62 “ 65 | 63 F 66 | 62 ” 67 | 62 " 68 | 29 ’ 69 | 64 R 70 | 24 — 71 | 65 8 72 | 66 v 73 | 67 ? 74 | 68 é 75 | 69 % 76 | 70 : 77 | 71 j 78 | 72 \ 79 | 73 { 80 | 74 } 81 | 75 | 82 | 76 U 83 | 77 $ 84 | 78 ° 85 | 79 * 86 | 80 ! 87 | 81 ] 88 | 82 Q 89 | 29 ‘ 90 | 83 Z 91 | 84 X 92 | 85 [ 93 | 86 = 94 | 87 + 95 | 88 § 96 | 89 _ 97 | 90 £ 98 | 91 & 99 | 92 # 100 | 93 > 101 | 94 < 102 | 95 ~ 103 | 96 € 104 | 97 @ 105 | 98 ¢ 106 | 99 » 107 | 100 « 108 | 47,5 fl 109 | 47,18 fi 110 | 101 ® 111 | 102 © 112 | 103 ¥ 113 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/charset_size=134.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 133 3 | 1 l 4 | 2 ’ 5 | 3 é 6 | 4 t 7 | 5 e 8 | 6 i 9 | 7 n 10 | 8 s 11 | 9 x 12 | 10 g 13 | 11 u 14 | 12 o 15 | 13 1 16 | 14 8 17 | 15 7 18 | 16 0 19 | 17 - 20 | 18 . 21 | 19 p 22 | 20 a 23 | 21 r 24 | 22 è 25 | 23 d 26 | 24 c 27 | 25 V 28 | 26 v 29 | 27 b 30 | 28 m 31 | 29 ) 32 | 30 C 33 | 31 z 34 | 32 S 35 | 33 y 36 | 34 , 37 | 35 k 38 | 36 É 39 | 37 A 40 | 38 h 41 | 39 E 42 | 40 » 43 | 41 D 44 | 42 / 45 | 43 H 46 | 44 M 47 | 45 ( 48 | 46 G 49 | 47 P 50 | 48 ç 51 | 2 ' 52 | 49 R 53 | 50 f 54 | 51 " 55 | 52 2 56 | 53 j 57 | 54 | 58 | 55 N 59 | 56 6 60 | 57 ° 61 | 58 5 62 | 59 T 63 | 60 O 64 | 61 U 65 | 62 3 66 | 63 % 67 | 64 9 68 | 65 q 69 | 66 Z 70 | 67 B 71 | 68 K 72 | 69 w 73 | 70 W 74 | 71 : 75 | 72 4 76 | 73 L 77 | 74 F 78 | 75 ] 79 | 76 ï 80 | 2 ‘ 81 | 77 I 82 | 78 J 83 | 79 ä 84 | 80 î 85 | 81 ; 86 | 82 à 87 | 83 ê 88 | 84 X 89 | 85 ü 90 | 86 Y 91 | 87 ô 92 | 88 = 93 | 89 + 94 | 90 \ 95 | 91 { 96 | 92 } 97 | 93 _ 98 | 94 Q 99 | 95 œ 100 | 96 ñ 101 | 97 * 102 | 98 ! 103 | 99 Ü 104 | 51 “ 105 | 100 â 106 | 101 Ç 107 | 102 Œ 108 | 103 û 109 | 104 ? 110 | 105 $ 111 | 106 ë 112 | 107 « 113 | 108 € 114 | 109 & 115 | 110 < 116 | 51 ” 117 | 111 æ 118 | 112 # 119 | 113 ® 120 | 114  121 | 115 È 122 | 116 > 123 | 117 [ 124 | 17 — 125 | 118 Æ 126 | 119 ù 127 | 120 Î 128 | 121 Ô 129 | 122 ÿ 130 | 123 À 131 | 124 Ê 132 | 125 @ 133 | 126 Ï 134 | 127 © 135 | 128 Ë 136 | 129 Ù 137 | 130 £ 138 | 131 Ÿ 139 | 132 Û 140 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/charset_size_10.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 9 3 | 1 a 4 | 2 b 5 | 3 r 6 | 4 n 7 | 4,5 m 8 | 6 f 9 | 7 . 10 | 8 , 11 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/mnist-tiny: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/street/testdata/mnist-tiny -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/numbers-16-tiny: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/street/testdata/numbers-16-tiny -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/street/testdata/numbers.charset_size=12.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 11 3 | 1 9 4 | 2 8 5 | 3 7 6 | 4 6 7 | 5 1 8 | 6 4 9 | 7 0 10 | 8 3 11 | 9 5 12 | 10 2 13 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/swivel/.gitignore: -------------------------------------------------------------------------------- 1 | *.an.tab 2 | *.pyc 3 | *.ws.tab 4 | MEN.tar.gz 5 | Mtruk.csv 6 | SimLex-999.zip 7 | analogy 8 | fastprep 9 | myz_naacl13_test_set.tgz 10 | questions-words.txt 11 | rw.zip 12 | ws353simrel.tar.gz 13 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/swivel/nearest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright 2016 Google Inc. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """Simple tool for inspecting nearest neighbors and analogies.""" 18 | 19 | import re 20 | import sys 21 | from getopt import GetoptError, getopt 22 | 23 | from vecs import Vecs 24 | 25 | try: 26 | opts, args = getopt(sys.argv[1:], 'v:e:', ['vocab=', 'embeddings=']) 27 | except GetoptError, e: 28 | print >> sys.stderr, e 29 | sys.exit(2) 30 | 31 | opt_vocab = 'vocab.txt' 32 | opt_embeddings = None 33 | 34 | for o, a in opts: 35 | if o in ('-v', '--vocab'): 36 | opt_vocab = a 37 | if o in ('-e', '--embeddings'): 38 | opt_embeddings = a 39 | 40 | vecs = Vecs(opt_vocab, opt_embeddings) 41 | 42 | while True: 43 | sys.stdout.write('query> ') 44 | sys.stdout.flush() 45 | 46 | query = sys.stdin.readline().strip() 47 | if not query: 48 | break 49 | 50 | parts = re.split(r'\s+', query) 51 | 52 | if len(parts) == 1: 53 | res = vecs.neighbors(parts[0]) 54 | 55 | elif len(parts) == 3: 56 | vs = [vecs.lookup(w) for w in parts] 57 | if any(v is None for v in vs): 58 | print 'not in vocabulary: %s' % ( 59 | ', '.join(tok for tok, v in zip(parts, vs) if v is None)) 60 | 61 | continue 62 | 63 | res = vecs.neighbors(vs[2] - vs[0] + vs[1]) 64 | 65 | else: 66 | print 'use a single word to query neighbors, or three words for analogy' 67 | continue 68 | 69 | if not res: 70 | continue 71 | 72 | for word, sim in res[:20]: 73 | print '%0.4f: %s' % (sim, word) 74 | 75 | print 76 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/.gitignore: -------------------------------------------------------------------------------- 1 | /bazel-bin 2 | /bazel-genfiles 3 | /bazel-out 4 | /bazel-tensorflow 5 | /bazel-testlogs 6 | /bazel-tf 7 | /bazel-syntaxnet 8 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM java:8 2 | 3 | ENV SYNTAXNETDIR=/opt/tensorflow PATH=$PATH:/root/bin 4 | 5 | RUN mkdir -p $SYNTAXNETDIR \ 6 | && cd $SYNTAXNETDIR \ 7 | && apt-get update \ 8 | && apt-get install git zlib1g-dev file swig python2.7 python-dev python-pip python-mock -y \ 9 | && pip install --upgrade pip \ 10 | && pip install -U protobuf==3.0.0 \ 11 | && pip install asciitree \ 12 | && pip install numpy \ 13 | && wget https://github.com/bazelbuild/bazel/releases/download/0.3.1/bazel-0.3.1-installer-linux-x86_64.sh \ 14 | && chmod +x bazel-0.3.1-installer-linux-x86_64.sh \ 15 | && ./bazel-0.3.1-installer-linux-x86_64.sh --user \ 16 | && git clone --recursive https://github.com/tensorflow/models.git \ 17 | && cd $SYNTAXNETDIR/models/syntaxnet/tensorflow \ 18 | && echo "\n\n\n\n" | ./configure \ 19 | && apt-get autoremove -y \ 20 | && apt-get clean 21 | 22 | RUN cd $SYNTAXNETDIR/models/syntaxnet \ 23 | && bazel test --genrule_strategy=standalone syntaxnet/... util/utf8/... 24 | 25 | WORKDIR $SYNTAXNETDIR/models/syntaxnet 26 | 27 | CMD [ "sh", "-c", "echo 'Bob brought the pizza to Alice.' | syntaxnet/demo.sh" ] 28 | 29 | # COMMANDS to build and run 30 | # =============================== 31 | # mkdir build && cp Dockerfile build/ && cd build 32 | # docker build -t syntaxnet . 33 | # docker run syntaxnet 34 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/WORKSPACE: -------------------------------------------------------------------------------- 1 | local_repository( 2 | name = "org_tensorflow", 3 | path = "tensorflow", 4 | ) 5 | 6 | load('@org_tensorflow//tensorflow:workspace.bzl', 'tf_workspace') 7 | tf_workspace() 8 | 9 | # Specify the minimum required Bazel version. 10 | load("@org_tensorflow//tensorflow:tensorflow.bzl", "check_version") 11 | check_version("0.3.0") 12 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/beam_search_training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/beam_search_training.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/ff_nn_schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/ff_nn_schematic.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/looping-parser.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/looping-parser.gif -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/sawman.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/sawman.png -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/base.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef SYNTAXNET_BASE_H_ 17 | #define SYNTAXNET_BASE_H_ 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include "tensorflow/core/lib/core/status.h" 25 | #include "tensorflow/core/lib/strings/strcat.h" 26 | #include "tensorflow/core/lib/strings/stringprintf.h" 27 | #include "tensorflow/core/platform/default/integral_types.h" 28 | #include "tensorflow/core/platform/mutex.h" 29 | #include "tensorflow/core/platform/protobuf.h" 30 | 31 | 32 | 33 | using tensorflow::int32; 34 | using tensorflow::int64; 35 | using tensorflow::uint64; 36 | using tensorflow::uint32; 37 | using tensorflow::uint32; 38 | using tensorflow::protobuf::TextFormat; 39 | using tensorflow::mutex_lock; 40 | using tensorflow::mutex; 41 | using std::map; 42 | using std::pair; 43 | using std::vector; 44 | using std::unordered_map; 45 | using std::unordered_set; 46 | typedef signed int char32; 47 | 48 | using tensorflow::StringPiece; 49 | using std::string; 50 | 51 | // namespace syntaxnet 52 | 53 | #endif // SYNTAXNET_BASE_H_ 54 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2016 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | 17 | # A script that runs a tokenizer, a part-of-speech tagger and a dependency 18 | # parser on an English text file, with one sentence per line. 19 | # 20 | # Example usage: 21 | # echo "Parsey McParseface is my favorite parser!" | syntaxnet/demo.sh 22 | 23 | # To run on a conll formatted file, add the --conll command line argument. 24 | # 25 | 26 | PARSER_EVAL=bazel-bin/syntaxnet/parser_eval 27 | MODEL_DIR=syntaxnet/models/parsey_mcparseface 28 | [[ "$1" == "--conll" ]] && INPUT_FORMAT=stdin-conll || INPUT_FORMAT=stdin 29 | 30 | $PARSER_EVAL \ 31 | --input=$INPUT_FORMAT \ 32 | --output=stdout-conll \ 33 | --hidden_layer_sizes=64 \ 34 | --arg_prefix=brain_tagger \ 35 | --graph_builder=structured \ 36 | --task_context=$MODEL_DIR/context.pbtxt \ 37 | --model_path=$MODEL_DIR/tagger-params \ 38 | --slim_model \ 39 | --batch_size=1024 \ 40 | --alsologtostderr \ 41 | | \ 42 | $PARSER_EVAL \ 43 | --input=stdin-conll \ 44 | --output=stdout-conll \ 45 | --hidden_layer_sizes=512,512 \ 46 | --arg_prefix=brain_parser \ 47 | --graph_builder=structured \ 48 | --task_context=$MODEL_DIR/context.pbtxt \ 49 | --model_path=$MODEL_DIR/parser-params \ 50 | --slim_model \ 51 | --batch_size=1024 \ 52 | --alsologtostderr \ 53 | | \ 54 | bazel-bin/syntaxnet/conll2tree \ 55 | --task_context=$MODEL_DIR/context.pbtxt \ 56 | --alsologtostderr 57 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/dictionary.proto: -------------------------------------------------------------------------------- 1 | // Protocol buffers for serializing string<=>index dictionaries. 2 | 3 | syntax = "proto2"; 4 | 5 | package syntaxnet; 6 | 7 | // Serializable representation of a string=>string pair. 8 | message StringToStringPair { 9 | // String representing the key. 10 | required string key = 1; 11 | 12 | // String representing the value. 13 | required string value = 2; 14 | } 15 | 16 | // Serializable representation of a string=>string mapping. 17 | message StringToStringMap { 18 | // Key=>value pairs. 19 | repeated StringToStringPair pair = 1; 20 | } 21 | 22 | // Affix table entry, for serialization of the affix tables. 23 | message AffixTableEntry { 24 | // Nested message for serializing a single affix. 25 | message AffixEntry { 26 | // The affix as a string. 27 | required string form = 1; 28 | 29 | // The length of the affix (this is non-trivial to compute due to UTF-8). 30 | required int32 length = 2; 31 | 32 | // The ID of the affix that is one character shorter, or -1 if none exists. 33 | required int32 shorter_id = 3; 34 | } 35 | 36 | // The type of affix table, as a string. 37 | required string type = 1; 38 | 39 | // The maximum affix length. 40 | required int32 max_length = 2; 41 | 42 | // The list of affixes, in order of affix ID. 43 | repeated AffixEntry affix = 3; 44 | } 45 | 46 | // A light-weight proto to store vectors in binary format. 47 | message TokenEmbedding { 48 | required bytes token = 1; // can be word or phrase, or URL, etc. 49 | 50 | // If available, raw count of this token in the training corpus. 51 | optional int64 count = 3; 52 | 53 | message Vector { 54 | repeated float values = 1 [packed = true]; 55 | } 56 | optional Vector vector = 2; 57 | }; 58 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/document_format.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "syntaxnet/document_format.h" 17 | 18 | namespace syntaxnet { 19 | 20 | // Component registry for document formatters. 21 | REGISTER_CLASS_REGISTRY("document format", DocumentFormat); 22 | 23 | } // namespace syntaxnet 24 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/feature_extractor.proto: -------------------------------------------------------------------------------- 1 | // Protocol buffers for feature extractor. 2 | 3 | syntax = "proto2"; 4 | 5 | package syntaxnet; 6 | 7 | message Parameter { 8 | optional string name = 1; 9 | optional string value = 2; 10 | } 11 | 12 | // Descriptor for feature function. 13 | message FeatureFunctionDescriptor { 14 | // Feature function type. 15 | required string type = 1; 16 | 17 | // Feature function name. 18 | optional string name = 2; 19 | 20 | // Default argument for feature function. 21 | optional int32 argument = 3 [default = 0]; 22 | 23 | // Named parameters for feature descriptor. 24 | repeated Parameter parameter = 4; 25 | 26 | // Nested sub-feature function descriptors. 27 | repeated FeatureFunctionDescriptor feature = 7; 28 | }; 29 | 30 | // Descriptor for feature extractor. 31 | message FeatureExtractorDescriptor { 32 | // Top-level feature function for extractor. 33 | repeated FeatureFunctionDescriptor feature = 1; 34 | }; 35 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/load_parser_ops.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Loads parser_ops shared library.""" 17 | 18 | import os.path 19 | import tensorflow as tf 20 | 21 | tf.load_op_library( 22 | os.path.join(tf.resource_loader.get_data_files_path(), 23 | 'parser_ops.so')) 24 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/fine-to-universal.map: -------------------------------------------------------------------------------- 1 | # . 2 | $ . 3 | '' . 4 | -LRB- . 5 | -RRB- . 6 | , . 7 | . . 8 | : . 9 | ADD X 10 | AFX PRT 11 | CC CONJ 12 | CD NUM 13 | DT DET 14 | EX DET 15 | FW X 16 | GW X 17 | HYPH . 18 | IN ADP 19 | JJ ADJ 20 | JJR ADJ 21 | JJS ADJ 22 | LS X 23 | MD VERB 24 | NFP . 25 | NN NOUN 26 | NNP NOUN 27 | NNPS NOUN 28 | NNS NOUN 29 | PDT DET 30 | POS PRT 31 | PRP PRON 32 | PRP$ PRON 33 | RB ADV 34 | RBR ADV 35 | RBS ADV 36 | RP PRT 37 | SYM X 38 | TO PRT 39 | UH X 40 | VB VERB 41 | VBD VERB 42 | VBG VERB 43 | VBN VERB 44 | VBP VERB 45 | VBZ VERB 46 | WDT DET 47 | WP PRON 48 | WP$ PRON 49 | WRB ADV 50 | `` . 51 | X X 52 | XX X 53 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/label-map: -------------------------------------------------------------------------------- 1 | 46 2 | punct 243160 3 | prep 194627 4 | pobj 186958 5 | det 170592 6 | nsubj 144821 7 | nn 144800 8 | amod 117242 9 | ROOT 90592 10 | dobj 88551 11 | aux 76523 12 | advmod 72893 13 | conj 59384 14 | cc 57532 15 | num 36350 16 | poss 35117 17 | dep 34986 18 | ccomp 29470 19 | cop 25991 20 | mark 25141 21 | xcomp 25111 22 | rcmod 16234 23 | auxpass 15740 24 | advcl 14996 25 | possessive 14866 26 | nsubjpass 14133 27 | pcomp 12488 28 | appos 11112 29 | partmod 11106 30 | neg 11090 31 | number 10658 32 | prt 7123 33 | quantmod 6653 34 | tmod 5418 35 | infmod 5134 36 | npadvmod 3213 37 | parataxis 3012 38 | mwe 2793 39 | expl 2712 40 | iobj 1642 41 | acomp 1632 42 | discourse 1381 43 | csubj 1225 44 | predet 1160 45 | preconj 749 46 | goeswith 146 47 | csubjpass 41 48 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/parser-params: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/parser-params -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/prefix-table: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/prefix-table -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/suffix-table: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/suffix-table -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/tag-map: -------------------------------------------------------------------------------- 1 | 49 2 | NN 285194 3 | IN 228165 4 | DT 179147 5 | NNP 175147 6 | JJ 125667 7 | NNS 115732 8 | , 97481 9 | . 85938 10 | RB 78513 11 | VB 63952 12 | CC 57554 13 | VBD 56635 14 | CD 55674 15 | PRP 55244 16 | VBZ 48126 17 | VBN 44458 18 | VBG 34524 19 | VBP 33669 20 | TO 28772 21 | MD 22364 22 | PRP$ 20706 23 | HYPH 18526 24 | POS 14905 25 | `` 12193 26 | '' 12154 27 | WDT 10267 28 | : 8713 29 | $ 7993 30 | WP 7336 31 | RP 7335 32 | WRB 6634 33 | JJR 6295 34 | NNPS 5917 35 | -RRB- 3904 36 | -LRB- 3840 37 | JJS 3596 38 | RBR 3186 39 | EX 2733 40 | UH 1521 41 | RBS 1467 42 | PDT 1271 43 | FW 928 44 | NFP 844 45 | SYM 652 46 | ADD 476 47 | LS 392 48 | WP$ 332 49 | GW 184 50 | AFX 42 51 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/tagger-params: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_mcparseface/tagger-params -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_universal/context-tokenize-zh.pbtxt: -------------------------------------------------------------------------------- 1 | Parameter { 2 | name: "brain_tokenizer_zh_embedding_dims" 3 | value: "32;32" 4 | } 5 | Parameter { 6 | name: "brain_tokenizer_zh_embedding_names" 7 | value: "chars;words" 8 | } 9 | Parameter { 10 | name: "brain_tokenizer_zh_features" 11 | value: "input.char " 12 | "input(1).char " 13 | "input(2).char " 14 | "input(3).char " 15 | "input(-1).char " 16 | "input(-2).char " 17 | "input(-3).char " 18 | "stack.char " 19 | "stack.offset(1).char " 20 | "stack.offset(-1).char " 21 | "stack(1).char " 22 | "stack(1).offset(1).char " 23 | "stack(1).offset(-1).char " 24 | "stack(2).char; " 25 | "last-word(1,min-freq=2) " 26 | "last-word(2,min-freq=2) " 27 | "last-word(3,min-freq=2)" 28 | } 29 | Parameter { 30 | name: "brain_tokenizer_zh_transition_system" 31 | value: "binary-segment-transitions" 32 | } 33 | input { 34 | name: "word-map" 35 | Part { 36 | file_pattern: "last-word-map" 37 | } 38 | } 39 | input { 40 | name: "char-map" 41 | Part { 42 | file_pattern: "char-map" 43 | } 44 | } 45 | input { 46 | name: "label-map" 47 | Part { 48 | file_pattern: "label-map" 49 | } 50 | } 51 | input { 52 | name: 'stdin-untoken' 53 | record_format: 'untokenized-text' 54 | Part { 55 | file_pattern: '-' 56 | } 57 | } 58 | input { 59 | name: 'stdout-conll' 60 | record_format: 'conll-sentence' 61 | Part { 62 | file_pattern: '-' 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_universal/parse.sh: -------------------------------------------------------------------------------- 1 | # A script that runs a morphological analyzer, a part-of-speech tagger and a 2 | # dependency parser on a text file, with one sentence per line. 3 | # 4 | # Example usage: 5 | # bazel build syntaxnet:parser_eval 6 | # cat sentences.txt | 7 | # syntaxnet/models/parsey_universal/parse.sh \ 8 | # $MODEL_DIRECTORY > output.conll 9 | # 10 | # To run on a conll formatted file, add the --conll command line argument: 11 | # cat sentences.conll | 12 | # syntaxnet/models/parsey_universal/parse.sh \ 13 | # --conll $MODEL_DIRECTORY > output.conll 14 | # 15 | # Models can be downloaded from 16 | # http://download.tensorflow.org/models/parsey_universal/.zip 17 | # for the languages listed at 18 | # https://github.com/tensorflow/models/blob/master/syntaxnet/universal.md 19 | # 20 | 21 | PARSER_EVAL=bazel-bin/syntaxnet/parser_eval 22 | CONTEXT=syntaxnet/models/parsey_universal/context.pbtxt 23 | if [[ "$1" == "--conll" ]]; then 24 | INPUT_FORMAT=stdin-conll 25 | shift 26 | else 27 | INPUT_FORMAT=stdin 28 | fi 29 | MODEL_DIR=$1 30 | 31 | $PARSER_EVAL \ 32 | --input=$INPUT_FORMAT \ 33 | --output=stdout-conll \ 34 | --hidden_layer_sizes=64 \ 35 | --arg_prefix=brain_morpher \ 36 | --graph_builder=structured \ 37 | --task_context=$CONTEXT \ 38 | --resource_dir=$MODEL_DIR \ 39 | --model_path=$MODEL_DIR/morpher-params \ 40 | --slim_model \ 41 | --batch_size=1024 \ 42 | --alsologtostderr \ 43 | | \ 44 | $PARSER_EVAL \ 45 | --input=stdin-conll \ 46 | --output=stdout-conll \ 47 | --hidden_layer_sizes=64 \ 48 | --arg_prefix=brain_tagger \ 49 | --graph_builder=structured \ 50 | --task_context=$CONTEXT \ 51 | --resource_dir=$MODEL_DIR \ 52 | --model_path=$MODEL_DIR/tagger-params \ 53 | --slim_model \ 54 | --batch_size=1024 \ 55 | --alsologtostderr \ 56 | | \ 57 | $PARSER_EVAL \ 58 | --input=stdin-conll \ 59 | --output=stdout-conll \ 60 | --hidden_layer_sizes=512,512 \ 61 | --arg_prefix=brain_parser \ 62 | --graph_builder=structured \ 63 | --task_context=$CONTEXT \ 64 | --resource_dir=$MODEL_DIR \ 65 | --model_path=$MODEL_DIR/parser-params \ 66 | --slim_model \ 67 | --batch_size=1024 \ 68 | --alsologtostderr 69 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_universal/tokenize.sh: -------------------------------------------------------------------------------- 1 | # A script that runs a tokenizer on a text file with one sentence per line. 2 | # 3 | # Example usage: 4 | # bazel build syntaxnet:parser_eval 5 | # cat untokenized-sentences.txt | 6 | # syntaxnet/models/parsey_universal/tokenize.sh \ 7 | # $MODEL_DIRECTORY > output.conll 8 | # 9 | # Models can be downloaded from 10 | # http://download.tensorflow.org/models/parsey_universal/.zip 11 | # for the languages listed at 12 | # https://github.com/tensorflow/models/blob/master/syntaxnet/universal.md 13 | # 14 | 15 | PARSER_EVAL=bazel-bin/syntaxnet/parser_eval 16 | CONTEXT=syntaxnet/models/parsey_universal/context.pbtxt 17 | INPUT_FORMAT=stdin-untoken 18 | MODEL_DIR=$1 19 | 20 | $PARSER_EVAL \ 21 | --input=$INPUT_FORMAT \ 22 | --output=stdin-untoken \ 23 | --hidden_layer_sizes=128,128 \ 24 | --arg_prefix=brain_tokenizer \ 25 | --graph_builder=greedy \ 26 | --task_context=$CONTEXT \ 27 | --resource_dir=$MODEL_DIR \ 28 | --model_path=$MODEL_DIR/tokenizer-params \ 29 | --batch_size=32 \ 30 | --alsologtostderr \ 31 | --slim_model 32 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/models/parsey_universal/tokenize_zh.sh: -------------------------------------------------------------------------------- 1 | # A script that runs a traditional Chinese tokenizer on a text file with one 2 | # sentence per line. 3 | # 4 | # Example usage: 5 | # bazel build syntaxnet:parser_eval 6 | # cat untokenized-sentences.txt | 7 | # syntaxnet/models/parsey_universal/tokenize_zh.sh \ 8 | # $MODEL_DIRECTORY > output.conll 9 | # 10 | # The traditional Chinese model can be downloaded from 11 | # http://download.tensorflow.org/models/parsey_universal/Chinese.zip 12 | # 13 | 14 | PARSER_EVAL=bazel-bin/syntaxnet/parser_eval 15 | CONTEXT=syntaxnet/models/parsey_universal/context-tokenize-zh.pbtxt 16 | INPUT_FORMAT=stdin-untoken 17 | MODEL_DIR=$1 18 | 19 | $PARSER_EVAL \ 20 | --input=$INPUT_FORMAT \ 21 | --output=stdin-untoken \ 22 | --hidden_layer_sizes=256,256 \ 23 | --arg_prefix=brain_tokenizer_zh \ 24 | --graph_builder=structured \ 25 | --task_context=$CONTEXT \ 26 | --resource_dir=$MODEL_DIR \ 27 | --model_path=$MODEL_DIR/tokenizer-params \ 28 | --batch_size=1024 \ 29 | --alsologtostderr \ 30 | --slim_model 31 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/parser_transitions.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "syntaxnet/parser_transitions.h" 17 | 18 | #include "syntaxnet/parser_state.h" 19 | 20 | namespace syntaxnet { 21 | 22 | // Transition system registry. 23 | REGISTER_CLASS_REGISTRY("transition system", ParserTransitionSystem); 24 | 25 | void ParserTransitionSystem::PerformAction(ParserAction action, 26 | ParserState *state) const { 27 | PerformActionWithoutHistory(action, state); 28 | } 29 | 30 | } // namespace syntaxnet 31 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/registry.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "syntaxnet/registry.h" 17 | 18 | namespace syntaxnet { 19 | 20 | // Global list of all component registries. 21 | RegistryMetadata *global_registry_list = nullptr; 22 | 23 | void RegistryMetadata::Register(RegistryMetadata *registry) { 24 | registry->set_link(global_registry_list); 25 | global_registry_list = registry; 26 | } 27 | 28 | } // namespace syntaxnet 29 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/sentence_batch.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "syntaxnet/sentence_batch.h" 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "syntaxnet/task_context.h" 23 | 24 | namespace syntaxnet { 25 | 26 | void SentenceBatch::Init(TaskContext *context) { 27 | reader_.reset(new TextReader(*context->GetInput(input_name_), context)); 28 | size_ = 0; 29 | } 30 | 31 | bool SentenceBatch::AdvanceSentence(int index) { 32 | if (sentences_[index] == nullptr) ++size_; 33 | sentences_[index].reset(); 34 | std::unique_ptr sentence(reader_->Read()); 35 | if (sentence == nullptr) { 36 | --size_; 37 | return false; 38 | } 39 | 40 | // Preprocess the new sentence for the parser state. 41 | sentences_[index] = std::move(sentence); 42 | return true; 43 | } 44 | 45 | } // namespace syntaxnet 46 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/sparse.proto: -------------------------------------------------------------------------------- 1 | // Protocol for passing around sparse sets of features. 2 | 3 | syntax = "proto2"; 4 | 5 | package syntaxnet; 6 | 7 | // A sparse set of features. 8 | // 9 | // If using SparseStringToIdTransformer, description is required and id should 10 | // be omitted; otherwise, id is required and description optional. 11 | // 12 | // id, weight, and description fields are all aligned if present (ie, any of 13 | // these that are non-empty should have the same # items). If weight is omitted, 14 | // 1.0 is used. 15 | message SparseFeatures { 16 | repeated uint64 id = 1; 17 | repeated float weight = 2; 18 | repeated string description = 3; 19 | }; 20 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/test_main.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // A program with a main that is suitable for unittests, including those 17 | // that also define microbenchmarks. Based on whether the user specified 18 | // the --benchmark_filter flag which specifies which benchmarks to run, 19 | // we will either run benchmarks or run the gtest tests in the program. 20 | 21 | #include "tensorflow/core/platform/platform.h" 22 | #include "tensorflow/core/platform/types.h" 23 | 24 | #if defined(PLATFORM_GOOGLE) || defined(__ANDROID__) 25 | 26 | // main() is supplied by gunit_main 27 | #else 28 | #include "gtest/gtest.h" 29 | #include "tensorflow/core/lib/core/stringpiece.h" 30 | #include "tensorflow/core/platform/test_benchmark.h" 31 | 32 | GTEST_API_ int main(int argc, char **argv) { 33 | std::cout << "Running main() from test_main.cc\n"; 34 | 35 | testing::InitGoogleTest(&argc, argv); 36 | for (int i = 1; i < argc; i++) { 37 | if (tensorflow::StringPiece(argv[i]).starts_with("--benchmarks=")) { 38 | const char *pattern = argv[i] + strlen("--benchmarks="); 39 | tensorflow::testing::Benchmark::Run(pattern); 40 | return 0; 41 | } 42 | } 43 | return RUN_ALL_TESTS(); 44 | } 45 | #endif 46 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/testdata/context.pbtxt: -------------------------------------------------------------------------------- 1 | Parameter { 2 | name: 'brain_parser_embedding_dims' 3 | value: '8;8;8' 4 | } 5 | Parameter { 6 | name: 'brain_parser_features' 7 | value: 'input.token.word input(1).token.word input(2).token.word stack.token.word stack(1).token.word stack(2).token.word;input.tag input(1).tag input(2).tag stack.tag stack(1).tag stack(2).tag;stack.child(1).label stack.child(1).sibling(-1).label stack.child(-1).label stack.child(-1).sibling(1).label' 8 | } 9 | Parameter { 10 | name: 'brain_parser_embedding_names' 11 | value: 'words;tags;labels' 12 | } 13 | input { 14 | name: 'training-corpus' 15 | record_format: 'conll-sentence' 16 | Part { 17 | file_pattern: 'syntaxnet/testdata/mini-training-set' 18 | } 19 | } 20 | input { 21 | name: 'tuning-corpus' 22 | record_format: 'conll-sentence' 23 | Part { 24 | file_pattern: 'syntaxnet/testdata/mini-training-set' 25 | } 26 | } 27 | input { 28 | name: 'parsed-tuning-corpus' 29 | creator: 'brain_parser/greedy' 30 | record_format: 'conll-sentence' 31 | } 32 | input { 33 | name: 'label-map' 34 | file_format: 'text' 35 | Part { 36 | file_pattern: 'OUTPATH/label-map' 37 | } 38 | } 39 | input { 40 | name: 'word-map' 41 | Part { 42 | file_pattern: 'OUTPATH/word-map' 43 | } 44 | } 45 | input { 46 | name: 'lcword-map' 47 | Part { 48 | file_pattern: 'OUTPATH/lcword-map' 49 | } 50 | } 51 | input { 52 | name: 'tag-map' 53 | Part { 54 | file_pattern: 'OUTPATH/tag-map' 55 | } 56 | } 57 | input { 58 | name: 'category-map' 59 | Part { 60 | file_pattern: 'OUTPATH/category-map' 61 | } 62 | } 63 | input { 64 | name: 'char-map' 65 | Part { 66 | file_pattern: 'OUTPATH/char-map' 67 | } 68 | } 69 | input { 70 | name: 'prefix-table' 71 | Part { 72 | file_pattern: 'OUTPATH/prefix-table' 73 | } 74 | } 75 | input { 76 | name: 'suffix-table' 77 | Part { 78 | file_pattern: 'OUTPATH/suffix-table' 79 | } 80 | } 81 | input { 82 | name: 'tag-to-category' 83 | Part { 84 | file_pattern: 'OUTPATH/tag-to-category' 85 | } 86 | } 87 | input { 88 | name: 'stdout' 89 | record_format: 'conll-sentence' 90 | Part { 91 | file_pattern: '-' 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/syntaxnet/workspace.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "syntaxnet/workspace.h" 17 | 18 | #include "tensorflow/core/lib/strings/strcat.h" 19 | 20 | namespace syntaxnet { 21 | 22 | string WorkspaceRegistry::DebugString() const { 23 | string str; 24 | for (auto &it : workspace_names_) { 25 | const string &type_name = workspace_types_.at(it.first); 26 | for (size_t index = 0; index < it.second.size(); ++index) { 27 | const string &workspace_name = it.second[index]; 28 | tensorflow::strings::StrAppend(&str, "\n ", type_name, " :: ", 29 | workspace_name); 30 | } 31 | } 32 | return str; 33 | } 34 | 35 | VectorIntWorkspace::VectorIntWorkspace(int size) : elements_(size) {} 36 | 37 | VectorIntWorkspace::VectorIntWorkspace(int size, int value) 38 | : elements_(size, value) {} 39 | 40 | VectorIntWorkspace::VectorIntWorkspace(const vector &elements) 41 | : elements_(elements) {} 42 | 43 | string VectorIntWorkspace::TypeName() { return "Vector"; } 44 | 45 | VectorVectorIntWorkspace::VectorVectorIntWorkspace(int size) 46 | : elements_(size) {} 47 | 48 | string VectorVectorIntWorkspace::TypeName() { return "VectorVector"; } 49 | 50 | } // namespace syntaxnet 51 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | cc_library( 4 | name = "utf", 5 | srcs = [ 6 | "rune.c", 7 | "runestrcat.c", 8 | "runestrchr.c", 9 | "runestrcmp.c", 10 | "runestrcpy.c", 11 | "runestrdup.c", 12 | "runestrecpy.c", 13 | "runestrlen.c", 14 | "runestrncat.c", 15 | "runestrncmp.c", 16 | "runestrncpy.c", 17 | "runestrrchr.c", 18 | "runestrstr.c", 19 | "runetype.c", 20 | "utfecpy.c", 21 | "utflen.c", 22 | "utfnlen.c", 23 | "utfrrune.c", 24 | "utfrune.c", 25 | "utfutf.c", 26 | ], 27 | hdrs = [ 28 | "runetypebody.c", 29 | "utf.h", 30 | "utfdef.h", 31 | ], 32 | includes = ["."], 33 | visibility = ["//visibility:public"], 34 | ) 35 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/README: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 1998-2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrcat.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | Rune* 20 | runestrcat(Rune *s1, const Rune *s2) 21 | { 22 | 23 | runestrcpy((Rune*)runestrchr(s1, 0), s2); 24 | return s1; 25 | } 26 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrchr.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | const 20 | Rune* 21 | runestrchr(const Rune *s, Rune c) 22 | { 23 | Rune c0 = c; 24 | Rune c1; 25 | 26 | if(c == 0) { 27 | while(*s++) 28 | ; 29 | return s-1; 30 | } 31 | 32 | while((c1 = *s++) != 0) 33 | if(c1 == c0) 34 | return s-1; 35 | return 0; 36 | } 37 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrcmp.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | int 20 | runestrcmp(const Rune *s1, const Rune *s2) 21 | { 22 | Rune c1, c2; 23 | 24 | for(;;) { 25 | c1 = *s1++; 26 | c2 = *s2++; 27 | if(c1 != c2) { 28 | if(c1 > c2) 29 | return 1; 30 | return -1; 31 | } 32 | if(c1 == 0) 33 | return 0; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrcpy.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | Rune* 20 | runestrcpy(Rune *s1, const Rune *s2) 21 | { 22 | Rune *os1; 23 | 24 | os1 = s1; 25 | while((*s1++ = *s2++) != 0) 26 | ; 27 | return os1; 28 | } 29 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrdup.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include 17 | #include "third_party/utf/utf.h" 18 | #include "third_party/utf/utfdef.h" 19 | 20 | Rune* 21 | runestrdup(const Rune *s) 22 | { 23 | Rune *ns; 24 | 25 | ns = (Rune*)malloc(sizeof(Rune)*(runestrlen(s) + 1)); 26 | if(ns == 0) 27 | return 0; 28 | 29 | return runestrcpy(ns, s); 30 | } 31 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrecpy.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | Rune* 20 | runestrecpy(Rune *s1, Rune *es1, const Rune *s2) 21 | { 22 | if(s1 >= es1) 23 | return s1; 24 | 25 | while((*s1++ = *s2++) != 0){ 26 | if(s1 == es1){ 27 | *--s1 = '\0'; 28 | break; 29 | } 30 | } 31 | return s1; 32 | } 33 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrlen.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | long 20 | runestrlen(const Rune *s) 21 | { 22 | 23 | return runestrchr(s, 0) - s; 24 | } 25 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrncat.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | Rune* 20 | runestrncat(Rune *s1, const Rune *s2, long n) 21 | { 22 | Rune *os1; 23 | 24 | os1 = s1; 25 | s1 = (Rune*)runestrchr(s1, 0); 26 | while((*s1++ = *s2++) != 0) 27 | if(--n < 0) { 28 | s1[-1] = 0; 29 | break; 30 | } 31 | return os1; 32 | } 33 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrncmp.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | int 20 | runestrncmp(const Rune *s1, const Rune *s2, long n) 21 | { 22 | Rune c1, c2; 23 | 24 | while(n > 0) { 25 | c1 = *s1++; 26 | c2 = *s2++; 27 | n--; 28 | if(c1 != c2) { 29 | if(c1 > c2) 30 | return 1; 31 | return -1; 32 | } 33 | if(c1 == 0) 34 | break; 35 | } 36 | return 0; 37 | } 38 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrncpy.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | Rune* 20 | runestrncpy(Rune *s1, const Rune *s2, long n) 21 | { 22 | int i; 23 | Rune *os1; 24 | 25 | os1 = s1; 26 | for(i = 0; i < n; i++) 27 | if((*s1++ = *s2++) == 0) { 28 | while(++i < n) 29 | *s1++ = 0; 30 | return os1; 31 | } 32 | return os1; 33 | } 34 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrrchr.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | const 20 | Rune* 21 | runestrrchr(const Rune *s, Rune c) 22 | { 23 | const Rune *r; 24 | 25 | if(c == 0) 26 | return runestrchr(s, 0); 27 | r = 0; 28 | while((s = runestrchr(s, c)) != 0) 29 | r = s++; 30 | return r; 31 | } 32 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/runestrstr.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | /* 20 | * Return pointer to first occurrence of s2 in s1, 21 | * 0 if none 22 | */ 23 | const 24 | Rune* 25 | runestrstr(const Rune *s1, const Rune *s2) 26 | { 27 | const Rune *p, *pa, *pb; 28 | int c0, c; 29 | 30 | c0 = *s2; 31 | if(c0 == 0) 32 | return s1; 33 | s2++; 34 | for(p=runestrchr(s1, c0); p; p=runestrchr(p+1, c0)) { 35 | pa = p; 36 | for(pb=s2;; pb++) { 37 | c = *pb; 38 | if(c == 0) 39 | return p; 40 | if(c != *++pa) 41 | break; 42 | } 43 | } 44 | return 0; 45 | } 46 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utfdef.h: -------------------------------------------------------------------------------- 1 | #define uchar _utfuchar 2 | #define ushort _utfushort 3 | #define uint _utfuint 4 | #define ulong _utfulong 5 | #define vlong _utfvlong 6 | #define uvlong _utfuvlong 7 | 8 | typedef unsigned char uchar; 9 | typedef unsigned short ushort; 10 | typedef unsigned int uint; 11 | typedef unsigned long ulong; 12 | 13 | #define nelem(x) (sizeof(x)/sizeof((x)[0])) 14 | #define nil ((void*)0) 15 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utfecpy.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | char* 20 | utfecpy(char *to, char *e, const char *from) 21 | { 22 | char *end; 23 | 24 | if(to >= e) 25 | return to; 26 | end = (char*)memccpy(to, from, '\0', e - to); 27 | if(end == nil){ 28 | end = e-1; 29 | while(end>to && (*--end&0xC0)==0x80) 30 | ; 31 | *end = '\0'; 32 | }else{ 33 | end--; 34 | } 35 | return end; 36 | } 37 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utflen.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | int 20 | utflen(const char *s) 21 | { 22 | int c; 23 | long n; 24 | Rune rune; 25 | 26 | n = 0; 27 | for(;;) { 28 | c = *(uchar*)s; 29 | if(c < Runeself) { 30 | if(c == 0) 31 | return n; 32 | s++; 33 | } else 34 | s += chartorune(&rune, s); 35 | n++; 36 | } 37 | return 0; 38 | } 39 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utfnlen.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | int 20 | utfnlen(const char *s, long m) 21 | { 22 | int c; 23 | long n; 24 | Rune rune; 25 | const char *es; 26 | 27 | es = s + m; 28 | for(n = 0; s < es; n++) { 29 | c = *(uchar*)s; 30 | if(c < Runeself){ 31 | if(c == '\0') 32 | break; 33 | s++; 34 | continue; 35 | } 36 | if(!fullrune(s, es-s)) 37 | break; 38 | s += chartorune(&rune, s); 39 | } 40 | return n; 41 | } 42 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utfrrune.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | const 20 | char* 21 | utfrrune(const char *s, Rune c) 22 | { 23 | long c1; 24 | Rune r; 25 | const char *s1; 26 | 27 | if(c < Runesync) /* not part of utf sequence */ 28 | return strrchr(s, c); 29 | 30 | s1 = 0; 31 | for(;;) { 32 | c1 = *(uchar*)s; 33 | if(c1 < Runeself) { /* one byte rune */ 34 | if(c1 == 0) 35 | return s1; 36 | if(c1 == c) 37 | s1 = s; 38 | s++; 39 | continue; 40 | } 41 | c1 = chartorune(&r, s); 42 | if(r == c) 43 | s1 = s; 44 | s += c1; 45 | } 46 | return 0; 47 | } 48 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utfrune.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | const 20 | char* 21 | utfrune(const char *s, Rune c) 22 | { 23 | long c1; 24 | Rune r; 25 | int n; 26 | 27 | if(c < Runesync) /* not part of utf sequence */ 28 | return strchr(s, c); 29 | 30 | for(;;) { 31 | c1 = *(uchar*)s; 32 | if(c1 < Runeself) { /* one byte rune */ 33 | if(c1 == 0) 34 | return 0; 35 | if(c1 == c) 36 | return s; 37 | s++; 38 | continue; 39 | } 40 | n = chartorune(&r, s); 41 | if(r == c) 42 | return s; 43 | s += n; 44 | } 45 | return 0; 46 | } 47 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/third_party/utf/utfutf.c: -------------------------------------------------------------------------------- 1 | /* 2 | * The authors of this software are Rob Pike and Ken Thompson. 3 | * Copyright (c) 2002 by Lucent Technologies. 4 | * Permission to use, copy, modify, and distribute this software for any 5 | * purpose without fee is hereby granted, provided that this entire notice 6 | * is included in all copies of any software which is or includes a copy 7 | * or modification of this software and in all copies of the supporting 8 | * documentation for such software. 9 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED 10 | * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY 11 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY 12 | * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. 13 | */ 14 | #include 15 | #include 16 | #include "third_party/utf/utf.h" 17 | #include "third_party/utf/utfdef.h" 18 | 19 | 20 | /* 21 | * Return pointer to first occurrence of s2 in s1, 22 | * 0 if none 23 | */ 24 | const 25 | char* 26 | utfutf(const char *s1, const char *s2) 27 | { 28 | const char *p; 29 | long f, n1, n2; 30 | Rune r; 31 | 32 | n1 = chartorune(&r, s2); 33 | f = r; 34 | if(f <= Runesync) /* represents self */ 35 | return strstr(s1, s2); 36 | 37 | n2 = strlen(s2); 38 | for(p=s1; (p=utfrune(p, f)) != 0; p+=n1) 39 | if(strncmp(p, s2, n2) == 0) 40 | return p; 41 | return 0; 42 | } 43 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/tools/bazel.rc: -------------------------------------------------------------------------------- 1 | build:cuda --crosstool_top=//third_party/gpus/crosstool 2 | 3 | build --define=use_fast_cpp_protos=true 4 | build --define=allow_oversize_protos=true 5 | build --copt -funsigned-char 6 | build -c opt 7 | 8 | build --spawn_strategy=standalone 9 | test --spawn_strategy=standalone 10 | run --spawn_strategy=standalone 11 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/util/utf8/BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) 2 | 3 | # Requires --copt -funsigned-char when compiling (unsigned chars). 4 | 5 | cc_library( 6 | name = "unicodetext", 7 | srcs = [ 8 | "unicodetext.cc", 9 | "unilib.cc", 10 | ], 11 | hdrs = [ 12 | "unicodetext.h", 13 | "unilib.h", 14 | "unilib_utf8_utils.h", 15 | ], 16 | visibility = ["//visibility:public"], 17 | deps = [ 18 | "//syntaxnet:base", 19 | "//third_party/utf", 20 | ], 21 | ) 22 | 23 | cc_test( 24 | name = "unicodetext_unittest", 25 | srcs = [ 26 | "gtest_main.cc", 27 | "unicodetext_unittest.cc", 28 | ], 29 | deps = [ 30 | "@org_tensorflow//tensorflow/core:testlib", 31 | ":unicodetext", 32 | ], 33 | ) 34 | 35 | cc_binary( 36 | name = "unicodetext_main", 37 | srcs = ["unicodetext_main.cc"], 38 | deps = [":unicodetext"], 39 | ) 40 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/util/utf8/gtest_main.cc: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Author: sligocki@google.com (Shawn Ligocki) 18 | // 19 | // Build all tests with this main to run all tests. 20 | 21 | #include "gtest/gtest.h" 22 | 23 | int main(int argc, char **argv) { 24 | ::testing::InitGoogleTest(&argc, argv); 25 | return RUN_ALL_TESTS(); 26 | } 27 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/util/utf8/unicodetext_main.cc: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Author: sligocki@google.com (Shawn Ligocki) 18 | // 19 | // A basic main function to test that UnicodeText builds. 20 | 21 | #include 22 | #include 23 | 24 | #include 25 | 26 | #include "util/utf8/unicodetext.h" 27 | 28 | int main(int argc, char** argv) { 29 | if (argc > 1) { 30 | printf("Bytes:\n"); 31 | std::string bytes(argv[1]); 32 | for (std::string::const_iterator iter = bytes.begin(); 33 | iter < bytes.end(); ++iter) { 34 | printf(" 0x%02X\n", *iter); 35 | } 36 | 37 | printf("Unicode codepoints:\n"); 38 | UnicodeText text(UTF8ToUnicodeText(bytes)); 39 | for (UnicodeText::const_iterator iter = text.begin(); 40 | iter < text.end(); ++iter) { 41 | printf(" U+%X\n", *iter); 42 | } 43 | } 44 | return EXIT_SUCCESS; 45 | } 46 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/syntaxnet/util/utf8/unilib.cc: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2010 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | // Author: sligocki@google.com (Shawn Ligocki) 18 | 19 | #include "util/utf8/unilib.h" 20 | 21 | #include "syntaxnet/base.h" 22 | #include "third_party/utf/utf.h" 23 | 24 | namespace UniLib { 25 | 26 | // Codepoints not allowed for interchange are: 27 | // C0 (ASCII) controls: U+0000 to U+001F excluding Space (SP, U+0020), 28 | // Horizontal Tab (HT, U+0009), Line-Feed (LF, U+000A), 29 | // Form Feed (FF, U+000C) and Carriage-Return (CR, U+000D) 30 | // C1 controls: U+007F to U+009F 31 | // Surrogates: U+D800 to U+DFFF 32 | // Non-characters: U+FDD0 to U+FDEF and U+xxFFFE to U+xxFFFF for all xx 33 | bool IsInterchangeValid(char32 c) { 34 | return !((c >= 0x00 && c <= 0x08) || c == 0x0B || (c >= 0x0E && c <= 0x1F) || 35 | (c >= 0x7F && c <= 0x9F) || 36 | (c >= 0xD800 && c <= 0xDFFF) || 37 | (c >= 0xFDD0 && c <= 0xFDEF) || (c&0xFFFE) == 0xFFFE); 38 | } 39 | 40 | int SpanInterchangeValid(const char* begin, int byte_length) { 41 | char32 rune; 42 | const char* p = begin; 43 | const char* end = begin + byte_length; 44 | while (p < end) { 45 | int bytes_consumed = charntorune(&rune, p, end - p); 46 | // We want to accept Runeerror == U+FFFD as a valid char, but it is used 47 | // by chartorune to indicate error. Luckily, the real codepoint is size 3 48 | // while errors return bytes_consumed <= 1. 49 | if ((rune == Runeerror && bytes_consumed <= 1) || 50 | !IsInterchangeValid(rune)) { 51 | break; // Found 52 | } 53 | p += bytes_consumed; 54 | } 55 | return p - begin; 56 | } 57 | 58 | } // namespace UniLib 59 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/textsum/BUILD: -------------------------------------------------------------------------------- 1 | package(default_visibility = [":internal"]) 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | exports_files(["LICENSE"]) 6 | 7 | package_group( 8 | name = "internal", 9 | packages = [ 10 | "//textsum/...", 11 | ], 12 | ) 13 | 14 | py_library( 15 | name = "seq2seq_attention_model", 16 | srcs = ["seq2seq_attention_model.py"], 17 | deps = [ 18 | ":seq2seq_lib", 19 | ], 20 | ) 21 | 22 | py_library( 23 | name = "seq2seq_lib", 24 | srcs = ["seq2seq_lib.py"], 25 | ) 26 | 27 | py_binary( 28 | name = "seq2seq_attention", 29 | srcs = ["seq2seq_attention.py"], 30 | deps = [ 31 | ":batch_reader", 32 | ":data", 33 | ":seq2seq_attention_decode", 34 | ":seq2seq_attention_model", 35 | ], 36 | ) 37 | 38 | py_library( 39 | name = "batch_reader", 40 | srcs = ["batch_reader.py"], 41 | deps = [ 42 | ":data", 43 | ":seq2seq_attention_model", 44 | ], 45 | ) 46 | 47 | py_library( 48 | name = "beam_search", 49 | srcs = ["beam_search.py"], 50 | ) 51 | 52 | py_library( 53 | name = "seq2seq_attention_decode", 54 | srcs = ["seq2seq_attention_decode.py"], 55 | deps = [ 56 | ":beam_search", 57 | ":data", 58 | ], 59 | ) 60 | 61 | py_library( 62 | name = "data", 63 | srcs = ["data.py"], 64 | ) 65 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/textsum/data/data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sunnyxiaohu/R-C3D.pytorch/e8731af7b95f1dc934f6604f9c09e3c4ead74db5/lib/tf_model_zoo/models/textsum/data/data -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/transformer/README.md: -------------------------------------------------------------------------------- 1 | # Spatial Transformer Network 2 | 3 | The Spatial Transformer Network [1] allows the spatial manipulation of data within the network. 4 | 5 |
6 |

7 |
8 | 9 | ### API 10 | 11 | A Spatial Transformer Network implemented in Tensorflow 0.7 and based on [2]. 12 | 13 | #### How to use 14 | 15 |
16 |

17 |
18 | 19 | ```python 20 | transformer(U, theta, out_size) 21 | ``` 22 | 23 | #### Parameters 24 | 25 | U : float 26 | The output of a convolutional net should have the 27 | shape [num_batch, height, width, num_channels]. 28 | theta: float 29 | The output of the 30 | localisation network should be [num_batch, 6]. 31 | out_size: tuple of two ints 32 | The size of the output of the network 33 | 34 | 35 | #### Notes 36 | To initialize the network to the identity transform init ``theta`` to : 37 | 38 | ```python 39 | identity = np.array([[1., 0., 0.], 40 | [0., 1., 0.]]) 41 | identity = identity.flatten() 42 | theta = tf.Variable(initial_value=identity) 43 | ``` 44 | 45 | #### Experiments 46 | 47 |
48 |

49 |
50 | 51 | We used cluttered MNIST. Left column are the input images, right are the attended parts of the image by an STN. 52 | 53 | All experiments were run in Tensorflow 0.7. 54 | 55 | ### References 56 | 57 | [1] Jaderberg, Max, et al. "Spatial Transformer Networks." arXiv preprint arXiv:1506.02025 (2015) 58 | 59 | [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py 60 | -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/transformer/data/README.md: -------------------------------------------------------------------------------- 1 | ### How to get the data 2 | 3 | #### Cluttered MNIST 4 | 5 | The cluttered MNIST dataset can be found here [1] or can be generated via [2]. 6 | 7 | Settings used for `cluttered_mnist.py` : 8 | 9 | ```python 10 | 11 | ORG_SHP = [28, 28] 12 | OUT_SHP = [40, 40] 13 | NUM_DISTORTIONS = 8 14 | dist_size = (5, 5) 15 | 16 | ``` 17 | 18 | [1] https://github.com/daviddao/spatial-transformer-tensorflow 19 | 20 | [2] https://github.com/skaae/recurrent-spatial-transformer-code/blob/master/MNIST_SEQUENCE/create_mnist_sequence.py -------------------------------------------------------------------------------- /lib/tf_model_zoo/models/video_prediction/download_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2016 The TensorFlow Authors All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | 17 | 18 | # Example: 19 | # 20 | # download_dataset.sh datafiles.txt ./tmp 21 | # 22 | # will download all of the files listed in the file, datafiles.txt, into 23 | # a directory, "./tmp". 24 | # 25 | # Each line of the datafiles.txt file should contain the path from the 26 | # bucket root to a file. 27 | 28 | ARGC="$#" 29 | LISTING_FILE=push_datafiles.txt 30 | if [ "${ARGC}" -ge 1 ]; then 31 | LISTING_FILE=$1 32 | fi 33 | OUTPUT_DIR="./" 34 | if [ "${ARGC}" -ge 2 ]; then 35 | OUTPUT_DIR=$2 36 | fi 37 | 38 | echo "OUTPUT_DIR=$OUTPUT_DIR" 39 | 40 | mkdir "${OUTPUT_DIR}" 41 | 42 | function download_file { 43 | FILE=$1 44 | BUCKET="https://storage.googleapis.com/brain-robotics-data" 45 | URL="${BUCKET}/${FILE}" 46 | OUTPUT_FILE="${OUTPUT_DIR}/${FILE}" 47 | DIRECTORY=`dirname ${OUTPUT_FILE}` 48 | echo DIRECTORY=$DIRECTORY 49 | mkdir -p "${DIRECTORY}" 50 | curl --output ${OUTPUT_FILE} ${URL} 51 | } 52 | 53 | while read filename; do 54 | download_file $filename 55 | done <${LISTING_FILE} 56 | -------------------------------------------------------------------------------- /preprocess/activitynet/download_video.py: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------- 2 | # R-C3D 3 | # Copyright (c) 2017 Boston University 4 | # Licensed under The MIT License [see LICENSE for details] 5 | # Written by Huijuan Xu 6 | # -------------------------------------------------------- 7 | 8 | import json 9 | import os 10 | 11 | annotation_file = open('activity_net.v1-3.min.json') 12 | annotation = json.load(annotation_file) 13 | 14 | video_database = annotation['database'] 15 | videos = annotation['database'].keys() 16 | 17 | # Download the ActivityNet videos into the ./videos folder 18 | command1 = 'mkdir '+'videos' 19 | os.system(command1) 20 | 21 | for i in videos: 22 | url = video_database[i]['url'] 23 | command3 = 'youtube-dl -o '+'videos/'+i+' '+url 24 | print command3 25 | os.system(command3) 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /preprocess/charades/generate_frames.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | # -------------------------------------------------------- 3 | # R-C3D 4 | # Copyright (c) 2017 Boston University 5 | # Licensed under The MIT License [see LICENSE for details] 6 | # Written by Huijuan Xu 7 | # -------------------------------------------------------- 8 | 9 | import os 10 | from util import * 11 | import json 12 | import glob 13 | 14 | fps = 25 15 | ext = '.mp4' 16 | VIDEO_DIR = '/media/agwang/新加卷/DataSets/Charades/Charades_v1_480' 17 | FRAME_DIR = '/media/agwang/03c94b1e-c46c-4c7b-8d3f-47e316fdee74/home/ksnzh/Videos/action-datasets/Charades' 18 | 19 | META_DIR = os.path.join(VIDEO_DIR, '../Charades_meta') 20 | 21 | def generate_frame(split, keep_empty): 22 | SUB_FRAME_DIR = os.path.join(FRAME_DIR, split) 23 | mkdir(SUB_FRAME_DIR) 24 | segment = dataset_label_parser(META_DIR, split, keep_empty) 25 | video_list = segment.keys() 26 | 27 | for vid in video_list: 28 | filename = os.path.join(VIDEO_DIR, vid+ext) 29 | outpath = os.path.join(FRAME_DIR, split, vid) 30 | outfile = os.path.join(outpath, "image_%5d.jpg") 31 | mkdir(outpath) 32 | ffmpeg(filename, outfile, fps) 33 | for framename in os.listdir(outpath): 34 | resize(os.path.join(outpath, framename)) 35 | frame_size = len(os.listdir(outpath)) 36 | print filename, fps, frame_size 37 | 38 | generate_frame('train', keep_empty=False) 39 | generate_frame('test', keep_empty=True) 40 | #generate_frame('testing') 41 | -------------------------------------------------------------------------------- /preprocess/thumos14/generate_frames.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | # -------------------------------------------------------- 3 | # R-C3D 4 | # Copyright (c) 2017 Boston University 5 | # Licensed under The MIT License [see LICENSE for details] 6 | # Written by Huijuan Xu 7 | # -------------------------------------------------------- 8 | 9 | import os 10 | from util import * 11 | import json 12 | import glob 13 | 14 | fps = 25 15 | ext = '.mp4' 16 | VIDEO_DIR = '/media/G/DataSets/THUMOS' 17 | FRAME_DIR = '/media/F/THUMOS14' 18 | 19 | META_DIR = os.path.join(FRAME_DIR, 'annotation_') 20 | 21 | def generate_frame(split): 22 | SUB_FRAME_DIR = os.path.join(FRAME_DIR, split) 23 | mkdir(SUB_FRAME_DIR) 24 | segment = dataset_label_parser(META_DIR+split, split, use_ambiguous=True) 25 | video_list = segment.keys() 26 | for vid in video_list: 27 | filename = os.path.join(VIDEO_DIR, split, vid+ext) 28 | outpath = os.path.join(FRAME_DIR, split, vid) 29 | outfile = os.path.join(outpath, "image_%5d.jpg") 30 | mkdir(outpath) 31 | ffmpeg(filename, outfile, fps) 32 | for framename in os.listdir(outpath): 33 | resize(os.path.join(outpath, framename)) 34 | frame_size = len(os.listdir(outpath)) 35 | print (filename, fps, frame_size) 36 | 37 | generate_frame('val') 38 | #generate_frame('test') 39 | #generate_frame('testing') 40 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cython 2 | cffi 3 | opencv-python 4 | scipy 5 | easydict 6 | matplotlib 7 | pyyaml 8 | -------------------------------------------------------------------------------- /script_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | NET_DIR=$1 4 | EX_DIR=$2 5 | FRAMERATE=25 6 | array=( $@ ) 7 | len=${#array[@]} 8 | EXTRA_ARGS=${array[@]:2:$len} 9 | 10 | export PYTHONUNBUFFERED=true 11 | 12 | LOG="output/${NET_DIR}/${EX_DIR}/test_log_${i}.txt.`date +'%Y-%m-%d_%H-%M-%S'`" 13 | exec &> >(tee -a "$LOG") 14 | echo Logging output to "$LOG" 15 | 16 | python ./test_net.py \ 17 | --net ${NET_DIR} \ 18 | --dataset ${EX_DIR} \ 19 | --cuda \ 20 | ${EXTRA_ARGS} 21 | 22 | #evaluation 23 | python ./evaluation/${EX_DIR}/${EX_DIR}_log_analysis.py $LOG --framerate ${FRAMERATE} 24 | -------------------------------------------------------------------------------- /script_train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | NET_DIR=$1 4 | DATASET=$2 5 | 6 | array=( $@ ) 7 | len=${#array[@]} 8 | EXTRA_ARGS=${array[@]:2:$len} 9 | 10 | export PYTHONUNBUFFERED=true 11 | #checkpoint 30103 checkepoch 3 --r true --lr_decay_step 10 --epochs 12 12 | LOG="output/${NET_DIR}/${DATASET}/train_log_${i}.txt.`date +'%Y-%m-%d_%H-%M-%S'`" 13 | exec &> >(tee -a "$LOG") 14 | echo Logging output to "$LOG" 15 | 16 | python ./trainval_net.py \ 17 | --net ${NET_DIR} \ 18 | --dataset ${DATASET} \ 19 | ${EXTRA_ARGS} 20 | 21 | #2>&1 | tee $LOG 22 | 23 | --------------------------------------------------------------------------------