├── .bazelrc ├── .github └── issue_template.md ├── .gitignore ├── .gitmodules ├── AUTHORS ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── RELEASE.md ├── WORKSPACE ├── dockerfiles └── Dockerfile.base-py3.6-cu116-ubuntu18.04 ├── tensorflow_serving ├── BUILD ├── apis │ ├── BUILD │ ├── classification.proto │ ├── classifier.h │ ├── get_model_metadata.proto │ ├── get_model_status.proto │ ├── inference.proto │ ├── input.proto │ ├── internal │ │ ├── BUILD │ │ └── serialized_input.proto │ ├── model.proto │ ├── model_management.proto │ ├── model_service.proto │ ├── model_service_pb2.py │ ├── model_service_pb2_grpc.py │ ├── predict.proto │ ├── prediction_log.proto │ ├── prediction_service.proto │ ├── prediction_service_pb2.py │ ├── prediction_service_pb2_grpc.py │ ├── regression.proto │ ├── regressor.h │ └── session_service.proto ├── batching │ ├── BUILD │ ├── README.md │ ├── batch_scheduler_retrier.h │ ├── batch_scheduler_retrier_test.cc │ ├── batching_session.cc │ ├── batching_session.h │ ├── batching_session_test.cc │ ├── batching_util.cc │ ├── batching_util.h │ ├── batching_util_test.cc │ ├── streaming_batch_scheduler.cc │ ├── streaming_batch_scheduler.h │ ├── streaming_batch_scheduler_test.cc │ ├── test_util │ │ ├── BUILD │ │ ├── matrix_half_plus_two_saved_model.py │ │ ├── puppet_batch_scheduler.h │ │ └── puppet_batch_scheduler_test.cc │ └── testdata │ │ ├── BUILD │ │ └── matrix_half_plus_two │ │ └── 1 │ │ └── saved_model.pb ├── config │ ├── BUILD │ ├── log_collector_config.proto │ ├── logging_config.proto │ ├── model_server_config.proto │ ├── monitoring_config.proto │ ├── platform_config.proto │ └── ssl_config.proto ├── core │ ├── BUILD │ ├── README.md │ ├── aspired_version_policy.cc │ ├── aspired_version_policy.h │ ├── aspired_version_policy_test.cc │ ├── aspired_versions_manager.cc │ ├── aspired_versions_manager.h │ ├── aspired_versions_manager_benchmark.cc │ ├── aspired_versions_manager_builder.cc │ ├── aspired_versions_manager_builder.h │ ├── aspired_versions_manager_builder_test.cc │ ├── aspired_versions_manager_test.cc │ ├── availability_preserving_policy.cc │ ├── availability_preserving_policy.h │ ├── availability_preserving_policy_test.cc │ ├── basic_manager.cc │ ├── basic_manager.h │ ├── basic_manager_test.cc │ ├── caching_manager.cc │ ├── caching_manager.h │ ├── caching_manager_test.cc │ ├── dynamic_source_router.h │ ├── dynamic_source_router_test.cc │ ├── load_servables_fast.cc │ ├── load_servables_fast.h │ ├── loader.h │ ├── loader_harness.cc │ ├── loader_harness.h │ ├── loader_harness_test.cc │ ├── log_collector.cc │ ├── log_collector.h │ ├── log_collector_test.cc │ ├── logging.proto │ ├── manager.h │ ├── manager_test.cc │ ├── manager_wrapper.cc │ ├── manager_wrapper.h │ ├── request_logger.cc │ ├── request_logger.h │ ├── request_logger_test.cc │ ├── resource_preserving_policy.cc │ ├── resource_preserving_policy.h │ ├── resource_preserving_policy_test.cc │ ├── servable_data.h │ ├── servable_data_test.cc │ ├── servable_handle.h │ ├── servable_id.h │ ├── servable_id_test.cc │ ├── servable_state.h │ ├── servable_state_monitor.cc │ ├── servable_state_monitor.h │ ├── servable_state_monitor_test.cc │ ├── server_request_logger.cc │ ├── server_request_logger.h │ ├── server_request_logger_test.cc │ ├── simple_loader.h │ ├── simple_loader_test.cc │ ├── source.h │ ├── source_adapter.h │ ├── source_adapter_test.cc │ ├── source_router.h │ ├── source_router_test.cc │ ├── static_manager.cc │ ├── static_manager.h │ ├── static_manager_test.cc │ ├── static_source_router.h │ ├── static_source_router_test.cc │ ├── storage_path.h │ ├── storage_path_test.cc │ ├── target.h │ └── test_util │ │ ├── BUILD │ │ ├── availability_test_util.cc │ │ ├── availability_test_util.h │ │ ├── fake_loader.cc │ │ ├── fake_loader.h │ │ ├── fake_loader_source_adapter.cc │ │ ├── fake_loader_source_adapter.h │ │ ├── fake_loader_source_adapter.proto │ │ ├── fake_log_collector.h │ │ ├── fake_storage_path_source_adapter.cc │ │ ├── fake_storage_path_source_adapter.h │ │ ├── manager_test_util.cc │ │ ├── manager_test_util.h │ │ ├── mock_loader.h │ │ ├── mock_log_collector.h │ │ ├── mock_request_logger.h │ │ ├── mock_server_request_logger.h │ │ ├── mock_session.h │ │ ├── mock_storage_path_target.h │ │ ├── servable_handle_test_util.h │ │ ├── session_test_util.cc │ │ ├── session_test_util.h │ │ └── test_main.cc ├── example │ ├── BUILD │ ├── config_files │ │ ├── README │ │ ├── models.config │ │ ├── platform_config │ │ ├── session_group_multi_models_config │ │ └── session_group_multi_models_platform_config │ ├── mnist_client.py │ ├── mnist_input_data.py │ ├── mnist_saved_model.py │ ├── resnet_client.cc │ ├── resnet_client.py │ ├── resnet_client_grpc.py │ ├── resnet_k8s.yaml │ └── resnet_warmup.py ├── g3doc │ ├── _toc.yaml │ ├── api_rest.md │ ├── architecture.md │ ├── building_with_docker.md │ ├── custom_op.md │ ├── custom_servable.md │ ├── custom_source.md │ ├── docker.md │ ├── images │ │ ├── serving_architecture.svg │ │ └── tf_diagram.svg │ ├── saved_model_warmup.md │ ├── serving_advanced.md │ ├── serving_basic.md │ ├── serving_config.md │ ├── serving_kubernetes.md │ ├── setup.md │ ├── signature_defs.md │ └── tutorials │ │ ├── README.md │ │ └── Serving_REST_simple.ipynb ├── model_servers │ ├── BUILD │ ├── get_model_status_impl.cc │ ├── get_model_status_impl.h │ ├── get_model_status_impl_test.cc │ ├── grpc_status_util.cc │ ├── grpc_status_util.h │ ├── http_rest_api_handler.cc │ ├── http_rest_api_handler.h │ ├── http_rest_api_handler_test.cc │ ├── http_server.cc │ ├── http_server.h │ ├── main.cc │ ├── model_platform_types.h │ ├── model_service_impl.cc │ ├── model_service_impl.h │ ├── platform_config_util.cc │ ├── platform_config_util.h │ ├── prediction_service_impl.cc │ ├── prediction_service_impl.h │ ├── server.cc │ ├── server.h │ ├── server_core.cc │ ├── server_core.h │ ├── server_core_test.cc │ ├── tensorflow_model_server_beta_grpc_test.py │ ├── tensorflow_model_server_test.py │ ├── tensorflow_model_server_test_client.py │ ├── test_util │ │ ├── BUILD │ │ ├── mock_server_core.h │ │ ├── server_core_test_util.cc │ │ ├── server_core_test_util.h │ │ ├── storage_path_error_injecting_source_adapter.cc │ │ ├── storage_path_error_injecting_source_adapter.h │ │ └── storage_path_error_injecting_source_adapter.proto │ ├── version.cc │ └── version.h ├── repo.bzl ├── resources │ ├── BUILD │ ├── resource_tracker.cc │ ├── resource_tracker.h │ ├── resource_tracker_test.cc │ ├── resource_util.cc │ ├── resource_util.h │ ├── resource_util_test.cc │ ├── resource_values.cc │ ├── resource_values.h │ └── resources.proto ├── servables │ ├── hashmap │ │ ├── BUILD │ │ ├── hashmap_source_adapter.cc │ │ ├── hashmap_source_adapter.h │ │ ├── hashmap_source_adapter.proto │ │ └── hashmap_source_adapter_test.cc │ └── tensorflow │ │ ├── BUILD │ │ ├── bundle_factory_test.h │ │ ├── bundle_factory_test_util.cc │ │ ├── bundle_factory_test_util.h │ │ ├── bundle_factory_util.cc │ │ ├── bundle_factory_util.h │ │ ├── bundle_factory_util_test.cc │ │ ├── classification_service.cc │ │ ├── classification_service.h │ │ ├── classification_service_test.cc │ │ ├── classifier.cc │ │ ├── classifier.h │ │ ├── classifier_test.cc │ │ ├── curried_session.cc │ │ ├── curried_session.h │ │ ├── curried_session_test.cc │ │ ├── get_model_metadata_impl.cc │ │ ├── get_model_metadata_impl.h │ │ ├── get_model_metadata_impl_test.cc │ │ ├── multi_inference.cc │ │ ├── multi_inference.h │ │ ├── multi_inference_helper.cc │ │ ├── multi_inference_helper.h │ │ ├── multi_inference_helper_test.cc │ │ ├── multi_inference_test.cc │ │ ├── predict_impl.cc │ │ ├── predict_impl.h │ │ ├── predict_impl_test.cc │ │ ├── predict_util.cc │ │ ├── predict_util.h │ │ ├── predict_util_test.cc │ │ ├── regression_service.cc │ │ ├── regression_service.h │ │ ├── regression_service_test.cc │ │ ├── regressor.cc │ │ ├── regressor.h │ │ ├── regressor_test.cc │ │ ├── saved_model_bundle_factory.cc │ │ ├── saved_model_bundle_factory.h │ │ ├── saved_model_bundle_factory_test.cc │ │ ├── saved_model_bundle_source_adapter.cc │ │ ├── saved_model_bundle_source_adapter.h │ │ ├── saved_model_bundle_source_adapter.proto │ │ ├── saved_model_bundle_source_adapter_test.cc │ │ ├── saved_model_bundle_v2_factory.cc │ │ ├── saved_model_bundle_v2_factory.h │ │ ├── saved_model_bundle_v2_source_adapter.cc │ │ ├── saved_model_bundle_v2_source_adapter.h │ │ ├── saved_model_warmup.cc │ │ ├── saved_model_warmup.h │ │ ├── saved_model_warmup_test.cc │ │ ├── serving_session.cc │ │ ├── serving_session.h │ │ ├── session_bundle_config.proto │ │ ├── session_bundle_factory.cc │ │ ├── session_bundle_factory.h │ │ ├── session_bundle_factory_test.cc │ │ ├── session_bundle_source_adapter.cc │ │ ├── session_bundle_source_adapter.h │ │ ├── session_bundle_source_adapter.proto │ │ ├── session_bundle_source_adapter_test.cc │ │ ├── simple_servers.cc │ │ ├── simple_servers.h │ │ ├── simple_servers_test.cc │ │ ├── testdata │ │ ├── BUILD │ │ ├── bad_half_plus_two │ │ │ └── 00000123 │ │ │ │ ├── checkpoint │ │ │ │ ├── export │ │ │ │ └── export.meta │ │ ├── bad_model_config.txt │ │ ├── batching_config.txt │ │ ├── export_bad_half_plus_two.py │ │ ├── export_counter.py │ │ ├── export_half_plus_two.py │ │ ├── good_model_config.txt │ │ ├── half_plus_two │ │ │ └── 00000123 │ │ │ │ ├── export.data-00000-of-00001 │ │ │ │ ├── export.index │ │ │ │ └── export.meta │ │ ├── half_plus_two_2_versions │ │ │ ├── 00000123 │ │ │ │ ├── export.data-00000-of-00001 │ │ │ │ ├── export.index │ │ │ │ └── export.meta │ │ │ └── 00000124 │ │ │ │ ├── export.data-00000-of-00001 │ │ │ │ ├── export.index │ │ │ │ └── export.meta │ │ ├── half_plus_two_model_metadata.json │ │ ├── monitoring_config.txt │ │ ├── saved_model_counter │ │ │ └── 00000123 │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ └── variables.index │ │ ├── saved_model_half_plus_three │ │ │ └── 00000123 │ │ │ │ ├── assets │ │ │ │ └── foo.txt │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ └── variables.index │ │ ├── saved_model_half_plus_two.py │ │ ├── saved_model_half_plus_two_2_versions │ │ │ ├── 00000123 │ │ │ │ ├── assets │ │ │ │ │ └── foo.txt │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ │ └── variables.index │ │ │ └── 00000124 │ │ │ │ ├── assets │ │ │ │ └── foo.txt │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ └── variables.index │ │ ├── saved_model_half_plus_two_2_versions_metadata.json │ │ ├── saved_model_half_plus_two_cpu │ │ │ └── 00000123 │ │ │ │ ├── assets │ │ │ │ └── foo.txt │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ └── variables.index │ │ ├── saved_model_half_plus_two_gpu │ │ │ └── 00000123 │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ └── variables.index │ │ ├── saved_model_half_plus_two_gpu_trt │ │ │ └── 00000123 │ │ │ │ └── saved_model.pb │ │ ├── saved_model_half_plus_two_mkl │ │ │ └── 00000123 │ │ │ │ ├── assets │ │ │ │ └── foo.txt │ │ │ │ ├── saved_model.pb │ │ │ │ └── variables │ │ │ │ ├── variables.data-00000-of-00001 │ │ │ │ └── variables.index │ │ └── saved_model_half_plus_two_tflite │ │ │ └── 00000123 │ │ │ └── model.tflite │ │ ├── tflite_session.cc │ │ ├── tflite_session.h │ │ ├── tflite_session_test.cc │ │ ├── util.cc │ │ ├── util.h │ │ └── util_test.cc ├── serving.bzl ├── sources │ └── storage_path │ │ ├── BUILD │ │ ├── file_system_storage_path_source.cc │ │ ├── file_system_storage_path_source.h │ │ ├── file_system_storage_path_source.proto │ │ ├── file_system_storage_path_source_test.cc │ │ ├── static_storage_path_source.cc │ │ ├── static_storage_path_source.h │ │ ├── static_storage_path_source.proto │ │ └── static_storage_path_source_test.cc ├── tensorflow_version.bzl ├── test_util │ ├── BUILD │ ├── test_util.cc │ └── test_util.h ├── tools │ ├── docker │ │ ├── Dockerfile │ │ ├── Dockerfile.devel │ │ ├── Dockerfile.devel-gpu │ │ ├── Dockerfile.devel-mkl │ │ ├── Dockerfile.gpu │ │ ├── Dockerfile.mkl │ │ ├── README.md │ │ └── tests │ │ │ ├── BUILD │ │ │ ├── docker_test_lib.sh │ │ │ ├── dockerfile_devel_gpu_test.sh │ │ │ ├── dockerfile_devel_gpu_trt_test.sh │ │ │ ├── dockerfile_devel_mkl_test.sh │ │ │ ├── dockerfile_devel_test.sh │ │ │ ├── dockerfile_gpu_test.sh │ │ │ ├── dockerfile_gpu_trt_test.sh │ │ │ ├── dockerfile_mkl_test.sh │ │ │ └── dockerfile_test.sh │ └── pip_package │ │ ├── BUILD │ │ ├── build_pip_package.sh │ │ └── setup.py ├── util │ ├── BUILD │ ├── any_ptr.h │ ├── any_ptr_test.cc │ ├── class_registration.h │ ├── class_registration_test.cc │ ├── class_registration_test.proto │ ├── class_registration_util.cc │ ├── class_registration_util.h │ ├── cleanup.h │ ├── cleanup_test.cc │ ├── event_bus.h │ ├── event_bus_test.cc │ ├── executor.h │ ├── fast_read_dynamic_ptr.h │ ├── fast_read_dynamic_ptr_benchmark.cc │ ├── fast_read_dynamic_ptr_test.cc │ ├── file_probing_env.cc │ ├── file_probing_env.h │ ├── hash.cc │ ├── hash.h │ ├── inline_executor.cc │ ├── inline_executor.h │ ├── inline_executor_test.cc │ ├── json_tensor.cc │ ├── json_tensor.h │ ├── json_tensor_test.cc │ ├── net_http │ │ ├── README.md │ │ ├── client │ │ │ ├── BUILD │ │ │ ├── README.md │ │ │ ├── evhttp_connection.cc │ │ │ ├── evhttp_connection.h │ │ │ └── testing │ │ │ │ ├── BUILD │ │ │ │ └── evhttp_echo_client.cc │ │ ├── compression │ │ │ ├── BUILD │ │ │ ├── README.md │ │ │ ├── gzip_zlib.cc │ │ │ ├── gzip_zlib.h │ │ │ └── gzip_zlib_test.cc │ │ ├── internal │ │ │ ├── BUILD │ │ │ ├── fixed_thread_pool.h │ │ │ ├── net_logging.cc │ │ │ ├── net_logging.h │ │ │ └── testing │ │ │ │ ├── BUILD │ │ │ │ └── net_logging_example.cc │ │ ├── server │ │ │ ├── internal │ │ │ │ ├── BUILD │ │ │ │ ├── evhttp_request.cc │ │ │ │ ├── evhttp_request.h │ │ │ │ ├── evhttp_request_test.cc │ │ │ │ ├── evhttp_server.cc │ │ │ │ ├── evhttp_server.h │ │ │ │ ├── evhttp_server_test.cc │ │ │ │ └── server_support.h │ │ │ ├── public │ │ │ │ ├── BUILD │ │ │ │ ├── header_names.cc │ │ │ │ ├── header_names.h │ │ │ │ ├── httpserver.h │ │ │ │ ├── httpserver_interface.h │ │ │ │ ├── response_code_enum.h │ │ │ │ └── server_request_interface.h │ │ │ └── testing │ │ │ │ ├── BUILD │ │ │ │ └── evhttp_echo_server.cc │ │ └── socket │ │ │ └── testing │ │ │ ├── BUILD │ │ │ ├── ev_fetch_client.cc │ │ │ └── ev_print_req_server.cc │ ├── observer.h │ ├── observer_test.cc │ ├── optional.cc │ ├── optional.h │ ├── optional_test.cc │ ├── prometheus_exporter.cc │ ├── prometheus_exporter.h │ ├── prometheus_exporter_test.cc │ ├── retrier.cc │ ├── retrier.h │ ├── retrier_test.cc │ ├── status.proto │ ├── status_util.cc │ ├── status_util.h │ ├── status_util_test.cc │ ├── test_util │ │ ├── BUILD │ │ └── mock_file_probing_env.h │ ├── threadpool_executor.cc │ ├── threadpool_executor.h │ ├── threadpool_executor_test.cc │ ├── tracer.h │ ├── unique_ptr_with_deps.h │ └── unique_ptr_with_deps_test.cc └── workspace.bzl ├── third_party ├── apr1 │ ├── BUILD │ ├── libapr1.BUILD │ └── libapr1.patch ├── aprutil1 │ ├── BUILD │ ├── libaprutil1.BUILD │ └── libaprutil1.patch ├── aws_util │ ├── BUILD │ ├── aws_c_cal.BUILD │ ├── aws_c_common.BUILD │ ├── aws_c_event_stream.BUILD │ ├── aws_c_io.BUILD │ └── aws_checksums.BUILD ├── expat │ ├── BUILD │ └── libexpat.BUILD ├── libevent │ └── BUILD ├── mxml │ ├── BUILD │ ├── mxml.BUILD │ └── mxml.patch ├── oss_c_sdk │ ├── BUILD │ └── oss_c_sdk.BUILD └── rapidjson │ └── BUILD └── tools ├── gen_status_stamp.sh └── run_in_docker.sh /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | /bazel-bin 3 | /bazel-ci_build-cache 4 | /bazel-genfiles 5 | /bazel-out 6 | /bazel-serving 7 | /bazel-tensorflow 8 | /bazel-tensorflow_serving 9 | /bazel-testlogs 10 | /bazel-tf 11 | /bazel-workspace 12 | /third_party/py/numpy/numpy_include 13 | /util/python/python_include 14 | /util/python/python_lib 15 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/.gitmodules -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of TensorFlow Serving authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | 5 | # Names should be added to this file as: 6 | # Name or Organization 7 | # The email address is not required for organizations. 8 | 9 | Google Inc. 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | ## How to become a contributor and submit your own code 4 | 5 | ### Contributor License Agreements 6 | 7 | We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles. 8 | 9 | Please fill out either the individual or corporate Contributor License Agreement (CLA). 10 | 11 | * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). 12 | * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). 13 | 14 | Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests. 15 | 16 | ***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the main repository. 17 | 18 | ### Contributing code 19 | 20 | If you have improvements to TensorFlow Serving, send us your pull requests! 21 | For those just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/). 22 | 23 | If you want to contribute but you're not sure where to start, take a look at the 24 | [issues with the "contributions welcome" label](https://github.com/tensorflow/serving/labels/contributions%20welcome). 25 | These are issues that we believe are particularly well suited for outside 26 | contributions, often because we probably won't get to them right now. If you 27 | decide to start on an issue, leave a comment so that other people know that 28 | you're working on it. If you want to help out, but not alone, use the issue 29 | comment thread to coordinate. 30 | -------------------------------------------------------------------------------- /dockerfiles/Dockerfile.base-py3.6-cu116-ubuntu18.04: -------------------------------------------------------------------------------- 1 | FROM alideeprec/deeprec-base:deeprec-base-gpu-py36-cu116-ubuntu18.04 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y automake 5 | RUN apt-get install -y libtool 6 | 7 | RUN ln -s /usr/bin/python /home/pai/bin/python 8 | -------------------------------------------------------------------------------- /tensorflow_serving/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving. 2 | 3 | package( 4 | default_visibility = ["//tensorflow_serving:internal"], 5 | ) 6 | 7 | licenses(["notice"]) # Apache 2.0 8 | 9 | exports_files(["LICENSE"]) 10 | 11 | # open source marker; do not remove 12 | 13 | package_group( 14 | name = "internal", 15 | packages = [ 16 | "//tensorflow_serving/...", 17 | ], 18 | ) 19 | 20 | filegroup( 21 | name = "all_files", 22 | srcs = glob( 23 | ["**/*"], 24 | exclude = [ 25 | "**/METADATA", 26 | "**/OWNERS", 27 | "g3doc/sitemap.md", 28 | ], 29 | ), 30 | ) 31 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/classification.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option cc_enable_arenas = true; 4 | 5 | import "tensorflow_serving/apis/input.proto"; 6 | import "tensorflow_serving/apis/model.proto"; 7 | 8 | package tensorflow.serving; 9 | 10 | // A single class. 11 | message Class { 12 | // Label or name of the class. 13 | string label = 1; 14 | // Score for this class (e.g., the probability the item belongs to this 15 | // class). As per the proto3 default-value semantics, if the score is missing, 16 | // it should be treated as 0. 17 | float score = 2; 18 | } 19 | 20 | // List of classes for a single item (tensorflow.Example). 21 | message Classifications { 22 | repeated Class classes = 1; 23 | } 24 | 25 | // Contains one result per input example, in the same order as the input in 26 | // ClassificationRequest. 27 | message ClassificationResult { 28 | repeated Classifications classifications = 1; 29 | } 30 | 31 | // RPC Interfaces 32 | 33 | message ClassificationRequest { 34 | // Model Specification. If version is not specified, will use the latest 35 | // (numerical) version. 36 | ModelSpec model_spec = 1; 37 | 38 | // Input data. 39 | tensorflow.serving.Input input = 2; 40 | } 41 | 42 | message ClassificationResponse { 43 | // Effective Model Specification used for classification. 44 | ModelSpec model_spec = 2; 45 | 46 | // Result of the classification. 47 | ClassificationResult result = 1; 48 | } 49 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/classifier.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_APIS_CLASSIFIER_H_ 17 | #define TENSORFLOW_SERVING_APIS_CLASSIFIER_H_ 18 | 19 | #include "tensorflow/core/lib/core/status.h" 20 | #include "tensorflow_serving/apis/classification.pb.h" 21 | 22 | namespace tensorflow { 23 | namespace serving { 24 | 25 | /// Model-type agnostic interface for performing classification. 26 | /// 27 | /// Specific implementations will exist for different model types 28 | /// (e.g. TensorFlow SavedModel) that can convert the request into a model 29 | /// specific input and know how to convert the output into a generic 30 | /// ClassificationResult. 31 | class ClassifierInterface { 32 | public: 33 | /// Given a ClassificationRequest, populates the ClassificationResult with the 34 | /// result. 35 | /// 36 | /// @param request Input request specifying the model/signature to query 37 | /// along with the data payload. 38 | /// @param result The output classifications that will get populated. 39 | /// @return A status object indicating success or failure. 40 | virtual Status Classify(const ClassificationRequest& request, 41 | ClassificationResult* result) = 0; 42 | 43 | virtual ~ClassifierInterface() = default; 44 | }; 45 | 46 | } // namespace serving 47 | } // namespace tensorflow 48 | 49 | #endif // TENSORFLOW_SERVING_APIS_CLASSIFIER_H_ 50 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/get_model_metadata.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | import "google/protobuf/any.proto"; 7 | import "tensorflow/core/protobuf/meta_graph.proto"; 8 | import "tensorflow_serving/apis/model.proto"; 9 | 10 | // Message returned for "signature_def" field. 11 | message SignatureDefMap { 12 | map signature_def = 1; 13 | }; 14 | 15 | message GetModelMetadataRequest { 16 | // Model Specification indicating which model we are querying for metadata. 17 | // If version is not specified, will use the latest (numerical) version. 18 | ModelSpec model_spec = 1; 19 | // Metadata fields to get. Currently supported: "signature_def". 20 | repeated string metadata_field = 2; 21 | } 22 | 23 | message GetModelMetadataResponse { 24 | // Model Specification indicating which model this metadata belongs to. 25 | ModelSpec model_spec = 1; 26 | // Map of metadata field name to metadata field. The options for metadata 27 | // field name are listed in GetModelMetadataRequest. Currently supported: 28 | // "signature_def". 29 | map metadata = 2; 30 | } 31 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/internal/BUILD: -------------------------------------------------------------------------------- 1 | # Internal implementation details of serving APIs. 2 | 3 | package( 4 | default_visibility = [ 5 | "//tensorflow_serving:internal", 6 | ], 7 | features = ["-layering_check"], 8 | ) 9 | 10 | licenses(["notice"]) # Apache 2.0 11 | 12 | load("//tensorflow_serving:serving.bzl", "serving_proto_library") 13 | 14 | serving_proto_library( 15 | name = "serialized_input_proto", 16 | srcs = ["serialized_input.proto"], 17 | cc_api_version = 2, 18 | visibility = [ 19 | "//tensorflow_serving:internal", 20 | "@org_tensorflow//tensorflow_ranking/google:__pkg__", 21 | ], 22 | deps = [ 23 | ], 24 | ) 25 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/internal/serialized_input.proto: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Serialized counterparts of the messages in input.proto. These protos enable 17 | // us to keep the original tensorflow.serving.Input's structure but with the 18 | // tensorflow.Examples in their serialized form. When combined with lazy 19 | // parsing, this improves performance by allowing us to skip a redundant 20 | // deserialization/serialization loop. 21 | // 22 | // WARNING: These are internal implementation details and not part of the public 23 | // API. 24 | 25 | syntax = "proto3"; 26 | 27 | option cc_enable_arenas = true; 28 | 29 | package tensorflow.serving.internal; 30 | 31 | message SerializedExampleList { 32 | repeated bytes examples = 1; 33 | } 34 | 35 | message SerializedExampleListWithContext { 36 | repeated bytes examples = 1; 37 | bytes context = 2; 38 | } 39 | 40 | message SerializedInput { 41 | oneof kind { 42 | SerializedExampleList example_list = 1; 43 | SerializedExampleListWithContext example_list_with_context = 2; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/model.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | import "google/protobuf/wrappers.proto"; 7 | 8 | // Metadata for an inference request such as the model name and version. 9 | message ModelSpec { 10 | // Required servable name. 11 | string name = 1; 12 | 13 | // Optional choice of which version of the model to use. 14 | // 15 | // Recommended to be left unset in the common case. Should be specified only 16 | // when there is a strong version consistency requirement. 17 | // 18 | // When left unspecified, the system will serve the best available version. 19 | // This is typically the latest version, though during version transitions, 20 | // notably when serving on a fleet of instances, may be either the previous or 21 | // new version. 22 | oneof version_choice { 23 | // Use this specific version number. 24 | google.protobuf.Int64Value version = 2; 25 | 26 | // Use the version associated with the given label. 27 | string version_label = 4; 28 | } 29 | 30 | // A named signature to evaluate. If unspecified, the default signature will 31 | // be used. 32 | string signature_name = 3; 33 | } 34 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/model_management.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "tensorflow_serving/config/model_server_config.proto"; 4 | import "tensorflow_serving/util/status.proto"; 5 | 6 | package tensorflow.serving; 7 | option cc_enable_arenas = true; 8 | 9 | message ReloadConfigRequest { 10 | ModelServerConfig config = 1; 11 | } 12 | 13 | message ReloadConfigResponse { 14 | StatusProto status = 1; 15 | } 16 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/model_service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option cc_enable_arenas = true; 4 | 5 | import "tensorflow_serving/apis/get_model_status.proto"; 6 | import "tensorflow_serving/apis/model_management.proto"; 7 | 8 | package tensorflow.serving; 9 | 10 | // ModelService provides methods to query and update the state of the server, 11 | // e.g. which models/versions are being served. 12 | service ModelService { 13 | // Gets status of model. If the ModelSpec in the request does not specify 14 | // version, information about all versions of the model will be returned. If 15 | // the ModelSpec in the request does specify a version, the status of only 16 | // that version will be returned. 17 | rpc GetModelStatus(GetModelStatusRequest) returns (GetModelStatusResponse); 18 | 19 | // Reloads the set of served models. The new config supersedes the old one, 20 | // so if a model is omitted from the new config it will be unloaded and no 21 | // longer served. 22 | rpc HandleReloadConfigRequest(ReloadConfigRequest) 23 | returns (ReloadConfigResponse); 24 | } 25 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/predict.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | import "tensorflow/core/framework/tensor.proto"; 7 | import "tensorflow_serving/apis/model.proto"; 8 | 9 | // PredictRequest specifies which TensorFlow model to run, as well as 10 | // how inputs are mapped to tensors and how outputs are filtered before 11 | // returning to user. 12 | message PredictRequest { 13 | // Model Specification. If version is not specified, will use the latest 14 | // (numerical) version. 15 | ModelSpec model_spec = 1; 16 | 17 | // Input tensors. 18 | // Names of input tensor are alias names. The mapping from aliases to real 19 | // input tensor names is stored in the SavedModel export as a prediction 20 | // SignatureDef under the 'inputs' field. 21 | map inputs = 2; 22 | 23 | // Output filter. 24 | // Names specified are alias names. The mapping from aliases to real output 25 | // tensor names is stored in the SavedModel export as a prediction 26 | // SignatureDef under the 'outputs' field. 27 | // Only tensors specified here will be run/fetched and returned, with the 28 | // exception that when none is specified, all tensors specified in the 29 | // named signature will be run/fetched and returned. 30 | repeated string output_filter = 3; 31 | } 32 | 33 | // Response for PredictRequest on successful run. 34 | message PredictResponse { 35 | // Effective Model Specification used to process PredictRequest. 36 | ModelSpec model_spec = 2; 37 | 38 | // Output tensors. 39 | map outputs = 1; 40 | } 41 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/prediction_log.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option cc_enable_arenas = true; 4 | 5 | import "tensorflow_serving/apis/classification.proto"; 6 | import "tensorflow_serving/apis/inference.proto"; 7 | import "tensorflow_serving/apis/predict.proto"; 8 | import "tensorflow_serving/apis/regression.proto"; 9 | import "tensorflow_serving/apis/session_service.proto"; 10 | import "tensorflow_serving/core/logging.proto"; 11 | 12 | package tensorflow.serving; 13 | 14 | message ClassifyLog { 15 | ClassificationRequest request = 1; 16 | ClassificationResponse response = 2; 17 | } 18 | 19 | message RegressLog { 20 | RegressionRequest request = 1; 21 | RegressionResponse response = 2; 22 | } 23 | 24 | message PredictLog { 25 | PredictRequest request = 1; 26 | PredictResponse response = 2; 27 | } 28 | 29 | message MultiInferenceLog { 30 | MultiInferenceRequest request = 1; 31 | MultiInferenceResponse response = 2; 32 | } 33 | 34 | message SessionRunLog { 35 | SessionRunRequest request = 1; 36 | SessionRunResponse response = 2; 37 | } 38 | 39 | // Logged model inference request. 40 | message PredictionLog { 41 | LogMetadata log_metadata = 1; 42 | oneof log_type { 43 | ClassifyLog classify_log = 2; 44 | RegressLog regress_log = 3; 45 | PredictLog predict_log = 6; 46 | MultiInferenceLog multi_inference_log = 4; 47 | SessionRunLog session_run_log = 5; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/prediction_service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | import "tensorflow_serving/apis/classification.proto"; 7 | import "tensorflow_serving/apis/get_model_metadata.proto"; 8 | import "tensorflow_serving/apis/inference.proto"; 9 | import "tensorflow_serving/apis/predict.proto"; 10 | import "tensorflow_serving/apis/regression.proto"; 11 | 12 | // open source marker; do not remove 13 | // PredictionService provides access to machine-learned models loaded by 14 | // model_servers. 15 | service PredictionService { 16 | // Classify. 17 | rpc Classify(ClassificationRequest) returns (ClassificationResponse); 18 | 19 | // Regress. 20 | rpc Regress(RegressionRequest) returns (RegressionResponse); 21 | 22 | // Predict -- provides access to loaded TensorFlow model. 23 | rpc Predict(PredictRequest) returns (PredictResponse); 24 | 25 | // MultiInference API for multi-headed models. 26 | rpc MultiInference(MultiInferenceRequest) returns (MultiInferenceResponse); 27 | 28 | // GetModelMetadata - provides access to metadata for loaded models. 29 | rpc GetModelMetadata(GetModelMetadataRequest) 30 | returns (GetModelMetadataResponse); 31 | } 32 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/regression.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option cc_enable_arenas = true; 4 | 5 | import "tensorflow_serving/apis/input.proto"; 6 | import "tensorflow_serving/apis/model.proto"; 7 | 8 | package tensorflow.serving; 9 | 10 | // Regression result for a single item (tensorflow.Example). 11 | message Regression { 12 | float value = 1; 13 | } 14 | 15 | // Contains one result per input example, in the same order as the input in 16 | // RegressionRequest. 17 | message RegressionResult { 18 | repeated Regression regressions = 1; 19 | } 20 | 21 | // RPC interfaces. 22 | 23 | message RegressionRequest { 24 | // Model Specification. If version is not specified, will use the latest 25 | // (numerical) version. 26 | ModelSpec model_spec = 1; 27 | 28 | // Input data. 29 | tensorflow.serving.Input input = 2; 30 | } 31 | 32 | message RegressionResponse { 33 | // Effective Model Specification used for regression. 34 | ModelSpec model_spec = 2; 35 | 36 | RegressionResult result = 1; 37 | } 38 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/regressor.h: -------------------------------------------------------------------------------- 1 | 2 | /* Copyright 2016 Google Inc. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | ==============================================================================*/ 16 | 17 | #ifndef TENSORFLOW_SERVING_APIS_REGRESSOR_H_ 18 | #define TENSORFLOW_SERVING_APIS_REGRESSOR_H_ 19 | 20 | #include "tensorflow/core/lib/core/status.h" 21 | #include "tensorflow_serving/apis/regression.pb.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | 26 | /// Model agnostic interface for performing regression. 27 | /// 28 | /// Specific implementations will exist for different model types 29 | /// (e.g. TensorFlow SavedModel) that can convert the request into a model 30 | /// specific input and know how to convert the output into a generic 31 | /// RegressionResult. 32 | class RegressorInterface { 33 | public: 34 | /// Given a RegressionRequest, populates the RegressionResult with the 35 | /// result. 36 | /// 37 | /// @param request Input request specifying the model/signature to query 38 | /// along with the data payload. 39 | /// @param result The output regression results that will get populated. 40 | /// @return A status object indicating success or failure. 41 | virtual Status Regress(const RegressionRequest& request, 42 | RegressionResult* result) = 0; 43 | 44 | virtual ~RegressorInterface() = default; 45 | }; 46 | 47 | } // namespace serving 48 | } // namespace tensorflow 49 | 50 | #endif // TENSORFLOW_SERVING_APIS_REGRESSOR_H_ 51 | -------------------------------------------------------------------------------- /tensorflow_serving/apis/session_service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option cc_enable_arenas = true; 4 | 5 | import "tensorflow_serving/apis/model.proto"; 6 | import "tensorflow/core/protobuf/config.proto"; 7 | import "tensorflow/core/protobuf/named_tensor.proto"; 8 | 9 | package tensorflow.serving; 10 | 11 | message SessionRunRequest { 12 | // Model Specification. If version is not specified, will use the latest 13 | // (numerical) version. 14 | ModelSpec model_spec = 1; 15 | 16 | // Tensors to be fed in the step. Each feed is a named tensor. 17 | repeated NamedTensorProto feed = 2; 18 | 19 | // Fetches. A list of tensor names. The caller expects a tensor to 20 | // be returned for each fetch[i] (see RunResponse.tensor). The 21 | // order of specified fetches does not change the execution order. 22 | repeated string fetch = 3; 23 | 24 | // Target Nodes. A list of node names. The named nodes will be run 25 | // to but their outputs will not be fetched. 26 | repeated string target = 4; 27 | 28 | // Options for the run call. **Currently ignored.** 29 | RunOptions options = 5; 30 | } 31 | 32 | message SessionRunResponse { 33 | // Effective Model Specification used for session run. 34 | ModelSpec model_spec = 3; 35 | 36 | // NOTE: The order of the returned tensors may or may not match 37 | // the fetch order specified in RunRequest. 38 | repeated NamedTensorProto tensor = 1; 39 | 40 | // Returned metadata if requested in the options. 41 | RunMetadata metadata = 2; 42 | } 43 | 44 | // SessionService defines a service with which a client can interact to execute 45 | // Tensorflow model inference. The SessionService::SessionRun method is similar 46 | // to MasterService::RunStep of Tensorflow, except that all sessions are ready 47 | // to run, and you request a specific model/session with ModelSpec. 48 | service SessionService { 49 | // Runs inference of a given model. 50 | rpc SessionRun(SessionRunRequest) returns (SessionRunResponse); 51 | } 52 | -------------------------------------------------------------------------------- /tensorflow_serving/batching/test_util/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving batching test utilities. 2 | 3 | package( 4 | default_visibility = ["//tensorflow_serving:internal"], 5 | features = ["-layering_check"], 6 | ) 7 | 8 | licenses(["notice"]) # Apache 2.0 9 | 10 | filegroup( 11 | name = "all_files", 12 | srcs = glob( 13 | ["**/*"], 14 | exclude = [ 15 | "**/METADATA", 16 | "**/OWNERS", 17 | ], 18 | ), 19 | ) 20 | 21 | cc_library( 22 | name = "puppet_batch_scheduler", 23 | testonly = 1, 24 | hdrs = ["puppet_batch_scheduler.h"], 25 | visibility = ["//visibility:private"], 26 | deps = [ 27 | "@org_tensorflow//tensorflow/core:tensorflow", 28 | "@org_tensorflow//tensorflow/core/kernels/batching_util:batch_scheduler", 29 | ], 30 | ) 31 | 32 | cc_test( 33 | name = "puppet_batch_scheduler_test", 34 | srcs = [ 35 | "puppet_batch_scheduler_test.cc", 36 | ], 37 | deps = [ 38 | ":puppet_batch_scheduler", 39 | "//tensorflow_serving/core/test_util:test_main", 40 | "@org_tensorflow//tensorflow/core:lib", 41 | "@org_tensorflow//tensorflow/core:test", 42 | ], 43 | ) 44 | 45 | # script that generates saved_model for matrix_half_plus_two model. 46 | py_binary( 47 | name = "matrix_half_plus_two_saved_model", 48 | srcs = ["matrix_half_plus_two_saved_model.py"], 49 | python_version = "PY2", 50 | srcs_version = "PY2AND3", 51 | deps = ["@org_tensorflow//tensorflow:tensorflow_py"], 52 | ) 53 | -------------------------------------------------------------------------------- /tensorflow_serving/batching/testdata/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving batching test data. 2 | 3 | package( 4 | default_visibility = ["//tensorflow_serving:internal"], 5 | features = ["-layering_check"], 6 | ) 7 | 8 | licenses(["notice"]) # Apache 2.0 9 | 10 | filegroup( 11 | name = "matrix_half_plus_two", 12 | srcs = glob( 13 | ["matrix_half_plus_two/**/*"], 14 | ), 15 | ) 16 | -------------------------------------------------------------------------------- /tensorflow_serving/batching/testdata/matrix_half_plus_two/1/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/batching/testdata/matrix_half_plus_two/1/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/config/log_collector_config.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | message LogCollectorConfig { 7 | // Identifies the type of the LogCollector we will use to collect these logs. 8 | string type = 1; 9 | 10 | // The prefix to use for the filenames of the logs. 11 | string filename_prefix = 2; 12 | } 13 | -------------------------------------------------------------------------------- /tensorflow_serving/config/logging_config.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | import "tensorflow_serving/config/log_collector_config.proto"; 7 | 8 | message SamplingConfig { 9 | // Requests will be logged uniformly at random with this probability. Valid 10 | // range: [0, 1.0]. 11 | double sampling_rate = 1; 12 | } 13 | 14 | // Configuration for logging query/responses. 15 | message LoggingConfig { 16 | LogCollectorConfig log_collector_config = 1; 17 | SamplingConfig sampling_config = 2; 18 | } 19 | -------------------------------------------------------------------------------- /tensorflow_serving/config/monitoring_config.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | // Configuration for Prometheus monitoring. 7 | message PrometheusConfig { 8 | // Whether to expose Prometheus metrics. 9 | bool enable = 1; 10 | 11 | // The endpoint to expose Prometheus metrics. 12 | // If not specified, PrometheusExporter::kPrometheusPath value is used. 13 | string path = 2; 14 | } 15 | 16 | // Configuration for monitoring. 17 | message MonitoringConfig { 18 | PrometheusConfig prometheus_config = 1; 19 | } 20 | -------------------------------------------------------------------------------- /tensorflow_serving/config/platform_config.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | import "google/protobuf/any.proto"; 7 | 8 | // Configuration for a servable platform e.g. tensorflow or other ML systems. 9 | message PlatformConfig { 10 | // The config proto for a SourceAdapter in the StoragePathSourceAdapter 11 | // registry. 12 | google.protobuf.Any source_adapter_config = 1; 13 | }; 14 | 15 | message PlatformConfigMap { 16 | // A map from a platform name to a platform config. The platform name is used 17 | // in ModelConfig.model_platform. 18 | map platform_configs = 1; 19 | }; 20 | -------------------------------------------------------------------------------- /tensorflow_serving/config/ssl_config.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | option cc_enable_arenas = true; 5 | 6 | // Configuration for a secure gRPC channel 7 | message SSLConfig { 8 | // private server key for SSL 9 | string server_key = 1; 10 | // public server certificate 11 | string server_cert = 2; 12 | // custom certificate authority 13 | string custom_ca = 3; 14 | // valid client certificate required ? 15 | bool client_verify = 4; 16 | }; 17 | -------------------------------------------------------------------------------- /tensorflow_serving/core/README.md: -------------------------------------------------------------------------------- 1 | Directory for non-application-specific modules. 2 | -------------------------------------------------------------------------------- /tensorflow_serving/core/aspired_version_policy.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/aspired_version_policy.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | optional AspiredVersionPolicy::GetHighestAspiredNewServableId( 22 | const std::vector& all_versions) { 23 | optional highest_version_id; 24 | for (const auto& version : all_versions) { 25 | if (version.is_aspired && version.state == LoaderHarness::State::kNew) { 26 | if (!highest_version_id || 27 | version.id.version > highest_version_id.value().version) { 28 | highest_version_id = version.id; 29 | } 30 | } 31 | } 32 | return highest_version_id; 33 | } 34 | 35 | } // namespace serving 36 | } // namespace tensorflow 37 | -------------------------------------------------------------------------------- /tensorflow_serving/core/aspired_versions_manager_builder.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/aspired_versions_manager_builder.h" 17 | 18 | #include "tensorflow_serving/core/manager_wrapper.h" 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | 23 | Status AspiredVersionsManagerBuilder::Create( 24 | Options options, std::unique_ptr* builder) { 25 | std::unique_ptr aspired_versions_manager; 26 | TF_RETURN_IF_ERROR(AspiredVersionsManager::Create(std::move(options), 27 | &aspired_versions_manager)); 28 | builder->reset( 29 | new AspiredVersionsManagerBuilder(std::move(aspired_versions_manager))); 30 | return Status::OK(); 31 | } 32 | 33 | AspiredVersionsManagerBuilder::AspiredVersionsManagerBuilder( 34 | std::unique_ptr manager) 35 | : aspired_versions_manager_(manager.get()) { 36 | manager_with_sources_.SetOwned(std::move(manager)); 37 | } 38 | 39 | std::unique_ptr AspiredVersionsManagerBuilder::Build() { 40 | return std::unique_ptr( 41 | new ManagerWrapper(std::move(manager_with_sources_))); 42 | } 43 | 44 | } // namespace serving 45 | } // namespace tensorflow 46 | -------------------------------------------------------------------------------- /tensorflow_serving/core/logging.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | 5 | import "tensorflow_serving/apis/model.proto"; 6 | import "tensorflow_serving/config/logging_config.proto"; 7 | 8 | option cc_enable_arenas = true; 9 | 10 | // Metadata logged along with the request logs. 11 | message LogMetadata { 12 | ModelSpec model_spec = 1; 13 | SamplingConfig sampling_config = 2; 14 | // List of tags used to load the relevant MetaGraphDef from SavedModel. 15 | repeated string saved_model_tags = 3; 16 | // TODO(b/33279154): Add more metadata as mentioned in the bug. 17 | } 18 | -------------------------------------------------------------------------------- /tensorflow_serving/core/manager_wrapper.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/manager_wrapper.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | ManagerWrapper::ManagerWrapper(UniquePtrWithDeps wrapped) 22 | : wrapped_(std::move(wrapped)) {} 23 | 24 | std::vector ManagerWrapper::ListAvailableServableIds() const { 25 | return wrapped_->ListAvailableServableIds(); 26 | } 27 | 28 | Status ManagerWrapper::GetUntypedServableHandle( 29 | const ServableRequest& request, 30 | std::unique_ptr* const untyped_handle) { 31 | return wrapped_->GetUntypedServableHandle(request, untyped_handle); 32 | } 33 | 34 | std::map> 35 | ManagerWrapper::GetAvailableUntypedServableHandles() const { 36 | return wrapped_->GetAvailableUntypedServableHandles(); 37 | } 38 | 39 | } // namespace serving 40 | } // namespace tensorflow 41 | -------------------------------------------------------------------------------- /tensorflow_serving/core/manager_wrapper.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_MANAGER_WRAPPER_H_ 17 | #define TENSORFLOW_SERVING_CORE_MANAGER_WRAPPER_H_ 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #include "tensorflow_serving/core/manager.h" 24 | #include "tensorflow_serving/core/servable_id.h" 25 | #include "tensorflow_serving/util/unique_ptr_with_deps.h" 26 | 27 | namespace tensorflow { 28 | namespace serving { 29 | 30 | // An implementation of Manager that delegates all calls to another Manager. 31 | // 32 | // May be useful to override just part of the functionality of another Manager 33 | // or storing a Manager with its dependencies. 34 | class ManagerWrapper : public Manager { 35 | public: 36 | explicit ManagerWrapper(UniquePtrWithDeps wrapped); 37 | ~ManagerWrapper() override = default; 38 | 39 | std::vector ListAvailableServableIds() const override; 40 | 41 | private: 42 | Status GetUntypedServableHandle( 43 | const ServableRequest& request, 44 | std::unique_ptr* untyped_handle) override; 45 | 46 | std::map> 47 | GetAvailableUntypedServableHandles() const override; 48 | 49 | const UniquePtrWithDeps wrapped_; 50 | }; 51 | 52 | } // namespace serving 53 | } // namespace tensorflow 54 | 55 | #endif // TENSORFLOW_SERVING_CORE_MANAGER_WRAPPER_H_ 56 | -------------------------------------------------------------------------------- /tensorflow_serving/core/servable_data_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/servable_data.h" 17 | 18 | #include 19 | 20 | #include 21 | #include "tensorflow/core/lib/core/errors.h" 22 | #include "tensorflow/core/lib/core/status_test_util.h" 23 | #include "tensorflow/core/platform/types.h" 24 | 25 | namespace tensorflow { 26 | namespace serving { 27 | namespace { 28 | 29 | TEST(ServableDataTest, NoError) { 30 | ServableId id = {"name", 42}; 31 | ServableData data(id, "yo"); 32 | EXPECT_EQ(id, data.id()); 33 | TF_EXPECT_OK(data.status()); 34 | EXPECT_EQ("yo", data.DataOrDie()); 35 | EXPECT_EQ("yo", data.ConsumeDataOrDie()); 36 | } 37 | 38 | TEST(ServableDataTest, StaticCreateNoError) { 39 | ServableId id = {"name", 42}; 40 | auto data = CreateServableData(id, "yo"); 41 | EXPECT_EQ(id, data.id()); 42 | TF_EXPECT_OK(data.status()); 43 | EXPECT_EQ("yo", data.DataOrDie()); 44 | EXPECT_EQ("yo", data.ConsumeDataOrDie()); 45 | } 46 | 47 | TEST(ServableDataTest, Error) { 48 | ServableId id = {"name", 42}; 49 | ServableData data(id, errors::Unknown("d'oh")); 50 | EXPECT_EQ(id, data.id()); 51 | EXPECT_EQ(errors::Unknown("d'oh"), data.status()); 52 | } 53 | 54 | } // namespace 55 | } // namespace serving 56 | } // namespace tensorflow 57 | -------------------------------------------------------------------------------- /tensorflow_serving/core/servable_id_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/servable_id.h" 17 | 18 | #include 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | namespace { 23 | 24 | // Note: these tests use EXPECT_TRUE/FALSE in conjunction with the specific 25 | // comparison operator being tested (e.g. ==), instead of e.g. EXPECT_EQ, to 26 | // ensure that the exact operator targeted for testing is being invoked. 27 | 28 | TEST(ServableIdTest, Equals) { 29 | EXPECT_TRUE((ServableId{"a", 1} == ServableId{"a", 1})); 30 | 31 | EXPECT_FALSE((ServableId{"b", 2} == ServableId{"a", 2})); 32 | EXPECT_FALSE((ServableId{"b", 1} == ServableId{"b", 2})); 33 | } 34 | 35 | TEST(ServableIdTest, NotEquals) { 36 | EXPECT_FALSE((ServableId{"a", 1} != ServableId{"a", 1})); 37 | 38 | EXPECT_TRUE((ServableId{"b", 2} != ServableId{"a", 2})); 39 | EXPECT_TRUE((ServableId{"b", 1} != ServableId{"b", 2})); 40 | } 41 | 42 | TEST(ServableIdTest, LessThan) { 43 | EXPECT_TRUE((ServableId{"a", 1} < ServableId{"b", 1})); 44 | EXPECT_TRUE((ServableId{"b", 1} < ServableId{"b", 2})); 45 | EXPECT_TRUE((ServableId{"a", 1} < ServableId{"b", 2})); 46 | 47 | EXPECT_FALSE((ServableId{"a", 1} < ServableId{"a", 1})); 48 | 49 | EXPECT_FALSE((ServableId{"b", 1} < ServableId{"a", 1})); 50 | EXPECT_FALSE((ServableId{"b", 2} < ServableId{"b", 1})); 51 | EXPECT_FALSE((ServableId{"b", 2} < ServableId{"a", 1})); 52 | } 53 | 54 | } // namespace 55 | } // namespace serving 56 | } // namespace tensorflow 57 | -------------------------------------------------------------------------------- /tensorflow_serving/core/static_manager.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/static_manager.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | StaticManagerBuilder::StaticManagerBuilder() { 22 | BasicManager::Options basic_manager_options; 23 | // We don't want multithreading. 24 | basic_manager_options.num_load_threads = 0; 25 | basic_manager_options.num_unload_threads = 0; 26 | const Status basic_manager_status = 27 | BasicManager::Create(std::move(basic_manager_options), &basic_manager_); 28 | if (!basic_manager_status.ok()) { 29 | LOG(ERROR) << "Error creating BasicManager: " << health_; 30 | health_ = basic_manager_status; 31 | } 32 | } 33 | 34 | std::unique_ptr StaticManagerBuilder::Build() { 35 | if (!health_.ok()) { 36 | LOG(ERROR) << health_; 37 | return nullptr; 38 | } 39 | 40 | // If Build() is called again, we'll produce the following error. 41 | health_ = errors::FailedPrecondition( 42 | "Build() already called on this StaticManagerBuilder."); 43 | 44 | return std::move(basic_manager_); 45 | } 46 | 47 | } // namespace serving 48 | } // namespace tensorflow 49 | -------------------------------------------------------------------------------- /tensorflow_serving/core/storage_path.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Typedefs and registries pertaining to storage system paths. 17 | 18 | #ifndef TENSORFLOW_SERVING_CORE_STORAGE_PATH_H_ 19 | #define TENSORFLOW_SERVING_CORE_STORAGE_PATH_H_ 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | #include "tensorflow/core/lib/core/status.h" 26 | #include "tensorflow/core/platform/types.h" 27 | #include "tensorflow_serving/core/servable_data.h" 28 | #include "tensorflow_serving/core/servable_id.h" 29 | 30 | namespace tensorflow { 31 | namespace serving { 32 | 33 | // Strings that represent paths in some storage system. 34 | using StoragePath = string; 35 | 36 | inline bool operator==(const ServableData& a, 37 | const ServableData& b) { 38 | if (a.id() != b.id()) { 39 | return false; 40 | } 41 | if (a.status().ok() != b.status().ok()) { 42 | return false; 43 | } 44 | if (a.status().ok()) { 45 | return a.DataOrDie() == b.DataOrDie(); 46 | } else { 47 | return a.status() == b.status(); 48 | } 49 | } 50 | 51 | } // namespace serving 52 | } // namespace tensorflow 53 | 54 | #endif // TENSORFLOW_SERVING_CORE_STORAGE_PATH_H_ 55 | -------------------------------------------------------------------------------- /tensorflow_serving/core/storage_path_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/storage_path.h" 17 | 18 | #include 19 | #include "tensorflow/core/lib/core/errors.h" 20 | 21 | namespace tensorflow { 22 | namespace serving { 23 | namespace { 24 | 25 | TEST(StoragePathTest, ServableDataEquality) { 26 | ServableId id0 = {"0", 0}; 27 | ServableId id1 = {"1", 1}; 28 | 29 | ServableData a(id0, "x"); 30 | ServableData a2(id0, "x"); 31 | EXPECT_TRUE(a == a); 32 | EXPECT_TRUE(a == a2); 33 | EXPECT_TRUE(a2 == a); 34 | 35 | ServableData b(id0, "y"); 36 | ServableData c(id1, "x"); 37 | ServableData d(id0, errors::Unknown("error")); 38 | for (const ServableData& other : {b, c, d}) { 39 | EXPECT_TRUE(other == other); 40 | EXPECT_FALSE(a == other); 41 | EXPECT_FALSE(other == a); 42 | } 43 | } 44 | 45 | } // namespace 46 | } // namespace serving 47 | } // namespace tensorflow 48 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/availability_test_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/test_util/availability_test_util.h" 17 | #include "tensorflow/core/platform/env.h" 18 | 19 | namespace tensorflow { 20 | namespace serving { 21 | namespace test_util { 22 | 23 | namespace { 24 | 25 | // Determines whether WaitUntilServableManagerStateIsOneOf()'s condition is 26 | // satisfied. (See that function's documentation.) 27 | bool ServableManagerStateIsOneOf( 28 | const ServableStateMonitor& monitor, const ServableId& servable, 29 | const std::vector& states) { 30 | optional maybe_state = monitor.GetState(servable); 31 | if (!maybe_state) { 32 | return false; 33 | } 34 | const ServableState state = *maybe_state; 35 | 36 | for (const ServableState::ManagerState& desired_manager_state : states) { 37 | if (state.manager_state == desired_manager_state) { 38 | return true; 39 | } 40 | } 41 | return false; 42 | } 43 | 44 | } // namespace 45 | 46 | void WaitUntilServableManagerStateIsOneOf( 47 | const ServableStateMonitor& monitor, const ServableId& servable, 48 | const std::vector& states) { 49 | while (!ServableManagerStateIsOneOf(monitor, servable, states)) { 50 | Env::Default()->SleepForMicroseconds(50 * 1000 /* 50 ms */); 51 | } 52 | } 53 | 54 | } // namespace test_util 55 | } // namespace serving 56 | } // namespace tensorflow 57 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/availability_test_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // Methods related to the availability of servables, that are useful in writing 17 | // tests. (Not intended for production use.) 18 | 19 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_AVAILABILITY_TEST_UTIL_H_ 20 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_AVAILABILITY_TEST_UTIL_H_ 21 | 22 | #include "tensorflow_serving/core/servable_state_monitor.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | namespace test_util { 27 | 28 | // Waits until 'monitor' shows that the manager state of 'servable' is one of 29 | // 'states'. 30 | void WaitUntilServableManagerStateIsOneOf( 31 | const ServableStateMonitor& monitor, const ServableId& servable, 32 | const std::vector& states); 33 | 34 | } // namespace test_util 35 | } // namespace serving 36 | } // namespace tensorflow 37 | 38 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_AVAILABILITY_TEST_UTIL_H_ 39 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/fake_loader.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/test_util/fake_loader.h" 17 | 18 | #include "tensorflow/core/lib/core/errors.h" 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | namespace test_util { 23 | 24 | thread_local bool FakeLoader::was_deleted_in_this_thread_; 25 | int FakeLoader::num_fake_loaders_ = 0; 26 | mutex FakeLoader::num_fake_loaders_mu_(LINKER_INITIALIZED); 27 | 28 | FakeLoader::FakeLoader(int64 servable, const Status load_status) 29 | : servable_(servable), load_status_(load_status) { 30 | was_deleted_in_this_thread_ = false; 31 | { 32 | mutex_lock l(num_fake_loaders_mu_); 33 | ++num_fake_loaders_; 34 | } 35 | } 36 | 37 | FakeLoader::~FakeLoader() { 38 | { 39 | mutex_lock l(num_fake_loaders_mu_); 40 | --num_fake_loaders_; 41 | } 42 | was_deleted_in_this_thread_ = true; 43 | } 44 | 45 | Status FakeLoader::load_status() { return load_status_; } 46 | 47 | Status FakeLoader::Load() { return load_status_; } 48 | 49 | void FakeLoader::Unload() {} 50 | 51 | AnyPtr FakeLoader::servable() { return AnyPtr(&servable_); } 52 | 53 | bool FakeLoader::was_deleted_in_this_thread() { 54 | return was_deleted_in_this_thread_; 55 | } 56 | 57 | int FakeLoader::num_fake_loaders() { 58 | mutex_lock l(num_fake_loaders_mu_); 59 | return num_fake_loaders_; 60 | } 61 | 62 | } // namespace test_util 63 | } // namespace serving 64 | } // namespace tensorflow 65 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/fake_loader_source_adapter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving.test_util; 4 | 5 | // Config proto for FakeLoaderSourceAdapter. 6 | message FakeLoaderSourceAdapterConfig { 7 | // FakeLoaderSourceAdapter's 'suffix' ctor parameter. 8 | string suffix = 1; 9 | } 10 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/fake_log_collector.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_FAKE_LOG_COLLECTOR_H_ 17 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_FAKE_LOG_COLLECTOR_H_ 18 | 19 | #include "google/protobuf/message.h" 20 | #include "tensorflow/core/lib/core/status.h" 21 | #include "tensorflow_serving/core/log_collector.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | 26 | // FakeLogCollector which does nothing except count the number of times 27 | // CollectMessage has been called on it. 28 | class FakeLogCollector : public LogCollector { 29 | public: 30 | Status CollectMessage(const google::protobuf::Message& message) override { 31 | ++collect_count_; 32 | return Status::OK(); 33 | } 34 | 35 | Status Flush() override { return Status::OK(); } 36 | 37 | int collect_count() const { return collect_count_; } 38 | 39 | private: 40 | int collect_count_ = 0; 41 | }; 42 | 43 | } // namespace serving 44 | } // namespace tensorflow 45 | 46 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_FAKE_LOG_COLLECTOR_H_ 47 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/fake_storage_path_source_adapter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/core/test_util/fake_storage_path_source_adapter.h" 17 | 18 | #include "tensorflow/core/lib/core/errors.h" 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | namespace test_util { 23 | 24 | FakeStoragePathSourceAdapter::FakeStoragePathSourceAdapter( 25 | const string& suffix, std::function call_on_destruct) 26 | : suffix_(suffix), call_on_destruct_(call_on_destruct) {} 27 | 28 | FakeStoragePathSourceAdapter::~FakeStoragePathSourceAdapter() { 29 | Detach(); 30 | if (call_on_destruct_) { 31 | call_on_destruct_(suffix_); 32 | } 33 | } 34 | 35 | Status FakeStoragePathSourceAdapter::Convert( 36 | const StoragePath& data, StoragePath* const converted_data) { 37 | if (data == "invalid") { 38 | return errors::InvalidArgument( 39 | "FakeStoragePathSourceAdapter Convert() dutifully failing on " 40 | "\"invalid\" " 41 | "data"); 42 | } 43 | *converted_data = 44 | suffix_.empty() ? data : strings::StrCat(data, "/", suffix_); 45 | return Status::OK(); 46 | } 47 | 48 | } // namespace test_util 49 | } // namespace serving 50 | } // namespace tensorflow 51 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/mock_loader.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_LOADER_H_ 17 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_LOADER_H_ 18 | 19 | #include 20 | #include "tensorflow/core/lib/core/status.h" 21 | #include "tensorflow_serving/core/loader.h" 22 | #include "tensorflow_serving/util/any_ptr.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | namespace test_util { 27 | 28 | class MockLoader : public Loader { 29 | public: 30 | MOCK_CONST_METHOD1(EstimateResources, Status(ResourceAllocation* estimate)); 31 | MOCK_METHOD0(Load, Status()); 32 | MOCK_METHOD1(LoadWithMetadata, Status(const Metadata&)); 33 | MOCK_METHOD0(Unload, void()); 34 | MOCK_METHOD0(servable, AnyPtr()); 35 | }; 36 | 37 | } // namespace test_util 38 | } // namespace serving 39 | } // namespace tensorflow 40 | 41 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_LOADER_H_ 42 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/mock_log_collector.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_LOG_COLLECTOR_H_ 17 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_LOG_COLLECTOR_H_ 18 | 19 | #include "google/protobuf/message.h" 20 | #include 21 | #include "tensorflow/core/lib/core/status.h" 22 | #include "tensorflow_serving/core/log_collector.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | 27 | class MockLogCollector : public LogCollector { 28 | public: 29 | MockLogCollector() = default; 30 | MOCK_METHOD1(CollectMessage, Status(const google::protobuf::Message& message)); 31 | MOCK_METHOD0(Flush, Status()); 32 | }; 33 | 34 | } // namespace serving 35 | } // namespace tensorflow 36 | 37 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_LOG_COLLECTOR_H_ 38 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/mock_server_request_logger.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_SERVER_REQUEST_LOGGER_H_ 17 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_SERVER_REQUEST_LOGGER_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include 23 | #include "tensorflow_serving/core/server_request_logger.h" 24 | 25 | namespace tensorflow { 26 | namespace serving { 27 | namespace test_util { 28 | 29 | class MockServerRequestLogger : public ServerRequestLogger { 30 | public: 31 | MockServerRequestLogger() : ServerRequestLogger({}) {} 32 | 33 | MOCK_METHOD1(Update, 34 | Status(const std::map>& 35 | logging_config_map)); 36 | 37 | MOCK_METHOD3(Log, Status(const google::protobuf::Message& request, 38 | const google::protobuf::Message& response, 39 | const LogMetadata& log_metadata)); 40 | }; 41 | 42 | } // namespace test_util 43 | } // namespace serving 44 | } // namespace tensorflow 45 | 46 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_SERVER_REQUEST_LOGGER_H_ 47 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/mock_storage_path_target.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_STORAGE_PATH_TARGET_H_ 17 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_STORAGE_PATH_TARGET_H_ 18 | 19 | #include 20 | 21 | #include 22 | #include "tensorflow/core/lib/core/status.h" 23 | #include "tensorflow_serving/core/storage_path.h" 24 | #include "tensorflow_serving/core/target.h" 25 | 26 | namespace tensorflow { 27 | namespace serving { 28 | namespace test_util { 29 | 30 | class MockStoragePathTarget : public TargetBase { 31 | public: 32 | ~MockStoragePathTarget() override { Detach(); } 33 | MOCK_METHOD2(SetAspiredVersions, 34 | void(const StringPiece, std::vector>)); 35 | }; 36 | 37 | } // namespace test_util 38 | } // namespace serving 39 | } // namespace tensorflow 40 | 41 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_MOCK_STORAGE_PATH_TARGET_H_ 42 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/session_test_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_CORE_TEST_UTIL_SESSION_TEST_UTIL_H_ 17 | #define TENSORFLOW_SERVING_CORE_TEST_UTIL_SESSION_TEST_UTIL_H_ 18 | 19 | #include 20 | 21 | #include "absl/base/attributes.h" 22 | #include "tensorflow/core/lib/core/status.h" 23 | #include "tensorflow/core/public/session_options.h" 24 | 25 | namespace tensorflow { 26 | namespace serving { 27 | namespace test_util { 28 | 29 | // Sets a 'hook' function, which will be called when a new session is created 30 | // via the tensorflow::NewSession() API. If the hook returns an error status, 31 | // the session creation fails. 32 | // 33 | // For this hook to be enabled, create a session by setting 34 | // SessionOptions::target as "new_session_hook/". This 35 | // will call the hook as well as return the session created when target is 36 | // "". 37 | // 38 | // Calling this method again replaces the previous hook. 39 | // 40 | // This method is NOT thread-safe. 41 | ABSL_CONST_INIT extern const char kNewSessionHookSessionTargetPrefix[]; 42 | void SetNewSessionHook(std::function hook); 43 | 44 | } // namespace test_util 45 | } // namespace serving 46 | } // namespace tensorflow 47 | 48 | #endif // TENSORFLOW_SERVING_CORE_TEST_UTIL_SESSION_TEST_UTIL_H_ 49 | -------------------------------------------------------------------------------- /tensorflow_serving/core/test_util/test_main.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // A program with a main that is suitable for unittests, including those 17 | // that also define microbenchmarks. Based on whether the user specified 18 | // the --benchmark_filter flag which specifies which benchmarks to run, 19 | // we will either run benchmarks or run the gtest tests in the program. 20 | 21 | #include "tensorflow/core/platform/platform.h" 22 | #include "tensorflow/core/platform/types.h" 23 | 24 | #if defined(PLATFORM_GOOGLE) || defined(__ANDROID__) 25 | // main() is supplied by gunit_main 26 | #else 27 | #include "gtest/gtest.h" 28 | #include "tensorflow/core/lib/strings/str_util.h" 29 | #include "tensorflow/core/platform/test_benchmark.h" 30 | 31 | GTEST_API_ int main(int argc, char** argv) { 32 | std::cout << "Running main() from test_main.cc\n"; 33 | 34 | testing::InitGoogleTest(&argc, argv); 35 | for (int i = 1; i < argc; i++) { 36 | if (tensorflow::str_util::StartsWith(argv[i], "--benchmarks=")) { 37 | const char* pattern = argv[i] + strlen("--benchmarks="); 38 | tensorflow::testing::Benchmark::Run(pattern); 39 | return 0; 40 | } 41 | } 42 | return RUN_ALL_TESTS(); 43 | } 44 | #endif 45 | -------------------------------------------------------------------------------- /tensorflow_serving/example/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving examples. 2 | 3 | package( 4 | default_visibility = ["//tensorflow_serving:internal"], 5 | features = ["no_layering_check"], 6 | ) 7 | 8 | licenses(["notice"]) # Apache 2.0 9 | 10 | load("//tensorflow_serving:serving.bzl", "serving_proto_library") 11 | 12 | filegroup( 13 | name = "all_files", 14 | srcs = glob( 15 | ["**/*"], 16 | exclude = [ 17 | "**/METADATA", 18 | "**/OWNERS", 19 | ], 20 | ), 21 | ) 22 | 23 | cc_binary( 24 | name = "resnet_client_cc", 25 | srcs = [ 26 | "resnet_client.cc", 27 | ], 28 | deps = [ 29 | "//tensorflow_serving/apis:prediction_service_proto", 30 | "@com_google_protobuf//:protobuf_lite", 31 | "@grpc//:grpc++", 32 | "@org_tensorflow//tensorflow/core:framework", 33 | "@org_tensorflow//tensorflow/core:lib", 34 | ], 35 | ) 36 | -------------------------------------------------------------------------------- /tensorflow_serving/example/config_files/README: -------------------------------------------------------------------------------- 1 | Usage: 2 | 3 | CUDA_VISIBLE_DEVICES=0,1 tensorflow_model_server --use_session_group=true --model_config_file=session_group_multi_models_config --platform_config_file=session_group_multi_models_platform_config 4 | -------------------------------------------------------------------------------- /tensorflow_serving/example/config_files/models.config: -------------------------------------------------------------------------------- 1 | model_config_list:{ 2 | config:{ 3 | name:"pb1", 4 | base_path:"/data/workspace/serving-model/multi_wdl_model/pb1", 5 | model_platform:"tensorflow" 6 | }, 7 | config:{ 8 | name:"pb2", 9 | base_path:"/data/workspace/serving-model/multi_wdl_model/pb2", 10 | model_platform:"tensorflow" 11 | }, 12 | } 13 | -------------------------------------------------------------------------------- /tensorflow_serving/example/config_files/platform_config: -------------------------------------------------------------------------------- 1 | platform_configs { 2 | key: "tensorflow" 3 | value { 4 | source_adapter_config { 5 | [type.googleapis.com/tensorflow.serving.SavedModelBundleSourceAdapterConfig] { 6 | legacy_config { 7 | session_config { 8 | gpu_options { 9 | allow_growth: true 10 | } 11 | } 12 | } 13 | } 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /tensorflow_serving/example/config_files/session_group_multi_models_config: -------------------------------------------------------------------------------- 1 | model_config_list:{ 2 | config:{ 3 | name:"pb1", 4 | base_path:"/data/workspace/serving-model/multi_wdl_model/pb1", 5 | model_platform:"tensorflow", 6 | model_id: 0 7 | }, 8 | config:{ 9 | name:"pb2", 10 | base_path:"/data/workspace/serving-model/multi_wdl_model/pb2", 11 | model_platform:"tensorflow", 12 | model_id: 1 13 | }, 14 | } 15 | -------------------------------------------------------------------------------- /tensorflow_serving/example/config_files/session_group_multi_models_platform_config: -------------------------------------------------------------------------------- 1 | platform_configs { 2 | key: "tensorflow" 3 | value { 4 | source_adapter_config { 5 | [type.googleapis.com/tensorflow.serving.SavedModelBundleV2SourceAdapterConfig] { 6 | legacy_config { 7 | model_session_config { 8 | session_config { 9 | gpu_options { 10 | allow_growth: true 11 | } 12 | intra_op_parallelism_threads: 8 13 | inter_op_parallelism_threads: 8 14 | use_per_session_threads: true 15 | use_per_session_stream: true 16 | } 17 | session_num: 2 18 | cpusets: "1,2;5,6" 19 | } 20 | model_session_config { 21 | session_config { 22 | gpu_options { 23 | allow_growth: true 24 | } 25 | intra_op_parallelism_threads: 16 26 | inter_op_parallelism_threads: 16 27 | use_per_session_threads: true 28 | use_per_session_stream: true 29 | } 30 | session_num: 4 31 | cpusets: "20,21;23,24;26,27;29,30" 32 | } 33 | } 34 | } 35 | } 36 | } 37 | } 38 | 39 | -------------------------------------------------------------------------------- /tensorflow_serving/example/resnet_k8s.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | apiVersion: extensions/v1beta1 17 | kind: Deployment 18 | metadata: 19 | name: resnet-deployment 20 | spec: 21 | replicas: 3 22 | template: 23 | metadata: 24 | labels: 25 | app: resnet-server 26 | spec: 27 | containers: 28 | - name: resnet-container 29 | image: gcr.io/tensorflow-serving/resnet 30 | ports: 31 | - containerPort: 8500 32 | --- 33 | apiVersion: v1 34 | kind: Service 35 | metadata: 36 | labels: 37 | run: resnet-service 38 | name: resnet-service 39 | spec: 40 | ports: 41 | - port: 8500 42 | targetPort: 8500 43 | selector: 44 | app: resnet-server 45 | type: LoadBalancer 46 | -------------------------------------------------------------------------------- /tensorflow_serving/g3doc/_toc.yaml: -------------------------------------------------------------------------------- 1 | toc: 2 | - title: TensorFlow Serving with Docker 3 | path: /tfx/serving/docker 4 | - title: Installation 5 | path: /tfx/serving/setup 6 | - title: Serve a TensorFlow model 7 | path: /tfx/serving/serving_basic 8 | - title: Architecture 9 | path: /tfx/serving/architecture 10 | - title: Advanced model server configuration 11 | path: /tfx/serving/serving_config 12 | - title: Build a TensorFlow ModelServer 13 | path: /tfx/serving/serving_advanced 14 | - title: Use TensorFlow Serving with Kubernetes 15 | path: /tfx/serving/serving_kubernetes 16 | - title: Create a new kind of servable 17 | path: /tfx/serving/custom_servable 18 | - title: Create a module that discovers new servable paths 19 | path: /tfx/serving/custom_source 20 | - title: Serving TensorFlow models with custom ops 21 | path: /tfx/serving/custom_op 22 | - title: SignatureDefs in SavedModel for TensorFlow Serving 23 | path: /tfx/serving/signature_defs 24 | -------------------------------------------------------------------------------- /tensorflow_serving/g3doc/tutorials/README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow Serving tutorials 2 | 3 | Tutorials moved to: https://github.com/tensorflow/tfx/tree/master/docs/tutorials 4 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/get_model_status_impl.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_GET_MODEL_STATUS_IMPL_H_ 17 | #define TENSORFLOW_SERVING_MODEL_SERVERS_GET_MODEL_STATUS_IMPL_H_ 18 | 19 | #include "tensorflow/core/lib/core/status.h" 20 | #include "tensorflow_serving/apis/get_model_status.pb.h" 21 | #include "tensorflow_serving/model_servers/server_core.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | 26 | // Returns response with status information for model. If the request 27 | // specifies a model version, information about only that version will be 28 | // returned. If no version is specified, information about all versions of the 29 | // model will be returned. 30 | class GetModelStatusImpl { 31 | public: 32 | static Status GetModelStatus(ServerCore* core, 33 | const GetModelStatusRequest& request, 34 | GetModelStatusResponse* response); 35 | 36 | // Like GetModelStatus(), but uses 'model_spec' instead of the one embedded in 37 | // 'request'. 38 | static Status GetModelStatusWithModelSpec( 39 | ServerCore* core, const ModelSpec& model_spec, 40 | const GetModelStatusRequest& request, GetModelStatusResponse* response); 41 | }; 42 | 43 | } // namespace serving 44 | } // namespace tensorflow 45 | 46 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_GET_MODEL_STATUS_IMPL_H_ 47 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/grpc_status_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/model_servers/grpc_status_util.h" 17 | 18 | #include "grpcpp/support/status_code_enum.h" 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | 23 | ::grpc::Status ToGRPCStatus(const ::tensorflow::Status& status) { 24 | const int kErrorMessageLimit = 1024; 25 | string error_message; 26 | if (status.error_message().length() > kErrorMessageLimit) { 27 | error_message = 28 | status.error_message().substr(0, kErrorMessageLimit) + "...TRUNCATED"; 29 | } else { 30 | error_message = status.error_message(); 31 | } 32 | return ::grpc::Status(static_cast(status.code()), 33 | error_message); 34 | } 35 | 36 | } // namespace serving 37 | } // namespace tensorflow 38 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/grpc_status_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_GRPC_STATUS_UTIL_H_ 17 | #define TENSORFLOW_SERVING_MODEL_SERVERS_GRPC_STATUS_UTIL_H_ 18 | 19 | #include "grpcpp/support/status.h" 20 | #include "tensorflow/core/lib/core/status.h" 21 | 22 | namespace tensorflow { 23 | namespace serving { 24 | 25 | // Converts from tensorflow Status to GRPC Status. 26 | ::grpc::Status ToGRPCStatus(const ::tensorflow::Status& status); 27 | 28 | } // namespace serving 29 | } // namespace tensorflow 30 | 31 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_GRPC_STATUS_UTIL_H_ 32 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/http_server.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_HTTP_SERVER_H_ 16 | #define TENSORFLOW_SERVING_MODEL_SERVERS_HTTP_SERVER_H_ 17 | 18 | #include 19 | 20 | #include "tensorflow_serving/config/monitoring_config.pb.h" 21 | #include "tensorflow_serving/util/net_http/server/public/httpserver_interface.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | 26 | class ServerCore; 27 | 28 | // HTTP Server options. 29 | // Options for configuring a HttpServer object. 30 | struct HttpServerOptions { 31 | int port = 0; 32 | int num_threads = 1; 33 | int timeout_in_ms = 30000; // 30 seconds. 34 | bool use_saved_model = true; 35 | bool use_session_group = false; 36 | }; 37 | 38 | // Returns a HTTP Server that has following endpoints: 39 | // 40 | // o HTTP/REST API (under /v1/models/...) 41 | // 42 | // The returned server is in a state of accepting new requests. 43 | std::unique_ptr CreateAndStartHttpServer( 44 | int port, int num_threads, int timeout_in_ms, 45 | const MonitoringConfig& monitoring_config, ServerCore* core); 46 | 47 | std::unique_ptr CreateAndStartHttpServer( 48 | const HttpServerOptions& http_opt, 49 | const MonitoringConfig& monitoring_config, 50 | ServerCore* core); 51 | 52 | } // namespace serving 53 | } // namespace tensorflow 54 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_HTTP_SERVER_H_ 55 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/model_platform_types.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_PLATFORM_TYPES_H_ 17 | #define TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_PLATFORM_TYPES_H_ 18 | 19 | namespace tensorflow { 20 | namespace serving { 21 | 22 | constexpr char kTensorFlowModelPlatform[] = "tensorflow"; 23 | 24 | } // namespace serving 25 | } // namespace tensorflow 26 | 27 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_PLATFORM_TYPES_H_ 28 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/model_service_impl.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_SERVICE_IMPL_H_ 17 | #define TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_SERVICE_IMPL_H_ 18 | 19 | #include "grpcpp/server_context.h" 20 | #include "grpcpp/support/status.h" 21 | #include "tensorflow_serving/apis/model_management.pb.h" 22 | #include "tensorflow_serving/apis/model_service.grpc.pb.h" 23 | #include "tensorflow_serving/apis/model_service.pb.h" 24 | #include "tensorflow_serving/model_servers/server_core.h" 25 | 26 | namespace tensorflow { 27 | namespace serving { 28 | 29 | class ModelServiceImpl final : public ModelService::Service { 30 | public: 31 | explicit ModelServiceImpl(ServerCore *core) : core_(core) {} 32 | 33 | ::grpc::Status GetModelStatus(::grpc::ServerContext *context, 34 | const GetModelStatusRequest *request, 35 | GetModelStatusResponse *response) override; 36 | 37 | ::grpc::Status HandleReloadConfigRequest(::grpc::ServerContext *context, 38 | const ReloadConfigRequest *request, 39 | ReloadConfigResponse *response); 40 | 41 | private: 42 | ServerCore *core_; 43 | }; 44 | 45 | } // namespace serving 46 | } // namespace tensorflow 47 | 48 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_MODEL_SERVICE_IMPL_H_ 49 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/platform_config_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_PLATFORM_CONFIG_UTIL_H_ 17 | #define TENSORFLOW_SERVING_MODEL_SERVERS_PLATFORM_CONFIG_UTIL_H_ 18 | 19 | #include "tensorflow_serving/config/platform_config.pb.h" 20 | #include "tensorflow_serving/servables/tensorflow/session_bundle_config.pb.h" 21 | 22 | namespace tensorflow { 23 | namespace serving { 24 | 25 | // Creates a PlatformConfigMap containing a single entry with the key as 26 | // kTensorFlowModelPlatform and the value as a SourceAdapter config proto for 27 | // one of {SessionBundleSourceAdapter, SavedModelBundleSourceAdapter} (based 28 | // on 'use_saved_model' using 'session_bundle_config'. 29 | PlatformConfigMap CreateTensorFlowPlatformConfigMap( 30 | const SessionBundleConfig& session_bundle_config, bool use_saved_model); 31 | 32 | PlatformConfigMap CreateTensorFlowPlatformConfigMap( 33 | const SessionGroupBundleConfig& session_bundle_config, bool use_saved_model); 34 | 35 | } // namespace serving 36 | } // namespace tensorflow 37 | 38 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_PLATFORM_CONFIG_UTIL_H_ 39 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/tensorflow_model_server_test_client.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Manual test client for tensorflow_model_server.""" 16 | 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | # This is a placeholder for a Google-internal import. 22 | 23 | import grpc 24 | import tensorflow as tf 25 | 26 | from tensorflow.core.framework import types_pb2 27 | from tensorflow.python.platform import flags 28 | from tensorflow_serving.apis import predict_pb2 29 | from tensorflow_serving.apis import prediction_service_pb2_grpc 30 | 31 | 32 | tf.app.flags.DEFINE_string('server', 'localhost:8500', 33 | 'inception_inference service host:port') 34 | FLAGS = tf.app.flags.FLAGS 35 | 36 | 37 | def main(_): 38 | # Prepare request 39 | request = predict_pb2.PredictRequest() 40 | request.model_spec.name = 'default' 41 | request.inputs['x'].dtype = types_pb2.DT_FLOAT 42 | request.inputs['x'].float_val.append(2.0) 43 | request.output_filter.append('y') 44 | # Send request 45 | channel = grpc.insecure_channel(FLAGS.server) 46 | stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) 47 | print(stub.Predict(request, 5.0)) # 5 secs timeout 48 | 49 | 50 | if __name__ == '__main__': 51 | tf.app.run() 52 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.h" 17 | #include "tensorflow_serving/core/source_adapter.h" 18 | #include "tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.pb.h" 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | namespace test_util { 23 | 24 | // Register the source adapter. 25 | class StoragePathErrorInjectingSourceAdapterCreator { 26 | public: 27 | static Status Create( 28 | const StoragePathErrorInjectingSourceAdapterConfig& config, 29 | std::unique_ptr>>* 30 | adapter) { 31 | adapter->reset( 32 | new ErrorInjectingSourceAdapter>( 33 | Status(error::CANCELLED, config.error_message()))); 34 | return Status::OK(); 35 | } 36 | }; 37 | REGISTER_STORAGE_PATH_SOURCE_ADAPTER( 38 | StoragePathErrorInjectingSourceAdapterCreator, 39 | StoragePathErrorInjectingSourceAdapterConfig); 40 | 41 | } // namespace test_util 42 | } // namespace serving 43 | } // namespace tensorflow 44 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_MODEL_SERVERS_TEST_UTIL_STORAGE_PATH_ERROR_INJECTING_SOURCE_ADAPTER_H_ 17 | #define TENSORFLOW_SERVING_MODEL_SERVERS_TEST_UTIL_STORAGE_PATH_ERROR_INJECTING_SOURCE_ADAPTER_H_ 18 | 19 | #include "tensorflow_serving/core/source_adapter.h" 20 | 21 | namespace tensorflow { 22 | namespace serving { 23 | namespace test_util { 24 | 25 | // An ErrorInjectingSourceAdapter> (see 26 | // source_adapter.h) registered in StoragePathSourceAdapterRegistry and keyed on 27 | // StoragePathErrorInjectingSourceAdapterConfig. 28 | using StoragePathErrorInjectingSourceAdapter = 29 | ErrorInjectingSourceAdapter>; 30 | 31 | } // namespace test_util 32 | } // namespace serving 33 | } // namespace tensorflow 34 | 35 | #endif // TENSORFLOW_SERVING_MODEL_SERVERS_TEST_UTIL_STORAGE_PATH_ERROR_INJECTING_SOURCE_ADAPTER_H_ 36 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/test_util/storage_path_error_injecting_source_adapter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving.test_util; 4 | 5 | // Config proto for StoragePathErrorInjectingSourceAdapter. 6 | message StoragePathErrorInjectingSourceAdapterConfig { 7 | // The error message the adapter emits. 8 | string error_message = 1; 9 | } 10 | -------------------------------------------------------------------------------- /tensorflow_serving/model_servers/version.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/model_servers/version.h" 17 | 18 | const char kTFS_SCM_Revision[] = TF_MODELSERVER_VERSION_STRING; 19 | 20 | extern "C" { 21 | const char* TF_Serving_Version() { return kTFS_SCM_Revision; } 22 | } 23 | -------------------------------------------------------------------------------- /tensorflow_serving/repo.bzl: -------------------------------------------------------------------------------- 1 | """ TensorFlow Http Archive 2 | 3 | Modified http_archive that allows us to override the TensorFlow commit that is 4 | downloaded by setting an environment variable. This override is to be used for 5 | testing purposes. 6 | 7 | Add the following to your Bazel build command in order to override the 8 | TensorFlow revision. 9 | 10 | build: --action_env TF_REVISION="" 11 | 12 | * `TF_REVISION`: tensorflow revision override (git commit hash) 13 | """ 14 | 15 | _TF_REVISION = "TF_REVISION" 16 | 17 | def _tensorflow_http_archive(ctx): 18 | git_commit = ctx.attr.git_commit 19 | sha256 = ctx.attr.sha256 20 | 21 | override_git_commit = ctx.os.environ.get(_TF_REVISION) 22 | if override_git_commit: 23 | sha256 = "" 24 | git_commit = override_git_commit 25 | 26 | strip_prefix = "DeepRec-%s" % git_commit 27 | urls = [ 28 | "https://mirror.bazel.build/github.com/DeepRec-AI/DeepRec/archive/%s.tar.gz" % git_commit, 29 | "https://github.com/DeepRec-AI/DeepRec/archive/%s.tar.gz" % git_commit, 30 | ] 31 | ctx.download_and_extract( 32 | urls, 33 | "", 34 | sha256, 35 | "", 36 | strip_prefix, 37 | ) 38 | 39 | tensorflow_http_archive = repository_rule( 40 | implementation = _tensorflow_http_archive, 41 | attrs = { 42 | "git_commit": attr.string(mandatory = True), 43 | "sha256": attr.string(mandatory = True), 44 | }, 45 | ) 46 | -------------------------------------------------------------------------------- /tensorflow_serving/resources/resource_values.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/resources/resource_values.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | namespace device_types { 22 | const char* const kMain = "main"; 23 | const char* const kGpu = "gpu"; 24 | const char* const kTpu = "tpu"; 25 | } // namespace device_types 26 | 27 | namespace resource_kinds { 28 | const char* const kNumModelSlots = "num_model_slots"; 29 | const char* const kRamBytes = "ram_in_bytes"; 30 | const char* const kHeapRamBytes = "heap_ram_in_bytes"; 31 | const char* const kStackRamBytes = "stack_ram_in_bytes"; 32 | const char* const kProcessingMillis = "processing_in_millicores"; 33 | } // namespace resource_kinds 34 | 35 | } // namespace serving 36 | } // namespace tensorflow 37 | -------------------------------------------------------------------------------- /tensorflow_serving/resources/resources.proto: -------------------------------------------------------------------------------- 1 | // Representations for resources used by servables, and available in a system. 2 | // 3 | // Each of the string-typed values are free-form, so that they can be extended 4 | // by third parties. However we strongly recommend using the values defined in 5 | // resource_values.h when possible, for standardization. 6 | 7 | syntax = "proto3"; 8 | 9 | import "google/protobuf/wrappers.proto"; 10 | 11 | package tensorflow.serving; 12 | 13 | // One kind of resource on one device (or type of device). 14 | message Resource { 15 | // The type of device on which the resource resides, e.g. CPU or GPU. 16 | string device = 1; 17 | 18 | // A specific instance of the device of type 'device' to which the resources 19 | // are bound (instances are assumed to be numbered 0, 1, ...). 20 | // 21 | // When representing the resources required by a servable that has yet to be 22 | // loaded, this field is optional. If not set, it denotes that the servable's 23 | // resources are not (yet) bound to a specific instance. 24 | google.protobuf.UInt32Value device_instance = 2; 25 | 26 | // The kind of resource on the device (instance), e.g. RAM or compute share. 27 | // 28 | // A given type of resource should have a standard unit that represents the 29 | // smallest useful quantization. We strongly recommend including the unit 30 | // (e.g. bytes or millicores) in this string, as in "ram_bytes". 31 | string kind = 3; 32 | } 33 | 34 | // An allocation of one or more kinds of resources, along with the quantity of 35 | // each. Used to denote the resources that a servable (or collection of 36 | // servables) will use or is currently using. Also used to denote resources 37 | // available to the serving system for loading more servables. 38 | message ResourceAllocation { 39 | // A collection of resources, each with a quantity. Treated as a resource-> 40 | // quantity map, i.e. no resource can repeat and the order is immaterial. 41 | message Entry { 42 | Resource resource = 1; 43 | uint64 quantity = 2; 44 | } 45 | repeated Entry resource_quantities = 1; 46 | } 47 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/hashmap/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving hashmap servable. 2 | 3 | package( 4 | default_visibility = ["//tensorflow_serving:internal"], 5 | features = ["-layering_check"], 6 | ) 7 | 8 | licenses(["notice"]) # Apache 2.0 9 | 10 | filegroup( 11 | name = "all_files", 12 | srcs = glob( 13 | ["**/*"], 14 | exclude = [ 15 | "**/METADATA", 16 | "**/OWNERS", 17 | "g3doc/sitemap.md", 18 | ], 19 | ), 20 | ) 21 | 22 | cc_library( 23 | name = "hashmap_source_adapter", 24 | srcs = ["hashmap_source_adapter.cc"], 25 | hdrs = ["hashmap_source_adapter.h"], 26 | visibility = [ 27 | "//visibility:public", 28 | ], 29 | deps = [ 30 | ":hashmap_source_adapter_proto", 31 | "//tensorflow_serving/core:simple_loader", 32 | "//tensorflow_serving/core:source_adapter", 33 | "//tensorflow_serving/core:storage_path", 34 | "@org_tensorflow//tensorflow/core:lib", 35 | "@org_tensorflow//tensorflow/core:tensorflow", 36 | ], 37 | ) 38 | 39 | cc_test( 40 | name = "hashmap_source_adapter_test", 41 | size = "medium", 42 | srcs = ["hashmap_source_adapter_test.cc"], 43 | deps = [ 44 | ":hashmap_source_adapter", 45 | ":hashmap_source_adapter_proto", 46 | "//tensorflow_serving/core:loader", 47 | "//tensorflow_serving/core:servable_data", 48 | "//tensorflow_serving/core/test_util:test_main", 49 | "//tensorflow_serving/util:any_ptr", 50 | "@org_tensorflow//tensorflow/core:lib", 51 | "@org_tensorflow//tensorflow/core:test", 52 | ], 53 | ) 54 | 55 | load("//tensorflow_serving:serving.bzl", "serving_proto_library") 56 | 57 | serving_proto_library( 58 | name = "hashmap_source_adapter_proto", 59 | srcs = ["hashmap_source_adapter.proto"], 60 | cc_api_version = 2, 61 | ) 62 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/hashmap/hashmap_source_adapter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_SERVABLES_HASHMAP_HASHMAP_SOURCE_ADAPTER_H_ 17 | #define TENSORFLOW_SERVING_SERVABLES_HASHMAP_HASHMAP_SOURCE_ADAPTER_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow_serving/core/simple_loader.h" 23 | #include "tensorflow_serving/core/source_adapter.h" 24 | #include "tensorflow_serving/core/storage_path.h" 25 | #include "tensorflow_serving/servables/hashmap/hashmap_source_adapter.pb.h" 26 | 27 | namespace tensorflow { 28 | namespace serving { 29 | 30 | // A SourceAdapter for string-string hashmaps. It takes storage paths that give 31 | // the locations of serialized hashmaps (in the format indicated in the config) 32 | // and produces loaders for them. 33 | class HashmapSourceAdapter final 34 | : public SimpleLoaderSourceAdapter> { 36 | public: 37 | explicit HashmapSourceAdapter(const HashmapSourceAdapterConfig& config); 38 | ~HashmapSourceAdapter() override; 39 | 40 | private: 41 | TF_DISALLOW_COPY_AND_ASSIGN(HashmapSourceAdapter); 42 | }; 43 | 44 | } // namespace serving 45 | } // namespace tensorflow 46 | 47 | #endif // TENSORFLOW_SERVING_SERVABLES_HASHMAP_HASHMAP_SOURCE_ADAPTER_H_ 48 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/hashmap/hashmap_source_adapter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | 5 | // Config proto for HashmapSourceAdapter. 6 | message HashmapSourceAdapterConfig { 7 | // The format used by the file containing a serialized hashmap. 8 | enum Format { 9 | // A simple kind of CSV text file of the form: 10 | // key0,value0\n 11 | // key1,value1\n 12 | // ... 13 | SIMPLE_CSV = 0; 14 | } 15 | Format format = 1; 16 | } 17 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/classification_service.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_CLASSIFICATION_SERVICE_H_ 17 | #define TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_CLASSIFICATION_SERVICE_H_ 18 | 19 | #include "tensorflow/core/lib/core/status.h" 20 | #include "tensorflow/core/protobuf/config.pb.h" 21 | #include "tensorflow_serving/apis/classification.pb.h" 22 | #include "tensorflow_serving/model_servers/server_core.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | 27 | // Utility methods for implementation of 28 | // tensorflow_serving/apis/classification-service.proto. 29 | class TensorflowClassificationServiceImpl { 30 | public: 31 | static Status Classify(const RunOptions& run_options, ServerCore* core, 32 | const ClassificationRequest& request, 33 | ClassificationResponse* response); 34 | 35 | // Like Classify(), but uses 'model_spec' instead of the one embedded in 36 | // 'request'. 37 | static Status ClassifyWithModelSpec(const RunOptions& run_options, 38 | ServerCore* core, 39 | const ModelSpec& model_spec, 40 | const ClassificationRequest& request, 41 | ClassificationResponse* response); 42 | }; 43 | 44 | } // namespace serving 45 | } // namespace tensorflow 46 | 47 | #endif // TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_CLASSIFICATION_SERVICE_H_ 48 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/get_model_metadata_impl.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_GET_MODEL_METADATA_IMPL_H_ 17 | #define TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_GET_MODEL_METADATA_IMPL_H_ 18 | 19 | #include "tensorflow/core/lib/core/status.h" 20 | #include "tensorflow_serving/apis/get_model_metadata.pb.h" 21 | #include "tensorflow_serving/model_servers/server_core.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | 26 | struct ModelMetaOption { 27 | bool use_session_group = false; 28 | }; 29 | 30 | class GetModelMetadataImpl { 31 | public: 32 | static constexpr const char kSignatureDef[] = "signature_def"; 33 | 34 | static Status GetModelMetadata(ServerCore* core, 35 | const GetModelMetadataRequest& request, 36 | GetModelMetadataResponse* response, 37 | ModelMetaOption opt = ModelMetaOption()); 38 | 39 | // Like GetModelMetadata(), but uses 'model_spec' instead of the one embedded 40 | // in 'request'. 41 | static Status GetModelMetadataWithModelSpec( 42 | ServerCore* core, const ModelSpec& model_spec, 43 | const GetModelMetadataRequest& request, 44 | GetModelMetadataResponse* response, 45 | ModelMetaOption opt = ModelMetaOption()); 46 | }; 47 | 48 | } // namespace serving 49 | } // namespace tensorflow 50 | 51 | #endif // TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_GET_MODEL_METADATA_IMPL_H_ 52 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/multi_inference_helper.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_MULTI_INFERENCE_HELPER_H_ 17 | #define TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_MULTI_INFERENCE_HELPER_H_ 18 | 19 | #include "tensorflow/contrib/session_bundle/session_bundle.h" 20 | #include "tensorflow/core/lib/core/status.h" 21 | #include "tensorflow_serving/apis/inference.pb.h" 22 | #include "tensorflow_serving/model_servers/server_core.h" 23 | #include "tensorflow_serving/util/optional.h" 24 | 25 | namespace tensorflow { 26 | namespace serving { 27 | 28 | // Runs MultiInference 29 | Status RunMultiInferenceWithServerCore(const RunOptions& run_options, 30 | ServerCore* core, 31 | const MultiInferenceRequest& request, 32 | MultiInferenceResponse* response); 33 | 34 | // Like RunMultiInferenceWithServerCore(), but uses 'model_spec' instead of the 35 | // one(s) embedded in 'request'. 36 | Status RunMultiInferenceWithServerCoreWithModelSpec( 37 | const RunOptions& run_options, ServerCore* core, 38 | const ModelSpec& model_spec, const MultiInferenceRequest& request, 39 | MultiInferenceResponse* response); 40 | 41 | } // namespace serving 42 | } // namespace tensorflow 43 | 44 | #endif // TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_MULTI_INFERENCE_HELPER_H_ 45 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/regression_service.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_REGRESSION_SERVICE_H_ 17 | #define TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_REGRESSION_SERVICE_H_ 18 | 19 | #include "tensorflow/core/lib/core/status.h" 20 | #include "tensorflow/core/protobuf/config.pb.h" 21 | #include "tensorflow_serving/apis/regression.pb.h" 22 | #include "tensorflow_serving/model_servers/server_core.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | 27 | // Utility methods for implementation of 28 | // tensorflow_serving/apis/regression-service.proto. 29 | class TensorflowRegressionServiceImpl final { 30 | public: 31 | static Status Regress(const RunOptions& run_options, ServerCore* core, 32 | const RegressionRequest& request, 33 | RegressionResponse* response); 34 | 35 | // Like Regress(), but uses 'model_spec' instead of the one embedded in 36 | // 'request'. 37 | static Status RegressWithModelSpec(const RunOptions& run_options, 38 | ServerCore* core, 39 | const ModelSpec& model_spec, 40 | const RegressionRequest& request, 41 | RegressionResponse* response); 42 | }; 43 | 44 | } // namespace serving 45 | } // namespace tensorflow 46 | 47 | #endif // TENSORFLOW_SERVING_SERVABLES_TENSORFLOW_REGRESSION_SERVICE_H_ 48 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "tensorflow_serving/servables/tensorflow/session_bundle_config.proto"; 4 | 5 | package tensorflow.serving; 6 | 7 | // Config proto for SavedModelBundleSourceAdapter. 8 | message SavedModelBundleSourceAdapterConfig { 9 | // A SessionBundleConfig. 10 | // FOR INTERNAL USE ONLY DURING TRANSITION TO SAVED_MODEL. WILL BE DEPRECATED. 11 | // TODO(b/32248363): Replace this field with the "real" field(s). 12 | SessionBundleConfig legacy_config = 1000; 13 | } 14 | 15 | // Config proto for SavedModelBundleV2SourceAdapter. 16 | message SavedModelBundleV2SourceAdapterConfig { 17 | // A SessionGroupBundleConfig. 18 | // FOR INTERNAL USE ONLY DURING TRANSITION TO SAVED_MODEL. WILL BE DEPRECATED. 19 | // TODO(b/32248363): Replace this field with the "real" field(s). 20 | SessionGroupBundleConfig legacy_config = 1000; 21 | } 22 | 23 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/serving_session.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/servables/tensorflow/serving_session.h" 17 | 18 | #include "tensorflow/core/framework/graph.pb.h" 19 | #include "tensorflow/core/lib/core/errors.h" 20 | #include "tensorflow/core/lib/core/status.h" 21 | 22 | namespace tensorflow { 23 | namespace serving { 24 | 25 | Status ServingSession::Create(const GraphDef& graph) { 26 | return errors::PermissionDenied("State changes denied via ServingSession"); 27 | } 28 | 29 | Status ServingSession::Extend(const GraphDef& graph) { 30 | return errors::PermissionDenied("State changes denied via ServingSession"); 31 | } 32 | 33 | Status ServingSession::Close() { 34 | return errors::PermissionDenied("State changes denied via ServingSession"); 35 | } 36 | 37 | } // namespace serving 38 | } // namespace tensorflow 39 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/session_bundle_source_adapter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "tensorflow_serving/servables/tensorflow/session_bundle_config.proto"; 4 | 5 | package tensorflow.serving; 6 | 7 | // Config proto for SessionBundleSourceAdapter. 8 | message SessionBundleSourceAdapterConfig { 9 | SessionBundleConfig config = 1; 10 | } 11 | 12 | // Config proto for SessionGroupBundleSourceAdapter. 13 | message SessionGroupBundleSourceAdapterConfig { 14 | SessionGroupBundleConfig config = 1; 15 | } 16 | 17 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/bad_half_plus_two/00000123/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "/tmp/bad_half_plus_two/00000123/export" 2 | all_model_checkpoint_paths: "/tmp/bad_half_plus_two/00000123/export" 3 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/bad_half_plus_two/00000123/export: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/bad_half_plus_two/00000123/export -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/bad_half_plus_two/00000123/export.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/bad_half_plus_two/00000123/export.meta -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/bad_model_config.txt: -------------------------------------------------------------------------------- 1 | improperly formatted file -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/batching_config.txt: -------------------------------------------------------------------------------- 1 | max_batch_size { value: 128 } 2 | batch_timeout_micros { value: 0 } 3 | max_enqueued_batches { value: 1000000 } 4 | num_batch_threads { value: 8 } 5 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/export_half_plus_two.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Exports a toy TensorFlow model. 16 | 17 | Exports a TensorFlow model to /tmp/half_plus_two/. 18 | 19 | This graph calculates, 20 | y = a*x + b 21 | where a and b are variables with a=0.5 and b=2. 22 | """ 23 | 24 | # This is a placeholder for a Google-internal import. 25 | 26 | import tensorflow as tf 27 | from tensorflow.contrib.session_bundle import exporter 28 | 29 | 30 | def Export(): 31 | export_path = "/tmp/half_plus_two" 32 | with tf.Session() as sess: 33 | # Make model parameters a&b variables instead of constants to 34 | # exercise the variable reloading mechanisms. 35 | a = tf.Variable(0.5) 36 | b = tf.Variable(2.0) 37 | 38 | # Calculate, y = a*x + b 39 | # here we use a placeholder 'x' which is fed at inference time. 40 | x = tf.placeholder(tf.float32) 41 | y = tf.add(tf.multiply(a, x), b) 42 | 43 | # Run an export. 44 | tf.global_variables_initializer().run() 45 | export = exporter.Exporter(tf.train.Saver()) 46 | export.init(named_graph_signatures={ 47 | "inputs": exporter.generic_signature({"x": x}), 48 | "outputs": exporter.generic_signature({"y": y}), 49 | "regress": exporter.regression_signature(x, y) 50 | }) 51 | export.export(export_path, tf.constant(123), sess) 52 | 53 | 54 | def main(_): 55 | Export() 56 | 57 | 58 | if __name__ == "__main__": 59 | tf.app.run() 60 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/good_model_config.txt: -------------------------------------------------------------------------------- 1 | model_config_list: { 2 | config: { 3 | name: "half_plus_two", 4 | base_path: "${TEST_HALF_PLUS_TWO_DIR}", 5 | model_platform: "tensorflow" 6 | }, 7 | config: { 8 | name: "half_plus_three", 9 | base_path: "${TEST_HALF_PLUS_THREE_DIR}", 10 | model_platform: "tensorflow" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two/00000123/export.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two/00000123/export.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/half_plus_two/00000123/export.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two/00000123/export.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/half_plus_two/00000123/export.meta -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000123/export.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000123/export.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000123/export.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000123/export.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000123/export.meta -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000124/export.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000124/export.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000124/export.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000124/export.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/half_plus_two_2_versions/00000124/export.meta -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/monitoring_config.txt: -------------------------------------------------------------------------------- 1 | prometheus_config: { 2 | enable: true, 3 | path: "/monitoring/prometheus/metrics" 4 | } 5 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_counter/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_counter/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_counter/00000123/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_counter/00000123/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_counter/00000123/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/assets/foo.txt: -------------------------------------------------------------------------------- 1 | asset-file-contents -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@@@@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_three/00000123/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/assets/foo.txt: -------------------------------------------------------------------------------- 1 | asset-file-contents -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@@@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000123/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/assets/foo.txt: -------------------------------------------------------------------------------- 1 | asset-file-contents -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@@@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_2_versions/00000124/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt: -------------------------------------------------------------------------------- 1 | asset-file-contents -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ??@@@@@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu/00000123/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- 1 | ?@@@ -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu/00000123/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu/00000123/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_gpu_trt/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/assets/foo.txt: -------------------------------------------------------------------------------- 1 | asset-file-contents -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/saved_model.pb -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/variables/variables.data-00000-of-00001 -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_mkl/00000123/variables/variables.index -------------------------------------------------------------------------------- /tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tflite/00000123/model.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeepRec-AI/serving/0173b673defe6247736516861e735fcbd85ad06f/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tflite/00000123/model.tflite -------------------------------------------------------------------------------- /tensorflow_serving/serving.bzl: -------------------------------------------------------------------------------- 1 | load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library") 2 | load("@com_google_protobuf//:protobuf.bzl", "py_proto_library") 3 | 4 | def serving_proto_library( 5 | name, 6 | srcs = [], 7 | has_services = False, # pylint: disable=unused-argument 8 | deps = [], 9 | visibility = None, 10 | testonly = 0, 11 | cc_grpc_version = None, 12 | cc_api_version = 2): # pylint: disable=unused-argument 13 | native.filegroup( 14 | name = name + "_proto_srcs", 15 | srcs = srcs, 16 | testonly = testonly, 17 | ) 18 | 19 | use_grpc_plugin = None 20 | if cc_grpc_version: 21 | use_grpc_plugin = True 22 | cc_proto_library( 23 | name = name, 24 | srcs = srcs, 25 | deps = deps, 26 | cc_libs = ["@com_google_protobuf//:protobuf"], 27 | protoc = "@com_google_protobuf//:protoc", 28 | default_runtime = "@com_google_protobuf//:protobuf", 29 | use_grpc_plugin = use_grpc_plugin, 30 | testonly = testonly, 31 | visibility = visibility, 32 | ) 33 | 34 | def serving_go_grpc_library(**kwargs): # pylint: disable=unused-argument 35 | """Build the Go gRPC bindings for a service. Not yet implemented.""" 36 | return 37 | 38 | def serving_proto_library_py(name, proto_library, srcs = [], deps = [], visibility = None, testonly = 0): # pylint: disable=unused-argument 39 | py_proto_library( 40 | name = name, 41 | srcs = srcs, 42 | srcs_version = "PY2AND3", 43 | deps = ["@com_google_protobuf//:protobuf_python"] + deps, 44 | default_runtime = "@com_google_protobuf//:protobuf_python", 45 | protoc = "@com_google_protobuf//:protoc", 46 | visibility = visibility, 47 | testonly = testonly, 48 | ) 49 | -------------------------------------------------------------------------------- /tensorflow_serving/sources/storage_path/static_storage_path_source.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/sources/storage_path/static_storage_path_source.h" 17 | 18 | #include 19 | #include 20 | 21 | #include "tensorflow_serving/core/servable_data.h" 22 | #include "tensorflow_serving/core/servable_id.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | 27 | Status StaticStoragePathSource::Create( 28 | const StaticStoragePathSourceConfig& config, 29 | std::unique_ptr* result) { 30 | auto raw_result = new StaticStoragePathSource; 31 | raw_result->config_ = config; 32 | result->reset(raw_result); 33 | return Status::OK(); 34 | } 35 | 36 | void StaticStoragePathSource::SetAspiredVersionsCallback( 37 | AspiredVersionsCallback callback) { 38 | const ServableId id = {config_.servable_name(), config_.version_num()}; 39 | LOG(INFO) << "Aspiring servable " << id; 40 | callback(config_.servable_name(), 41 | {CreateServableData(id, config_.version_path())}); 42 | } 43 | 44 | } // namespace serving 45 | } // namespace tensorflow 46 | -------------------------------------------------------------------------------- /tensorflow_serving/sources/storage_path/static_storage_path_source.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_SOURCES_STORAGE_PATH_STATIC_STORAGE_PATH_SOURCE_H_ 17 | #define TENSORFLOW_SERVING_SOURCES_STORAGE_PATH_STATIC_STORAGE_PATH_SOURCE_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/core/lib/core/status.h" 22 | #include "tensorflow/core/platform/macros.h" 23 | #include "tensorflow/core/platform/types.h" 24 | #include "tensorflow_serving/core/source.h" 25 | #include "tensorflow_serving/core/storage_path.h" 26 | #include "tensorflow_serving/sources/storage_path/static_storage_path_source.pb.h" 27 | 28 | namespace tensorflow { 29 | namespace serving { 30 | 31 | // A StoragePathSource that calls the aspired-versions callback exactly once, 32 | // with a single hard-coded servable and version path. 33 | // Useful for testing and experimental deployments. 34 | class StaticStoragePathSource : public Source { 35 | public: 36 | static Status Create(const StaticStoragePathSourceConfig& config, 37 | std::unique_ptr* result); 38 | ~StaticStoragePathSource() override = default; 39 | 40 | void SetAspiredVersionsCallback(AspiredVersionsCallback callback) override; 41 | 42 | private: 43 | StaticStoragePathSource() = default; 44 | 45 | StaticStoragePathSourceConfig config_; 46 | 47 | TF_DISALLOW_COPY_AND_ASSIGN(StaticStoragePathSource); 48 | }; 49 | 50 | } // namespace serving 51 | } // namespace tensorflow 52 | 53 | #endif // TENSORFLOW_SERVING_SOURCES_STORAGE_PATH_STATIC_STORAGE_PATH_SOURCE_H_ 54 | -------------------------------------------------------------------------------- /tensorflow_serving/sources/storage_path/static_storage_path_source.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package tensorflow.serving; 4 | 5 | // Config proto for StaticStoragePathSource. 6 | message StaticStoragePathSourceConfig { 7 | // The single servable name, version number and path to supply statically. 8 | string servable_name = 1; 9 | int64 version_num = 2; 10 | string version_path = 3; 11 | } 12 | -------------------------------------------------------------------------------- /tensorflow_serving/tensorflow_version.bzl: -------------------------------------------------------------------------------- 1 | """ 2 | Module for build utilities to distiguish different tensorflow versions. 3 | """ 4 | 5 | load("@org_tensorflow//tensorflow:tensorflow.bzl", "VERSION_MAJOR") 6 | 7 | def if_v2(a): 8 | if VERSION_MAJOR == "2": 9 | return a 10 | else: 11 | return [] 12 | 13 | def if_not_v2(a): 14 | if VERSION_MAJOR == "2": 15 | return [] 16 | else: 17 | return a 18 | -------------------------------------------------------------------------------- /tensorflow_serving/test_util/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving test utils. 2 | 3 | package(default_visibility = [ 4 | "//tensorflow_serving:internal", 5 | ]) 6 | 7 | licenses(["notice"]) # Apache 2.0 8 | 9 | filegroup( 10 | name = "all_files", 11 | srcs = glob( 12 | ["**/*"], 13 | exclude = [ 14 | "**/METADATA", 15 | "**/OWNERS", 16 | ], 17 | ), 18 | ) 19 | 20 | cc_library( 21 | name = "test_util", 22 | testonly = 1, 23 | srcs = ["test_util.cc"], 24 | hdrs = ["test_util.h"], 25 | deps = [ 26 | "@com_google_googletest//:gtest", 27 | "@com_google_protobuf//:protobuf", 28 | "@org_tensorflow//tensorflow/core", 29 | "@org_tensorflow//tensorflow/core:lib", 30 | "@org_tensorflow//tensorflow/core:test", 31 | ], 32 | ) 33 | -------------------------------------------------------------------------------- /tensorflow_serving/test_util/test_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/test_util/test_util.h" 17 | 18 | #include "tensorflow/core/lib/io/path.h" 19 | #include "tensorflow/core/platform/env.h" 20 | #include "tensorflow/core/platform/test.h" 21 | 22 | namespace tensorflow { 23 | namespace serving { 24 | namespace test_util { 25 | 26 | string TensorflowTestSrcDirPath(const string& relative_path) { 27 | const string base_path = tensorflow::io::JoinPath( // 28 | getenv("TEST_SRCDIR"), // 29 | "tf_serving/external/org_tensorflow/tensorflow/"); 30 | return tensorflow::io::JoinPath(base_path, relative_path); 31 | } 32 | 33 | string ContribTestSrcDirPath(const string& relative_path) { 34 | const string base_path = TensorflowTestSrcDirPath("contrib/"); 35 | return tensorflow::io::JoinPath(base_path, relative_path); 36 | } 37 | 38 | string TestSrcDirPath(const string& relative_path) { 39 | const string base_path = tensorflow::io::JoinPath( 40 | getenv("TEST_SRCDIR"), "tf_serving/tensorflow_serving"); 41 | return tensorflow::io::JoinPath(base_path, relative_path); 42 | } 43 | 44 | ProtoStringMatcher::ProtoStringMatcher(const string& expected) 45 | : expected_(expected) {} 46 | ProtoStringMatcher::ProtoStringMatcher(const google::protobuf::Message& expected) 47 | : expected_(expected.DebugString()) {} 48 | 49 | } // namespace test_util 50 | } // namespace serving 51 | } // namespace tensorflow 52 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/README.md: -------------------------------------------------------------------------------- 1 | Files for using the [Docker](http://www.docker.com) container system. 2 | Please see [Docker instructions](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/docker.md) 3 | for more info. 4 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_devel_gpu_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile.devel-gpu basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. This model will only load on a GPU. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_Dockerfile.devel-gpu \ 25 | # --test_arg=tensorflow/serving:latest-devel-gpu \ 26 | # --test_output=streamed --verbose_failures 27 | 28 | declare -r PROJDIR=$(pwd)/tensorflow_serving 29 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 30 | 31 | # Values to fill in for test 32 | # ------------------------------------------------------------------------------ 33 | declare -r USE_NVIDIA_RUNTIME=true 34 | declare -r IS_MKL_IMAGE=false 35 | declare -r IS_DEVEL_IMAGE=true 36 | declare -r MODELNAME="saved_model_half_plus_two_gpu" 37 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 38 | declare -r REQUEST='{"instances": [1.0,2.0,5.0]}' 39 | declare -r RESPONSE='{"predictions":[2.5,3.0,4.5]}' 40 | # ------------------------------------------------------------------------------ 41 | 42 | # Grab the last argument as the image, so we can override the test arg in 43 | # the BUILD file 44 | test_docker_image ${@: -1} 45 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_devel_gpu_trt_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile.devel-gpu basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. This model will only load on a GPU. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_Dockerfile.devel-gpu \ 25 | # --test_arg=tensorflow/serving:latest-devel-gpu \ 26 | # --test_output=streamed --verbose_failures 27 | 28 | declare -r PROJDIR=$(pwd)/tensorflow_serving 29 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 30 | 31 | # Values to fill in for test 32 | # ------------------------------------------------------------------------------ 33 | declare -r USE_NVIDIA_RUNTIME=true 34 | declare -r IS_DEVEL_IMAGE=true 35 | declare -r MODELNAME="saved_model_half_plus_two_gpu_trt" 36 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 37 | # TF-TRT requires input to be at minimum two dimentional. 38 | declare -r REQUEST='{"instances": [[1.0],[2.0],[5.0]]}' 39 | declare -r RESPONSE='{"predictions":[[2.5],[3.0],[4.5]]}' 40 | # ------------------------------------------------------------------------------ 41 | 42 | # Grab the last argument as the image, so we can override the test arg in 43 | # the BUILD file 44 | test_docker_image ${@: -1} 45 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_devel_mkl_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile.devel basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_dockerfile_devel_mkl \ 25 | # --test_arg=tensorflow/serving:latest-devel-mkl \ 26 | # --test_output=streamed --verbose_failures 27 | 28 | declare -r PROJDIR=$(pwd)/tensorflow_serving 29 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 30 | 31 | # Values to fill in for test 32 | # ------------------------------------------------------------------------------ 33 | declare -r USE_NVIDIA_RUNTIME=false 34 | declare -r IS_MKL_IMAGE=true 35 | declare -r IS_DEVEL_IMAGE=true 36 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 37 | declare -r MODELNAME="saved_model_half_plus_two_mkl" 38 | declare -r REQUEST='{"instances": [1.0,2.0,5.0]}' 39 | declare -r RESPONSE='{"predictions":[2.5,3.0,4.5]}' 40 | # ------------------------------------------------------------------------------ 41 | 42 | # Grab the last argument as the image, so we can override the test arg in 43 | # the BUILD file 44 | test_docker_image ${@: -1} 45 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_devel_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile.devel basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_Dockerfile.devel \ 25 | # --test_arg=tensorflow/serving:latest-devel \ 26 | # --test_output=streamed --verbose_failures 27 | 28 | declare -r PROJDIR=$(pwd)/tensorflow_serving 29 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 30 | 31 | # Values to fill in for test 32 | # ------------------------------------------------------------------------------ 33 | declare -r USE_NVIDIA_RUNTIME=false 34 | declare -r IS_MKL_IMAGE=false 35 | declare -r IS_DEVEL_IMAGE=true 36 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 37 | declare -r MODELNAME="saved_model_half_plus_two_cpu" 38 | declare -r REQUEST='{"instances": [1.0,2.0,5.0]}' 39 | declare -r RESPONSE='{"predictions":[2.5,3.0,4.5]}' 40 | # ------------------------------------------------------------------------------ 41 | 42 | # Grab the last argument as the image, so we can override the test arg in 43 | # the BUILD file 44 | test_docker_image ${@: -1} 45 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_gpu_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile.gpu basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. This model will only load on a GPU. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_Dockerfile.gpu \ 25 | # --test_arg=tensorflow/serving:latest-gpu \ 26 | # --test_output=streamed --verbose_failures 27 | 28 | declare -r PROJDIR=$(pwd)/tensorflow_serving 29 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 30 | 31 | # Values to fill in for test 32 | # ------------------------------------------------------------------------------ 33 | declare -r USE_NVIDIA_RUNTIME=true 34 | declare -r IS_MKL_IMAGE=false 35 | declare -r IS_DEVEL_IMAGE=false 36 | declare -r MODELNAME="saved_model_half_plus_two_gpu" 37 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 38 | declare -r REQUEST='{"instances": [1.0,2.0,5.0]}' 39 | declare -r RESPONSE='{"predictions":[2.5,3.0,4.5]}' 40 | # ------------------------------------------------------------------------------ 41 | 42 | # Grab the last argument as the image, so we can override the test arg in 43 | # the BUILD file 44 | test_docker_image ${@: -1} 45 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_gpu_trt_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile.gpu basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. This model will only load on a GPU. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_Dockerfile.gpu \ 25 | # --test_arg=tensorflow/serving:latest-gpu \ 26 | # --test_output=streamed --verbose_failures 27 | 28 | declare -r PROJDIR=$(pwd)/tensorflow_serving 29 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 30 | 31 | # Values to fill in for test 32 | # ------------------------------------------------------------------------------ 33 | declare -r USE_NVIDIA_RUNTIME=true 34 | declare -r IS_DEVEL_IMAGE=false 35 | declare -r MODELNAME="saved_model_half_plus_two_gpu_trt" 36 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 37 | # TF-TRT requires input to be at minimum two dimentional. 38 | declare -r REQUEST='{"instances": [[1.0],[2.0],[5.0]]}' 39 | declare -r RESPONSE='{"predictions":[[2.5],[3.0],[4.5]]}' 40 | # ------------------------------------------------------------------------------ 41 | 42 | # Grab the last argument as the image, so we can override the test arg in 43 | # the BUILD file 44 | test_docker_image ${@: -1} 45 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_mkl_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_dockerfile_mkl --test_arg=tensorflow/serving:latest-mkl \ 25 | # --test_output=streamed --verbose_failures 26 | 27 | declare -r PROJDIR=$(pwd)/tensorflow_serving 28 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 29 | 30 | # Values to fill in for test 31 | # ------------------------------------------------------------------------------ 32 | declare -r USE_NVIDIA_RUNTIME=false 33 | declare -r IS_MKL_IMAGE=true 34 | declare -r IS_DEVEL_IMAGE=false 35 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 36 | declare -r MODELNAME="saved_model_half_plus_two_mkl" 37 | declare -r REQUEST='{"instances": [1.0,2.0,5.0]}' 38 | declare -r RESPONSE='{"predictions":[2.5,3.0,4.5]}' 39 | # ------------------------------------------------------------------------------ 40 | 41 | # Grab the last argument as the image, so we can override the test arg in 42 | # the BUILD file 43 | test_docker_image ${@: -1} 44 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/docker/tests/dockerfile_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google Inc. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | # 17 | # Tests if a Docker image built from Dockerfile basically functions. 18 | # 19 | # It does this by loading up a half plus two toy model in the Docker image 20 | # and querying it, validating the response. 21 | # 22 | # The image passed to this test must be already available locally. 23 | # 24 | # Ex: $ bazel test :unittest_Dockerfile --test_arg=tensorflow/serving \ 25 | # --test_output=streamed --verbose_failures 26 | 27 | declare -r PROJDIR=$(pwd)/tensorflow_serving 28 | source ${PROJDIR}/tools/docker/tests/docker_test_lib.sh || exit 1 29 | 30 | # Values to fill in for test 31 | # ------------------------------------------------------------------------------ 32 | declare -r USE_NVIDIA_RUNTIME=false 33 | declare -r IS_MKL_IMAGE=false 34 | declare -r IS_DEVEL_IMAGE=false 35 | declare -r MODELDIR="${PROJDIR}/servables/tensorflow/testdata" 36 | declare -r MODELNAME="saved_model_half_plus_two_cpu" 37 | declare -r REQUEST='{"instances": [1.0,2.0,5.0]}' 38 | declare -r RESPONSE='{"predictions":[2.5,3.0,4.5]}' 39 | # ------------------------------------------------------------------------------ 40 | 41 | # Grab the last argument as the image, so we can override the test arg in 42 | # the BUILD file 43 | test_docker_image ${@: -1} 44 | -------------------------------------------------------------------------------- /tensorflow_serving/tools/pip_package/BUILD: -------------------------------------------------------------------------------- 1 | # Description: Tensorflow Serving pip package. 2 | 3 | licenses(["notice"]) # Apache 2.0 4 | 5 | sh_binary( 6 | name = "build_pip_package", 7 | srcs = ["build_pip_package.sh"], 8 | data = [ 9 | "setup.py", 10 | 11 | # Python scripts needed for the Python TF Serving API 12 | "//tensorflow_serving/apis:classification_proto_py_pb2", 13 | "//tensorflow_serving/apis:get_model_metadata_proto_py_pb2", 14 | "//tensorflow_serving/apis:get_model_status_proto_py_pb2", 15 | "//tensorflow_serving/apis:inference_proto_py_pb2", 16 | "//tensorflow_serving/apis:input_proto_py_pb2", 17 | "//tensorflow_serving/apis:model_management_proto_py_pb2", 18 | "//tensorflow_serving/apis:model_proto_py_pb2", 19 | "//tensorflow_serving/apis:model_service_proto_py_pb2", 20 | "//tensorflow_serving/apis:prediction_log_proto_py_pb2", 21 | "//tensorflow_serving/apis:prediction_service_proto_py_pb2", 22 | "//tensorflow_serving/apis:predict_proto_py_pb2", 23 | "//tensorflow_serving/apis:regression_proto_py_pb2", 24 | "//tensorflow_serving/apis:session_service_proto_py_pb2", 25 | "//tensorflow_serving/config:log_collector_config_proto_py_pb2", 26 | "//tensorflow_serving/config:logging_config_proto_py_pb2", 27 | "//tensorflow_serving/config:model_server_config_proto_py_pb2", 28 | "//tensorflow_serving/sources/storage_path:file_system_storage_path_source_proto_py_pb2", 29 | "//tensorflow_serving/util:status_proto_py_pb2", 30 | "//tensorflow_serving/core:logging_proto_py_pb2", 31 | ], 32 | ) 33 | -------------------------------------------------------------------------------- /tensorflow_serving/util/class_registration_test.proto: -------------------------------------------------------------------------------- 1 | // Proto messages used by class_registration_test.cc. 2 | 3 | syntax = "proto3"; 4 | 5 | import "google/protobuf/any.proto"; 6 | 7 | package tensorflow.serving; 8 | 9 | message Config1 { 10 | string string_field = 1; 11 | } 12 | 13 | message Config2 { 14 | string string_field = 1; 15 | } 16 | 17 | message MessageWithAny { 18 | google.protobuf.Any any_field = 1; 19 | } 20 | -------------------------------------------------------------------------------- /tensorflow_serving/util/class_registration_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/class_registration_util.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | Status ParseUrlForAnyType(const string& type_url, 22 | string* const full_type_name) { 23 | std::vector splits = str_util::Split(type_url, '/'); 24 | if (splits.size() < 2 || splits[splits.size() - 1].empty()) { 25 | return errors::InvalidArgument( 26 | "Supplied config's type_url could not be parsed: ", type_url); 27 | } 28 | *full_type_name = splits[splits.size() - 1]; 29 | return Status::OK(); 30 | } 31 | 32 | } // namespace serving 33 | } // namespace tensorflow 34 | -------------------------------------------------------------------------------- /tensorflow_serving/util/class_registration_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_CLASS_REGISTRATION_UTIL_H_ 17 | #define TENSORFLOW_SERVING_UTIL_CLASS_REGISTRATION_UTIL_H_ 18 | 19 | #include "tensorflow/core/lib/core/errors.h" 20 | #include "tensorflow/core/lib/core/status.h" 21 | #include "tensorflow/core/lib/strings/str_util.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | 26 | // Parses a url whose final '/' is followed by a proto type name, e.g. 27 | // "type.googleapis.com/some_namespace.some_proto_type_name". 28 | // Returns Status::OK() iff parsing succeeded. 29 | Status ParseUrlForAnyType(const string& type_url, string* const full_type_name); 30 | 31 | } // namespace serving 32 | } // namespace tensorflow 33 | 34 | #endif // TENSORFLOW_SERVING_UTIL_CLASS_REGISTRATION_UTIL_H_ 35 | -------------------------------------------------------------------------------- /tensorflow_serving/util/executor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_EXECUTOR_H_ 17 | #define TENSORFLOW_SERVING_UTIL_EXECUTOR_H_ 18 | 19 | #include 20 | 21 | namespace tensorflow { 22 | namespace serving { 23 | 24 | /// An abstract object that can execute closures. 25 | /// 26 | /// Implementations of executor must be thread-safe. 27 | class Executor { 28 | public: 29 | virtual ~Executor() = default; 30 | 31 | /// Schedule the specified 'fn' for execution in this executor. Depending on 32 | /// the subclass implementation, this may block in some situations. 33 | virtual void Schedule(std::function fn) = 0; 34 | }; 35 | 36 | } // namespace serving 37 | } // namespace tensorflow 38 | 39 | #endif // TENSORFLOW_SERVING_UTIL_EXECUTOR_H_ 40 | -------------------------------------------------------------------------------- /tensorflow_serving/util/file_probing_env.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/file_probing_env.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | Status TensorflowFileProbingEnv::FileExists(const string& fname) { 22 | return env_->FileExists(fname); 23 | } 24 | 25 | Status TensorflowFileProbingEnv::GetChildren(const string& dir, 26 | std::vector* children) { 27 | return env_->GetChildren(dir, children); 28 | } 29 | 30 | Status TensorflowFileProbingEnv::IsDirectory(const string& fname) { 31 | return env_->IsDirectory(fname); 32 | } 33 | 34 | Status TensorflowFileProbingEnv::GetFileSize(const string& fname, 35 | uint64* file_size) { 36 | return env_->GetFileSize(fname, file_size); 37 | } 38 | 39 | } // namespace serving 40 | } // namespace tensorflow 41 | -------------------------------------------------------------------------------- /tensorflow_serving/util/hash.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/hash.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | uint64 HashCombine(const uint64 hash1, const uint64 hash2) { 22 | return hash1 ^ (hash2 + 0x9e3779b97f4a7800 + (hash1 << 10) + (hash1 >> 4)); 23 | } 24 | 25 | } // namespace serving 26 | } // namespace tensorflow 27 | -------------------------------------------------------------------------------- /tensorflow_serving/util/hash.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_HASH_H_ 17 | #define TENSORFLOW_SERVING_UTIL_HASH_H_ 18 | 19 | #include "tensorflow/core/platform/types.h" 20 | 21 | namespace tensorflow { 22 | namespace serving { 23 | 24 | // Combines 2 hashes and returns a 3rd one. 25 | uint64 HashCombine(uint64 hash1, uint64 hash2); 26 | 27 | } // namespace serving 28 | } // namespace tensorflow 29 | 30 | #endif // TENSORFLOW_SERVING_UTIL_HASH_H_ 31 | -------------------------------------------------------------------------------- /tensorflow_serving/util/inline_executor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/inline_executor.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | InlineExecutor::InlineExecutor() {} 22 | 23 | InlineExecutor::~InlineExecutor() {} 24 | 25 | void InlineExecutor::Schedule(std::function fn) { fn(); } 26 | 27 | } // namespace serving 28 | } // namespace tensorflow 29 | -------------------------------------------------------------------------------- /tensorflow_serving/util/inline_executor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_INLINE_EXECUTOR_H_ 17 | #define TENSORFLOW_SERVING_UTIL_INLINE_EXECUTOR_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/core/platform/macros.h" 22 | #include "tensorflow_serving/util/executor.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | 27 | // An InlineExecutor is a trivial executor that immediately executes the closure 28 | // given to it. It's useful as a fake, and in cases where an executor is needed, 29 | // but multi-threadedness is not. 30 | class InlineExecutor : public Executor { 31 | public: 32 | InlineExecutor(); 33 | ~InlineExecutor() override; 34 | void Schedule(std::function fn) override; 35 | }; 36 | 37 | } // namespace serving 38 | } // namespace tensorflow 39 | 40 | #endif // TENSORFLOW_SERVING_UTIL_INLINE_EXECUTOR_H_ 41 | -------------------------------------------------------------------------------- /tensorflow_serving/util/inline_executor_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/inline_executor.h" 17 | 18 | #include 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | namespace { 23 | 24 | TEST(InlineExecutorTest, Executes) { 25 | InlineExecutor inline_executor; 26 | 27 | int total_calls = 0; 28 | inline_executor.Schedule([&]() { ++total_calls; }); 29 | EXPECT_EQ(1, total_calls); 30 | } 31 | 32 | } // namespace 33 | } // namespace serving 34 | } // namespace tensorflow 35 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/README.md: -------------------------------------------------------------------------------- 1 | A (truly) lightweight OSS HTTP Server 2 | ===================================== 3 | 4 | Design and implementation started in April 2018, within the TF serving code base. 5 | 6 | APIs are subject to change. 7 | 8 | Questions? 9 | ---------- 10 | 11 | If you have any questions, please send them to [web|awk]@google.com 12 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/client/BUILD: -------------------------------------------------------------------------------- 1 | # Description: a lightweight http client 2 | 3 | package( 4 | default_visibility = [ 5 | "//tensorflow_serving/util/net_http:__subpackages__", 6 | ], 7 | ) 8 | 9 | licenses(["notice"]) # Apache 2.0 10 | 11 | cc_library( 12 | name = "evhttp_client", 13 | srcs = [ 14 | "evhttp_connection.cc", 15 | ], 16 | hdrs = [ 17 | "evhttp_connection.h", 18 | ], 19 | deps = [ 20 | "//tensorflow_serving/util/net_http/internal:net_logging", 21 | "//tensorflow_serving/util/net_http/server/public:http_server_api", 22 | "@com_github_libevent_libevent//:libevent", 23 | "@com_google_absl//absl/base", 24 | "@com_google_absl//absl/strings", 25 | "@com_google_absl//absl/synchronization", 26 | ], 27 | ) 28 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/client/README.md: -------------------------------------------------------------------------------- 1 | The client library is still under development, and currently used for writing tests. 2 | 3 | The API specs are yet to be finalized. -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/client/testing/BUILD: -------------------------------------------------------------------------------- 1 | # Description: net_http/client/testing 2 | 3 | package( 4 | default_visibility = ["//visibility:private"], 5 | ) 6 | 7 | licenses(["notice"]) # Apache 2.0 8 | 9 | cc_binary( 10 | name = "evhttp_echo_client", 11 | srcs = ["evhttp_echo_client.cc"], 12 | deps = [ 13 | "//tensorflow_serving/util/net_http/client:evhttp_client", 14 | ], 15 | ) 16 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/client/testing/evhttp_echo_client.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // A test client to print the response from the evhttp_echo_server 17 | // URI: /print 18 | 19 | #include 20 | 21 | #include "tensorflow_serving/util/net_http/client/evhttp_connection.h" 22 | 23 | namespace { 24 | 25 | using tensorflow::serving::net_http::ClientRequest; 26 | using tensorflow::serving::net_http::ClientResponse; 27 | using tensorflow::serving::net_http::EvHTTPConnection; 28 | 29 | bool SendRequest(const char* url) { 30 | auto connection = EvHTTPConnection::Connect(url); 31 | if (connection == nullptr) { 32 | std::cerr << "Fail to connect to %s" << url; 33 | } 34 | 35 | ClientRequest request = {url, "GET", {}, nullptr}; 36 | ClientResponse response = {}; 37 | 38 | if (!connection->BlockingSendRequest(request, &response)) { 39 | std::cerr << "Request failed."; 40 | return false; 41 | } 42 | 43 | std::cout << "Response received: " << std::endl 44 | << "Status: " << response.status << std::endl; 45 | 46 | for (auto keyval : response.headers) { 47 | std::cout << keyval.first << " : " << keyval.second << std::endl; 48 | } 49 | 50 | std::cout << std::endl << response.body << std::endl; 51 | return true; 52 | } 53 | 54 | } // namespace 55 | 56 | int main(int argc, char** argv) { 57 | if (argc < 2) { 58 | std::cerr << "Usage: http-client " << std::endl; 59 | return 1; 60 | } 61 | 62 | return SendRequest(argv[1]); 63 | } 64 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/compression/BUILD: -------------------------------------------------------------------------------- 1 | # Description: compression support libraries 2 | 3 | package( 4 | default_visibility = [ 5 | "//tensorflow_serving:internal", 6 | "//tensorflow_serving/util/net_http:__subpackages__", 7 | ], 8 | ) 9 | 10 | licenses(["notice"]) # Apache 2.0 11 | 12 | # C++ lib based on zlib for gzip support 13 | cc_library( 14 | name = "gzip_zlib", 15 | srcs = [ 16 | "gzip_zlib.cc", 17 | ], 18 | hdrs = [ 19 | "gzip_zlib.h", 20 | ], 21 | deps = [ 22 | "//tensorflow_serving/util/net_http/internal:net_logging", 23 | "@com_google_absl//absl/base", 24 | "@com_google_absl//absl/base:core_headers", 25 | "@com_google_absl//absl/strings", 26 | "@zlib_archive//:zlib", 27 | ], 28 | ) 29 | 30 | cc_test( 31 | name = "gzip_zlib_test", 32 | size = "large", 33 | srcs = ["gzip_zlib_test.cc"], 34 | features = ["-layering_check"], 35 | deps = [ 36 | ":gzip_zlib", 37 | "//tensorflow_serving/core/test_util:test_main", 38 | ], 39 | ) 40 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/compression/README.md: -------------------------------------------------------------------------------- 1 | Compression support 2 | =================== 3 | 4 | This package provides C++ wrappers for compression libraries such as gzip, br. 5 | 6 | APIs are subject to change but usage outside net_http is expected. 7 | 8 | gzip_zlib.h 9 | --------------------- 10 | 11 | Minimum APIs and implementation to support gzip Content-Encoding via zlib. 12 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/internal/BUILD: -------------------------------------------------------------------------------- 1 | # Description: shared code for net_http 2 | 3 | package( 4 | default_visibility = [ 5 | "//tensorflow_serving/util/net_http:__subpackages__", 6 | ], 7 | ) 8 | 9 | licenses(["notice"]) # Apache 2.0 10 | 11 | cc_library( 12 | name = "fixed_thread_pool", 13 | testonly = 1, 14 | hdrs = ["fixed_thread_pool.h"], 15 | deps = [ 16 | "@com_google_absl//absl/base:core_headers", 17 | "@com_google_absl//absl/synchronization", 18 | ], 19 | ) 20 | 21 | cc_library( 22 | name = "net_logging", 23 | srcs = ["net_logging.cc"], 24 | hdrs = ["net_logging.h"], 25 | deps = [ 26 | "@com_google_absl//absl/base:config", 27 | "@com_google_absl//absl/base:core_headers", 28 | "@com_google_absl//absl/base:log_severity", 29 | ], 30 | ) 31 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/internal/testing/BUILD: -------------------------------------------------------------------------------- 1 | # Description: net_http/internal/testing 2 | 3 | package( 4 | default_visibility = ["//visibility:private"], 5 | ) 6 | 7 | licenses(["notice"]) # Apache 2.0 8 | 9 | cc_binary( 10 | name = "net_logging_example", 11 | srcs = ["net_logging_example.cc"], 12 | deps = [ 13 | "//tensorflow_serving/util/net_http/internal:net_logging", 14 | ], 15 | ) 16 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/internal/testing/net_logging_example.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include 17 | #include 18 | 19 | #include "tensorflow_serving/util/net_http/internal/net_logging.h" 20 | 21 | int main(int argc, char** argv) { 22 | NET_LOG(INFO, "started!"); 23 | 24 | size_t size = 100; 25 | NET_LOG(ERROR, "read less than specified bytes : %zu", size); 26 | 27 | const char* url = "/url"; 28 | NET_LOG(WARNING, "%s: read less than specified bytes : %zu", url, size); 29 | 30 | NET_LOG(FATAL, "aborted!"); 31 | 32 | return 0; // unexpected 33 | } 34 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/server/internal/server_support.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // This pure interface provides callbacks from a request object to the 17 | // server object so the two are properly decoupled. 18 | // This may turn out to be generally useful with no libevents specifics. 19 | 20 | #ifndef TENSORFLOW_SERVING_UTIL_NET_HTTP_SERVER_INTERNAL_SERVER_SUPPORT_H_ 21 | #define TENSORFLOW_SERVING_UTIL_NET_HTTP_SERVER_INTERNAL_SERVER_SUPPORT_H_ 22 | 23 | #include 24 | 25 | #include "tensorflow_serving/util/net_http/server/public/server_request_interface.h" 26 | 27 | namespace tensorflow { 28 | namespace serving { 29 | namespace net_http { 30 | 31 | class ServerSupport { 32 | public: 33 | virtual ~ServerSupport() = default; 34 | 35 | ServerSupport(const ServerSupport& other) = delete; 36 | ServerSupport& operator=(const ServerSupport& other) = delete; 37 | 38 | // book-keeping of active requests 39 | virtual void IncOps() = 0; 40 | virtual void DecOps() = 0; 41 | 42 | // Schedules the callback function to run immediately from the event loop. 43 | // Returns false if any error. 44 | virtual bool EventLoopSchedule(std::function fn) = 0; 45 | 46 | protected: 47 | ServerSupport() = default; 48 | }; 49 | 50 | } // namespace net_http 51 | } // namespace serving 52 | } // namespace tensorflow 53 | 54 | #endif // TENSORFLOW_SERVING_UTIL_NET_HTTP_SERVER_INTERNAL_SERVER_SUPPORT_H_ 55 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/server/public/BUILD: -------------------------------------------------------------------------------- 1 | # Description: a lightweight http server and related utils to support Web clients 2 | 3 | package( 4 | default_visibility = [ 5 | ":http_server_clients", 6 | "//tensorflow_serving:internal", 7 | "//tensorflow_serving/util/net_http:__subpackages__", 8 | ], 9 | ) 10 | 11 | package_group(name = "http_server_clients") 12 | 13 | licenses(["notice"]) # Apache 2.0 14 | 15 | cc_library( 16 | name = "http_server_api", 17 | srcs = [ 18 | "header_names.cc", 19 | ], 20 | hdrs = [ 21 | "header_names.h", 22 | "httpserver_interface.h", 23 | "response_code_enum.h", 24 | "server_request_interface.h", 25 | ], 26 | deps = [ 27 | "@com_google_absl//absl/strings", 28 | "@com_google_absl//absl/time", 29 | ], 30 | ) 31 | 32 | cc_library( 33 | name = "http_server", 34 | hdrs = [ 35 | "httpserver.h", 36 | ], 37 | deps = [ 38 | ":http_server_api", 39 | "//tensorflow_serving/util/net_http/server/internal:evhttp_server", 40 | "@com_google_absl//absl/memory", 41 | ], 42 | ) 43 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/server/public/httpserver.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | // The entry point to access different HTTP server implementations. 17 | 18 | #ifndef TENSORFLOW_SERVING_UTIL_NET_HTTP_SERVER_PUBLIC_HTTPSERVER_H_ 19 | #define TENSORFLOW_SERVING_UTIL_NET_HTTP_SERVER_PUBLIC_HTTPSERVER_H_ 20 | 21 | #include 22 | 23 | #include "absl/memory/memory.h" 24 | 25 | #include "tensorflow_serving/util/net_http/server/internal/evhttp_server.h" 26 | #include "tensorflow_serving/util/net_http/server/public/httpserver_interface.h" 27 | 28 | namespace tensorflow { 29 | namespace serving { 30 | namespace net_http { 31 | 32 | // Creates a server implemented based on the libevents library. 33 | // Returns nullptr if there is any error. 34 | // 35 | // Must call WaitForTermination() or WaitForTerminationWithTimeout() before 36 | // the server is to be destructed. 37 | inline std::unique_ptr CreateEvHTTPServer( 38 | std::unique_ptr options) { 39 | auto server = absl::make_unique(std::move(options)); 40 | bool result = server->Initialize(); 41 | if (!result) { 42 | return nullptr; 43 | } 44 | 45 | return std::move(server); 46 | } 47 | 48 | } // namespace net_http 49 | } // namespace serving 50 | } // namespace tensorflow 51 | 52 | #endif // TENSORFLOW_SERVING_UTIL_NET_HTTP_SERVER_PUBLIC_HTTPSERVER_H_ 53 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/server/testing/BUILD: -------------------------------------------------------------------------------- 1 | # Description: net_http/server/testing 2 | 3 | package( 4 | default_visibility = ["//visibility:private"], 5 | ) 6 | 7 | licenses(["notice"]) # Apache 2.0 8 | 9 | cc_binary( 10 | name = "evhttp_echo_server", 11 | srcs = ["evhttp_echo_server.cc"], 12 | linkopts = ["-lm"], 13 | deps = [ 14 | "//tensorflow_serving/util/net_http/server/public:http_server", 15 | "//tensorflow_serving/util/net_http/server/public:http_server_api", 16 | "@com_google_absl//absl/memory", 17 | "@com_google_absl//absl/strings", 18 | ], 19 | ) 20 | -------------------------------------------------------------------------------- /tensorflow_serving/util/net_http/socket/testing/BUILD: -------------------------------------------------------------------------------- 1 | # Description: net_http/socket testing utils 2 | 3 | package( 4 | default_visibility = ["//visibility:private"], 5 | ) 6 | 7 | licenses(["notice"]) # Apache 2.0 8 | 9 | cc_binary( 10 | name = "ev_print_req_server", 11 | srcs = ["ev_print_req_server.cc"], 12 | linkopts = ["-lm"], 13 | deps = [ 14 | "@com_github_libevent_libevent//:libevent", 15 | "@com_google_absl//absl/strings", 16 | ], 17 | ) 18 | 19 | cc_binary( 20 | name = "ev_fetch_client", 21 | srcs = ["ev_fetch_client.cc"], 22 | deps = [ 23 | "@com_github_libevent_libevent//:libevent", 24 | ], 25 | linkopts = ["-lm"], 26 | ) 27 | -------------------------------------------------------------------------------- /tensorflow_serving/util/optional.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/optional.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | extern const in_place_t in_place{}; 22 | extern const nullopt_t nullopt{{}}; 23 | 24 | } // namespace serving 25 | } // namespace tensorflow 26 | -------------------------------------------------------------------------------- /tensorflow_serving/util/prometheus_exporter.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_PROMETHEUS_EXPORTER_H_ 17 | #define TENSORFLOW_SERVING_UTIL_PROMETHEUS_EXPORTER_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/core/lib/core/status.h" 22 | #include "tensorflow/core/lib/monitoring/collected_metrics.h" 23 | #include "tensorflow/core/lib/monitoring/collection_registry.h" 24 | 25 | namespace tensorflow { 26 | namespace serving { 27 | 28 | // Exports metrics in Prometheus monitoring format. 29 | class PrometheusExporter { 30 | public: 31 | // Default path to expose the metrics. 32 | static const char* const kPrometheusPath; 33 | 34 | PrometheusExporter(); 35 | 36 | // Generates text page in Prometheus format: 37 | // https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-example 38 | // If an error status returned, http_page is unchanged. 39 | Status GeneratePage(string* http_page); 40 | 41 | private: 42 | // The metrics registry. 43 | monitoring::CollectionRegistry* collection_registry_; 44 | }; 45 | 46 | } // namespace serving 47 | } // namespace tensorflow 48 | 49 | #endif // TENSORFLOW_SERVING_UTIL_PROMETHEUS_EXPORTER_H_ 50 | -------------------------------------------------------------------------------- /tensorflow_serving/util/retrier.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/retrier.h" 17 | 18 | #include "tensorflow/core/platform/env.h" 19 | #include "tensorflow/core/platform/logging.h" 20 | 21 | namespace tensorflow { 22 | namespace serving { 23 | 24 | Status Retry(const string& description, const uint32 max_num_retries, 25 | const int64 retry_interval_micros, 26 | const std::function& retried_fn, 27 | const std::function& is_cancelled) { 28 | Status status; 29 | int num_tries = 0; 30 | do { 31 | if (num_tries > 0) { 32 | Env::Default()->SleepForMicroseconds(retry_interval_micros); 33 | LOG(INFO) << "Retrying of " << description << " retry: " << num_tries; 34 | } 35 | status = retried_fn(); 36 | if (!status.ok()) { 37 | LOG(ERROR) << description << " failed: " << status; 38 | } 39 | ++num_tries; 40 | } while (!is_cancelled() && !status.ok() && num_tries < max_num_retries + 1); 41 | 42 | if (is_cancelled()) { 43 | LOG(INFO) << "Retrying of " << description << " was cancelled."; 44 | } 45 | if (num_tries == max_num_retries + 1) { 46 | LOG(INFO) << "Retrying of " << description 47 | << " exhausted max_num_retries: " << max_num_retries; 48 | } 49 | return status; 50 | } 51 | 52 | } // namespace serving 53 | } // namespace tensorflow 54 | -------------------------------------------------------------------------------- /tensorflow_serving/util/retrier.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_RETRIER_H_ 17 | #define TENSORFLOW_SERVING_UTIL_RETRIER_H_ 18 | 19 | #include 20 | #include 21 | 22 | #include "tensorflow/core/lib/core/status.h" 23 | 24 | namespace tensorflow { 25 | namespace serving { 26 | 27 | // Tries running 'retried_fn' once, and if it doesn't succeed, retries running 28 | // the 'retried_fn' till it returns an ok status or max_num_retries are 29 | // exhausted or cancelled() returns true. Each retry is attempted after an 30 | // interval of 'retry_interval_micros'. The 'description' is useful for logging. 31 | // 32 | // Returns the status returned by the last call to 'retried_fn'. 33 | Status Retry(const string& description, uint32 max_num_retries, 34 | int64 retry_interval_micros, 35 | const std::function& retried_fn, 36 | const std::function& is_cancelled = [] { return false; }); 37 | 38 | } // namespace serving 39 | } // namespace tensorflow 40 | 41 | #endif // TENSORFLOW_SERVING_UTIL_RETRIER_H_ 42 | -------------------------------------------------------------------------------- /tensorflow_serving/util/status.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option cc_enable_arenas = true; 4 | 5 | import "tensorflow/core/lib/core/error_codes.proto"; 6 | 7 | package tensorflow.serving; 8 | 9 | // Status that corresponds to Status in 10 | // third_party/tensorflow/core/lib/core/status.h. 11 | message StatusProto { 12 | // Error code. 13 | error.Code error_code = 1 [json_name = "error_code"]; 14 | 15 | // Error message. Will only be set if an error was encountered. 16 | string error_message = 2 [json_name = "error_message"]; 17 | } 18 | -------------------------------------------------------------------------------- /tensorflow_serving/util/status_util.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/status_util.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | StatusProto ToStatusProto(const Status& status) { 22 | StatusProto status_proto; 23 | status_proto.set_error_code(status.code()); 24 | if (!status.ok()) { 25 | status_proto.set_error_message(status.error_message()); 26 | } 27 | return status_proto; 28 | } 29 | 30 | Status FromStatusProto(const StatusProto& status_proto) { 31 | return status_proto.error_code() == tensorflow::error::OK 32 | ? Status() 33 | : Status(status_proto.error_code(), status_proto.error_message()); 34 | } 35 | 36 | } // namespace serving 37 | } // namespace tensorflow 38 | -------------------------------------------------------------------------------- /tensorflow_serving/util/status_util.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2018 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | #ifndef TENSORFLOW_SERVING_UTIL_STATUS_UTIL_H_ 16 | #define TENSORFLOW_SERVING_UTIL_STATUS_UTIL_H_ 17 | 18 | #include "tensorflow/core/lib/core/status.h" 19 | #include "tensorflow_serving/util/status.pb.h" 20 | 21 | namespace tensorflow { 22 | namespace serving { 23 | 24 | // Converts from tensorflow Status to StatusProto. 25 | StatusProto ToStatusProto(const Status& status); 26 | 27 | // Converts from StatusProto to tensorflow Status. 28 | Status FromStatusProto(const StatusProto& status_proto); 29 | 30 | } // namespace serving 31 | } // namespace tensorflow 32 | 33 | #endif // TENSORFLOW_SERVING_UTIL_STATUS_UTIL_H_ 34 | -------------------------------------------------------------------------------- /tensorflow_serving/util/test_util/BUILD: -------------------------------------------------------------------------------- 1 | # Description: testing utils for Tensorflow Serving utils. 2 | 3 | package( 4 | default_visibility = [ 5 | "//tensorflow_serving:internal", 6 | ], 7 | features = ["-layering_check"], 8 | ) 9 | 10 | licenses(["notice"]) # Apache 2.0 11 | 12 | cc_library( 13 | name = "mock_file_probing_env", 14 | testonly = 1, 15 | hdrs = ["mock_file_probing_env.h"], 16 | deps = [ 17 | "//tensorflow_serving/util:file_probing_env", 18 | "@com_google_googletest//:gtest", 19 | "@org_tensorflow//tensorflow/core:lib", 20 | ], 21 | ) 22 | -------------------------------------------------------------------------------- /tensorflow_serving/util/test_util/mock_file_probing_env.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_TEST_UTIL_MOCK_FILE_PROBING_ENV_H_ 17 | #define TENSORFLOW_SERVING_UTIL_TEST_UTIL_MOCK_FILE_PROBING_ENV_H_ 18 | 19 | #include 20 | #include "tensorflow/core/lib/core/status.h" 21 | #include "tensorflow_serving/util/file_probing_env.h" 22 | 23 | namespace tensorflow { 24 | namespace serving { 25 | namespace test_util { 26 | 27 | class MockFileProbingEnv : public FileProbingEnv { 28 | public: 29 | MOCK_METHOD1(FileExists, Status(const string& fname)); 30 | MOCK_METHOD2(GetChildren, 31 | Status(const string& fname, std::vector* children)); 32 | MOCK_METHOD1(IsDirectory, Status(const string& fname)); 33 | MOCK_METHOD2(GetFileSize, Status(const string& fname, uint64* file_size)); 34 | }; 35 | 36 | } // namespace test_util 37 | } // namespace serving 38 | } // namespace tensorflow 39 | 40 | #endif // TENSORFLOW_SERVING_UTIL_TEST_UTIL_MOCK_FILE_PROBING_ENV_H_ 41 | -------------------------------------------------------------------------------- /tensorflow_serving/util/threadpool_executor.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/threadpool_executor.h" 17 | 18 | namespace tensorflow { 19 | namespace serving { 20 | 21 | ThreadPoolExecutor::ThreadPoolExecutor(Env* const env, const string& name, 22 | int num_threads) 23 | : thread_pool_(env, name, num_threads) {} 24 | 25 | ThreadPoolExecutor::~ThreadPoolExecutor() {} 26 | 27 | void ThreadPoolExecutor::Schedule(std::function fn) { 28 | thread_pool_.Schedule(fn); 29 | } 30 | 31 | } // namespace serving 32 | } // namespace tensorflow 33 | -------------------------------------------------------------------------------- /tensorflow_serving/util/threadpool_executor.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #ifndef TENSORFLOW_SERVING_UTIL_THREADPOOL_EXECUTOR_H_ 17 | #define TENSORFLOW_SERVING_UTIL_THREADPOOL_EXECUTOR_H_ 18 | 19 | #include 20 | 21 | #include "tensorflow/core/lib/core/threadpool.h" 22 | #include "tensorflow/core/platform/env.h" 23 | #include "tensorflow_serving/util/executor.h" 24 | 25 | namespace tensorflow { 26 | namespace serving { 27 | 28 | // An executor which uses a pool of threads to execute the scheduled closures. 29 | class ThreadPoolExecutor : public Executor { 30 | public: 31 | // Constructs a threadpool that has 'num_threads' threads with specified 32 | // 'thread_pool_name'. Env is used to start the thread. 33 | // 34 | // REQUIRES: num_threads > 0. 35 | ThreadPoolExecutor(Env* env, const string& thread_pool_name, int num_threads); 36 | 37 | // Waits until all scheduled work has finished and then destroy the set of 38 | // threads. 39 | ~ThreadPoolExecutor() override; 40 | 41 | void Schedule(std::function fn) override; 42 | 43 | private: 44 | thread::ThreadPool thread_pool_; 45 | 46 | TF_DISALLOW_COPY_AND_ASSIGN(ThreadPoolExecutor); 47 | }; 48 | 49 | } // namespace serving 50 | } // namespace tensorflow 51 | 52 | #endif // TENSORFLOW_SERVING_UTIL_THREADPOOL_EXECUTOR_H_ 53 | -------------------------------------------------------------------------------- /tensorflow_serving/util/threadpool_executor_test.cc: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. All Rights Reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | ==============================================================================*/ 15 | 16 | #include "tensorflow_serving/util/threadpool_executor.h" 17 | 18 | #include 19 | 20 | namespace tensorflow { 21 | namespace serving { 22 | namespace { 23 | 24 | constexpr int kNumThreads = 30; 25 | 26 | TEST(ThreadPoolExecutor, Empty) { 27 | for (int num_threads = 1; num_threads < kNumThreads; num_threads++) { 28 | LOG(INFO) << "Testing with " << num_threads << " threads"; 29 | ThreadPoolExecutor pool(Env::Default(), "test", num_threads); 30 | } 31 | } 32 | 33 | TEST(ThreadPoolExecutor, DoWork) { 34 | for (int num_threads = 1; num_threads < kNumThreads; num_threads++) { 35 | LOG(INFO) << "Testing with " << num_threads << " threads"; 36 | const int kWorkItems = 15; 37 | // Not using std::vector due to its unusual implementation and API - 38 | // http://en.cppreference.com/w/cpp/container/vector_bool 39 | bool work[kWorkItems]; 40 | for (int i = 0; i < kWorkItems; ++i) { 41 | work[i] = false; 42 | } 43 | { 44 | ThreadPoolExecutor executor(Env::Default(), "test", num_threads); 45 | for (int i = 0; i < kWorkItems; i++) { 46 | executor.Schedule([&work, i]() { 47 | ASSERT_FALSE(work[i]); 48 | work[i] = true; 49 | }); 50 | } 51 | } 52 | for (int i = 0; i < kWorkItems; i++) { 53 | ASSERT_TRUE(work[i]); 54 | } 55 | } 56 | } 57 | 58 | } // namespace 59 | } // namespace serving 60 | } // namespace tensorflow 61 | -------------------------------------------------------------------------------- /third_party/apr1/BUILD: -------------------------------------------------------------------------------- 1 | # Dummy BUILD file to make this directory a package. 2 | -------------------------------------------------------------------------------- /third_party/aprutil1/BUILD: -------------------------------------------------------------------------------- 1 | # Dummy BUILD file to make this directory a package. 2 | -------------------------------------------------------------------------------- /third_party/aws_util/BUILD: -------------------------------------------------------------------------------- 1 | # Dummy BUILD file to make this directory a package. 2 | -------------------------------------------------------------------------------- /third_party/aws_util/aws_c_cal.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # AWS C CAL 3 | 4 | package(default_visibility = ["//visibility:public"]) 5 | 6 | licenses(["notice"]) # Apache 2.0 7 | 8 | exports_files(["LICENSE"]) 9 | 10 | cc_library( 11 | name = "aws_c_cal", 12 | srcs = glob([ 13 | "include/aws/cal/**/*.h", 14 | "source/*.c", 15 | "source/unix/*.c", 16 | ]), 17 | includes = [ 18 | "include/", 19 | ], 20 | deps = [ 21 | "@aws_c_common", 22 | ], 23 | ) 24 | -------------------------------------------------------------------------------- /third_party/aws_util/aws_c_common.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # AWS C COMMON 3 | 4 | package(default_visibility = ["//visibility:public"]) 5 | 6 | licenses(["notice"]) # Apache 2.0 7 | 8 | exports_files(["LICENSE"]) 9 | 10 | load("@org_tensorflow//third_party:common.bzl", "template_rule") 11 | 12 | cc_library( 13 | name = "aws_c_common", 14 | srcs = glob([ 15 | "include/aws/common/**/*.h", 16 | "include/aws/common/**/*.inl", 17 | "source/*.c", 18 | "source/posix/*.c", 19 | "source/arch/intel/cpuid.c", 20 | "source/arch/intel/asm/cpuid.c", 21 | ]), 22 | hdrs = [ 23 | "include/aws/common/config.h", 24 | ], 25 | includes = [ 26 | "include/", 27 | ], 28 | defines = [ 29 | "AWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR", 30 | ], 31 | ) 32 | 33 | template_rule( 34 | name = "COMMONConfig_h", 35 | src = "include/aws/common/config.h.in", 36 | out = "include/aws/common/config.h", 37 | substitutions = { 38 | "cmakedefine": "define", 39 | }, 40 | ) 41 | -------------------------------------------------------------------------------- /third_party/aws_util/aws_c_event_stream.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # AWS C EVENT STREAM 3 | 4 | package(default_visibility = ["//visibility:public"]) 5 | 6 | licenses(["notice"]) # Apache 2.0 7 | 8 | exports_files(["LICENSE"]) 9 | 10 | cc_library( 11 | name = "aws_c_event_stream", 12 | srcs = glob([ 13 | "include/aws/event-stream/*.h", 14 | "source/event_stream.c", 15 | ]), 16 | includes = [ 17 | "include/", 18 | ], 19 | deps = [ 20 | "@aws_c_common", 21 | "@aws_checksums", 22 | "@aws_c_io", 23 | ], 24 | ) 25 | -------------------------------------------------------------------------------- /third_party/aws_util/aws_c_io.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # AWS C IO 3 | 4 | package(default_visibility = ["//visibility:public"]) 5 | 6 | licenses(["notice"]) # Apache 2.0 7 | 8 | exports_files(["LICENSE"]) 9 | 10 | cc_library( 11 | name = "aws_c_io", 12 | srcs = glob([ 13 | "include/aws/io/**/*.h", 14 | "source/*.c", 15 | "source/pkcs11/v2.40/*.h", 16 | "source/pkcs11_private.h", 17 | "source/posix/*.c", 18 | "source/linux/*.c", 19 | ]), 20 | includes = [ 21 | "include/", 22 | "source/", 23 | ], 24 | deps = [ 25 | "@aws_c_common", 26 | "@aws_c_cal" 27 | ], 28 | defines = [ 29 | "BYO_CRYPTO", 30 | ], 31 | ) 32 | -------------------------------------------------------------------------------- /third_party/aws_util/aws_checksums.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # AWS CHECKSUMS 3 | 4 | package(default_visibility = ["//visibility:public"]) 5 | 6 | licenses(["notice"]) # Apache 2.0 7 | 8 | exports_files(["LICENSE"]) 9 | 10 | cc_library( 11 | name = "aws_checksums", 12 | srcs = glob([ 13 | "include/aws/checksums/**/*.h", 14 | "source/*.c", 15 | "source/intel/asm/*.c", 16 | ]), 17 | includes = [ 18 | "include/", 19 | ], 20 | deps = [ 21 | "@aws_c_common", 22 | ], 23 | ) 24 | -------------------------------------------------------------------------------- /third_party/expat/BUILD: -------------------------------------------------------------------------------- 1 | # Dummy BUILD file to make this directory a package. 2 | -------------------------------------------------------------------------------- /third_party/expat/libexpat.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # Expat library 3 | 4 | licenses(["notice"]) 5 | 6 | exports_files(["COPYING"]) 7 | 8 | cc_library( 9 | name = "libexpat", 10 | srcs = [ 11 | "lib/xmlparse.c", 12 | "lib/xmlrole.c", 13 | "lib/xmltok.c", 14 | ], 15 | hdrs = glob([ 16 | "lib/*.h", 17 | ]) + [ 18 | "lib/xmltok_impl.c", 19 | "lib/xmltok_ns.c", 20 | ], 21 | copts = [ 22 | "-DHAVE_MEMMOVE", 23 | "-DXML_POOR_ENTROPY", 24 | ], 25 | includes = [ 26 | "lib", 27 | ], 28 | visibility = ["//visibility:public"], 29 | deps = [], 30 | ) 31 | -------------------------------------------------------------------------------- /third_party/mxml/BUILD: -------------------------------------------------------------------------------- 1 | # Dummy BUILD file to make this directory a package. 2 | -------------------------------------------------------------------------------- /third_party/mxml/mxml.BUILD: -------------------------------------------------------------------------------- 1 | licenses(["notice"]) # Apache 2.0 2 | 3 | cc_library( 4 | name = "mxml", 5 | srcs = [ 6 | "config.h", 7 | "mxml-attr.c", 8 | "mxml-entity.c", 9 | "mxml-file.c", 10 | "mxml-get.c", 11 | "mxml-index.c", 12 | "mxml-node.c", 13 | "mxml-private.c", 14 | "mxml-private.h", 15 | "mxml-search.c", 16 | "mxml-set.c", 17 | "mxml-string.c", 18 | ], 19 | hdrs = [ 20 | "mxml.h", 21 | ], 22 | copts = ["-pthread"], 23 | defines = [ 24 | "_GNU_SOURCE", 25 | "_THREAD_SAFE", 26 | "_REENTRANT", 27 | ], 28 | includes = [ 29 | ".", 30 | ], 31 | linkopts = ["-lpthread"], 32 | visibility = ["//visibility:public"], 33 | ) 34 | -------------------------------------------------------------------------------- /third_party/oss_c_sdk/BUILD: -------------------------------------------------------------------------------- 1 | # Dummy BUILD file to make this directory a package. 2 | -------------------------------------------------------------------------------- /third_party/oss_c_sdk/oss_c_sdk.BUILD: -------------------------------------------------------------------------------- 1 | # Description: 2 | # aliyun_oss_c_sdk 3 | 4 | package(default_visibility = ["//visibility:public"]) 5 | 6 | licenses(["notice"]) # Apache 2.0 7 | 8 | exports_files(["LICENSE"]) 9 | 10 | cc_library( 11 | name = "aliyun_oss_c_sdk", 12 | srcs = [ 13 | "oss_c_sdk/aos_buf.c", 14 | "oss_c_sdk/aos_buf.h", 15 | "oss_c_sdk/aos_crc64.c", 16 | "oss_c_sdk/aos_crc64.h", 17 | "oss_c_sdk/aos_fstack.c", 18 | "oss_c_sdk/aos_fstack.h", 19 | "oss_c_sdk/aos_http_io.c", 20 | "oss_c_sdk/aos_http_io.h", 21 | "oss_c_sdk/aos_list.h", 22 | "oss_c_sdk/aos_log.c", 23 | "oss_c_sdk/aos_status.c", 24 | "oss_c_sdk/aos_string.c", 25 | "oss_c_sdk/aos_transport.c", 26 | "oss_c_sdk/aos_transport.h", 27 | "oss_c_sdk/aos_util.c", 28 | "oss_c_sdk/oss_auth.c", 29 | "oss_c_sdk/oss_bucket.c", 30 | "oss_c_sdk/oss_define.c", 31 | "oss_c_sdk/oss_define.h", 32 | "oss_c_sdk/oss_live.c", 33 | "oss_c_sdk/oss_multipart.c", 34 | "oss_c_sdk/oss_object.c", 35 | "oss_c_sdk/oss_resumable.c", 36 | "oss_c_sdk/oss_resumable.h", 37 | "oss_c_sdk/oss_util.c", 38 | "oss_c_sdk/oss_xml.c", 39 | "oss_c_sdk/oss_xml.h", 40 | ], 41 | hdrs = [ 42 | "oss_c_sdk/aos_define.h", 43 | "oss_c_sdk/aos_log.h", 44 | "oss_c_sdk/aos_status.h", 45 | "oss_c_sdk/aos_string.h", 46 | "oss_c_sdk/aos_util.h", 47 | "oss_c_sdk/oss_api.h", 48 | "oss_c_sdk/oss_auth.h", 49 | "oss_c_sdk/oss_util.h", 50 | ], 51 | includes = [ 52 | "oss_c_sdk", 53 | ], 54 | deps = [ 55 | "@curl", 56 | "@libapr1", 57 | "@libaprutil1", 58 | "@mxml", 59 | ], 60 | ) 61 | -------------------------------------------------------------------------------- /third_party/rapidjson/BUILD: -------------------------------------------------------------------------------- 1 | # RapidJSON (rapidjson.org) library. 2 | # from https://github.com/Tencent/rapidjson 3 | 4 | package( 5 | default_visibility = ["//visibility:public"], 6 | ) 7 | 8 | licenses(["notice"]) # BSD/MIT. 9 | 10 | cc_library( 11 | name = "rapidjson", 12 | hdrs = glob(["include/rapidjson/**/*.h"]), 13 | includes = ["include"], 14 | ) 15 | -------------------------------------------------------------------------------- /tools/gen_status_stamp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ============================================================================== 16 | 17 | # This script will be run by the building process to generate key-value 18 | # information that represents the status of the workspace. The output should be 19 | # in the format: 20 | # 21 | # KEY1 VALUE1 22 | # KEY2 VALUE2 23 | # 24 | # If the script exits with non-zero code, it's considered as a failure 25 | # and the output will be discarded. 26 | 27 | # if we're inside a git tree 28 | if [ -d .git ] || git rev-parse --git-dir > /dev/null 2>&1; then 29 | git_rev=$(git rev-parse --short HEAD) 30 | if [[ $? != 0 ]]; 31 | then 32 | exit 1 33 | fi 34 | echo "BUILD_SCM_REVISION ${git_rev}" 35 | else 36 | echo "BUILD_SCM_REVISION no_git" 37 | fi; 38 | 39 | 40 | --------------------------------------------------------------------------------