├── requirements.txt ├── .gitignore ├── .dockerignore ├── model ├── iris_model.pickle └── train.py ├── docs └── docker-architecture.png ├── codegen.py ├── Makefile ├── environment.yml ├── docker-compose.yml ├── iris.proto ├── tox.ini ├── iris_client.py ├── Dockerfile ├── iris_pb2_grpc.py ├── grpc_server.py ├── rest_proxy.py ├── README.md └── iris_pb2.py /requirements.txt: -------------------------------------------------------------------------------- 1 | grpcio 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | __pycache__/ 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .idea/ 3 | __pycache__/ 4 | docs/ 5 | -------------------------------------------------------------------------------- /model/iris_model.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yu-iskw/machine-learning-microservice-python/HEAD/model/iris_model.pickle -------------------------------------------------------------------------------- /docs/docker-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yu-iskw/machine-learning-microservice-python/HEAD/docs/docker-architecture.png -------------------------------------------------------------------------------- /codegen.py: -------------------------------------------------------------------------------- 1 | from grpc.tools import protoc 2 | 3 | 4 | protoc.main( 5 | ( 6 | '', 7 | '-I.', 8 | '--python_out=.', 9 | '--grpc_python_out=.', 10 | './iris.proto', 11 | ) 12 | ) 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | NAME := iris-predictor 2 | PYTHON_VERSION := 3.6 3 | 4 | build-docker: 5 | docker build . -t $(NAME) 6 | 7 | run-docker: 8 | docker run --rm -d -p 50052:50052 --name $(NAME) $(NAME) 9 | 10 | kill-docker: 11 | docker kill $(NAME) 12 | 13 | create-conda: 14 | conda env create -f environment.yml -n $(NAME) 15 | 16 | remove-conda: 17 | conda env remove -y -n $(NAME) 18 | 19 | lint: 20 | flake8 21 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: iris-predictor 2 | channels: 3 | - https://conda.anaconda.org/menpo 4 | - conda-forge 5 | dependencies: 6 | - python==3.5 7 | - protobuf 8 | - openblas 9 | - numpy 10 | - scipy 11 | - pandas 12 | - Cython 13 | - scikit-learn==0.19.1 14 | - pip: 15 | - grpcio 16 | - grpcio-tools 17 | - flask-restful==0.3.6 18 | - flake8 19 | -------------------------------------------------------------------------------- /model/train.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets 2 | from sklearn import svm 3 | from sklearn.externals import joblib 4 | 5 | # load iris dataset 6 | iris = datasets.load_iris() 7 | X, y = iris.data, iris.target 8 | 9 | # train model 10 | clf = svm.LinearSVC() 11 | clf.fit(X, y) 12 | 13 | # persistent model 14 | joblib.dump(clf, 'iris_model.pickle') 15 | 16 | # test code to check the saved model 17 | #clf = joblib.load('iris_model.pickle') 18 | #result = clf.predict([[ 5.0, 3.6, 1.3, 0.25]]) 19 | #print(result) -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | 4 | grpc-server: 5 | image: iris-predictor:latest 6 | container_name: grpc-server 7 | restart: always 8 | ports: 9 | - "50052:50052" 10 | environment: 11 | - MAX_WORKER=10000 12 | - PORT=50052 13 | 14 | rest-proxy: 15 | image: iris-predictor:latest 16 | container_name: rest_proxy 17 | restart: always 18 | ports: 19 | - "5000:5000" 20 | command: python -u rest_proxy.py --host 0.0.0.0 --port 5000 --grpc_host "grpc-server" --grpc_port 50052 21 | -------------------------------------------------------------------------------- /iris.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option java_multiple_files = true; 4 | option java_package = "io.grpc.examples.ml"; 5 | option java_outer_classname = "IrisProto"; 6 | option objc_class_prefix = "HLW"; 7 | 8 | package ml; 9 | 10 | service IrisPredictor { 11 | rpc PredictIrisSpecies (IrisPredictRequest) returns (IrisPredictReply) {} 12 | } 13 | 14 | message IrisPredictRequest { 15 | double sepal_length = 1; 16 | double sepal_width = 2; 17 | double petal_length = 3; 18 | double petal_width = 4; 19 | } 20 | 21 | message IrisPredictReply { 22 | int32 species = 1; 23 | } 24 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = .git/*,.idea/*,__pycache__/*,docs/*,model/*,*_pb2.py,*_pb2_grpc.py 3 | ignore = E123,E126,E127,E128,E501,E131,E266 4 | application-import-names = pajbot 5 | import-order-style = smarkets 6 | 7 | # E123 = closing bracket does not match indentation of opening bracket’s line 8 | # E126 = continuation line over-indented for hanging indent 9 | # E127 = continuation line over-indented for visual indent 10 | # E128 = continuation line under-indented for visual indent 11 | # E501 = line too long (82 > 79 characters) 12 | 13 | # Unused 14 | # E302 = expected 2 blank lines, found 0 15 | # F401 = module imported but unused 16 | # I101 = The names in your from import are in wrong order 17 | -------------------------------------------------------------------------------- /iris_client.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | import argparse 5 | 6 | import grpc 7 | 8 | import iris_pb2 9 | import iris_pb2_grpc 10 | 11 | 12 | def run(host, port): 13 | channel = grpc.insecure_channel('%s:%d' % (host, port)) 14 | stub = iris_pb2_grpc.IrisPredictorStub(channel) 15 | request = iris_pb2.IrisPredictRequest( 16 | sepal_length=5.0, 17 | sepal_width=3.6, 18 | petal_length=1.3, 19 | petal_width=0.25 20 | ) 21 | response = stub.PredictIrisSpecies(request) 22 | print("Predicted species number: " + str(response.species)) 23 | 24 | 25 | if __name__ == '__main__': 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('--host', help='host name', default='localhost', type=str) 28 | parser.add_argument('--port', help='port number', default=50052, type=int) 29 | 30 | args = parser.parse_args() 31 | run(args.host, args.port) 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | WORKDIR /root 4 | 5 | # Pick up some TF dependencies 6 | RUN apt-get update \ 7 | && apt-get install -y --no-install-recommends \ 8 | build-essential \ 9 | curl \ 10 | pkg-config \ 11 | rsync \ 12 | software-properties-common \ 13 | unzip \ 14 | git \ 15 | && apt-get clean \ 16 | && rm -rf /var/lib/apt/lists/* 17 | 18 | # Install miniconda 19 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \ 20 | && bash Miniconda-latest-Linux-x86_64.sh -p /miniconda -b \ 21 | && rm Miniconda-latest-Linux-x86_64.sh 22 | ENV PATH /miniconda/bin:$PATH 23 | 24 | # Create a conda environment 25 | ENV CONDA_ENV_NAME iris-predictor 26 | COPY environment.yml ./environment.yml 27 | RUN conda env create -f environment.yml -n $CONDA_ENV_NAME 28 | ENV PATH /miniconda/envs/${CONDA_ENV_NAME}/bin:$PATH 29 | 30 | # cleanup tarballs and downloaded package files 31 | RUN conda clean -tp -y \ 32 | && apt-get clean \ 33 | && rm -rf /var/lib/apt/lists/* 34 | 35 | # gRPC 36 | EXPOSE 50052 37 | # REST 38 | EXPOSE 5000 39 | 40 | # Environment variables 41 | ENV MAX_WORKERS 1 42 | ENV PORT 50052 43 | 44 | COPY . /root/ 45 | CMD python grpc_server.py --max_workers ${MAX_WORKERS} --port ${PORT} 46 | -------------------------------------------------------------------------------- /iris_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | import grpc 3 | 4 | import iris_pb2 as iris__pb2 5 | 6 | 7 | class IrisPredictorStub(object): 8 | # missing associated documentation comment in .proto file 9 | pass 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.PredictIrisSpecies = channel.unary_unary( 18 | '/ml.IrisPredictor/PredictIrisSpecies', 19 | request_serializer=iris__pb2.IrisPredictRequest.SerializeToString, 20 | response_deserializer=iris__pb2.IrisPredictReply.FromString, 21 | ) 22 | 23 | 24 | class IrisPredictorServicer(object): 25 | # missing associated documentation comment in .proto file 26 | pass 27 | 28 | def PredictIrisSpecies(self, request, context): 29 | # missing associated documentation comment in .proto file 30 | pass 31 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 32 | context.set_details('Method not implemented!') 33 | raise NotImplementedError('Method not implemented!') 34 | 35 | 36 | def add_IrisPredictorServicer_to_server(servicer, server): 37 | rpc_method_handlers = { 38 | 'PredictIrisSpecies': grpc.unary_unary_rpc_method_handler( 39 | servicer.PredictIrisSpecies, 40 | request_deserializer=iris__pb2.IrisPredictRequest.FromString, 41 | response_serializer=iris__pb2.IrisPredictReply.SerializeToString, 42 | ), 43 | } 44 | generic_handler = grpc.method_handlers_generic_handler( 45 | 'ml.IrisPredictor', rpc_method_handlers) 46 | server.add_generic_rpc_handlers((generic_handler,)) 47 | -------------------------------------------------------------------------------- /grpc_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | from concurrent import futures 3 | import time 4 | import argparse 5 | 6 | from sklearn.externals import joblib 7 | 8 | import grpc 9 | 10 | import iris_pb2 11 | import iris_pb2_grpc 12 | 13 | _ONE_DAY_IN_SECONDS = 60 * 60 * 24 14 | 15 | 16 | class IrisPredictor(iris_pb2_grpc.IrisPredictorServicer): 17 | _model = None 18 | 19 | @classmethod 20 | def get_or_create_model(cls): 21 | """ 22 | Get or create iris classification model. 23 | """ 24 | if cls._model is None: 25 | path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model', 'iris_model.pickle') 26 | cls._model = joblib.load(path) 27 | return cls._model 28 | 29 | def PredictIrisSpecies(self, request, context): 30 | model = self.__class__.get_or_create_model() 31 | sepal_length = request.sepal_length 32 | sepal_width = request.sepal_width 33 | petal_length = request.petal_length 34 | petal_width = request.petal_width 35 | result = model.predict([[sepal_length, sepal_width, petal_length, petal_width]]) 36 | return iris_pb2.IrisPredictReply(species=result[0]) 37 | 38 | 39 | def serve(port, max_workers): 40 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers)) 41 | iris_pb2_grpc.add_IrisPredictorServicer_to_server(IrisPredictor(), server) 42 | server.add_insecure_port('[::]:{port}'.format(port=port)) 43 | server.start() 44 | try: 45 | while True: 46 | time.sleep(_ONE_DAY_IN_SECONDS) 47 | except KeyboardInterrupt: 48 | server.stop(0) 49 | 50 | 51 | if __name__ == '__main__': 52 | parser = argparse.ArgumentParser(description='Process some integers.') 53 | parser.add_argument('--port', type=int, help='port number', required=False, default=50052) 54 | parser.add_argument('--max_workers', type=int, help='# max workers', required=False, default=10) 55 | args = parser.parse_args() 56 | 57 | serve(port=args.port, max_workers=args.max_workers) 58 | -------------------------------------------------------------------------------- /rest_proxy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import print_function 3 | 4 | import argparse 5 | import grpc 6 | 7 | from flask import Flask 8 | from flask_restful import reqparse, Api, Resource, fields, marshal 9 | 10 | import iris_pb2 11 | import iris_pb2_grpc 12 | 13 | app = Flask(__name__) 14 | api = Api(app) 15 | 16 | 17 | class HealthCheck(Resource): 18 | """ 19 | This class is used for health check. 20 | """ 21 | def get(self): 22 | return {"status": "alive"}, 200 23 | 24 | 25 | class RestProxy(Resource): 26 | """ 27 | This class is used for a proxy with REST to a gRPC server. 28 | """ 29 | 30 | # Define the inputs. 31 | parser = reqparse.RequestParser() 32 | parser.add_argument('sepal_length', type=float, help='sepal length', required=True) 33 | parser.add_argument('sepal_width', type=float, help='sepal width', required=True) 34 | parser.add_argument('petal_length', type=float, help='petal length', required=True) 35 | parser.add_argument('petal_width', type=float, help='petal width', required=True) 36 | parser.add_argument('threshold', type=float, help='threshold', required=False, default=0.5) 37 | 38 | # Define the outputs. 39 | resource_fields = { 40 | 'species': fields.String, 41 | } 42 | 43 | def __init__(self, host, port): 44 | """ 45 | :param host: host of gRPC server. 46 | :param port: port of gRPC server. 47 | """ 48 | self.host = host 49 | self.port = port 50 | 51 | def post(self): 52 | # Parse arguments by REST request. 53 | args = self.__class__.parser.parse_args() 54 | 55 | # Request to the gRPC server. 56 | channel = grpc.insecure_channel('%s:%s' % (self.host, self.port)) 57 | stub = iris_pb2_grpc.IrisPredictorStub(channel) 58 | request = iris_pb2.IrisPredictRequest( 59 | sepal_length=args['sepal_length'], 60 | sepal_width=args['sepal_width'], 61 | petal_length=args['petal_length'], 62 | petal_width=args['petal_width'] 63 | ) 64 | response = stub.PredictIrisSpecies(request) 65 | 66 | # Return the results. 67 | data = { 68 | 'species': response.species, 69 | } 70 | return marshal(data, self.__class__.resource_fields) 71 | 72 | 73 | if __name__ == '__main__': 74 | parser = argparse.ArgumentParser() 75 | parser.add_argument('--host', help='REST server host', default='localhost', type=str) 76 | parser.add_argument('--port', help='REST server port', default=5000, type=int) 77 | parser.add_argument('--grpc_host', help='gRPC server host', default='0.0.0.0', type=str) 78 | parser.add_argument('--grpc_port', help='gRPC server port', default=50052, type=int) 79 | parser.add_argument('--debug', help='debug flag', default=False, type=bool) 80 | args = parser.parse_args() 81 | 82 | resource_class_kwargs = { 83 | 'host': args.grpc_host, 84 | 'port': args.grpc_port, 85 | } 86 | 87 | # Run flask app 88 | api.add_resource(RestProxy, '/', resource_class_kwargs=resource_class_kwargs) 89 | api.add_resource(HealthCheck, '/healthcheck') 90 | app.run(host=args.host, port=args.port, debug=args.debug) 91 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Machine learning as a microservice in python 2 | 3 | This is an example to service machine larning as a microservice in python. 4 | The model predicts iris species by given sepal length, sepal width, petal length and petal width. 5 | 6 | ## Requirements 7 | 8 | - Docker 9 | - Anaconda 10 | - Make 11 | 12 | ## Implement the files 13 | 14 | 1. Train a model for iris data with `./model/train.py`. 15 | - As a result, it saves a model to predict iris species in `iris_model.pickle`. 16 | 2. Define the protocol-buffer in `iris.proto`. 17 | 3. Implement a command to generate python files from `iris.proto` in `codegen.py`. 18 | - `iris_pb2.py` and `iris_pb2_grpc.py` are generated. 19 | 4. Implement `grpc_server.py`. 20 | - We predict iris species by given features in `grpc_server.py`. 21 | 5. Implement `iris_client.py`. 22 | - The files is just a client to request judging iris species with features which are fixed values of sepal length, sepal width, petal length and petal width. 23 | 24 | 25 | ## How to set up an environment on our local machine 26 | The command creates an anaconda environment. 27 | We can activate the environment with `source activate iris-predictor`, since the environment name is `iris-predictor`. 28 | ``` 29 | # Create an anaconda environment. 30 | conda env create -f environment.yml -n iris-predictor 31 | 32 | # Verify that the new environment was installed correctly, active environment is shown with '*'. 33 | conda env list 34 | 35 | # Remove the anaconda environment. 36 | conda env remove -y -n iris-predictor 37 | ``` 38 | 39 | ## How to run the server and the client on our local machine 40 | Before running the predictor as a docker container, we can run the server and client on our local machine. 41 | ``` 42 | # Run serve. 43 | python grpc_server.py 44 | 45 | # Run client. 46 | python iris_client.py 47 | ``` 48 | 49 | ## How to build and run a docker image 50 | We put the python files and saved model in the docker image. 51 | Besides, the docker image is used for running `grpc_server.py`. 52 | 53 | The host name depends on your environment. 54 | If you use `docker-machine`, we can see the IP address with `docker-machine ip YOUR_DOCKER_MACHINE`. 55 | 56 | The docker image exposes `50052` port for the gRPC server. 57 | As well as, the gRPC server uses `50052`. 58 | That's why we put `-p 50052:50052` in the `docker run` command. 59 | ``` 60 | # Build a docker image. 61 | docker build . -t iris-predictor 62 | 63 | # Run a docker container. 64 | docker run --rm -d -p 50052:50052 --name iris-predictor iris-predictor 65 | 66 | # Kill the docker container 67 | docker kill iris-predictor 68 | ``` 69 | 70 | And then, we check if the client can access the server on docker or not: 71 | 72 | ``` 73 | # Execute it on your local machine, not a docker container. 74 | python iris_client.py --host HOST_NAME --port 50052 75 | Predicted species number: 0 76 | ``` 77 | 78 | ## Appendix: HTTP/REST API 79 | Sometimes you are faced with a situation that you need to offer both gRPC API and RESTful API. 80 | To avoid duplicated work, we can also define the HTTP/REST API as just proxy to the gRPC API. 81 | I know having requests internally can be in vein. 82 | But, in terms of software development, a benefit that we don't need to develop different prediction functions is true as well. 83 | 84 | The REST API as proxy is `rest_proxy.py` 85 | It is simply implemented with [Flask\-RESTful](https://flask-restful.readthedocs.io/en/latest/). 86 | 87 | And the definition to launch both the gRPC API and the RESTful API is in `docker-compose.yml`. 88 | 89 | ![docker architecture](./docs/docker-architecture.png) 90 | 91 | ``` 92 | # Launch the gRPC server and REST server on docker 93 | docker-composer -d 94 | 95 | # Request to the REST API. 96 | DOCKER_HOST="..." 97 | curl http://${DOCKER_HOST}:5000/ -X POST \ 98 | -d "sepal_length=6.8" \ 99 | -d "sepal_width=3.2" \ 100 | -d "petal_length=5.9" \ 101 | -d "petal_width=2.3" 102 | 103 | {"species": "2"} 104 | ``` 105 | -------------------------------------------------------------------------------- /iris_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: iris.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf import descriptor_pb2 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='iris.proto', 20 | package='ml', 21 | syntax='proto3', 22 | serialized_pb=_b('\n\niris.proto\x12\x02ml\"j\n\x12IrisPredictRequest\x12\x14\n\x0csepal_length\x18\x01 \x01(\x01\x12\x13\n\x0bsepal_width\x18\x02 \x01(\x01\x12\x14\n\x0cpetal_length\x18\x03 \x01(\x01\x12\x13\n\x0bpetal_width\x18\x04 \x01(\x01\"#\n\x10IrisPredictReply\x12\x0f\n\x07species\x18\x01 \x01(\x05\x32U\n\rIrisPredictor\x12\x44\n\x12PredictIrisSpecies\x12\x16.ml.IrisPredictRequest\x1a\x14.ml.IrisPredictReply\"\x00\x42(\n\x13io.grpc.examples.mlB\tIrisProtoP\x01\xa2\x02\x03HLWb\x06proto3') 23 | ) 24 | 25 | 26 | 27 | 28 | _IRISPREDICTREQUEST = _descriptor.Descriptor( 29 | name='IrisPredictRequest', 30 | full_name='ml.IrisPredictRequest', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | fields=[ 35 | _descriptor.FieldDescriptor( 36 | name='sepal_length', full_name='ml.IrisPredictRequest.sepal_length', index=0, 37 | number=1, type=1, cpp_type=5, label=1, 38 | has_default_value=False, default_value=float(0), 39 | message_type=None, enum_type=None, containing_type=None, 40 | is_extension=False, extension_scope=None, 41 | options=None), 42 | _descriptor.FieldDescriptor( 43 | name='sepal_width', full_name='ml.IrisPredictRequest.sepal_width', index=1, 44 | number=2, type=1, cpp_type=5, label=1, 45 | has_default_value=False, default_value=float(0), 46 | message_type=None, enum_type=None, containing_type=None, 47 | is_extension=False, extension_scope=None, 48 | options=None), 49 | _descriptor.FieldDescriptor( 50 | name='petal_length', full_name='ml.IrisPredictRequest.petal_length', index=2, 51 | number=3, type=1, cpp_type=5, label=1, 52 | has_default_value=False, default_value=float(0), 53 | message_type=None, enum_type=None, containing_type=None, 54 | is_extension=False, extension_scope=None, 55 | options=None), 56 | _descriptor.FieldDescriptor( 57 | name='petal_width', full_name='ml.IrisPredictRequest.petal_width', index=3, 58 | number=4, type=1, cpp_type=5, label=1, 59 | has_default_value=False, default_value=float(0), 60 | message_type=None, enum_type=None, containing_type=None, 61 | is_extension=False, extension_scope=None, 62 | options=None), 63 | ], 64 | extensions=[ 65 | ], 66 | nested_types=[], 67 | enum_types=[ 68 | ], 69 | options=None, 70 | is_extendable=False, 71 | syntax='proto3', 72 | extension_ranges=[], 73 | oneofs=[ 74 | ], 75 | serialized_start=18, 76 | serialized_end=124, 77 | ) 78 | 79 | 80 | _IRISPREDICTREPLY = _descriptor.Descriptor( 81 | name='IrisPredictReply', 82 | full_name='ml.IrisPredictReply', 83 | filename=None, 84 | file=DESCRIPTOR, 85 | containing_type=None, 86 | fields=[ 87 | _descriptor.FieldDescriptor( 88 | name='species', full_name='ml.IrisPredictReply.species', index=0, 89 | number=1, type=5, cpp_type=1, label=1, 90 | has_default_value=False, default_value=0, 91 | message_type=None, enum_type=None, containing_type=None, 92 | is_extension=False, extension_scope=None, 93 | options=None), 94 | ], 95 | extensions=[ 96 | ], 97 | nested_types=[], 98 | enum_types=[ 99 | ], 100 | options=None, 101 | is_extendable=False, 102 | syntax='proto3', 103 | extension_ranges=[], 104 | oneofs=[ 105 | ], 106 | serialized_start=126, 107 | serialized_end=161, 108 | ) 109 | 110 | DESCRIPTOR.message_types_by_name['IrisPredictRequest'] = _IRISPREDICTREQUEST 111 | DESCRIPTOR.message_types_by_name['IrisPredictReply'] = _IRISPREDICTREPLY 112 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 113 | 114 | IrisPredictRequest = _reflection.GeneratedProtocolMessageType('IrisPredictRequest', (_message.Message,), dict( 115 | DESCRIPTOR = _IRISPREDICTREQUEST, 116 | __module__ = 'iris_pb2' 117 | # @@protoc_insertion_point(class_scope:ml.IrisPredictRequest) 118 | )) 119 | _sym_db.RegisterMessage(IrisPredictRequest) 120 | 121 | IrisPredictReply = _reflection.GeneratedProtocolMessageType('IrisPredictReply', (_message.Message,), dict( 122 | DESCRIPTOR = _IRISPREDICTREPLY, 123 | __module__ = 'iris_pb2' 124 | # @@protoc_insertion_point(class_scope:ml.IrisPredictReply) 125 | )) 126 | _sym_db.RegisterMessage(IrisPredictReply) 127 | 128 | 129 | DESCRIPTOR.has_options = True 130 | DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023io.grpc.examples.mlB\tIrisProtoP\001\242\002\003HLW')) 131 | 132 | _IRISPREDICTOR = _descriptor.ServiceDescriptor( 133 | name='IrisPredictor', 134 | full_name='ml.IrisPredictor', 135 | file=DESCRIPTOR, 136 | index=0, 137 | options=None, 138 | serialized_start=163, 139 | serialized_end=248, 140 | methods=[ 141 | _descriptor.MethodDescriptor( 142 | name='PredictIrisSpecies', 143 | full_name='ml.IrisPredictor.PredictIrisSpecies', 144 | index=0, 145 | containing_service=None, 146 | input_type=_IRISPREDICTREQUEST, 147 | output_type=_IRISPREDICTREPLY, 148 | options=None, 149 | ), 150 | ]) 151 | _sym_db.RegisterServiceDescriptor(_IRISPREDICTOR) 152 | 153 | DESCRIPTOR.services_by_name['IrisPredictor'] = _IRISPREDICTOR 154 | 155 | # @@protoc_insertion_point(module_scope) 156 | --------------------------------------------------------------------------------