├── .gitignore
├── .travis.yml
├── README.md
├── benchmark
├── client.py
├── client_worker.py
├── config.py
├── jobset.py
├── server.py
├── server_pb2.py
├── touch_qps_worker.py
└── touch_test.py
├── examples
├── __init__.py
├── client.py
├── gencode.sh
├── server.proto
├── server.py
└── server_pb2.py
├── grma
├── __init__.py
├── app.py
├── config.py
├── mayue.py
├── pidfile.py
├── server
│ ├── __init__.py
│ └── base.py
├── utils.py
└── worker.py
├── images
└── logo.png
├── requirements_test.txt
├── setup.py
├── tests
├── client.py
├── server_pb2.py
└── test_touch.py
└── tox.ini
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | *.pyc
3 | *.swp
4 | *.iml
5 | *.egg-info
6 | *.egg
7 | *.patch
8 | .#*
9 | dist
10 | build
11 | tests/.cache/*
12 | .cache
13 | venv
14 | .DS_Store
15 | .tox/*
16 | *.pid
17 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: required
2 | dist: trusty
3 | language: python
4 | python: 2.7
5 | env:
6 | - TOXENV=py27
7 | install: pip install tox
8 | script: tox
9 | cache:
10 | directories:
11 | - .tox
12 | - $HOME/.cache/pip
13 | before_install:
14 | - echo "deb http://http.debian.net/debian jessie-backports main" | sudo tee -a /etc/apt/sources.list
15 | - sudo apt-get update -y
16 | - sudo apt-get install libgrpc-dev -y --force-yes
17 | - pip install grpcio==0.14.0 protobuf==3.0.0b3,
18 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # Overview
6 |
7 | A simple gunicorn like gRPC management server.
8 |
9 | [](https://travis-ci.org/qiajigou/grma) [](https://gitter.im/qiajigou/grma?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
10 |
11 | # Example
12 |
13 | See examples folder.
14 |
15 | grma --cls=server:app --num=8 --port=60061
16 |
17 | Then use python
18 |
19 | ```python
20 | from client import get_client
21 | c = get_client()
22 | c.hello('hello world')
23 | ```
24 |
25 | # How to use
26 |
27 | inherit `ServerBase` to create your own `JediServer` Class:
28 |
29 | ```python
30 | from grma.server.base import ServerBase
31 |
32 | class JediServer(ServerBase):
33 | """Your gRPC server class"""
34 |
35 | def start(self):
36 | pass
37 |
38 | def bind(self, host, port, private_key_path='', certificate_chain_path=''):
39 | pass
40 |
41 | def stop(self, grace=3):
42 | pass
43 |
44 | app = JediServer()
45 | ```
46 |
47 | Launching should be simple:
48 |
49 | run grma --port=50051 --cls=app:app --num=8 --daemon=1
50 |
51 |
52 | Get more from help
53 |
54 |
55 | ```
56 | usage: grma [-h] [--host HOST] [--port PORT] [--private PRIVATE]
57 | [--certificate CERTIFICATE] --cls CLS [--num NUM] [--pid PID]
58 | [--daemon DAEMON]
59 |
60 | A simple gunicorn like gRPC server management tool
61 |
62 | optional arguments:
63 | -h, --help show this help message and exit
64 | --host HOST an string for gRPC Server host
65 | --port PORT an integer for gRPC Server port
66 | --private PRIVATE a string of private key path
67 | --certificate CERTIFICATE
68 | a string of private certificate key path
69 | --cls CLS a string of gRPC server module [app:server]
70 | --num NUM a int of worker number
71 | --pid PID pid file for grma
72 | --daemon DAEMON run as daemon
73 | ```
74 |
75 | # TODO
76 |
77 | Lots to do...
78 |
--------------------------------------------------------------------------------
/benchmark/client.py:
--------------------------------------------------------------------------------
1 | import server_pb2
2 |
3 | import atexit
4 |
5 | from grpc.beta import implementations
6 | from random import randint
7 | from grpc._adapter._types import ConnectivityState
8 |
9 | global _pool
10 | _pool = dict()
11 |
12 |
13 | class ChannelPool(object):
14 |
15 | def __init__(self, host, port, pool_size):
16 | self.host = host
17 | self.port = port
18 | self.pool_size = pool_size
19 | self.channels = []
20 | self.stubs = []
21 | # only index, no ref!
22 | # and this is a stub rank!
23 | self.working_channel_indexs = set()
24 | self.connect()
25 |
26 | def flush_channels(self):
27 | # call this method to check all the channels status
28 | # if channel connection is failed or idle
29 | # we could try to reconnect sometime
30 | channels = [self.channels[i] for i in self.working_channel_indexs]
31 | for channel in channels:
32 | try:
33 | state = channel._low_channel.check_connectivity_state(True)
34 | if state == ConnectivityState.CONNECTING:
35 | self.on_channel_connection(channel, state)
36 | elif state == ConnectivityState.TRANSIENT_FAILURE:
37 | self.on_transient_failure(channel, state)
38 | elif state == ConnectivityState.FATAL_FAILURE:
39 | self.on_fatal_failure(channel, state)
40 | else:
41 | self.on_success(channel, state)
42 | except Exception, e:
43 | self.on_exception(channel, state, e)
44 |
45 | def on_channel_connection(self, channel, state):
46 | pass
47 |
48 | def on_transient_failure(self, channel, state):
49 | pass
50 |
51 | def on_fatal_failure(self, channel, state):
52 | pass
53 |
54 | def on_success(self, channel, state):
55 | pass
56 |
57 | def on_exception(self, channel, state, e):
58 | pass
59 |
60 | def connect(self):
61 | for i in range(self.pool_size):
62 | channel = implementations.insecure_channel(self.host, self.port)
63 | stub = server_pb2.beta_create_SimpleService_stub(channel)
64 | # we need to make channels[i] == stubs[i]->channel
65 | self.channels.append(channel)
66 | self.stubs.append(stub)
67 |
68 | def shutdown(self):
69 | for channel in self.channels:
70 | del channel
71 | del self.channels
72 | for stub in self.stubs:
73 | del stub
74 | del self.stubs
75 | self.channels = []
76 | self.stubs = []
77 |
78 | def get_stub(self):
79 | index = randint(0, self.pool_size - 1)
80 | self.working_channel_indexs.add(index)
81 | return self.stubs[index]
82 |
83 | def __del__(self):
84 | self.shutdown()
85 |
86 |
87 | class ClientImpl(object):
88 | def __init__(self, host='0.0.0.0', port=60061, size=1):
89 | self.pool = ChannelPool(host, port, size)
90 | self.pool.connect()
91 | self.register()
92 |
93 | def register(self):
94 | key = str(id(self))
95 | value = self
96 | if _pool.get(key):
97 | old_obj = _pool.get(key)
98 | del old_obj
99 | _pool[key] = value
100 |
101 | def shutdown(self):
102 | self.pool.shutdown()
103 |
104 | @property
105 | def stub(self):
106 | return self.pool.get_stub()
107 |
108 | def hello(self, words, with_call=False):
109 | request = server_pb2.HelloRequest(say=words)
110 | return self.stub.Hello(request, 3, with_call=with_call)
111 |
112 | Hello = hello
113 |
114 |
115 | def get_client():
116 | if _pool:
117 | key = _pool.keys()[0]
118 | return _pool[key]
119 | client = ClientImpl()
120 | return client
121 |
122 |
123 | def exit_handler():
124 | # this is a gRPC python bug
125 | # so we need to end everything
126 | # when app close
127 | for _, obj in _pool.items():
128 | obj.shutdown()
129 |
130 | atexit.register(exit_handler)
131 |
--------------------------------------------------------------------------------
/benchmark/client_worker.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import signal
4 | import jobset
5 | import subprocess
6 |
7 | from time import sleep
8 |
9 | PATH = os.path.abspath(os.path.dirname(__file__))
10 |
11 |
12 | class ClientWorker(object):
13 | def __init__(self):
14 | self.groups = []
15 |
16 | def register(self, args, process_num=1):
17 | jobset.message('START', '[%s] register %s' % (process_num, ' '.join(args)))
18 | id = len(self.groups) + 1
19 | info = dict(id=id, args=args, process_num=process_num)
20 | self.groups.append(info)
21 | jobset.message('SUCCESS', 'registed %s' % args)
22 |
23 | def spawn(self):
24 | for group in self.groups:
25 | try:
26 | process_num = group.get('process_num')
27 | args = group.get('args')
28 | _ps = []
29 | for i in range(process_num):
30 | proc = subprocess.Popen(args, preexec_fn=os.setsid)
31 | _ps.append(proc)
32 | group.update(proc_list=_ps)
33 | except Exception as e:
34 | jobset.message('FAIL', str(e))
35 | return
36 |
37 | def run(self):
38 | jobset.message('START', 'Start spawn subprocesses')
39 | self.spawn()
40 | while 1:
41 | sleep(1)
42 |
43 |
44 | def signal_handler(sig, fra):
45 | sys.exit(0)
46 |
47 | signal.signal(signal.SIGINT, signal_handler)
48 | signal.signal(signal.SIGQUIT, signal_handler)
49 | signal.signal(signal.SIGTERM, signal_handler)
50 | signal.signal(signal.SIGCHLD, signal_handler)
51 |
52 | if __name__ == '__main__':
53 | jobset.message('START', 'start runnling client worker')
54 | try:
55 | r = ClientWorker()
56 | r.register(
57 | [sys.executable, PATH + '/touch_test.py'], 1
58 | )
59 | r.run()
60 | except Exception, e:
61 | jobset.message('FAIL', str(e))
62 | print e
63 | finally:
64 | jobset.message('SUCCESS', 'Done')
65 |
--------------------------------------------------------------------------------
/benchmark/config.py:
--------------------------------------------------------------------------------
1 | host = '0.0.0.0'
2 | port = 60061
3 | cpu_count = 8
4 | total = 1000
5 |
--------------------------------------------------------------------------------
/benchmark/jobset.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015, Google Inc.
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are
6 | # met:
7 | #
8 | # * Redistributions of source code must retain the above copyright
9 | # notice, this list of conditions and the following disclaimer.
10 | # * Redistributions in binary form must reproduce the above
11 | # copyright notice, this list of conditions and the following disclaimer
12 | # in the documentation and/or other materials provided with the
13 | # distribution.
14 | # * Neither the name of Google Inc. nor the names of its
15 | # contributors may be used to endorse or promote products derived from
16 | # this software without specific prior written permission.
17 | #
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
30 | """Run a group of subprocesses and then finish."""
31 |
32 | import hashlib
33 | import multiprocessing
34 | import os
35 | import platform
36 | import re
37 | import signal
38 | import subprocess
39 | import sys
40 | import tempfile
41 | import time
42 |
43 |
44 | # cpu cost measurement
45 | measure_cpu_costs = False
46 |
47 |
48 | _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
49 | _MAX_RESULT_SIZE = 8192
50 |
51 | def platform_string():
52 | if platform.system() == 'Windows':
53 | return 'windows'
54 | elif platform.system()[:7] == 'MSYS_NT':
55 | return 'windows'
56 | elif platform.system() == 'Darwin':
57 | return 'mac'
58 | elif platform.system() == 'Linux':
59 | return 'linux'
60 | else:
61 | return 'posix'
62 |
63 |
64 | # setup a signal handler so that signal.pause registers 'something'
65 | # when a child finishes
66 | # not using futures and threading to avoid a dependency on subprocess32
67 | if platform_string() == 'windows':
68 | pass
69 | else:
70 | have_alarm = False
71 | def alarm_handler(unused_signum, unused_frame):
72 | global have_alarm
73 | have_alarm = False
74 |
75 | signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
76 | signal.signal(signal.SIGALRM, alarm_handler)
77 |
78 |
79 | _SUCCESS = object()
80 | _FAILURE = object()
81 | _RUNNING = object()
82 | _KILLED = object()
83 |
84 |
85 | _COLORS = {
86 | 'red': [ 31, 0 ],
87 | 'green': [ 32, 0 ],
88 | 'yellow': [ 33, 0 ],
89 | 'lightgray': [ 37, 0],
90 | 'gray': [ 30, 1 ],
91 | 'purple': [ 35, 0 ],
92 | }
93 |
94 |
95 | _BEGINNING_OF_LINE = '\x1b[0G'
96 | _CLEAR_LINE = '\x1b[2K'
97 |
98 |
99 | _TAG_COLOR = {
100 | 'FAILED': 'red',
101 | 'FLAKE': 'purple',
102 | 'TIMEOUT_FLAKE': 'purple',
103 | 'WARNING': 'yellow',
104 | 'TIMEOUT': 'red',
105 | 'PASSED': 'green',
106 | 'START': 'gray',
107 | 'WAITING': 'yellow',
108 | 'SUCCESS': 'green',
109 | 'IDLE': 'gray',
110 | }
111 |
112 |
113 | def message(tag, msg, explanatory_text=None, do_newline=False):
114 | if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
115 | return
116 | message.old_tag = tag
117 | message.old_msg = msg
118 | try:
119 | if platform_string() == 'windows' or not sys.stdout.isatty():
120 | if explanatory_text:
121 | print explanatory_text
122 | print '%s: %s' % (tag, msg)
123 | return
124 | sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
125 | _BEGINNING_OF_LINE,
126 | _CLEAR_LINE,
127 | '\n%s' % explanatory_text if explanatory_text is not None else '',
128 | _COLORS[_TAG_COLOR[tag]][1],
129 | _COLORS[_TAG_COLOR[tag]][0],
130 | tag,
131 | msg,
132 | '\n' if do_newline or explanatory_text is not None else ''))
133 | sys.stdout.flush()
134 | except:
135 | pass
136 |
137 | message.old_tag = ''
138 | message.old_msg = ''
139 |
140 | def which(filename):
141 | if '/' in filename:
142 | return filename
143 | for path in os.environ['PATH'].split(os.pathsep):
144 | if os.path.exists(os.path.join(path, filename)):
145 | return os.path.join(path, filename)
146 | raise Exception('%s not found' % filename)
147 |
148 |
149 | class JobSpec(object):
150 | """Specifies what to run for a job."""
151 |
152 | def __init__(self, cmdline, shortname=None, environ=None, hash_targets=None,
153 | cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
154 | timeout_retries=0, kill_handler=None, cpu_cost=1.0,
155 | verbose_success=False):
156 | """
157 | Arguments:
158 | cmdline: a list of arguments to pass as the command line
159 | environ: a dictionary of environment variables to set in the child process
160 | hash_targets: which files to include in the hash representing the jobs version
161 | (or empty, indicating the job should not be hashed)
162 | kill_handler: a handler that will be called whenever job.kill() is invoked
163 | cpu_cost: number of cores per second this job needs
164 | """
165 | if environ is None:
166 | environ = {}
167 | if hash_targets is None:
168 | hash_targets = []
169 | self.cmdline = cmdline
170 | self.environ = environ
171 | self.shortname = cmdline[0] if shortname is None else shortname
172 | self.hash_targets = hash_targets or []
173 | self.cwd = cwd
174 | self.shell = shell
175 | self.timeout_seconds = timeout_seconds
176 | self.flake_retries = flake_retries
177 | self.timeout_retries = timeout_retries
178 | self.kill_handler = kill_handler
179 | self.cpu_cost = cpu_cost
180 | self.verbose_success = verbose_success
181 |
182 | def identity(self):
183 | return '%r %r %r' % (self.cmdline, self.environ, self.hash_targets)
184 |
185 | def __hash__(self):
186 | return hash(self.identity())
187 |
188 | def __cmp__(self, other):
189 | return self.identity() == other.identity()
190 |
191 | def __repr__(self):
192 | return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
193 |
194 |
195 | class JobResult(object):
196 | def __init__(self):
197 | self.state = 'UNKNOWN'
198 | self.returncode = -1
199 | self.elapsed_time = 0
200 | self.num_failures = 0
201 | self.retries = 0
202 | self.message = ''
203 |
204 |
205 | class Job(object):
206 | """Manages one job."""
207 |
208 | def __init__(self, spec, bin_hash, newline_on_success, travis, add_env):
209 | self._spec = spec
210 | self._bin_hash = bin_hash
211 | self._newline_on_success = newline_on_success
212 | self._travis = travis
213 | self._add_env = add_env.copy()
214 | self._retries = 0
215 | self._timeout_retries = 0
216 | self._suppress_failure_message = False
217 | message('START', spec.shortname, do_newline=self._travis)
218 | self.result = JobResult()
219 | self.start()
220 |
221 | def GetSpec(self):
222 | return self._spec
223 |
224 | def start(self):
225 | self._tempfile = tempfile.TemporaryFile()
226 | env = dict(os.environ)
227 | env.update(self._spec.environ)
228 | env.update(self._add_env)
229 | self._start = time.time()
230 | cmdline = self._spec.cmdline
231 | if measure_cpu_costs:
232 | cmdline = ['time', '--portability'] + cmdline
233 | try_start = lambda: subprocess.Popen(args=cmdline,
234 | stderr=subprocess.STDOUT,
235 | stdout=self._tempfile,
236 | cwd=self._spec.cwd,
237 | shell=self._spec.shell,
238 | env=env)
239 | delay = 0.3
240 | for i in range(0, 4):
241 | try:
242 | self._process = try_start()
243 | break
244 | except OSError:
245 | message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
246 | time.sleep(delay)
247 | delay *= 2
248 | else:
249 | self._process = try_start()
250 | self._state = _RUNNING
251 |
252 | def state(self, update_cache):
253 | """Poll current state of the job. Prints messages at completion."""
254 | def stdout(self=self):
255 | self._tempfile.seek(0)
256 | stdout = self._tempfile.read()
257 | self.result.message = stdout[-_MAX_RESULT_SIZE:]
258 | return stdout
259 | if self._state == _RUNNING and self._process.poll() is not None:
260 | elapsed = time.time() - self._start
261 | self.result.elapsed_time = elapsed
262 | if self._process.returncode != 0:
263 | if self._retries < self._spec.flake_retries:
264 | message('FLAKE', '%s [ret=%d, pid=%d]' % (
265 | self._spec.shortname, self._process.returncode, self._process.pid),
266 | stdout(), do_newline=True)
267 | self._retries += 1
268 | self.result.num_failures += 1
269 | self.result.retries = self._timeout_retries + self._retries
270 | self.start()
271 | else:
272 | self._state = _FAILURE
273 | if not self._suppress_failure_message:
274 | message('FAILED', '%s [ret=%d, pid=%d]' % (
275 | self._spec.shortname, self._process.returncode, self._process.pid),
276 | stdout(), do_newline=True)
277 | self.result.state = 'FAILED'
278 | self.result.num_failures += 1
279 | self.result.returncode = self._process.returncode
280 | else:
281 | self._state = _SUCCESS
282 | measurement = ''
283 | if measure_cpu_costs:
284 | m = re.search(r'real ([0-9.]+)\nuser ([0-9.]+)\nsys ([0-9.]+)', stdout())
285 | real = float(m.group(1))
286 | user = float(m.group(2))
287 | sys = float(m.group(3))
288 | if real > 0.5:
289 | cores = (user + sys) / real
290 | measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
291 | message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
292 | self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
293 | stdout() if self._spec.verbose_success else None,
294 | do_newline=self._newline_on_success or self._travis)
295 | self.result.state = 'PASSED'
296 | if self._bin_hash:
297 | update_cache.finished(self._spec.identity(), self._bin_hash)
298 | elif (self._state == _RUNNING and
299 | self._spec.timeout_seconds is not None and
300 | time.time() - self._start > self._spec.timeout_seconds):
301 | if self._timeout_retries < self._spec.timeout_retries:
302 | message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
303 | self._timeout_retries += 1
304 | self.result.num_failures += 1
305 | self.result.retries = self._timeout_retries + self._retries
306 | if self._spec.kill_handler:
307 | self._spec.kill_handler(self)
308 | self._process.terminate()
309 | self.start()
310 | else:
311 | message('TIMEOUT', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
312 | self.kill()
313 | self.result.state = 'TIMEOUT'
314 | self.result.num_failures += 1
315 | return self._state
316 |
317 | def kill(self):
318 | if self._state == _RUNNING:
319 | self._state = _KILLED
320 | if self._spec.kill_handler:
321 | self._spec.kill_handler(self)
322 | self._process.terminate()
323 |
324 | def suppress_failure_message(self):
325 | self._suppress_failure_message = True
326 |
327 |
328 | class Jobset(object):
329 | """Manages one run of jobs."""
330 |
331 | def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
332 | stop_on_failure, add_env, cache):
333 | self._running = set()
334 | self._check_cancelled = check_cancelled
335 | self._cancelled = False
336 | self._failures = 0
337 | self._completed = 0
338 | self._maxjobs = maxjobs
339 | self._newline_on_success = newline_on_success
340 | self._travis = travis
341 | self._cache = cache
342 | self._stop_on_failure = stop_on_failure
343 | self._hashes = {}
344 | self._add_env = add_env
345 | self.resultset = {}
346 | self._remaining = None
347 |
348 | def set_remaining(self, remaining):
349 | self._remaining = remaining
350 |
351 | def get_num_failures(self):
352 | return self._failures
353 |
354 | def cpu_cost(self):
355 | c = 0
356 | for job in self._running:
357 | c += job._spec.cpu_cost
358 | return c
359 |
360 | def start(self, spec):
361 | """Start a job. Return True on success, False on failure."""
362 | while True:
363 | if self.cancelled(): return False
364 | current_cpu_cost = self.cpu_cost()
365 | if current_cpu_cost == 0: break
366 | if current_cpu_cost + spec.cpu_cost <= self._maxjobs: break
367 | self.reap()
368 | if self.cancelled(): return False
369 | if spec.hash_targets:
370 | if spec.identity() in self._hashes:
371 | bin_hash = self._hashes[spec.identity()]
372 | else:
373 | bin_hash = hashlib.sha1()
374 | for fn in spec.hash_targets:
375 | with open(which(fn)) as f:
376 | bin_hash.update(f.read())
377 | bin_hash = bin_hash.hexdigest()
378 | self._hashes[spec.identity()] = bin_hash
379 | should_run = self._cache.should_run(spec.identity(), bin_hash)
380 | else:
381 | bin_hash = None
382 | should_run = True
383 | if should_run:
384 | job = Job(spec,
385 | bin_hash,
386 | self._newline_on_success,
387 | self._travis,
388 | self._add_env)
389 | self._running.add(job)
390 | if not self.resultset.has_key(job.GetSpec().shortname):
391 | self.resultset[job.GetSpec().shortname] = []
392 | return True
393 |
394 | def reap(self):
395 | """Collect the dead jobs."""
396 | while self._running:
397 | dead = set()
398 | for job in self._running:
399 | st = job.state(self._cache)
400 | if st == _RUNNING: continue
401 | if st == _FAILURE or st == _KILLED:
402 | self._failures += 1
403 | if self._stop_on_failure:
404 | self._cancelled = True
405 | for job in self._running:
406 | job.kill()
407 | dead.add(job)
408 | break
409 | for job in dead:
410 | self._completed += 1
411 | self.resultset[job.GetSpec().shortname].append(job.result)
412 | self._running.remove(job)
413 | if dead: return
414 | if (not self._travis):
415 | rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
416 | message('WAITING', '%s%d jobs running, %d complete, %d failed' % (
417 | rstr, len(self._running), self._completed, self._failures))
418 | if platform_string() == 'windows':
419 | time.sleep(0.1)
420 | else:
421 | global have_alarm
422 | if not have_alarm:
423 | have_alarm = True
424 | signal.alarm(10)
425 | signal.pause()
426 |
427 | def cancelled(self):
428 | """Poll for cancellation."""
429 | if self._cancelled: return True
430 | if not self._check_cancelled(): return False
431 | for job in self._running:
432 | job.kill()
433 | self._cancelled = True
434 | return True
435 |
436 | def finish(self):
437 | while self._running:
438 | if self.cancelled(): pass # poll cancellation
439 | self.reap()
440 | return not self.cancelled() and self._failures == 0
441 |
442 |
443 | def _never_cancelled():
444 | return False
445 |
446 |
447 | # cache class that caches nothing
448 | class NoCache(object):
449 | def should_run(self, cmdline, bin_hash):
450 | return True
451 |
452 | def finished(self, cmdline, bin_hash):
453 | pass
454 |
455 |
456 | def tag_remaining(xs):
457 | staging = []
458 | for x in xs:
459 | staging.append(x)
460 | if len(staging) > 1000:
461 | yield (staging.pop(0), None)
462 | n = len(staging)
463 | for i, x in enumerate(staging):
464 | yield (x, n - i - 1)
465 |
466 |
467 | def run(cmdlines,
468 | check_cancelled=_never_cancelled,
469 | maxjobs=None,
470 | newline_on_success=False,
471 | travis=False,
472 | infinite_runs=False,
473 | stop_on_failure=False,
474 | cache=None,
475 | add_env={}):
476 | js = Jobset(check_cancelled,
477 | maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
478 | newline_on_success, travis, stop_on_failure, add_env,
479 | cache if cache is not None else NoCache())
480 | for cmdline, remaining in tag_remaining(cmdlines):
481 | if not js.start(cmdline):
482 | break
483 | if remaining is not None:
484 | js.set_remaining(remaining)
485 | js.finish()
486 | return js.get_num_failures(), js.resultset
487 |
--------------------------------------------------------------------------------
/benchmark/server.py:
--------------------------------------------------------------------------------
1 | import time
2 | import server_pb2
3 |
4 | from grma.server.base import ServerBase
5 |
6 | ONE_DAY_IN_SECONDS = 60 * 60 * 24
7 |
8 |
9 | class ServiceImpl(server_pb2.BetaSimpleServiceServicer):
10 | def hello(self, request, context):
11 | say = request.say
12 | return server_pb2.HelloResponse(reply='you said: %s' % say)
13 |
14 | Hello = hello
15 |
16 |
17 | class Server(ServerBase):
18 | def __init__(self):
19 | """Init a r2d2 server instance"""
20 | server = server_pb2.beta_create_SimpleService_server(
21 | ServiceImpl()
22 | )
23 | self.server = server
24 | self.started = False
25 |
26 | def bind(self, host, port, private_key_path='', certificate_chain_path=''):
27 | r = self.server.add_insecure_port('%s:%s' % (host, port))
28 | return r
29 |
30 | def start(self):
31 | """start server"""
32 | self.server.start()
33 | self.started = True
34 | try:
35 | while self.started:
36 | time.sleep(ONE_DAY_IN_SECONDS)
37 | except KeyboardInterrupt:
38 | self.stop()
39 |
40 | def stop(self, grace=0):
41 | self.server.stop(0)
42 | self.started = False
43 |
44 |
45 | # entry point of grma
46 | app = Server()
47 |
--------------------------------------------------------------------------------
/benchmark/server_pb2.py:
--------------------------------------------------------------------------------
1 | # Generated by the protocol buffer compiler. DO NOT EDIT!
2 | # source: server.proto
3 |
4 | import sys
5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import message as _message
8 | from google.protobuf import reflection as _reflection
9 | from google.protobuf import symbol_database as _symbol_database
10 | from google.protobuf import descriptor_pb2
11 | # @@protoc_insertion_point(imports)
12 |
13 | _sym_db = _symbol_database.Default()
14 |
15 |
16 | from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
17 |
18 |
19 | DESCRIPTOR = _descriptor.FileDescriptor(
20 | name='server.proto',
21 | package='',
22 | syntax='proto3',
23 | serialized_pb=_b('\n\x0cserver.proto\x1a google/protobuf/descriptor.proto\"\x14\n\x02MO\x12\x0e\n\x06method\x18\x01 \x01(\t\"\x1b\n\x0cHelloRequest\x12\x0b\n\x03say\x18\x01 \x01(\t\"\x1e\n\rHelloResponse\x12\r\n\x05reply\x18\x01 \x01(\t2B\n\rSimpleService\x12\x31\n\x05Hello\x12\r.HelloRequest\x1a\x0e.HelloResponse\"\t\xb2\xb5\x18\x05\n\x03GET:1\n\x02mo\x12\x1e.google.protobuf.MethodOptions\x18\xd6\x86\x03 \x01(\x0b\x32\x03.MOb\x06proto3')
24 | ,
25 | dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
26 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
27 |
28 |
29 | MO_FIELD_NUMBER = 50006
30 | mo = _descriptor.FieldDescriptor(
31 | name='mo', full_name='mo', index=0,
32 | number=50006, type=11, cpp_type=10, label=1,
33 | has_default_value=False, default_value=None,
34 | message_type=None, enum_type=None, containing_type=None,
35 | is_extension=True, extension_scope=None,
36 | options=None)
37 |
38 |
39 | _MO = _descriptor.Descriptor(
40 | name='MO',
41 | full_name='MO',
42 | filename=None,
43 | file=DESCRIPTOR,
44 | containing_type=None,
45 | fields=[
46 | _descriptor.FieldDescriptor(
47 | name='method', full_name='MO.method', index=0,
48 | number=1, type=9, cpp_type=9, label=1,
49 | has_default_value=False, default_value=_b("").decode('utf-8'),
50 | message_type=None, enum_type=None, containing_type=None,
51 | is_extension=False, extension_scope=None,
52 | options=None),
53 | ],
54 | extensions=[
55 | ],
56 | nested_types=[],
57 | enum_types=[
58 | ],
59 | options=None,
60 | is_extendable=False,
61 | syntax='proto3',
62 | extension_ranges=[],
63 | oneofs=[
64 | ],
65 | serialized_start=50,
66 | serialized_end=70,
67 | )
68 |
69 |
70 | _HELLOREQUEST = _descriptor.Descriptor(
71 | name='HelloRequest',
72 | full_name='HelloRequest',
73 | filename=None,
74 | file=DESCRIPTOR,
75 | containing_type=None,
76 | fields=[
77 | _descriptor.FieldDescriptor(
78 | name='say', full_name='HelloRequest.say', index=0,
79 | number=1, type=9, cpp_type=9, label=1,
80 | has_default_value=False, default_value=_b("").decode('utf-8'),
81 | message_type=None, enum_type=None, containing_type=None,
82 | is_extension=False, extension_scope=None,
83 | options=None),
84 | ],
85 | extensions=[
86 | ],
87 | nested_types=[],
88 | enum_types=[
89 | ],
90 | options=None,
91 | is_extendable=False,
92 | syntax='proto3',
93 | extension_ranges=[],
94 | oneofs=[
95 | ],
96 | serialized_start=72,
97 | serialized_end=99,
98 | )
99 |
100 |
101 | _HELLORESPONSE = _descriptor.Descriptor(
102 | name='HelloResponse',
103 | full_name='HelloResponse',
104 | filename=None,
105 | file=DESCRIPTOR,
106 | containing_type=None,
107 | fields=[
108 | _descriptor.FieldDescriptor(
109 | name='reply', full_name='HelloResponse.reply', index=0,
110 | number=1, type=9, cpp_type=9, label=1,
111 | has_default_value=False, default_value=_b("").decode('utf-8'),
112 | message_type=None, enum_type=None, containing_type=None,
113 | is_extension=False, extension_scope=None,
114 | options=None),
115 | ],
116 | extensions=[
117 | ],
118 | nested_types=[],
119 | enum_types=[
120 | ],
121 | options=None,
122 | is_extendable=False,
123 | syntax='proto3',
124 | extension_ranges=[],
125 | oneofs=[
126 | ],
127 | serialized_start=101,
128 | serialized_end=131,
129 | )
130 |
131 | DESCRIPTOR.message_types_by_name['MO'] = _MO
132 | DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
133 | DESCRIPTOR.message_types_by_name['HelloResponse'] = _HELLORESPONSE
134 | DESCRIPTOR.extensions_by_name['mo'] = mo
135 |
136 | MO = _reflection.GeneratedProtocolMessageType('MO', (_message.Message,), dict(
137 | DESCRIPTOR = _MO,
138 | __module__ = 'server_pb2'
139 | # @@protoc_insertion_point(class_scope:MO)
140 | ))
141 | _sym_db.RegisterMessage(MO)
142 |
143 | HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
144 | DESCRIPTOR = _HELLOREQUEST,
145 | __module__ = 'server_pb2'
146 | # @@protoc_insertion_point(class_scope:HelloRequest)
147 | ))
148 | _sym_db.RegisterMessage(HelloRequest)
149 |
150 | HelloResponse = _reflection.GeneratedProtocolMessageType('HelloResponse', (_message.Message,), dict(
151 | DESCRIPTOR = _HELLORESPONSE,
152 | __module__ = 'server_pb2'
153 | # @@protoc_insertion_point(class_scope:HelloResponse)
154 | ))
155 | _sym_db.RegisterMessage(HelloResponse)
156 |
157 | mo.message_type = _MO
158 | google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(mo)
159 |
160 | import abc
161 | from grpc.beta import implementations as beta_implementations
162 | from grpc.framework.common import cardinality
163 | from grpc.framework.interfaces.face import utilities as face_utilities
164 |
165 | class BetaSimpleServiceServicer(object):
166 | """"""
167 | __metaclass__ = abc.ABCMeta
168 | @abc.abstractmethod
169 | def Hello(self, request, context):
170 | raise NotImplementedError()
171 |
172 | class BetaSimpleServiceStub(object):
173 | """The interface to which stubs will conform."""
174 | __metaclass__ = abc.ABCMeta
175 | @abc.abstractmethod
176 | def Hello(self, request, timeout):
177 | raise NotImplementedError()
178 | Hello.future = None
179 |
180 | def beta_create_SimpleService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
181 | import server_pb2
182 | import server_pb2
183 | request_deserializers = {
184 | ('SimpleService', 'Hello'): server_pb2.HelloRequest.FromString,
185 | }
186 | response_serializers = {
187 | ('SimpleService', 'Hello'): server_pb2.HelloResponse.SerializeToString,
188 | }
189 | method_implementations = {
190 | ('SimpleService', 'Hello'): face_utilities.unary_unary_inline(servicer.Hello),
191 | }
192 | server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
193 | return beta_implementations.server(method_implementations, options=server_options)
194 |
195 | def beta_create_SimpleService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
196 | import server_pb2
197 | import server_pb2
198 | request_serializers = {
199 | ('SimpleService', 'Hello'): server_pb2.HelloRequest.SerializeToString,
200 | }
201 | response_deserializers = {
202 | ('SimpleService', 'Hello'): server_pb2.HelloResponse.FromString,
203 | }
204 | cardinalities = {
205 | 'Hello': cardinality.Cardinality.UNARY_UNARY,
206 | }
207 | stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
208 | return beta_implementations.dynamic_stub(channel, 'SimpleService', cardinalities, options=stub_options)
209 | # @@protoc_insertion_point(module_scope)
210 |
--------------------------------------------------------------------------------
/benchmark/touch_qps_worker.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import jobset
4 | import signal
5 | import subprocess
6 |
7 | from time import sleep
8 | from config import port, cpu_count
9 |
10 | CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
11 | JEDI_PATH = os.path.dirname(CURRENT_DIR)
12 | sys.path.insert(0, JEDI_PATH)
13 |
14 |
15 | class TouchQPSWorker(object):
16 | def __init__(self):
17 | self.cpu_count = cpu_count
18 | self.running = False
19 |
20 | def start(self):
21 | jobset.message('START', 'Start running worker background')
22 | prepare_jobs = []
23 | ports = [port + i for i in range(self.cpu_count)]
24 | for p in ports:
25 | exc = '%s/touch_test.py' % CURRENT_DIR
26 | prepare_jobs.append(
27 | (
28 | [sys.executable, exc, '-P', str(p)],
29 | )
30 | )
31 | processes = []
32 | for job in prepare_jobs:
33 | servers = lambda: subprocess.Popen(job[0])
34 | process = servers()
35 | processes.append(process)
36 | self.processes = processes
37 | jobset.message('SUCCESS', 'Runing Worker [cores=%s]' % self.cpu_count)
38 | self.running = True
39 |
40 | def stop(self):
41 | jobset.message('START', 'Start shutdown subprocess')
42 | for p in self.processes:
43 | try:
44 | p.terminate()
45 | p.wait()
46 | try:
47 | os.killpg(p.pid, signal.SIGTERM)
48 | except OSError:
49 | pass
50 | except:
51 | pass
52 | self.running = False
53 | jobset.message('SUCCESS', 'Done', do_newline=True)
54 |
55 |
56 | worker = TouchQPSWorker()
57 |
58 |
59 | def signal_handler(signal, frame):
60 | worker.stop()
61 |
62 | # signal.signal(signal.SIGINT, signal_handler)
63 | # signal.signal(signal.SIGQUIT, signal_handler)
64 | # signal.signal(signal.SIGTERM, signal_handler)
65 | # signal.signal(signal.SIGCHLD, signal_handler)
66 |
67 |
68 | if __name__ == '__main__':
69 | worker.start()
70 | while worker.running:
71 | try:
72 | sleep(1)
73 | except KeyboardInterrupt:
74 | sys.exit(1)
75 |
--------------------------------------------------------------------------------
/benchmark/touch_test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import jobset
3 | import time
4 | import os
5 |
6 | from config import cpu_count, total
7 |
8 | from client import get_client
9 |
10 |
11 | def run():
12 | pid = os.getpid()
13 | msg = str(pid)
14 | parser = argparse.ArgumentParser(description='Run Server on PORT')
15 | parser.add_argument('-P', metavar='P', type=int, nargs='+',
16 | help='an integer for gRPC Server port')
17 | args = parser.parse_args()
18 | if args and args.P:
19 | port = args.P[-1]
20 | jobset.message('START', 'Run hello on port %s' % port, do_newline=True)
21 | c = get_client()
22 | start = time.time()
23 | tt = int(total / cpu_count)
24 | for i in range(tt):
25 | r = c.hello(msg)
26 | assert msg in str(r)
27 | end = time.time()
28 | diff = end - start
29 | qps = total / diff
30 | jobset.message('SUCCESS', 'Done hello total=%s, '
31 | 'time diff=%s, qps=%s' % (
32 | total, diff, qps),
33 | do_newline=True)
34 |
35 | if __name__ == '__main__':
36 | run()
37 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qiajigou/grma/a895172c626197692dcbf0149c7bbda434f72e48/examples/__init__.py
--------------------------------------------------------------------------------
/examples/client.py:
--------------------------------------------------------------------------------
1 | import server_pb2
2 |
3 | import atexit
4 |
5 | from grpc.beta import implementations
6 | from random import randint
7 | from grpc._adapter._types import ConnectivityState
8 |
9 | global _pool
10 | _pool = dict()
11 |
12 |
13 | class ChannelPool(object):
14 |
15 | def __init__(self, host, port, pool_size):
16 | self.host = host
17 | self.port = port
18 | self.pool_size = pool_size
19 | self.channels = []
20 | self.stubs = []
21 | # only index, no ref!
22 | # and this is a stub rank!
23 | self.working_channel_indexs = set()
24 | self.connect()
25 |
26 | def flush_channels(self):
27 | # call this method to check all the channels status
28 | # if channel connection is failed or idle
29 | # we could try to reconnect sometime
30 | channels = [self.channels[i] for i in self.working_channel_indexs]
31 | for channel in channels:
32 | try:
33 | state = channel._low_channel.check_connectivity_state(True)
34 | if state == ConnectivityState.CONNECTING:
35 | self.on_channel_connection(channel, state)
36 | elif state == ConnectivityState.TRANSIENT_FAILURE:
37 | self.on_transient_failure(channel, state)
38 | elif state == ConnectivityState.FATAL_FAILURE:
39 | self.on_fatal_failure(channel, state)
40 | else:
41 | self.on_success(channel, state)
42 | except Exception, e:
43 | self.on_exception(channel, state, e)
44 |
45 | def on_channel_connection(self, channel, state):
46 | pass
47 |
48 | def on_transient_failure(self, channel, state):
49 | pass
50 |
51 | def on_fatal_failure(self, channel, state):
52 | pass
53 |
54 | def on_success(self, channel, state):
55 | pass
56 |
57 | def on_exception(self, channel, state, e):
58 | pass
59 |
60 | def connect(self):
61 | for i in range(self.pool_size):
62 | channel = implementations.insecure_channel(self.host, self.port)
63 | stub = server_pb2.beta_create_SimpleService_stub(channel)
64 | # we need to make channels[i] == stubs[i]->channel
65 | self.channels.append(channel)
66 | self.stubs.append(stub)
67 |
68 | def shutdown(self):
69 | for channel in self.channels:
70 | del channel
71 | del self.channels
72 | for stub in self.stubs:
73 | del stub
74 | del self.stubs
75 | self.channels = []
76 | self.stubs = []
77 |
78 | def get_stub(self):
79 | index = randint(0, self.pool_size - 1)
80 | self.working_channel_indexs.add(index)
81 | return self.stubs[index]
82 |
83 | def __del__(self):
84 | self.shutdown()
85 |
86 |
87 | class ClientImpl(object):
88 | def __init__(self, host='0.0.0.0', port=60061, size=1):
89 | self.pool = ChannelPool(host, port, size)
90 | self.pool.connect()
91 | self.register()
92 |
93 | def register(self):
94 | key = str(id(self))
95 | value = self
96 | if _pool.get(key):
97 | old_obj = _pool.get(key)
98 | del old_obj
99 | _pool[key] = value
100 |
101 | def shutdown(self):
102 | self.pool.shutdown()
103 |
104 | @property
105 | def stub(self):
106 | return self.pool.get_stub()
107 |
108 | def hello(self, words, with_call=False):
109 | request = server_pb2.HelloRequest(say=words)
110 | return self.stub.Hello(request, 3, with_call=with_call)
111 |
112 | Hello = hello
113 |
114 |
115 | def get_client():
116 | if _pool:
117 | key = _pool.keys()[0]
118 | return _pool[key]
119 | client = ClientImpl()
120 | return client
121 |
122 |
123 | def exit_handler():
124 | # this is a gRPC python bug
125 | # so we need to end everything
126 | # when app close
127 | for _, obj in _pool.items():
128 | obj.shutdown()
129 |
130 | atexit.register(exit_handler)
131 |
--------------------------------------------------------------------------------
/examples/gencode.sh:
--------------------------------------------------------------------------------
1 | protoc -I ./ --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`which grpc_python_plugin` ./*.proto
2 |
--------------------------------------------------------------------------------
/examples/server.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | import "google/protobuf/descriptor.proto";
4 |
5 | extend google.protobuf.MethodOptions {
6 | MO mo = 50006;
7 | }
8 |
9 | message MO {
10 | string method = 1;
11 | }
12 |
13 | message HelloRequest {
14 | string say = 1;
15 | }
16 |
17 | message HelloResponse {
18 | string reply = 1;
19 | }
20 |
21 | service SimpleService {
22 |
23 | // this is http://c3po/service/SimpleService/call/Hello
24 | rpc Hello(HelloRequest) returns (HelloResponse) {
25 | option (mo).method = "GET";
26 | };
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/examples/server.py:
--------------------------------------------------------------------------------
1 | import time
2 | import server_pb2
3 |
4 | from grma.server.base import ServerBase
5 |
6 | ONE_DAY_IN_SECONDS = 60 * 60 * 24
7 |
8 |
9 | class ServiceImpl(server_pb2.BetaSimpleServiceServicer):
10 | def hello(self, request, context):
11 | say = request.say
12 | return server_pb2.HelloResponse(reply='you said: %s' % say)
13 |
14 | Hello = hello
15 |
16 |
17 | class Server(ServerBase):
18 | def __init__(self):
19 | server = server_pb2.beta_create_SimpleService_server(
20 | ServiceImpl()
21 | )
22 | self.server = server
23 | self.started = False
24 |
25 | def bind(self, host, port, private_key_path='', certificate_chain_path=''):
26 | # return 0 if cannot binded
27 | r = self.server.add_insecure_port('%s:%s' % (host, port))
28 | return r
29 |
30 | def start(self):
31 | """start server"""
32 | self.server.start()
33 | self.started = True
34 | try:
35 | while self.started:
36 | time.sleep(ONE_DAY_IN_SECONDS)
37 | except KeyboardInterrupt:
38 | self.stop()
39 |
40 | def stop(self, grace=0):
41 | self.server.stop(0)
42 | self.started = False
43 |
44 |
45 | # entry point of grma
46 | app = Server()
47 |
--------------------------------------------------------------------------------
/examples/server_pb2.py:
--------------------------------------------------------------------------------
1 | # Generated by the protocol buffer compiler. DO NOT EDIT!
2 | # source: server.proto
3 |
4 | import sys
5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import message as _message
8 | from google.protobuf import reflection as _reflection
9 | from google.protobuf import symbol_database as _symbol_database
10 | from google.protobuf import descriptor_pb2
11 | # @@protoc_insertion_point(imports)
12 |
13 | _sym_db = _symbol_database.Default()
14 |
15 |
16 | from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
17 |
18 |
19 | DESCRIPTOR = _descriptor.FileDescriptor(
20 | name='server.proto',
21 | package='',
22 | syntax='proto3',
23 | serialized_pb=_b('\n\x0cserver.proto\x1a google/protobuf/descriptor.proto\"\x14\n\x02MO\x12\x0e\n\x06method\x18\x01 \x01(\t\"\x1b\n\x0cHelloRequest\x12\x0b\n\x03say\x18\x01 \x01(\t\"\x1e\n\rHelloResponse\x12\r\n\x05reply\x18\x01 \x01(\t2B\n\rSimpleService\x12\x31\n\x05Hello\x12\r.HelloRequest\x1a\x0e.HelloResponse\"\t\xb2\xb5\x18\x05\n\x03GET:1\n\x02mo\x12\x1e.google.protobuf.MethodOptions\x18\xd6\x86\x03 \x01(\x0b\x32\x03.MOb\x06proto3')
24 | ,
25 | dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
26 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
27 |
28 |
29 | MO_FIELD_NUMBER = 50006
30 | mo = _descriptor.FieldDescriptor(
31 | name='mo', full_name='mo', index=0,
32 | number=50006, type=11, cpp_type=10, label=1,
33 | has_default_value=False, default_value=None,
34 | message_type=None, enum_type=None, containing_type=None,
35 | is_extension=True, extension_scope=None,
36 | options=None)
37 |
38 |
39 | _MO = _descriptor.Descriptor(
40 | name='MO',
41 | full_name='MO',
42 | filename=None,
43 | file=DESCRIPTOR,
44 | containing_type=None,
45 | fields=[
46 | _descriptor.FieldDescriptor(
47 | name='method', full_name='MO.method', index=0,
48 | number=1, type=9, cpp_type=9, label=1,
49 | has_default_value=False, default_value=_b("").decode('utf-8'),
50 | message_type=None, enum_type=None, containing_type=None,
51 | is_extension=False, extension_scope=None,
52 | options=None),
53 | ],
54 | extensions=[
55 | ],
56 | nested_types=[],
57 | enum_types=[
58 | ],
59 | options=None,
60 | is_extendable=False,
61 | syntax='proto3',
62 | extension_ranges=[],
63 | oneofs=[
64 | ],
65 | serialized_start=50,
66 | serialized_end=70,
67 | )
68 |
69 |
70 | _HELLOREQUEST = _descriptor.Descriptor(
71 | name='HelloRequest',
72 | full_name='HelloRequest',
73 | filename=None,
74 | file=DESCRIPTOR,
75 | containing_type=None,
76 | fields=[
77 | _descriptor.FieldDescriptor(
78 | name='say', full_name='HelloRequest.say', index=0,
79 | number=1, type=9, cpp_type=9, label=1,
80 | has_default_value=False, default_value=_b("").decode('utf-8'),
81 | message_type=None, enum_type=None, containing_type=None,
82 | is_extension=False, extension_scope=None,
83 | options=None),
84 | ],
85 | extensions=[
86 | ],
87 | nested_types=[],
88 | enum_types=[
89 | ],
90 | options=None,
91 | is_extendable=False,
92 | syntax='proto3',
93 | extension_ranges=[],
94 | oneofs=[
95 | ],
96 | serialized_start=72,
97 | serialized_end=99,
98 | )
99 |
100 |
101 | _HELLORESPONSE = _descriptor.Descriptor(
102 | name='HelloResponse',
103 | full_name='HelloResponse',
104 | filename=None,
105 | file=DESCRIPTOR,
106 | containing_type=None,
107 | fields=[
108 | _descriptor.FieldDescriptor(
109 | name='reply', full_name='HelloResponse.reply', index=0,
110 | number=1, type=9, cpp_type=9, label=1,
111 | has_default_value=False, default_value=_b("").decode('utf-8'),
112 | message_type=None, enum_type=None, containing_type=None,
113 | is_extension=False, extension_scope=None,
114 | options=None),
115 | ],
116 | extensions=[
117 | ],
118 | nested_types=[],
119 | enum_types=[
120 | ],
121 | options=None,
122 | is_extendable=False,
123 | syntax='proto3',
124 | extension_ranges=[],
125 | oneofs=[
126 | ],
127 | serialized_start=101,
128 | serialized_end=131,
129 | )
130 |
131 | DESCRIPTOR.message_types_by_name['MO'] = _MO
132 | DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
133 | DESCRIPTOR.message_types_by_name['HelloResponse'] = _HELLORESPONSE
134 | DESCRIPTOR.extensions_by_name['mo'] = mo
135 |
136 | MO = _reflection.GeneratedProtocolMessageType('MO', (_message.Message,), dict(
137 | DESCRIPTOR = _MO,
138 | __module__ = 'server_pb2'
139 | # @@protoc_insertion_point(class_scope:MO)
140 | ))
141 | _sym_db.RegisterMessage(MO)
142 |
143 | HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
144 | DESCRIPTOR = _HELLOREQUEST,
145 | __module__ = 'server_pb2'
146 | # @@protoc_insertion_point(class_scope:HelloRequest)
147 | ))
148 | _sym_db.RegisterMessage(HelloRequest)
149 |
150 | HelloResponse = _reflection.GeneratedProtocolMessageType('HelloResponse', (_message.Message,), dict(
151 | DESCRIPTOR = _HELLORESPONSE,
152 | __module__ = 'server_pb2'
153 | # @@protoc_insertion_point(class_scope:HelloResponse)
154 | ))
155 | _sym_db.RegisterMessage(HelloResponse)
156 |
157 | mo.message_type = _MO
158 | google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(mo)
159 |
160 | import abc
161 | from grpc.beta import implementations as beta_implementations
162 | from grpc.framework.common import cardinality
163 | from grpc.framework.interfaces.face import utilities as face_utilities
164 |
165 | class BetaSimpleServiceServicer(object):
166 | """"""
167 | __metaclass__ = abc.ABCMeta
168 | @abc.abstractmethod
169 | def Hello(self, request, context):
170 | raise NotImplementedError()
171 |
172 | class BetaSimpleServiceStub(object):
173 | """The interface to which stubs will conform."""
174 | __metaclass__ = abc.ABCMeta
175 | @abc.abstractmethod
176 | def Hello(self, request, timeout):
177 | raise NotImplementedError()
178 | Hello.future = None
179 |
180 | def beta_create_SimpleService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
181 | import server_pb2
182 | import server_pb2
183 | request_deserializers = {
184 | ('SimpleService', 'Hello'): server_pb2.HelloRequest.FromString,
185 | }
186 | response_serializers = {
187 | ('SimpleService', 'Hello'): server_pb2.HelloResponse.SerializeToString,
188 | }
189 | method_implementations = {
190 | ('SimpleService', 'Hello'): face_utilities.unary_unary_inline(servicer.Hello),
191 | }
192 | server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
193 | return beta_implementations.server(method_implementations, options=server_options)
194 |
195 | def beta_create_SimpleService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
196 | import server_pb2
197 | import server_pb2
198 | request_serializers = {
199 | ('SimpleService', 'Hello'): server_pb2.HelloRequest.SerializeToString,
200 | }
201 | response_deserializers = {
202 | ('SimpleService', 'Hello'): server_pb2.HelloResponse.FromString,
203 | }
204 | cardinalities = {
205 | 'Hello': cardinality.Cardinality.UNARY_UNARY,
206 | }
207 | stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
208 | return beta_implementations.dynamic_stub(channel, 'SimpleService', cardinalities, options=stub_options)
209 | # @@protoc_insertion_point(module_scope)
210 |
--------------------------------------------------------------------------------
/grma/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -
2 |
3 | version_info = (0, 0, 5)
4 | __version__ = '.'.join([str(i) for i in version_info])
5 | __logo__ = '''
6 | ************************************
7 |
8 | ██████╗ ██████╗ ███╗ ███╗ █████╗
9 | ██╔════╝ ██╔══██╗████╗ ████║██╔══██╗
10 | ██║ ███╗██████╔╝██╔████╔██║███████║
11 | ██║ ██║██╔══██╗██║╚██╔╝██║██╔══██║
12 | ╚██████╔╝██║ ██║██║ ╚═╝ ██║██║ ██║
13 | ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝
14 |
15 | fire in the hole
16 |
17 | ************************************
18 | '''
19 |
--------------------------------------------------------------------------------
/grma/app.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import utils
3 | import importlib
4 |
5 | from config import Config
6 | from mayue import Mayue
7 |
8 |
9 | class Application(object):
10 | def __init__(self):
11 | self.cfg = None
12 | self.args = None
13 | self.server = None
14 | self.init_path()
15 | self.init_config()
16 | self.load_config()
17 | self.load_class()
18 |
19 | def __repr__(self):
20 | return ''
21 |
22 | def run(self):
23 | if self.server:
24 | Mayue(self).run()
25 |
26 | def init_path(self):
27 | path = utils.getcwd()
28 | sys.path.insert(0, path)
29 |
30 | def init_config(self):
31 | self.cfg = Config()
32 |
33 | def load_config(self):
34 | parser = self.cfg.parser()
35 | args = parser.parse_args()
36 | self.args = args
37 |
38 | def load_class(self):
39 | try:
40 | kls = self.args.cls
41 | module, var = kls.split(':')
42 | i = importlib.import_module(module)
43 | c = i.__dict__.get(var)
44 | if c:
45 | try:
46 | if getattr(c, 'start') and getattr(c, 'stop'):
47 | self.server = c
48 | except AttributeError:
49 | msg = '''--cls={cls} have no [start] or [stop] method:
50 |
51 | exp:
52 |
53 | class App(object):
54 | def __init__(self):
55 | pass
56 |
57 | def start(self):
58 | # start the gRPC server
59 |
60 | def stop(self):
61 | # stop the gRPC server
62 | '''
63 | print msg
64 | return False
65 | else:
66 | return False
67 | except Exception, e:
68 | print e
69 | return False
70 |
71 |
72 | def run():
73 | Application().run()
74 |
75 | if __name__ == '__main__':
76 | run()
77 |
--------------------------------------------------------------------------------
/grma/config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 |
4 |
5 | logging.basicConfig(level=logging.INFO)
6 |
7 |
8 | class Config(object):
9 | def parser(self):
10 | parser = argparse.ArgumentParser(description='A simple gunicorn like '
11 | 'gRPC server management tool')
12 | parser.add_argument('--host', type=str,
13 | default='0.0.0.0',
14 | help='an string for gRPC Server host')
15 | parser.add_argument('--port', type=int,
16 | default=60051,
17 | help='an integer for gRPC Server port')
18 | parser.add_argument('--private', type=str, default='',
19 | help='a string of private key path')
20 | parser.add_argument('--certificate', type=str, default='',
21 | help='a string of private certificate key path')
22 | parser.add_argument('--cls', type=str, required=True,
23 | help='a string of gRPC server module '
24 | '[app:server]')
25 | parser.add_argument('--num', type=int, default=1,
26 | help='a int of worker number')
27 | parser.add_argument('--pid', type=str,
28 | help='pid file for grma')
29 | parser.add_argument('--daemon', type=int, default=0,
30 | help='run as daemon')
31 | parser.add_argument('--grace', type=int, default=3,
32 | help='timeout for graceful shutdown')
33 | return parser
34 |
--------------------------------------------------------------------------------
/grma/mayue.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import utils
4 | import signal
5 | import errno
6 | import logging
7 |
8 | from time import sleep
9 |
10 | from grma import __version__, __logo__
11 | from worker import Worker
12 | from pidfile import Pidfile
13 |
14 | _sigs = [getattr(signal, "SIG%s" % x)
15 | for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
16 |
17 |
18 | class Mayue(object):
19 | ctx = dict()
20 | workers = dict()
21 |
22 | logger = logging.getLogger(__name__)
23 | signal_list = list()
24 |
25 | def __init__(self, app):
26 | self.app = app
27 | self.pid = None
28 | self.pidfile = None
29 | self.try_to_stop = False
30 |
31 | args = sys.argv[:]
32 | args.insert(0, sys.executable)
33 | cwd = utils.getcwd()
34 |
35 | self.ctx = dict(args=args, cwd=cwd, exectable=sys.executable)
36 |
37 | @property
38 | def num_workers(self):
39 | return self.app.args.num
40 |
41 | @num_workers.setter
42 | def num_workers(self, value):
43 | self.app.args.num = value
44 |
45 | def spawn_worker(self):
46 | sleep(0.1)
47 | worker = Worker(self.pid, self.app.server, self.app.args)
48 |
49 | pid = os.fork()
50 |
51 | if pid != 0:
52 | # parent process
53 | self.workers[pid] = worker
54 | return pid
55 |
56 | # child process
57 | try:
58 | worker.init_worker()
59 | worker.run()
60 | sys.exit(0)
61 | except Exception as e:
62 | self.logger.exception('Exception: %s', e)
63 | finally:
64 | worker.stop()
65 |
66 | def spawn_workers(self):
67 | for i in range(self.num_workers):
68 | self.spawn_worker()
69 | self.init_signals()
70 |
71 | def stop_workers(self):
72 | for pid, worker in self.workers.items():
73 | worker.stop()
74 | del self.workers[pid]
75 | self.kill_worker(pid, signal.SIGKILL)
76 |
77 | def kill_worker(self, pid, sig):
78 | try:
79 | os.kill(pid, sig)
80 | except OSError:
81 | pass
82 |
83 | def clean(self):
84 | self.stop_workers()
85 | if self.pidfile is not None:
86 | self.pidfile.unlink()
87 |
88 | def run(self):
89 | host = self.app.args.host
90 | port = self.app.args.port
91 |
92 | print __logo__
93 |
94 | print '[OK] Running grma {version}'.format(version=__version__)
95 |
96 | print '-' * 10 + ' CONFIG ' + '-' * 10
97 |
98 | cf = dict()
99 | for arg in vars(self.app.args):
100 | cf[arg] = getattr(self.app.args, arg)
101 |
102 | for k, v in cf.items():
103 | msg = '{key}\t{value}'.format(key=k, value=v)
104 | print msg
105 |
106 | print '-' * 28
107 |
108 | if self.app.args.daemon:
109 | utils.daemonize()
110 |
111 | self.pid = os.getpid()
112 |
113 | binded = self.app.server.bind(
114 | host, port,
115 | self.app.args.private, self.app.args.certificate
116 | )
117 |
118 | if not binded:
119 | logging.info('[FAILED] Master cannot bind {host}:{port}, '
120 | 'or maybe bind function return None?'.format(
121 | host=host, port=port))
122 | sys.exit(1)
123 |
124 | logging.info('[OK] Master running pid: {pid}'.format(pid=self.pid))
125 | utils.setproctitle('grma master pid={pid}'.format(pid=self.pid))
126 |
127 | self.init_signals()
128 |
129 | if self.app.args.pid:
130 | try:
131 | self.pidfile = Pidfile(self.app.args.pid)
132 | self.pidfile.create(self.pid)
133 | except:
134 | self.clean()
135 | sys.exit(1)
136 |
137 | self.manage_workers()
138 | while True:
139 | try:
140 | if self.try_to_stop:
141 | break
142 |
143 | sig = self.signal_list.pop(0) if self.signal_list else None
144 |
145 | if sig is None:
146 | self.manage_workers()
147 | continue
148 |
149 | self.process_signal(sig)
150 | except KeyboardInterrupt:
151 | self.clean()
152 | break
153 | except Exception as e:
154 | self.logger.exception(e)
155 | self.clean()
156 | break
157 | # gRPC master server should close first
158 | self.kill_worker(self.pid, signal.SIGKILL)
159 |
160 | def process_signal(self, sig):
161 | if not sig:
162 | return
163 | signame = utils.SIG_NAMES.get(sig)
164 | self.logger.info('Handing signal {signame}'.format(
165 | signame=signame.upper()))
166 | handler = getattr(self, "_handle_%s" % signame, None)
167 | if not handler:
168 | self.logger.error('no such a hander for sig {signame}'.format(
169 | signame=signame
170 | ))
171 | else:
172 | handler()
173 |
174 | def init_signals(self):
175 | [signal.signal(s, self.handle_signal) for s in _sigs]
176 | signal.signal(signal.SIGCHLD, self._handle_chld)
177 |
178 | def handle_signal(self, sig, frame):
179 | if len(self.signal_list) < 10:
180 | self.signal_list.append(sig)
181 |
182 | def stop(self):
183 | self.clean()
184 | self.try_to_stop = True
185 |
186 | def _handle_int(self):
187 | self.stop()
188 |
189 | def _handle_quite(self):
190 | self.stop()
191 |
192 | def _handle_term(self):
193 | self.stop()
194 |
195 | def _handle_chld(self, sig, frame):
196 | self.process_workers()
197 |
198 | def _handle_ttin(self):
199 | """SIGTTIN handling.
200 | Increases the number of workers by one.
201 | """
202 | self.num_workers = self.num_workers + 1
203 | self.logger.info('Create a new worker. '
204 | 'Now you have {num} workers '
205 | 'work for you'.format(num=self.num_workers))
206 | self.manage_workers()
207 |
208 | def _handle_ttou(self):
209 | """SIGTTOU handling.
210 | Decreases the number of workers by one.
211 | """
212 | if self.num_workers <= 1:
213 | self.logger.error('You cannot kill the only worker you have.')
214 | return
215 | self.num_workers = self.num_workers - 1
216 | self.logger.info('Kill a worker. Now you have {num} workers '
217 | 'work for you'.format(num=self.num_workers))
218 | self.manage_workers()
219 |
220 | def process_workers(self):
221 | try:
222 | while True:
223 | wpid, status = os.waitpid(-1, os.WNOHANG)
224 | if not wpid:
225 | break
226 | else:
227 | worker = self.workers.pop(wpid, None)
228 | if not worker:
229 | continue
230 | worker.stop()
231 | except OSError as e:
232 | if e.errno != errno.ECHILD:
233 | raise
234 |
235 | def manage_workers(self):
236 | diff = self.num_workers - len(self.workers)
237 | if diff > 0:
238 | for i in range(diff):
239 | self.spawn_worker()
240 |
241 | workers = self.workers.items()
242 | while len(workers) > self.num_workers:
243 | (pid, _) = workers.pop(0)
244 | self.kill_worker(pid, signal.SIGKILL)
245 |
--------------------------------------------------------------------------------
/grma/pidfile.py:
--------------------------------------------------------------------------------
1 | # original code from gunicorn pidfile
2 | # has some little change
3 |
4 | import errno
5 | import os
6 | import tempfile
7 |
8 |
9 | class Pidfile(object):
10 | def __init__(self, fname):
11 | self.fname = fname
12 | self.pid = None
13 |
14 | def create(self, pid):
15 | oldpid = self.validate()
16 | if oldpid:
17 | if oldpid == os.getpid():
18 | return
19 | msg = ('Already running on PID {oldpid} '
20 | '(or pid file {fname} is stale)')
21 | raise RuntimeError(msg.format(oldpid=oldpid, fname=self.fname))
22 | raise RuntimeError(msg % (oldpid, self.fname))
23 |
24 | self.pid = pid
25 |
26 | # Write pidfile
27 | fdir = os.path.dirname(self.fname)
28 | if fdir and not os.path.isdir(fdir):
29 | msg = '{fdir} does not exitst, cant create pidfile'.format(
30 | fdir=fdir)
31 | raise RuntimeError(msg)
32 | fd, fname = tempfile.mkstemp(dir=fdir)
33 | os.write(fd, ('{pid}\n'.format(pid=self.pid)).encode('utf-8'))
34 | if self.fname:
35 | os.rename(fname, self.fname)
36 | else:
37 | self.fname = fname
38 | os.close(fd)
39 | os.chmod(self.fname, 420)
40 |
41 | def rename(self, path):
42 | self.unlink()
43 | self.fname = path
44 | self.create(self.pid)
45 |
46 | def unlink(self):
47 | try:
48 | with open(self.fname, 'r') as f:
49 | pid1 = int(f.read() or 0)
50 |
51 | if pid1 == self.pid:
52 | os.unlink(self.fname)
53 | except:
54 | pass
55 |
56 | def validate(self):
57 | if not self.fname:
58 | return
59 | try:
60 | with open(self.fname, 'r') as f:
61 | try:
62 | wpid = int(f.read())
63 | except ValueError:
64 | return
65 |
66 | try:
67 | os.kill(wpid, 0)
68 | return wpid
69 | except OSError as e:
70 | if e.args[0] == errno.ESRCH:
71 | return
72 | raise
73 | except IOError as e:
74 | if e.args[0] == errno.ENOENT:
75 | return
76 | raise
77 |
--------------------------------------------------------------------------------
/grma/server/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qiajigou/grma/a895172c626197692dcbf0149c7bbda434f72e48/grma/server/__init__.py
--------------------------------------------------------------------------------
/grma/server/base.py:
--------------------------------------------------------------------------------
1 | class ServerBase(object):
2 | """All gRPC server class should base on"""
3 |
4 | def __repr__(self):
5 | return '<%s>' % self.__class__.__name__
6 |
7 | def start(self):
8 | raise NotImplementedError()
9 |
10 | def bind(self, host, port, private_key_path='', certificate_chain_path=''):
11 | raise NotImplementedError()
12 |
13 | def stop(self, grace=0):
14 | raise NotImplementedError()
15 |
--------------------------------------------------------------------------------
/grma/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import signal
3 | import fcntl
4 |
5 |
6 | try:
7 | from os import closerange
8 | except ImportError:
9 | def closerange(fd_low, fd_high):
10 | for fd in range(fd_low, fd_high):
11 | try:
12 | os.close(fd)
13 | except OSError:
14 | pass
15 |
16 | try:
17 | from setproctitle import setproctitle
18 | except ImportError:
19 | def setproctitle(title):
20 | return
21 |
22 |
23 | SIG_NAMES = dict(
24 | (getattr(signal, name), name[3:].lower()) for name in dir(signal)
25 | if name[:3] == "SIG" and name[3] != "_"
26 | )
27 |
28 |
29 | def getcwd():
30 | try:
31 | pwd = os.stat(os.environ['PWD'])
32 | cwd = os.stat(os.getcwd())
33 | if pwd.st_ino == cwd.st_ino and pwd.st_dev == cwd.st_dev:
34 | cwd = os.environ['PWD']
35 | else:
36 | cwd = os.getcwd()
37 | except:
38 | cwd = os.getcwd()
39 | return cwd
40 |
41 |
42 | def daemonize():
43 | if os.fork():
44 | os._exit(0)
45 | os.setsid()
46 |
47 | if os.fork():
48 | os._exit(0)
49 |
50 | os.umask(0o22)
51 |
52 | closerange(0, 3)
53 |
54 | redir = getattr(os, 'devnull', '/dev/null')
55 | fd_null = os.open(redir, os.O_RDWR)
56 |
57 | if fd_null != 0:
58 | os.dup2(fd_null, 0)
59 |
60 | os.dup2(fd_null, 1)
61 | os.dup2(fd_null, 2)
62 |
63 |
64 | def set_non_blocking(fd):
65 | flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
66 | fcntl.fcntl(fd, fcntl.F_SETFL, flags)
67 |
68 |
69 | def close_on_exec(fd):
70 | flags = fcntl.fcntl(fd, fcntl.F_GETFD)
71 | flags |= fcntl.FD_CLOEXEC
72 | fcntl.fcntl(fd, fcntl.F_SETFD, flags)
73 |
--------------------------------------------------------------------------------
/grma/worker.py:
--------------------------------------------------------------------------------
1 | import os
2 | import utils
3 | import signal
4 | import logging
5 |
6 |
7 | class Worker(object):
8 | logger = logging.getLogger(__name__)
9 |
10 | def __init__(self, pid, server, args):
11 | self.server = server
12 | self.args = args
13 | self.master_pid = pid
14 |
15 | def init_worker(self):
16 | self.init_signals()
17 |
18 | def run(self):
19 | pid = os.getpid()
20 | self.logger.info('[OK] Worker running with pid: {pid}'.format(pid=pid))
21 | utils.setproctitle('grma worker pid={pid}'.format(pid=pid))
22 | self.server.start()
23 |
24 | def stop(self):
25 | self.server.stop(self.args.grace)
26 |
27 | def init_signals(self):
28 | signal.signal(signal.SIGQUIT, self.handle_quit)
29 | signal.signal(signal.SIGTERM, self.handle_exit)
30 | signal.signal(signal.SIGINT, self.handle_quit)
31 |
32 | def _stop(self):
33 | self.stop()
34 |
35 | def handle_quit(self, sig, frame):
36 | self._stop()
37 |
38 | def handle_exit(self, sig, frame):
39 | self._stop()
40 |
--------------------------------------------------------------------------------
/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qiajigou/grma/a895172c626197692dcbf0149c7bbda434f72e48/images/logo.png
--------------------------------------------------------------------------------
/requirements_test.txt:
--------------------------------------------------------------------------------
1 | grpcio==0.14.0
2 | protobuf==3.15.0
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from grma import __version__
2 |
3 | from setuptools import setup, find_packages
4 |
5 | setup(
6 | name='grma',
7 | version=__version__,
8 |
9 | description='Simple gRPC Python manager',
10 | author='GuoJing',
11 | author_email='soundbbg@gmail.com',
12 | license='MIT',
13 | url='https://github.com/qiajigou/grma',
14 | zip_safe=False,
15 | packages=find_packages(exclude=['examples', 'tests']),
16 | include_package_data=True,
17 | entry_points="""
18 | [console_scripts]
19 | grma=grma.app:run
20 | """,
21 | install_requires=[
22 | 'setproctitle==1.1.10'
23 | ]
24 | )
25 |
--------------------------------------------------------------------------------
/tests/client.py:
--------------------------------------------------------------------------------
1 | import server_pb2
2 |
3 | import atexit
4 |
5 | from grpc.beta import implementations
6 | from random import randint
7 | from grpc._adapter._types import ConnectivityState
8 |
9 | global _pool
10 | _pool = dict()
11 |
12 |
13 | class ChannelPool(object):
14 |
15 | def __init__(self, host, port, pool_size):
16 | self.host = host
17 | self.port = port
18 | self.pool_size = pool_size
19 | self.channels = []
20 | self.stubs = []
21 | # only index, no ref!
22 | # and this is a stub rank!
23 | self.working_channel_indexs = set()
24 | self.connect()
25 |
26 | def flush_channels(self):
27 | # call this method to check all the channels status
28 | # if channel connection is failed or idle
29 | # we could try to reconnect sometime
30 | channels = [self.channels[i] for i in self.working_channel_indexs]
31 | for channel in channels:
32 | try:
33 | state = channel._low_channel.check_connectivity_state(True)
34 | if state == ConnectivityState.CONNECTING:
35 | self.on_channel_connection(channel, state)
36 | elif state == ConnectivityState.TRANSIENT_FAILURE:
37 | self.on_transient_failure(channel, state)
38 | elif state == ConnectivityState.FATAL_FAILURE:
39 | self.on_fatal_failure(channel, state)
40 | else:
41 | self.on_success(channel, state)
42 | except Exception, e:
43 | self.on_exception(channel, state, e)
44 |
45 | def on_channel_connection(self, channel, state):
46 | pass
47 |
48 | def on_transient_failure(self, channel, state):
49 | pass
50 |
51 | def on_fatal_failure(self, channel, state):
52 | pass
53 |
54 | def on_success(self, channel, state):
55 | pass
56 |
57 | def on_exception(self, channel, state, e):
58 | pass
59 |
60 | def connect(self):
61 | for i in range(self.pool_size):
62 | channel = implementations.insecure_channel(self.host, self.port)
63 | stub = server_pb2.beta_create_SimpleService_stub(channel)
64 | # we need to make channels[i] == stubs[i]->channel
65 | self.channels.append(channel)
66 | self.stubs.append(stub)
67 |
68 | def shutdown(self):
69 | for channel in self.channels:
70 | del channel
71 | del self.channels
72 | for stub in self.stubs:
73 | del stub
74 | del self.stubs
75 | self.channels = []
76 | self.stubs = []
77 |
78 | def get_stub(self):
79 | index = randint(0, self.pool_size - 1)
80 | self.working_channel_indexs.add(index)
81 | return self.stubs[index]
82 |
83 | def __del__(self):
84 | self.shutdown()
85 |
86 |
87 | class ClientImpl(object):
88 | def __init__(self, host='0.0.0.0', port=60051, size=1):
89 | self.pool = ChannelPool(host, port, size)
90 | self.pool.connect()
91 | self.register()
92 |
93 | def register(self):
94 | key = str(id(self))
95 | value = self
96 | if _pool.get(key):
97 | old_obj = _pool.get(key)
98 | del old_obj
99 | _pool[key] = value
100 |
101 | def shutdown(self):
102 | self.pool.shutdown()
103 |
104 | @property
105 | def stub(self):
106 | return self.pool.get_stub()
107 |
108 | def hello(self, words, with_call=False):
109 | request = server_pb2.HelloRequest(say=words)
110 | return self.stub.Hello(request, 3, with_call=with_call)
111 |
112 | Hello = hello
113 |
114 |
115 | def get_client():
116 | if _pool:
117 | key = _pool.keys()[0]
118 | return _pool[key]
119 | client = ClientImpl()
120 | return client
121 |
122 |
123 | def exit_handler():
124 | # this is a gRPC python bug
125 | # so we need to end everything
126 | # when app close
127 | for _, obj in _pool.items():
128 | obj.shutdown()
129 |
130 | atexit.register(exit_handler)
131 |
--------------------------------------------------------------------------------
/tests/server_pb2.py:
--------------------------------------------------------------------------------
1 | # Generated by the protocol buffer compiler. DO NOT EDIT!
2 | # source: server.proto
3 |
4 | import sys
5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import message as _message
8 | from google.protobuf import reflection as _reflection
9 | from google.protobuf import symbol_database as _symbol_database
10 | from google.protobuf import descriptor_pb2
11 | # @@protoc_insertion_point(imports)
12 |
13 | _sym_db = _symbol_database.Default()
14 |
15 |
16 | from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
17 |
18 |
19 | DESCRIPTOR = _descriptor.FileDescriptor(
20 | name='server.proto',
21 | package='',
22 | syntax='proto3',
23 | serialized_pb=_b('\n\x0cserver.proto\x1a google/protobuf/descriptor.proto\"\x14\n\x02MO\x12\x0e\n\x06method\x18\x01 \x01(\t\"\x1b\n\x0cHelloRequest\x12\x0b\n\x03say\x18\x01 \x01(\t\"\x1e\n\rHelloResponse\x12\r\n\x05reply\x18\x01 \x01(\t2B\n\rSimpleService\x12\x31\n\x05Hello\x12\r.HelloRequest\x1a\x0e.HelloResponse\"\t\xb2\xb5\x18\x05\n\x03GET:1\n\x02mo\x12\x1e.google.protobuf.MethodOptions\x18\xd6\x86\x03 \x01(\x0b\x32\x03.MOb\x06proto3')
24 | ,
25 | dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
26 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
27 |
28 |
29 | MO_FIELD_NUMBER = 50006
30 | mo = _descriptor.FieldDescriptor(
31 | name='mo', full_name='mo', index=0,
32 | number=50006, type=11, cpp_type=10, label=1,
33 | has_default_value=False, default_value=None,
34 | message_type=None, enum_type=None, containing_type=None,
35 | is_extension=True, extension_scope=None,
36 | options=None)
37 |
38 |
39 | _MO = _descriptor.Descriptor(
40 | name='MO',
41 | full_name='MO',
42 | filename=None,
43 | file=DESCRIPTOR,
44 | containing_type=None,
45 | fields=[
46 | _descriptor.FieldDescriptor(
47 | name='method', full_name='MO.method', index=0,
48 | number=1, type=9, cpp_type=9, label=1,
49 | has_default_value=False, default_value=_b("").decode('utf-8'),
50 | message_type=None, enum_type=None, containing_type=None,
51 | is_extension=False, extension_scope=None,
52 | options=None),
53 | ],
54 | extensions=[
55 | ],
56 | nested_types=[],
57 | enum_types=[
58 | ],
59 | options=None,
60 | is_extendable=False,
61 | syntax='proto3',
62 | extension_ranges=[],
63 | oneofs=[
64 | ],
65 | serialized_start=50,
66 | serialized_end=70,
67 | )
68 |
69 |
70 | _HELLOREQUEST = _descriptor.Descriptor(
71 | name='HelloRequest',
72 | full_name='HelloRequest',
73 | filename=None,
74 | file=DESCRIPTOR,
75 | containing_type=None,
76 | fields=[
77 | _descriptor.FieldDescriptor(
78 | name='say', full_name='HelloRequest.say', index=0,
79 | number=1, type=9, cpp_type=9, label=1,
80 | has_default_value=False, default_value=_b("").decode('utf-8'),
81 | message_type=None, enum_type=None, containing_type=None,
82 | is_extension=False, extension_scope=None,
83 | options=None),
84 | ],
85 | extensions=[
86 | ],
87 | nested_types=[],
88 | enum_types=[
89 | ],
90 | options=None,
91 | is_extendable=False,
92 | syntax='proto3',
93 | extension_ranges=[],
94 | oneofs=[
95 | ],
96 | serialized_start=72,
97 | serialized_end=99,
98 | )
99 |
100 |
101 | _HELLORESPONSE = _descriptor.Descriptor(
102 | name='HelloResponse',
103 | full_name='HelloResponse',
104 | filename=None,
105 | file=DESCRIPTOR,
106 | containing_type=None,
107 | fields=[
108 | _descriptor.FieldDescriptor(
109 | name='reply', full_name='HelloResponse.reply', index=0,
110 | number=1, type=9, cpp_type=9, label=1,
111 | has_default_value=False, default_value=_b("").decode('utf-8'),
112 | message_type=None, enum_type=None, containing_type=None,
113 | is_extension=False, extension_scope=None,
114 | options=None),
115 | ],
116 | extensions=[
117 | ],
118 | nested_types=[],
119 | enum_types=[
120 | ],
121 | options=None,
122 | is_extendable=False,
123 | syntax='proto3',
124 | extension_ranges=[],
125 | oneofs=[
126 | ],
127 | serialized_start=101,
128 | serialized_end=131,
129 | )
130 |
131 | DESCRIPTOR.message_types_by_name['MO'] = _MO
132 | DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
133 | DESCRIPTOR.message_types_by_name['HelloResponse'] = _HELLORESPONSE
134 | DESCRIPTOR.extensions_by_name['mo'] = mo
135 |
136 | MO = _reflection.GeneratedProtocolMessageType('MO', (_message.Message,), dict(
137 | DESCRIPTOR = _MO,
138 | __module__ = 'server_pb2'
139 | # @@protoc_insertion_point(class_scope:MO)
140 | ))
141 | _sym_db.RegisterMessage(MO)
142 |
143 | HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
144 | DESCRIPTOR = _HELLOREQUEST,
145 | __module__ = 'server_pb2'
146 | # @@protoc_insertion_point(class_scope:HelloRequest)
147 | ))
148 | _sym_db.RegisterMessage(HelloRequest)
149 |
150 | HelloResponse = _reflection.GeneratedProtocolMessageType('HelloResponse', (_message.Message,), dict(
151 | DESCRIPTOR = _HELLORESPONSE,
152 | __module__ = 'server_pb2'
153 | # @@protoc_insertion_point(class_scope:HelloResponse)
154 | ))
155 | _sym_db.RegisterMessage(HelloResponse)
156 |
157 | mo.message_type = _MO
158 | google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(mo)
159 |
160 | import abc
161 | from grpc.beta import implementations as beta_implementations
162 | from grpc.framework.common import cardinality
163 | from grpc.framework.interfaces.face import utilities as face_utilities
164 |
165 | class BetaSimpleServiceServicer(object):
166 | """"""
167 | __metaclass__ = abc.ABCMeta
168 | @abc.abstractmethod
169 | def Hello(self, request, context):
170 | raise NotImplementedError()
171 |
172 | class BetaSimpleServiceStub(object):
173 | """The interface to which stubs will conform."""
174 | __metaclass__ = abc.ABCMeta
175 | @abc.abstractmethod
176 | def Hello(self, request, timeout):
177 | raise NotImplementedError()
178 | Hello.future = None
179 |
180 | def beta_create_SimpleService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
181 | import server_pb2
182 | import server_pb2
183 | request_deserializers = {
184 | ('SimpleService', 'Hello'): server_pb2.HelloRequest.FromString,
185 | }
186 | response_serializers = {
187 | ('SimpleService', 'Hello'): server_pb2.HelloResponse.SerializeToString,
188 | }
189 | method_implementations = {
190 | ('SimpleService', 'Hello'): face_utilities.unary_unary_inline(servicer.Hello),
191 | }
192 | server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
193 | return beta_implementations.server(method_implementations, options=server_options)
194 |
195 | def beta_create_SimpleService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
196 | import server_pb2
197 | import server_pb2
198 | request_serializers = {
199 | ('SimpleService', 'Hello'): server_pb2.HelloRequest.SerializeToString,
200 | }
201 | response_deserializers = {
202 | ('SimpleService', 'Hello'): server_pb2.HelloResponse.FromString,
203 | }
204 | cardinalities = {
205 | 'Hello': cardinality.Cardinality.UNARY_UNARY,
206 | }
207 | stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
208 | return beta_implementations.dynamic_stub(channel, 'SimpleService', cardinalities, options=stub_options)
209 | # @@protoc_insertion_point(module_scope)
210 |
--------------------------------------------------------------------------------
/tests/test_touch.py:
--------------------------------------------------------------------------------
1 | def test_touch():
2 | from client import get_client
3 | client = get_client()
4 | resp = client.hello('hello')
5 | assert 'hello' in str(resp)
6 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py27
3 | skipsdist = True
4 |
5 | [testenv]
6 | usedevelop = True
7 | whitelist_externals=*
8 | commands =
9 | grma --cls=examples.server:app --num=2 --daemon=1 --pid=grma.pid
10 | py.test {posargs:tests/} --assert=plain
11 | deps =
12 | -rrequirements_test.txt
13 | py27: mock
14 |
--------------------------------------------------------------------------------