├── AUTHORS ├── LICENSE ├── README.md ├── arcus.py ├── arcus_cmd.py ├── arcus_mc_node.py ├── arcus_util.py ├── arcus_zk_cmd.py ├── test.py ├── zk_sync.py └── zk_util.py /AUTHORS: -------------------------------------------------------------------------------- 1 | KiYeul Lee ; 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | 204 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ## arcus-python-client : Arcus Python Client 3 | 4 | This is a python3 client driver for Arcus cloud. 5 | 6 | ## Requirement 7 | 8 | This driver is made by python 3.0 9 | and it uses kazoo module to handle zookeeper. Install kazoo first: https://kazoo.readthedocs.org 10 | 11 | ## Use 12 | 13 | Just import arcus.py and arcus_mc_node.py. 14 | test.py is basic functional test for this driver and you can get detail information about that. 15 | 16 | Visit arcus cache cloud project at github to get more detail information. 17 | https://github.com/naver/arcus 18 | 19 | 20 | ## License 21 | 22 | Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /arcus.py: -------------------------------------------------------------------------------- 1 | # 2 | # arcus-python-client - Arcus python client drvier 3 | # Copyright 2014 NAVER Corp. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | from kazoo.client import KazooClient 19 | 20 | from threading import Lock 21 | import hashlib, bisect, re 22 | import struct, datetime, time 23 | import queue 24 | import zlib 25 | 26 | 27 | g_log = False 28 | 29 | def enable_log(flag = True): 30 | global g_log 31 | g_log = flag 32 | 33 | 34 | def arcuslog(caller, *param): 35 | global g_log 36 | 37 | if g_log: 38 | str = '' 39 | if caller: 40 | str = '[%s - %s(%s)] ' % (datetime.datetime.now(), caller.__class__.__name__, hex(id(caller))) 41 | 42 | for p in param: 43 | str += repr(p) 44 | 45 | print(str) 46 | 47 | 48 | class ArcusException(Exception): 49 | def __init__(self, msg): 50 | self.msg = msg 51 | 52 | 53 | 54 | class ArcusProtocolException(ArcusException): 55 | def __init__(self, msg): 56 | self.msg = msg 57 | 58 | 59 | 60 | class ArcusNodeException(ArcusException): 61 | def __init__(self, msg): 62 | self.msg = msg 63 | 64 | class ArcusNodeSocketException(ArcusNodeException): 65 | def __init__(self, msg): 66 | self.msg = msg 67 | 68 | class ArcusNodeConnectionException(ArcusNodeException): 69 | def __init__(self, msg): 70 | self.msg = msg 71 | 72 | 73 | 74 | 75 | class ArcusListException(ArcusException): 76 | def __init__(self, msg): 77 | self.msg = msg 78 | 79 | 80 | 81 | 82 | class CollectionException(ArcusException): 83 | def __init__(self, msg): 84 | self.msg = msg 85 | 86 | class CollectionType(CollectionException): 87 | def __init__(self, msg='collection type mismatch'): 88 | self.msg = msg 89 | 90 | class CollectionExist(CollectionException): 91 | def __init__(self, msg='collection already exits'): 92 | self.msg = msg 93 | 94 | class CollectionIndex(CollectionException): 95 | def __init__(self, msg='invalid index or range'): 96 | self.msg = msg 97 | 98 | class CollectionOverflow(CollectionException): 99 | def __init__(self, msg='collection overflow'): 100 | self.msg = msg 101 | 102 | class CollectionUnreadable(CollectionException): 103 | def __init__(self, msg='collection is unreadable'): 104 | self.msg = msg 105 | 106 | class CollectionHexFormat(CollectionException): 107 | def __init__(self, msg='invalid hex string format'): 108 | self.msg = msg 109 | 110 | class FilterInvalid(CollectionException): 111 | def __init__(self, msg='invalid fiter expression'): 112 | self.msg = msg 113 | 114 | class ArcusTranscoder: 115 | # primitive type 116 | FLAG_MASK=0xff00 117 | FLAG_STRING=0 118 | FLAG_BOOLEAN=(1<<8) 119 | FLAG_INTEGER=(2<<8) # decode only (for other client) 120 | FLAG_LONG=(3<<8) 121 | FLAG_DATE=(4<<8) 122 | FLAG_BYTE=(5<<8) # decode only (for other client) 123 | FLAG_FLOAT=(6<<8) # decode only (for other client) 124 | FLAG_DOUBLE=(7<<8) 125 | FLAG_BYTEARRAY=(8<<8) 126 | 127 | # general case 128 | FLAG_SERIALIZED = 1 # used at java 129 | FLAG_COMPRESSED = 2 130 | FLAG_PICKLE = 4 131 | 132 | def __init__(self): 133 | self.min_compress_len = 0 134 | 135 | def encode(self, val): 136 | flags = 0 137 | if isinstance(val, str): 138 | ret = bytes(val, 'utf-8') 139 | elif isinstance(val, bool): 140 | flags |= self.FLAG_BOOLEAN 141 | ret = struct.pack('>b', val) 142 | elif isinstance(val, int): 143 | flags |= self.FLAG_LONG 144 | ret = struct.pack('>q', val) 145 | elif isinstance(val, float): 146 | flags |= self.FLAG_DOUBLE 147 | ret = struct.pack('>d', val) 148 | elif isinstance(val, datetime.datetime): 149 | flags |= self.FLAG_DATE 150 | ret = int((time.mktime(val.timetuple()) + val.microsecond/1000000.0)*1000) 151 | ret = struct.pack('>q', ret) 152 | elif isinstance(val, bytes): 153 | flags |= self.FLAG_BYTEARRAY 154 | ret = val 155 | else: 156 | flags |= self.FLAG_PICKLE 157 | file = StringIO() 158 | pickler = pickle.Pickler(file, 0) 159 | pickler.dump(val) 160 | 161 | ret = bytes(file.getvalue(), 'utf-8') 162 | 163 | lv = len(ret) 164 | if self.min_compress_len and lv > self.min_compress_len: 165 | comp_val = compress(ret) 166 | if len(comp_val) < lv: 167 | flags |= self.FLAG_COMPRESSED 168 | ret = comp_val 169 | 170 | 171 | return (flags, len(ret), ret) 172 | 173 | def decode(self, flags, buf): 174 | if flags & self.FLAG_COMPRESSED != 0: 175 | buf = zlib.decompress(buf, 16+zlib.MAX_WBITS) 176 | 177 | flags = flags & self.FLAG_MASK 178 | 179 | if flags == 0: 180 | val = buf.decode('utf-8') 181 | elif flags == self.FLAG_BOOLEAN: 182 | val = struct.unpack('>b', buf)[0] 183 | if val == 1: 184 | val = True 185 | else: 186 | val = False 187 | 188 | elif flags == self.FLAG_INTEGER or flags == self.FLAG_LONG or flags == self.FLAG_BYTE: 189 | val = 0 190 | l = len(buf) 191 | for i in range(0, l): 192 | val = val + (buf[i] << (8*(l-i-1))) 193 | 194 | elif flags == self.FLAG_DATE: 195 | val = 0 196 | l = len(buf) 197 | for i in range(0, l): 198 | val = val + (buf[i] << (8*(l-i-1))) 199 | 200 | val = datetime.datetime.fromtimestamp(val/1000.0) 201 | 202 | elif flags == self.FLAG_FLOAT: 203 | val = struct.unpack('>f', buf)[0] 204 | elif flags == self.FLAG_DOUBLE: 205 | val = struct.unpack('>d', buf)[0] 206 | elif flags == self.FLAG_BYTEARRAY: 207 | val = buf 208 | elif flags & Client.FLAG_PICKLE != 0: 209 | try: 210 | buf = buf.encode('utf-8') 211 | file = StringIO(buf) 212 | unpickler = pickle.Unpickler(file) 213 | val = unpickler.load() 214 | except Exception as e: 215 | arcuslog('Pickle error: %s\n' % e) 216 | return None 217 | else: 218 | arcuslog("unknown flags on get: %x\n" % flags) 219 | 220 | return val 221 | 222 | 223 | 224 | class ArcusKetemaHash: 225 | def __init__(self): 226 | # config 227 | self.per_node = 40 228 | self.per_hash = 4 229 | 230 | def hash(self, addr): 231 | ret = [] 232 | for i in range(0, self.per_node): 233 | ret = ret + self.__hash(addr + ('-%d' % i)) 234 | 235 | return ret 236 | 237 | def __hash(self, input): 238 | m = hashlib.md5() 239 | m.update(bytes(input, 'utf-8')) 240 | r = m.digest() 241 | ret = [] 242 | 243 | for i in range(0, self.per_hash): 244 | hash = (r[3 + i*4] << 24) | (r[2 + i*4] << 16) | (r[1 + i*4] << 8) | r[0 + i*4] 245 | ret.append(hash) 246 | 247 | return ret 248 | 249 | 250 | 251 | class ArcusPoint: 252 | def __init__(self, hash, node): 253 | self.hash = hash 254 | self.node = node 255 | 256 | def __lt__(self, rhs): 257 | return self.hash < rhs.hash 258 | 259 | def __le__(self, rhs): 260 | return self.hash <= rhs.hash 261 | 262 | def __eq__(self, rhs): 263 | return self.hash == rhs.hash 264 | 265 | def __ne__(self, rhs): 266 | return self.hash != rhs.hash 267 | 268 | def __gt__(self, rhs): 269 | return self.hash > rhs.hash 270 | 271 | def __ge__(self, rhs): 272 | return self.hash >= rhs.hash 273 | 274 | def __repr__(self): 275 | return '(%d:%s)' % (self.hash, self.node) 276 | 277 | 278 | 279 | class ArcusLocator: 280 | def __init__(self, node_allocator): 281 | # config 282 | self.hash_method = ArcusKetemaHash() 283 | 284 | # init 285 | self.lock = Lock() 286 | self.node_list = [] 287 | self.addr_node_map = {} 288 | self.node_allocator = node_allocator 289 | 290 | def connect(self, addr, code): 291 | # init zookeeper 292 | arcuslog(self, 'zoo keeper init') 293 | self.zk = KazooClient(hosts=addr) 294 | self.zk.start() 295 | 296 | self.zoo_path = '/arcus/cache_list/' + code 297 | arcuslog (self, 'zoo keeper get path: ' + self.zoo_path) 298 | data, stat = self.zk.get(self.zoo_path) 299 | arcuslog (self, 'zoo keeper node info with stat: ', data, stat) 300 | 301 | children = self.zk.get_children(self.zoo_path, watch=self.watch_children) 302 | self.hash_nodes(children) 303 | 304 | def disconnect(self): 305 | for node in self.addr_node_map.values(): 306 | node.disconnect_all() 307 | 308 | self.addr_node_map = {} 309 | self.node_list = [] 310 | self.zk.stop() 311 | self.node_allocator.join() 312 | 313 | def hash_nodes(self, children): 314 | #print ('hash_nodes with children %d' % len(children)) 315 | arcuslog (self, 'hash_nodes with children: ', children) 316 | 317 | self.lock.acquire() 318 | 319 | # clear first 320 | self.node_list = [] 321 | for node in self.addr_node_map.values(): 322 | node.in_use = False 323 | 324 | # update live nodes 325 | for child in children: 326 | lst = child.split('-') 327 | addr, name = lst[:2] 328 | 329 | if addr in self.addr_node_map: 330 | self.addr_node_map[addr].in_use = True 331 | node = self.addr_node_map[addr] 332 | else: 333 | # new node 334 | node = self.node_allocator.alloc(addr, name) 335 | self.addr_node_map[addr] = node 336 | self.addr_node_map[addr].in_use = True 337 | 338 | hash_list = self.hash_method.hash(node.addr) 339 | arcuslog(self, 'hash_lists of node(%s): %s' % (node.addr, hash_list)) 340 | 341 | for hash in hash_list: 342 | point = ArcusPoint(hash, node) 343 | self.node_list.append(point) 344 | 345 | # sort list 346 | self.node_list.sort() 347 | arcuslog(self, 'sorted node list', self.node_list) 348 | 349 | # disconnect dead node 350 | dead_list = [] 351 | for addr, node in self.addr_node_map.items(): 352 | if node.in_use == False: 353 | dead_list.append(node) 354 | 355 | for node in dead_list: 356 | arcuslog(self, 'disconnect node(%s)' % node.addr) 357 | node.disconnect() 358 | del self.addr_node_map[node.addr] 359 | 360 | self.lock.release() 361 | 362 | def watch_children(self, event): 363 | arcuslog(self, 'watch children called: ', event) 364 | 365 | # rehashing 366 | children = self.zk.get_children(event.path, watch=self.watch_children) 367 | self.hash_nodes(children) 368 | 369 | def get_node(self, key): 370 | hash = self.__hash_key(key) 371 | 372 | self.lock.acquire() 373 | idx = bisect.bisect(self.node_list, ArcusPoint(hash, None)) 374 | 375 | # roll over 376 | if idx >= len(self.node_list): 377 | idx = 0 378 | 379 | point = self.node_list[idx] 380 | self.lock.release() 381 | 382 | return point.node 383 | 384 | def __hash_key(self, key): 385 | bkey = bytes(key, 'utf-8') 386 | 387 | m = hashlib.md5() 388 | m.update(bkey) 389 | r = m.digest() 390 | ret = r[3] << 24 | r[2] << 16 | r[1] << 8 | r[0] 391 | return ret 392 | 393 | 394 | class Arcus: 395 | def __init__(self, locator): 396 | self.locator = locator 397 | 398 | def connect(self, addr, code): 399 | self.locator.connect(addr, code) 400 | 401 | def disconnect(self): 402 | self.locator.disconnect() 403 | 404 | def set(self, key, val, exptime=0): 405 | node = self.locator.get_node(key) 406 | return node.set(key, val, exptime) 407 | 408 | def get(self, key): 409 | node = self.locator.get_node(key) 410 | return node.get(key) 411 | 412 | def gets(self, key): 413 | node = self.locator.get_node(key) 414 | return node.gets(key) 415 | 416 | def incr(self, key, val=1): 417 | node = self.locator.get_node(key) 418 | return node.incr(key, val) 419 | 420 | def decr(self, key, val=1): 421 | node = self.locator.get_node(key) 422 | return node.decr(key, val) 423 | 424 | def delete(self, key): 425 | node = self.locator.get_node(key) 426 | return node.delete(key) 427 | 428 | def add(self, key, val, exptime=0): 429 | node = self.locator.get_node(key) 430 | return node.add(key, val, exptime) 431 | 432 | def append(self, key, val, exptime=0): 433 | node = self.locator.get_node(key) 434 | return node.append(key, val, exptime) 435 | 436 | def prepend(self, key, val, exptime=0): 437 | node = self.locator.get_node(key) 438 | return node.prepend(key, val, exptime) 439 | 440 | def replace(self, key, val, exptime=0): 441 | node = self.locator.get_node(key) 442 | return node.replace(key, val, exptime) 443 | 444 | def cas(self, key, val, cas_id, exptime=0): 445 | node = self.locator.get_node(key) 446 | return node.cas(key, val, cas_id, time) 447 | 448 | def lop_create(self, key, flags, exptime=0, noreply=False, attr_map=None): 449 | node = self.locator.get_node(key) 450 | return node.lop_create(key, flags, exptime, noreply, attr_map) 451 | 452 | def lop_insert(self, key, index, value, noreply=False, pipe=False, attr_map=None): 453 | node = self.locator.get_node(key) 454 | return node.lop_insert(key, index, value, noreply, pipe, attr_map) 455 | 456 | def lop_get(self, key, range, delete=False, drop=False): 457 | node = self.locator.get_node(key) 458 | return node.lop_get(key, range, delete, drop) 459 | 460 | def lop_delete(self, key, range, drop=False, noreply=False, pipe=False): 461 | node = self.locator.get_node(key) 462 | return node.lop_delete(key, range, drop, noreply, pipe) 463 | 464 | def sop_create(self, key, flags, exptime=0, noreply=False, attr_map=None): 465 | node = self.locator.get_node(key) 466 | return node.sop_create(key, flags, exptime, noreply, attr_map) 467 | 468 | def sop_insert(self, key, value, noreply=False, pipe=False, attr_map=None): 469 | node = self.locator.get_node(key) 470 | return node.sop_insert(key, value, noreply, pipe, attr_map) 471 | 472 | def sop_get(self, key, count=0, delete=False, drop=False): 473 | node = self.locator.get_node(key) 474 | return node.sop_get(key, count, delete, drop) 475 | 476 | def sop_delete(self, key, value, drop=False, noreply=False, pipe=False): 477 | node = self.locator.get_node(key) 478 | return node.sop_delete(key, value, drop, noreply, pipe) 479 | 480 | def sop_exist(self, key, value, pipe=False): 481 | node = self.locator.get_node(key) 482 | return node.sop_exist(key, value, pipe) 483 | 484 | def bop_create(self, key, flags, exptime=0, noreply=False, attr_map=None): 485 | node = self.locator.get_node(key) 486 | return node.bop_create(key, flags, exptime, noreply, attr_map) 487 | 488 | def bop_insert(self, key, bkey, value, eflag=None, noreply=False, pipe=False, attr_map=None): 489 | node = self.locator.get_node(key) 490 | return node.bop_insert(key, bkey, value, eflag, noreply, pipe, attr_map) 491 | 492 | def bop_upsert(self, key, bkey, value, eflag=None, noreply=False, pipe=False, attr_map=None): 493 | node = self.locator.get_node(key) 494 | return node.bop_upsert(key, bkey, value, eflag, noreply, pipe, attr_map) 495 | 496 | def bop_update(self, key, bkey, value, eflag=None, noreply=False, pipe=False, attr_map=None): 497 | node = self.locator.get_node(key) 498 | return node.bop_update(key, bkey, value, eflag, noreply, pipe, attr_map) 499 | 500 | def bop_get(self, key, range, filter=None, delete=False, drop=False): 501 | node = self.locator.get_node(key) 502 | return node.bop_get(key, range, filter, delete, drop) 503 | 504 | def bop_delete(self, key, range, filter=None, count=None, drop=False, noreply=False, pipe=False): 505 | node = self.locator.get_node(key) 506 | return node.bop_delete(key, range, filter, count, drop, noreply, pipe) 507 | 508 | def bop_count(self, key, range, filter=None): 509 | node = self.locator.get_node(key) 510 | return node.bop_count(key, range, filter) 511 | 512 | def bop_incr(self, key, bkey, value, noreply=False, pipe=False): 513 | node = self.locator.get_node(key) 514 | return node.bop_incr(key, bkey, value, noreply, pipe) 515 | 516 | def bop_decr(self, key, bkey, value, noreply=False, pipe=False): 517 | node = self.locator.get_node(key) 518 | return node.bop_incr(key, bkey, value, noreply, pipe) 519 | 520 | def bop_mget(self, key_list, range, filter=None, offset=None, count=50): 521 | nodes = {} 522 | 523 | for key in key_list: 524 | node = self.locator.get_node(key) 525 | if node not in nodes: 526 | nodes[node] = [key] 527 | else: 528 | nodes[node].append(key) 529 | 530 | op_list = ArcusOperationList('bop mget') 531 | for node in nodes: 532 | op = node.bop_mget(nodes[node], range, filter, offset, count) 533 | op_list.add_op(op) 534 | 535 | return op_list 536 | 537 | 538 | def bop_smget(self, key_list, range, filter=None, offset=None, count=2000): 539 | nodes = {} 540 | 541 | for key in key_list: 542 | node = self.locator.get_node(key) 543 | if node not in nodes: 544 | nodes[node] = [key] 545 | else: 546 | nodes[node].append(key) 547 | 548 | op_list = ArcusOperationList('bop smget') 549 | for node in nodes: 550 | op = node.bop_smget(nodes[node], range, filter, offset, count) 551 | op_list.add_op(op) 552 | 553 | return op_list 554 | 555 | def list_alloc(self, key, flags, exptime=0, cache_time=0): 556 | self.lop_create(key, flags, exptime) 557 | return self.list_get(key, cache_time) 558 | 559 | def list_get(self, key, cache_time=0): 560 | return ArcusList(self, key, cache_time) 561 | 562 | def set_alloc(self, key, flags, exptime=0, cache_time=0): 563 | self.sop_create(key, flags, exptime) 564 | return self.set_get(key, cache_time) 565 | 566 | def set_get(self, key, cache_time=0): 567 | return ArcusSet(self, key, cache_time) 568 | 569 | 570 | 571 | class ArcusOperation: 572 | def __init__(self, node, request, callback): 573 | self.node = node 574 | self.request = request 575 | self.callback = callback 576 | self.q = queue.Queue(1) 577 | self.result = self # self.result == self : not received, self.result == None : receive None 578 | self.invalid = False 579 | 580 | self.noreply = False 581 | self.pipe = False 582 | 583 | def __repr__(self): 584 | return '' % (hex(id(self)), repr(self.get_result())) 585 | 586 | def has_result(self): 587 | return self.result != None or self.q.empty() == False 588 | 589 | def set_result(self, result): 590 | self.q.put(result) 591 | 592 | def set_invalid(self): 593 | if self.has_result(): 594 | return False 595 | 596 | self.invalid = True 597 | self.q.put(None) # wake up blocked callers. 598 | return True 599 | 600 | def get_result(self, timeout=0): 601 | if self.result != self: 602 | return self.result 603 | 604 | if timeout > 0: 605 | result = self.q.get(False, timeout) 606 | else: 607 | result = self.q.get() 608 | 609 | if result == self and self.invalid == True: 610 | raise ArcusNodeConnectionException('current async result is unavailable because Arcus node is disconnected now') 611 | 612 | if isinstance(result, Exception): 613 | raise result 614 | 615 | self.result = result 616 | return result 617 | 618 | 619 | 620 | 621 | class ArcusOperationList: 622 | def __init__(self, cmd): 623 | self.ops = [] 624 | self.cmd = cmd 625 | self.result = None 626 | self.missed_key = None 627 | self.invalid = False 628 | 629 | self.noreply = False 630 | self.pipe = False 631 | 632 | def __repr__(self): 633 | return '' % (hex(id(self)), repr(self.get_result())) 634 | 635 | def add_op(self, op): 636 | self.ops.append(op) 637 | 638 | def has_result(self): 639 | if self.result != None: 640 | return True 641 | 642 | for a in ops: 643 | if a.has_result() == False: 644 | return False 645 | 646 | return True 647 | 648 | def set_result(self, result): 649 | assert False 650 | pass 651 | 652 | def set_invalidate(self): 653 | if self.has_result(): 654 | return False # already done 655 | 656 | self.invalid = True 657 | 658 | # invalidate all ops and wake up blockers. 659 | for a in ops: 660 | a.set_invalidate() 661 | 662 | return True 663 | 664 | def get_missed_key(self, timeout=0): 665 | if self.missed_key != None: 666 | return self.missed_key 667 | 668 | self.get_result(timeout) 669 | return self.missed_key 670 | 671 | 672 | def get_result(self, timeout=0): 673 | if self.result != None: 674 | return self.result 675 | 676 | tmp_result = [] 677 | missed_key = [] 678 | if (timeout > 0): 679 | start_time = time.time() 680 | end_time = start_tume + timeout 681 | 682 | for a in self.ops: 683 | curr_time = time.time() 684 | remain_time = end_time - curr_time 685 | if remain_time < 0: 686 | raise Queue.Empty() 687 | 688 | ret, miss = a.get_result(remain_time) 689 | tmp_result.append(ret) 690 | missed_key += miss 691 | else: 692 | for a in self.ops: 693 | ret, miss = a.get_result() 694 | tmp_result.append(ret) 695 | missed_key += miss 696 | 697 | if self.cmd == 'bop mget': 698 | result = {} 699 | for a in tmp_result: 700 | result.update(a) 701 | 702 | else: # bop smget 703 | length = len(tmp_result) 704 | 705 | # empty 706 | if length <= 0: 707 | return [] 708 | 709 | # merge sort 710 | result = [] 711 | while True: 712 | # remove empty list 713 | while len(tmp_result[0]) == 0: 714 | tmp_result.pop(0) 715 | if len(tmp_result) == 0: # all done 716 | if self.result == None and self.invalid == True: 717 | raise ArcusNodeConnectionException('current async result is unavailable because Arcus node is disconnected now') 718 | missed_key.sort() 719 | self.result = result 720 | self.missed_key = missed_key; 721 | return self.result 722 | 723 | min = tmp_result[0][0] 724 | idx = 0 725 | for i in range(0, len(tmp_result)): 726 | if len(tmp_result[i]) and tmp_result[i][0] < min: 727 | min = tmp_result[i][0] 728 | idx = i 729 | 730 | result.append(tmp_result[idx].pop(0)) 731 | 732 | 733 | if self.result == None and self.invalid == True: 734 | raise ArcusNodeConnectionException('current async result is unavailable because Arcus node is disconnected now') 735 | missed_key.sort() 736 | self.result = result 737 | self.missed_key = missed_key 738 | return self.result 739 | 740 | 741 | class ArcusList: 742 | def __init__(self, arcus, key, cache_time=0): 743 | self.arcus = arcus 744 | self.key = key 745 | self.cache_time = cache_time 746 | 747 | if cache_time > 0: 748 | try: 749 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 750 | except Exception: 751 | self.cache = [] 752 | else: 753 | self.cache = None 754 | 755 | self.next_refresh = time.time() + cache_time 756 | 757 | def __len__(self): 758 | if self.cache != None: 759 | if time.time() >= self.next_refresh: 760 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 761 | self.next_refresh = time.time() + self.cache_time 762 | return len(self.cache) 763 | else: 764 | return len(self.arcus.lop_get(self.key, (0, -1)).get_result()) 765 | 766 | def __iter__(self): 767 | if self.cache != None: 768 | if time.time() >= self.next_refresh: 769 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 770 | self.next_refresh = time.time() + self.cache_time 771 | return iter(self.cache) 772 | else: 773 | return iter(self.arcus.lop_get(self.key, (0, -1)).get_result()) 774 | 775 | def __eq__(self, rhs): 776 | if self.cache != None: 777 | if time.time() >= self.next_refresh: 778 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 779 | self.next_refresh = time.time() + self.cache_time 780 | return self.cache == rhs 781 | else: 782 | return self.arcus.lop_get(self.key, (0, -1)).get_result() == rhs 783 | 784 | def __ne__(self, rhs): 785 | if self.cache != None: 786 | if time.time() >= self.next_refresh: 787 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 788 | self.next_refresh = time.time() + self.cache_time 789 | return self.cache != rhs 790 | else: 791 | return self.arcus.lop_get(self.key, (0, -1)).get_result() != rhs 792 | 793 | def __le__(self, rhs): 794 | if self.cache != None: 795 | if time.time() >= self.next_refresh: 796 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 797 | self.next_refresh = time.time() + self.cache_time 798 | return self.cache <= rhs 799 | else: 800 | return self.arcus.lop_get(self.key, (0, -1)).get_result() <= rhs 801 | 802 | def __lt__(self, rhs): 803 | if self.cache != None: 804 | if time.time() >= self.next_refresh: 805 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 806 | self.next_refresh = time.time() + self.cache_time 807 | return self.cache < rhs 808 | else: 809 | return self.arcus.lop_get(self.key, (0, -1)).get_result() < rhs 810 | 811 | def __ge__(self, rhs): 812 | if self.cache != None: 813 | if time.time() >= self.next_refresh: 814 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 815 | self.next_refresh = time.time() + self.cache_time 816 | return self.cache >= rhs 817 | else: 818 | return self.arcus.lop_get(self.key, (0, -1)).get_result() >= rhs 819 | 820 | def __gt__(self, rhs): 821 | if self.cache != None: 822 | if time.time() >= self.next_refresh: 823 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 824 | self.next_refresh = time.time() + self.cache_time 825 | return self.cache > rhs 826 | else: 827 | return self.arcus.lop_get(self.key, (0, -1)).get_result() > rhs 828 | 829 | def __getitem__(self, index): 830 | if self.cache != None: 831 | if time.time() >= self.next_refresh: 832 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 833 | self.next_refresh = time.time() + self.cache_time 834 | return self.cache[index] 835 | else: 836 | if isinstance(index, slice): 837 | start = index.start 838 | stop = index.stop 839 | if stop != None: 840 | stop -= 1 841 | 842 | if start == None: 843 | start = 0 844 | if stop == None: 845 | stop = -1 846 | 847 | try: 848 | return self.arcus.lop_get(self.key, (start, stop)).get_result() 849 | except Exception: 850 | return [] 851 | else: 852 | ret = self.arcus.lop_get(self.key, index).get_result() 853 | if len(ret) == 0: 854 | raise IndexError('lop index out of range') 855 | 856 | return ret[0] 857 | 858 | 859 | def __setitem__(self, index, value): 860 | raise ArcusListException('list set is not possible') 861 | 862 | def __delitem__(self, index): 863 | if self.cache != None: 864 | del self.cache[index] 865 | 866 | if isinstance(index, slice): 867 | start = index.start 868 | stop = index.stop 869 | if stop != None: 870 | stop -= 1 871 | 872 | if start == None: 873 | start = 0 874 | if stop == None: 875 | stop = -1 876 | return self.arcus.lop_delete(self.key, (start, stop)).get_result() 877 | else: 878 | return self.arcus.lop_delete(self.key, index).get_result() 879 | 880 | def insert(self, index, value): 881 | if self.cache != None: 882 | self.cache.insert(index, value) 883 | 884 | return self.arcus.lop_insert(self.key, index, value).get_result() 885 | 886 | def append(self, value): 887 | if self.cache != None: 888 | self.cache.append(value) 889 | 890 | return self.arcus.lop_insert(self.key, -1, value).get_result() 891 | 892 | def invalidate(self): 893 | if self.cache != None: 894 | try: 895 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 896 | except Exception: 897 | self.cache = [] 898 | 899 | self.next_refresh = time.time() + self.cache_time 900 | 901 | def __repr__(self): 902 | if self.cache != None: 903 | if time.time() >= self.next_refresh: 904 | self.cache = self.arcus.lop_get(self.key, (0, -1)).get_result() 905 | self.next_refresh = time.time() + self.cache_time 906 | return repr(self.cache) 907 | 908 | try: 909 | ret = self.arcus.lop_get(self.key, (0, -1)).get_result() 910 | except Exception: 911 | ret = [] # not found? 912 | 913 | return repr(ret) 914 | 915 | 916 | 917 | 918 | class ArcusSet: 919 | def __init__(self, arcus, key, cache_time=0): 920 | self.arcus = arcus 921 | self.key = key 922 | self.cache_time = cache_time 923 | 924 | if cache_time > 0: 925 | try: 926 | self.cache = self.arcus.sop_get(self.key).get_result() 927 | except Exception: 928 | self.cache = set() 929 | else: 930 | self.cache = None 931 | 932 | self.next_refresh = time.time() + cache_time 933 | 934 | def __len__(self): 935 | if self.cache != None: 936 | if time.time() >= self.next_refresh: 937 | self.cache = self.arcus.sop_get(self.key).get_result() 938 | self.next_refresh = time.time() + self.cache_time 939 | return len(self.cache) 940 | else: 941 | return len(self.arcus.sop_get(self.key).get_result()) 942 | 943 | def __contains__(self, value): 944 | if self.cache != None and time.time() < self.next_refresh: 945 | return value in self.cache # do not fetch all for cache when time over 946 | 947 | return self.arcus.sop_exist(self.key, value).get_result() 948 | 949 | def __iter__(self): 950 | if self.cache != None: 951 | if time.time() >= self.next_refresh: 952 | self.cache = self.arcus.sop_get(self.key).get_result() 953 | self.next_refresh = time.time() + self.cache_time 954 | return iter(self.cache) 955 | else: 956 | return iter(self.arcus.sop_get(self.key).get_result()) 957 | 958 | def add(self, value): 959 | if self.cache != None: 960 | self.cache[value] = True 961 | 962 | return self.arcus.sop_insert(self.key, value).get_result() 963 | 964 | def invalidate(self): 965 | if self.cache != None: 966 | try: 967 | self.cache = self.arcus.sop_get(self.key).get_result() 968 | except Exception: 969 | self.cache = set() 970 | 971 | self.next_refresh = time.time() + self.cache_time 972 | 973 | def __repr__(self): 974 | if self.cache != None: 975 | if time.time() >= self.next_refresh: 976 | self.cache = self.arcus.sop_get(self.key).get_result() 977 | self.next_refresh = time.time() + self.cache_time 978 | return repr(self.cache) 979 | 980 | try: 981 | ret = self.arcus.sop_get(self.key).get_result() 982 | except Exception: 983 | ret = set() 984 | 985 | return repr(ret) 986 | 987 | 988 | 989 | 990 | -------------------------------------------------------------------------------- /arcus_cmd.py: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/python3 2 | 3 | # 4 | # arcus-python-client - Arcus python client drvier 5 | # Copyright 2014 NAVER Corp. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License") 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | 21 | import sys,os,socket,re 22 | 23 | from optparse import OptionParser 24 | from collections import OrderedDict 25 | 26 | import paramiko 27 | 28 | from arcus_util import zookeeper 29 | from arcus_util import arcus_node 30 | 31 | from kazoo.client import KazooClient 32 | import kazoo 33 | 34 | 35 | # set below environment for dump_script 36 | HOME_DIR='' 37 | USER='' 38 | 39 | 40 | 41 | def do_ssh_command(addr, command, tmout): 42 | ssh = paramiko.SSHClient() 43 | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 44 | ssh.connect(addr, timeout=tmout) 45 | 46 | stdin, stdout, stderr = ssh.exec_command(command) 47 | for line in stdout.readlines(): 48 | sys.stdout.write(line) 49 | ssh.close() 50 | 51 | 52 | if __name__ == '__main__': 53 | usage = "usage: %prog [options]" 54 | parser = OptionParser(usage=usage, version="%prog 1.0") 55 | parser.add_option('-f', '--file', dest='file', default='', help='zookeeper address lists file') 56 | parser.add_option('-a', '--address', dest='address', default='', help='zookeeper address') 57 | parser.add_option('-s', '--service', dest='service', default='', help='service code') 58 | parser.add_option('-c', '--command', dest='command', default='', help='arcus command') 59 | parser.add_option('-n', '--node', dest='node', default='', help='node address or ip') 60 | parser.add_option('-x', '--ssh_command', dest='ssh_command', default='', help='ssh command execution') 61 | parser.add_option('', '--ssh_command_file', dest='ssh_command_file', default='', help='ssh command execution from file') 62 | parser.add_option('-i', '--i', dest='info', default=False, help='memory, maxconns info', action='store_true') 63 | parser.add_option('', '--dump_script', dest='dump_script', default=False, help='dump start script', action='store_true') 64 | parser.add_option('', '--vpn_remap', dest='vpn_remap', default='', help='read ip remap file for vpn network') 65 | parser.add_option('', '--all_node', dest='all_node', default=False, help='select all node', action='store_true') 66 | parser.add_option('', '--all_server', dest='all_server', default=False, help='select all server', action='store_true') 67 | parser.add_option('-t', '--timeout', dest='timeout', default='200', help='arcus command timeout (msec)') 68 | parser.add_option('-p', '--prefix_stats', dest='prefix', help='show arcus prefix stats') 69 | 70 | (options, args) = parser.parse_args() 71 | 72 | timeout = int(options.timeout) / 1000 73 | 74 | if options.file: 75 | fh = open(options.file) 76 | addresses = fh.readlines() 77 | else: 78 | addresses = [options.address] 79 | 80 | remap = {} 81 | if options.vpn_remap: 82 | fh = open(options.vpn_remap) 83 | lines = fh.readlines() 84 | for line in lines: 85 | if line.strip() == '' or line[0] == '#': 86 | continue 87 | 88 | fr, to = line.split() 89 | remap[fr] = to 90 | 91 | lists = [] # nodes 92 | zoo_caches = [] # caches (for cloud brief report) 93 | 94 | for address in addresses: 95 | if address.strip() == '' or address[0] == '#': 96 | continue 97 | 98 | 99 | try: 100 | if len(remap) > 0: 101 | addr, port = address.split(':') 102 | ip = socket.gethostbyname(addr) 103 | if ip in remap: 104 | print('## zookeeper ip remap %s -> %s for address %s' % (ip, remap[ip], addr)) 105 | address = '%s:%s' % (remap[ip], port) 106 | 107 | #print(address) 108 | zoo = zookeeper(address) 109 | 110 | list = [] 111 | if options.service: 112 | list = zoo.get_arcus_node_of_code(options.service, options.node) 113 | if len(list) > 0: 114 | print ('\n\n## Zookeeper address %s' % address) 115 | elif options.node: 116 | list = zoo.get_arcus_node_of_server(options.node) 117 | if len(list) > 0: 118 | print ('\n\n## Zookeeper address %s' % address) 119 | elif options.all_node: 120 | cache_list = zoo.get_arcus_cache_list() 121 | for cache in cache_list: 122 | list += zoo.get_arcus_node_of_code(cache, options.node) 123 | elif options.all_server: 124 | cache_list = zoo.get_arcus_cache_list() 125 | ip_map = {} 126 | for cache in cache_list: 127 | tmp = zoo.get_arcus_node_of_code(cache, options.node) 128 | for t in tmp: 129 | ip_map[t.ip] = True 130 | 131 | for k in ip_map: 132 | list.append(arcus_node(k, '*')) 133 | 134 | else: 135 | print ('\n\n## Zookeeper address %s' % address) 136 | cache = zoo.get_arcus_cache_list() 137 | print (cache) 138 | zoo_caches.append((zoo, cache)) 139 | 140 | if options.dump_script: # record zookeeper address 141 | for node in list: 142 | node.zk_addr = address 143 | 144 | 145 | except kazoo.exceptions.NoNodeError: 146 | # not found 147 | continue 148 | 149 | lists = lists + list 150 | 151 | 152 | lists.sort(key = lambda x: x.ip + ":" + x.port) 153 | for node in lists: 154 | if node.ip in remap: 155 | print('## vpn remap %s -> %s' % (node.ip, remap[node.ip])) 156 | node.ip = remap[node.ip] 157 | 158 | print(node) 159 | 160 | if options.ssh_command_file: 161 | fh = open(options.ssh_command_file) 162 | options.ssh_command = fh.read() 163 | 164 | if options.ssh_command: 165 | prev_ip = '' 166 | for node in lists: 167 | if prev_ip != node.ip: # run once per machine 168 | print ('## run ssh command, [%s] %s' % (node.ip, options.ssh_command)) 169 | do_ssh_command(node.ip, options.ssh_command, timeout) 170 | prev_ip = node.ip 171 | 172 | if options.command: 173 | for node in lists: 174 | 175 | try: 176 | result = node.do_arcus_command(options.command, timeout) 177 | print ('%s\t\t%s - %s' % (node, options.command, result)) 178 | except Exception as e: 179 | print ('%s\t\tFAILED!!' % (node)) 180 | print(e) 181 | 182 | if options.info and (options.service or options.node): 183 | if options.node: 184 | print('===================================================================================') 185 | print ('[%s] system memory' % lists[0].ip) 186 | do_ssh_command(lists[0].ip, 'free', timeout) # run once 187 | print('-----------------------------------------------------------------------------------') 188 | 189 | 190 | re_limit = re.compile("STAT limit_maxbytes ([0-9]+)") 191 | re_bytes = re.compile("STAT bytes ([0-9]+)") 192 | re_curr_conn = re.compile("STAT curr_connections ([0-9]+)") 193 | re_maxconns = re.compile("maxconns ([0-9]+)") 194 | 195 | last_node = None 196 | 197 | grand_total_used = 0 198 | grand_total_limit = 0 199 | 200 | total_used = 0 201 | total_limit = 0 202 | for node in lists: 203 | try: 204 | if options.service and last_node != node.ip: 205 | if last_node != None: 206 | print ('TOTAL MEM: (%d/%d) %f%%' % (total_used, total_limit, total_used/total_limit*100)) 207 | total_used = total_limit = 0 208 | 209 | print('===================================================================================') 210 | print ('[%s] system memory' % node.ip) 211 | do_ssh_command(node.ip, 'free', timeout) # run every server 212 | last_node = node.ip 213 | print('-----------------------------------------------------------------------------------') 214 | 215 | result = node.do_arcus_command('stats', timeout) 216 | m_limit = re_limit.search(result) 217 | m_bytes = re_bytes.search(result) 218 | m_curr_conn = re_curr_conn.search(result) 219 | 220 | result = node.do_arcus_command('config maxconns', timeout) 221 | m_maxconns = re_maxconns.search(result) 222 | 223 | #if m_limit == None or m_bytes == None or m_maxconns == None or m_curr_conn == None: # 1.6 not support maxconns 224 | if m_limit == None or m_bytes == None or m_curr_conn == None: 225 | print ('%s\t\tstats failed!!' % (node)) 226 | continue 227 | 228 | limit = int(m_limit.groups()[0]) / 1024 / 1024 229 | used = int(m_bytes.groups()[0]) / 1024 / 1024 230 | curr_conn = int(m_curr_conn.groups()[0]) 231 | 232 | if m_maxconns == None: 233 | maxconns = 10000 234 | else: 235 | maxconns = int(m_maxconns.groups()[0]) 236 | 237 | print ('%s\t\tMEM: (%d/%d) %f%%, CONN: (%d/%d)' % (node, used, limit, used/limit*100, curr_conn, maxconns)) 238 | total_used += used 239 | total_limit += limit 240 | 241 | grand_total_used += used 242 | grand_total_limit += limit 243 | 244 | 245 | except Exception as e: 246 | print ('%s\t\tFAILED!!' % (node)) 247 | print(e) 248 | continue 249 | 250 | 251 | print ('TOTAL MEM: (%d/%d) %f%%' % (total_used, total_limit, total_used/total_limit*100)) 252 | 253 | if options.service: 254 | print ('GRAND TOTAL MEM: (%d/%d) %f%%' % (grand_total_used, grand_total_limit, grand_total_used/grand_total_limit*100)) 255 | 256 | 257 | if options.info and not options.service and not options.node: # brief report per cloud 258 | grand_total_used = 0 259 | grand_total_limit = 0 260 | grand_total_instances = 0 261 | 262 | for item in zoo_caches: 263 | zoo = item[0] 264 | caches = item[1] 265 | 266 | print('===================================================================================') 267 | print('## ' + zoo.address) 268 | for cache in caches: 269 | 270 | lists = zoo.get_arcus_node_of_code(cache, '') 271 | 272 | re_limit = re.compile("STAT limit_maxbytes ([0-9]+)") 273 | re_bytes = re.compile("STAT bytes ([0-9]+)") 274 | re_curr_conn = re.compile("STAT curr_connections ([0-9]+)") 275 | re_maxconns = re.compile("maxconns ([0-9]+)") 276 | 277 | total_used = 0 278 | total_limit = 0 279 | for node in lists: 280 | try: 281 | if node.ip in remap: 282 | print('## vpn remap %s -> %s' % (node.ip, remap[node.ip])) 283 | node.ip = remap[node.ip] 284 | 285 | result = node.do_arcus_command('stats', timeout) 286 | m_limit = re_limit.search(result) 287 | m_bytes = re_bytes.search(result) 288 | m_curr_conn = re_curr_conn.search(result) 289 | 290 | result = node.do_arcus_command('config maxconns', timeout) 291 | m_maxconns = re_maxconns.search(result) 292 | 293 | #if m_limit == None or m_bytes == None or m_maxconns == None or m_curr_conn == None: # 1.6 not support maxconns 294 | if m_limit == None or m_bytes == None or m_curr_conn == None: 295 | print ('%s\t\tstats failed!!' % (node)) 296 | continue 297 | 298 | limit = int(m_limit.groups()[0]) / 1024 / 1024 299 | used = int(m_bytes.groups()[0]) / 1024 / 1024 300 | curr_conn = int(m_curr_conn.groups()[0]) 301 | 302 | if m_maxconns == None: 303 | maxconns = 10000 304 | else: 305 | maxconns = int(m_maxconns.groups()[0]) 306 | 307 | total_used = total_used + used; 308 | total_limit = total_limit + limit; 309 | 310 | except Exception as e: 311 | print ('%s\t\tFAILED!!' % (node)) 312 | print(e) 313 | continue 314 | 315 | 316 | #print ('[%s] %d instances, (%d/%d) %f%%' % (cache, len(lists), total_used, total_limit, total_used/total_limit*100)) 317 | print ('[%s] %d instances, %dM memory' % (cache, len(lists), total_limit)) 318 | grand_total_limit += total_limit 319 | grand_total_used += total_used 320 | grand_total_instances += len(lists) 321 | 322 | 323 | #print ('TOTAL %d instances, (%d/%d)M memory' % (grand_total_instances, grand_total_used, grand_total_limit grand_total_used/grand_total_limit*100)) 324 | print ('TOTAL %d instances, %dM memory' % (grand_total_instances, grand_total_limit)) 325 | 326 | 327 | 328 | if options.dump_script: 329 | re_limit = re.compile("STAT limit_maxbytes ([0-9]+)") 330 | re_bytes = re.compile("STAT bytes ([0-9]+)") 331 | re_curr_conn = re.compile("STAT curr_connections ([0-9]+)") 332 | re_maxconns = re.compile("maxconns ([0-9]+)") 333 | 334 | last_node = None 335 | 336 | total_used = 0 337 | total_limit = 0 338 | for node in lists: 339 | try: 340 | result = node.do_arcus_command('stats', timeout) 341 | m_limit = re_limit.search(result) 342 | m_bytes = re_bytes.search(result) 343 | m_curr_conn = re_curr_conn.search(result) 344 | 345 | result = node.do_arcus_command('config maxconns', timeout) 346 | m_maxconns = re_maxconns.search(result) 347 | 348 | #if m_limit == None or m_bytes == None or m_maxconns == None or m_curr_conn == None: # 1.6 not support maxconns 349 | if m_limit == None or m_bytes == None or m_curr_conn == None: 350 | print ('%s\t\tstats failed!!' % (node)) 351 | continue 352 | 353 | limit = int(m_limit.groups()[0]) / 1024 / 1024 354 | used = int(m_bytes.groups()[0]) / 1024 / 1024 355 | curr_conn = int(m_curr_conn.groups()[0]) 356 | 357 | if m_maxconns == None: 358 | maxconns = 10000 359 | else: 360 | maxconns = int(m_maxconns.groups()[0]) 361 | 362 | total_used += used 363 | total_limit += limit 364 | 365 | except Exception as e: 366 | print ('%s\t\tFAILED!!' % (node)) 367 | print(e) 368 | continue 369 | 370 | file_name = 'start_mem_%s.sh' % node.code 371 | script_fh = open(file_name, 'w') 372 | if os.path.getsize(file_name) == 0: 373 | script_fh.write('#!/bin/bash\n') 374 | os.chmod(file_name, 0o755) 375 | 376 | start_script = '%s/bin/memcached -v -o 60 -r -m%d -R5 -p %s -d -c %d -U 0 -D: -b 8192 -u %s -t 6 -E %s/lib/default_engine.so -X %s/lib/syslog_logger.so -X %s/lib/ascii_scrub.so -z %s\n' % (HOME_DIR, limit, node.port, maxconns, USER, HOME_DIR, HOME_DIR, HOME_DIR, node.zk_addr) 377 | 378 | script_fh.write(start_script) 379 | 380 | if options.prefix: 381 | 382 | def print_stats(prefix, stats, is_total=False): 383 | result = "" 384 | printed_count = 0 385 | for current_prefix, prefix_stats in stats.items(): 386 | if prefix == 'all' or current_prefix.startswith(prefix) or (current_prefix=='' and prefix=='null'): 387 | printed_count += 1 388 | heading = "PREFIX %-10s " % (current_prefix) 389 | result += heading 390 | for key, value in prefix_stats.items(): 391 | if key == 'time' and is_total: 392 | continue 393 | if key in ['tsz', 'lcs', 'scs', 'bcs', 'bps', 'pfs', 'gps']: 394 | result += "\n" + " "*(len(heading)) 395 | if key in ['bps', 'pfs']: 396 | result += " "*12 397 | try: 398 | result += "%s %7d "% (key, value) 399 | except KeyError: 400 | # Some version of arcus-memcached does not print 'inc' and 'dec' 401 | pass 402 | result += "\n" 403 | if printed_count > 0: 404 | print(result) 405 | else: 406 | print("(no result)") 407 | 408 | 409 | def merge_stats(total_stats, node_stats): 410 | for prefix, stats in node_stats.items(): 411 | if prefix not in total_stats: 412 | total_stats[prefix] = OrderedDict() 413 | for key, value in node_stats[prefix].items(): 414 | total_stats[prefix][key] = total_stats[prefix].get(key, 0) + value 415 | 416 | def collect_stats(node, command): 417 | """ 418 | Returns: 419 | { 'prefix1': OrderedDict[('key1', value1), ('key2', value2)...], ... } 420 | """ 421 | try: 422 | result = node.do_arcus_command(command, timeout) 423 | except Exception as e: 424 | print('%s\t\tFAILED!!' % (node)) 425 | print(e) 426 | return None 427 | 428 | node_stats = {} 429 | 430 | for line in result.splitlines(): 431 | if not line.startswith("PREFIX"): 432 | continue 433 | 434 | tokens = line.split() 435 | current_prefix = tokens[1] 436 | 437 | prefix_stats = OrderedDict() 438 | 439 | for i in range(2, len(tokens), 2): 440 | key = tokens[i] 441 | value = int(tokens[i+1]) 442 | prefix_stats[key] = value 443 | 444 | node_stats[current_prefix] = prefix_stats 445 | 446 | return node_stats 447 | 448 | prefixes_total = {} 449 | detail_total = {} 450 | 451 | for node in lists: 452 | print(node) 453 | 454 | node_stats = collect_stats(node, 'stats prefixes') 455 | print_stats(options.prefix, node_stats) 456 | merge_stats(prefixes_total, node_stats) 457 | 458 | node_stats = collect_stats(node, 'stats detail dump') 459 | print_stats(options.prefix, node_stats) 460 | merge_stats(detail_total, node_stats) 461 | 462 | print('[Total]') 463 | print_stats(options.prefix, prefixes_total, is_total=True) 464 | print_stats(options.prefix, detail_total, is_total=True) 465 | -------------------------------------------------------------------------------- /arcus_mc_node.py: -------------------------------------------------------------------------------- 1 | # 2 | # arcus-python-client - Arcus python client drvier 3 | # Copyright 2014 NAVER Corp. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | 19 | import sys 20 | import socket 21 | import time 22 | import os 23 | import re 24 | import threading 25 | from threading import Lock 26 | import select 27 | 28 | from arcus import * 29 | 30 | 31 | # Some parts of Connection and ArcusMCNode is came from python memcache module 32 | class Connection(object): 33 | def __init__(self, host): 34 | ip, port = host.split(':') 35 | self.ip = ip 36 | self.port = int(port) 37 | self.address = ( self.ip, self.port ) 38 | 39 | self.socket = None 40 | self.buffer = b'' 41 | 42 | self.connect() 43 | 44 | def connect(self): 45 | if self.socket: 46 | disconnect() 47 | 48 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 49 | try: 50 | self.socket.connect(self.address) 51 | except socket.timeout as msg: 52 | self.disconnect() 53 | except socket.error as msg: 54 | self.disconnect() 55 | 56 | self.buffer = b'' 57 | return self.socket 58 | 59 | def disconnect(self): 60 | if self.socket: 61 | self.socket.close() 62 | self.socket = None 63 | 64 | def disconnected(self): 65 | return self.socket == None 66 | 67 | def send_request(self, request): 68 | arcuslog(self, 'send_request: ', request + b'\r\n') 69 | self.socket.sendall(request + b'\r\n') 70 | 71 | def hasline(self): 72 | index = self.buffer.find(b'\r\n') 73 | return index >= 0 74 | 75 | def readline(self): 76 | buf = self.buffer 77 | 78 | while True: 79 | index = buf.find(b'\r\n') 80 | if index >= 0: 81 | break 82 | 83 | data = self.socket.recv(4096) 84 | arcuslog(self, 'sock recv: (%d): "' % len(data), data) 85 | 86 | if data == None: 87 | self.disconnect() 88 | raise ArcusNodeConnectionException('connection lost') 89 | 90 | buf += data 91 | 92 | self.buffer = buf[index+2:] 93 | 94 | arcuslog(self, 'readline: ', buf[:index]) 95 | return buf[:index] 96 | 97 | def recv(self, rlen): 98 | buf = self.buffer 99 | while len(buf) < rlen: 100 | foo = self.socket.recv(max(rlen - len(buf), 4096)) 101 | 102 | if foo == None: 103 | raise ArcusNodeSocketException('Read %d bytes, expecting %d, read returned 0 length bytes' % ( len(buf), rlen )) 104 | 105 | buf += foo 106 | arcuslog(self, 'sock recv: (%d): ' % len(foo), foo) 107 | 108 | self.buffer = buf[rlen:] 109 | arcuslog(self, 'recv: ', buf[:rlen]) 110 | return buf[:rlen] 111 | 112 | 113 | class ArcusMCNode: 114 | worker = None 115 | shutdown = False 116 | 117 | def __init__(self, addr, name, transcoder, node_allocator): 118 | #mandatory files 119 | self.addr = addr 120 | self.name = name 121 | self.in_use = False 122 | self.transcoder = transcoder 123 | 124 | self.handle = Connection(addr) 125 | self.ops = [] 126 | self.lock = Lock() # for ordering worker.q and ops 127 | 128 | self.node_allocator = node_allocator 129 | 130 | def __repr__(self): 131 | return '%s-%s' % (self.addr, self.name) 132 | 133 | def get_fileno(self): 134 | return self.handle.socket.fileno() 135 | 136 | def disconnect(self): 137 | # disconnect socket 138 | self.handle.disconnect() 139 | 140 | # clear existing operation 141 | for op in self.ops: 142 | op.set_invalid() 143 | 144 | self.ops = [] 145 | 146 | def disconnect_all(self): # shutdown 147 | self.node_allocator.shutdown = True 148 | self.disconnect() 149 | 150 | self.node_allocator.worker.q.put(None) 151 | 152 | def process_request(self, request): 153 | if self.handle.disconnected(): 154 | ret = self.handle.connect() 155 | if ret != None: 156 | # re-register if node connection is available 157 | self.node_allocator.worker.register_node(self) 158 | 159 | self.handle.send_request(request) 160 | 161 | 162 | 163 | ########################################################################################## 164 | ### commands 165 | ########################################################################################## 166 | def get(self, key): 167 | return self._get('get', key) 168 | 169 | def gets(self, key): 170 | return self._get('gets', key) 171 | 172 | def set(self, key, val, exptime=0): 173 | return self._set("set", key, val, exptime) 174 | 175 | def cas(self, key, val, cas_id, exptime=0): 176 | return self._cas(key, 'cas', val, cas_id, exptime) 177 | 178 | def incr(self, key, value=1): 179 | return self._incr_decr("incr", key, value) 180 | 181 | def decr(self, key, value=1): 182 | return self._incr_decr("decr", key, value) 183 | 184 | def add(self, key, val, exptime=0): 185 | return self._set("add", key, val, exptime) 186 | 187 | def append(self, key, val, exptime=0): 188 | return self._set("append", key, val, exptime) 189 | 190 | def prepend(self, key, val, exptime=0): 191 | return self._set("prepend", key, val, exptime) 192 | 193 | def replace(self, key, val, exptime=0): 194 | return self._set("replace", key, val, exptime) 195 | 196 | def delete(self, key): 197 | full_cmd = "delete %s" % key 198 | return self.add_op('delete', bytes(full_cmd, 'utf-8'), self._recv_delete) 199 | 200 | def flush_all(self): 201 | full_cmd = b'flush_all' 202 | return self.add_op('flush_all', full_cmd, self._recv_ok) 203 | 204 | def get_stats(self, stat_args = None): 205 | if stat_args == None: 206 | full_cmd = b'stats' 207 | else: 208 | full_cmd = bytes('stats ' + stat_args, 'utf-8') 209 | 210 | op = self.add_op('stats', full_cmd, self._recv_stat) 211 | 212 | def lop_create(self, key, flags, exptime=0, noreply=False, attr=None): 213 | return self._coll_create('lop create', key, flags, exptime, noreply, attr) 214 | 215 | def lop_insert(self, key, index, value, noreply=False, pipe=False, attr=None): 216 | return self._coll_set('lop insert', key, index, value, noreply, pipe, attr) 217 | 218 | def lop_delete(self, key, range, drop=False, noreply=False, pipe=False): 219 | option = '' 220 | if drop == True: 221 | option += 'drop' 222 | 223 | if noreply == True: 224 | option += ' noreply' 225 | 226 | if pipe == True: 227 | assert noreply == False 228 | option += ' pipe' 229 | 230 | if isinstance(range, tuple): 231 | full_cmd = bytes('lop delete %s %d..%d %s' % (key, range[0], range[1], option), 'utf-8'); 232 | return self.add_op('lop delete', full_cmd, self._recv_delete, noreply or pipe) 233 | else: 234 | full_cmd = bytes('lop delete %s %d %s' % (key, range, option), 'utf-8'); 235 | return self.add_op('lop delete', full_cmd, self._recv_delete, noreply or pipe) 236 | 237 | 238 | def lop_get(self, key, range, delete=False, drop=False): 239 | return self._coll_get('lop get', key, range, self._recv_lop_get, delete, drop) 240 | 241 | def sop_create(self, key, flags, exptime=0, noreply=False, attr=None): 242 | return self._coll_create('sop create', key, flags, exptime, noreply, attr) 243 | 244 | def sop_insert(self, key, value, noreply=False, pipe=False, attr=None): 245 | return self._coll_set('sop insert', key, None, value, noreply, pipe, attr) 246 | 247 | def sop_get(self, key, count=0, delete=False, drop=False): 248 | return self._coll_get('sop get', key, count, self._recv_sop_get, delete, drop) 249 | 250 | def sop_delete(self, key, val, drop=False, noreply=False, pipe=False): 251 | flags, len, value = self.transcoder.encode(val) 252 | 253 | option = '%d' % len 254 | if drop == True: 255 | option += 'drop' 256 | 257 | if noreply == True: 258 | option += ' noreply' 259 | 260 | if pipe == True: 261 | assert noreply == False 262 | option += ' pipe' 263 | 264 | option += '\r\n' 265 | 266 | full_cmd = bytes('sop delete %s %s' % (key, option), 'utf-8') + value; 267 | return self.add_op('sop delete', full_cmd, self._recv_delete, noreply or pipe) 268 | 269 | def sop_exist(self, key, val, pipe=False): 270 | flags, len, value = self.transcoder.encode(val) 271 | 272 | option = '%d' % len 273 | if pipe == True: 274 | assert noreply == False 275 | option += ' pipe' 276 | 277 | option += '\r\n' 278 | 279 | full_cmd = bytes('sop exist %s %s' % (key, option), 'utf-8') + value 280 | return self.add_op('sop exist', full_cmd, self._recv_exist, pipe) 281 | 282 | 283 | def bop_create(self, key, flags, exptime=0, noreply=False, attr=None): 284 | return self._coll_create('bop create', key, flags, exptime, noreply, attr) 285 | 286 | def bop_insert(self, key, bkey, value, eflag=None, noreply=False, pipe=False, attr=None): 287 | return self._coll_set('bop insert', key, None, value, noreply, pipe, attr, bkey, eflag) 288 | 289 | def bop_upsert(self, key, bkey, value, eflag=None, noreply=False, pipe=False, attr=None): 290 | return self._coll_set('bop upsert', key, None, value, noreply, pipe, attr, bkey, eflag) 291 | 292 | def bop_update(self, key, bkey, value, eflag=None, noreply=False, pipe=False, attr=None): 293 | return self._coll_set('bop update', key, None, value, noreply, pipe, attr, bkey, eflag) 294 | 295 | def bop_delete(self, key, range, filter=None, count=None, drop=False, noreply=False, pipe=False): 296 | option = '' 297 | 298 | if filter != None: 299 | option += filter.get_expr() + ' ' 300 | 301 | if count != None: 302 | option += '%d ' % count 303 | 304 | if drop == True: 305 | option += 'drop' 306 | 307 | if noreply == True: 308 | option += ' noreply' 309 | 310 | if pipe == True: 311 | assert noreply == False 312 | option += ' pipe' 313 | 314 | if isinstance(range, tuple): 315 | if isinstance(range[0], str): 316 | if range[0][:2] != '0x' or range[1][:2] != '0x': 317 | raise CollectionHexFormat() 318 | 319 | full_cmd = bytes('bop delete %s %s..%s %s' % (key, range[0], range[1], option), 'utf-8') 320 | return self.add_op('bop delete', full_cmd, self._recv_delete, noreply or pipe) 321 | else: 322 | full_cmd = bytes('bop delete %s %d..%d %s' % (key, range[0], range[1], option), 'utf-8') 323 | return self.add_op('bop delete', full_cmd, self._recv_delete, noreply or pipe) 324 | else: 325 | if isinstance(range, str): 326 | if range[:2] != '0x': 327 | raise CollectionHexFormat() 328 | 329 | full_cmd = bytes('bop delete %s %s %s' % (key, range, option), 'utf-8') 330 | return self.add_op('bop delete', full_cmd, self._recv_delete, noreply or pipe) 331 | else: 332 | full_cmd = bytes('bop delete %s %d %s' % (key, range, option), 'utf-8') 333 | return self.add_op('bop delete', full_cmd, self._recv_delete, noreply or pipe) 334 | 335 | def bop_get(self, key, range, filter=None, delete=False, drop=False): 336 | return self._coll_get('bop get', key, range, self._recv_bop_get, delete, drop, filter=filter) 337 | 338 | def bop_mget(self, key_list, range, filter=None, offset=None, count=50): 339 | return self._coll_mget('bop mget', key_list, range, filter, offset, count) 340 | 341 | def bop_smget(self, key_list, range, filter=None, offset=None, count=2000): 342 | return self._coll_mget('bop smget', key_list, range, filter, offset, count) 343 | 344 | def bop_count(self, key, range, filter): 345 | return self._coll_get('bop count', key, range, self._recv_bop_get, filter=filter) 346 | 347 | def bop_incr(self, key, bkey, value, noreply=False, pipe=False): 348 | return self._bop_incrdecr('bop incr', key, bkey, value, noreply, pipe) 349 | 350 | def bop_decr(self, key, bkey, value, noreply=False, pipe=False): 351 | return self._bop_incrdecr('bop decr', key, bkey, value, noreply, pipe) 352 | 353 | 354 | ########################################################################################## 355 | ### Queue senders 356 | ########################################################################################## 357 | def add_op(self, cmd, full_cmd, callback, noreply = False): 358 | op = ArcusOperation(self, full_cmd, callback) 359 | arcuslog(self, 'add operation %s(%s:%s) to %s' % (full_cmd, callback, hex(id(op)), self)) 360 | 361 | 362 | if noreply: # or pipe 363 | # don't need to receive response, set_result now 364 | self.node_allocator.worker.q.put(op) 365 | op.set_result(True) 366 | else: 367 | self.lock.acquire() 368 | self.node_allocator.worker.q.put(op) 369 | self.ops.append(op) 370 | self.lock.release() 371 | 372 | return op 373 | 374 | def _get(self, cmd, key): 375 | full_cmd = bytes("%s %s" % (cmd, key), 'utf-8') 376 | if cmd == 'gets': 377 | callback = self._recv_cas_value 378 | else: 379 | callback = self._recv_value 380 | 381 | op = self.add_op(cmd, full_cmd, callback) 382 | return op 383 | 384 | def _set(self, cmd, key, val, exptime=0): 385 | flags, len, value = self.transcoder.encode(val) 386 | if flags == None: 387 | return(0) 388 | 389 | full_cmd = bytes("%s %s %d %d %d\r\n" % (cmd, key, flags, exptime, len), 'utf-8') 390 | full_cmd += value 391 | 392 | op = self.add_op(cmd, full_cmd, self._recv_set) 393 | return op 394 | 395 | def _cas(self, cmd, key, val, cas_id, exptime=0): 396 | flags, len, value = self.transcoder.encode(val) 397 | if flags == None: 398 | return(0) 399 | 400 | full_cmd = bytes("%s %s %d %d %d %d\r\n" % (cmd, key, flags, exptime, len, cas_id), 'utf-8') 401 | full_cmd += value 402 | 403 | op = self.add_op(cmd, full_cmd, self._recv_set) 404 | return op 405 | 406 | def _incr_decr(self, cmd, key, value): 407 | full_cmd = "%s %s %d" % (cmd, key, value) 408 | 409 | op = self.add_op(cmd, bytes(full_cmd, 'utf-8'), self._recv_set) 410 | return op 411 | 412 | def _coll_create(self, cmd, key, flags, exptime=0, noreply=False, attr=None): 413 | if attr == None: 414 | attr = {} 415 | 416 | # default value 417 | if 'maxcount' not in attr: 418 | attr['maxcount'] = 4000 419 | if 'ovflaction' not in attr: 420 | attr['ovflaction'] = 'tail_trim' 421 | if 'readable' not in attr: 422 | attr['readable'] = True 423 | 424 | option = '%d %d %d' % (flags, exptime, attr['maxcount']) 425 | if attr['ovflaction'] != 'tail_trim': 426 | option += ' ' + attr['ovflaction'] 427 | if attr['readable'] == False: 428 | option += ' unreadable' 429 | 430 | if noreply == True: 431 | option += ' noreply' 432 | 433 | full_cmd = bytes('%s %s %s' % (cmd, key, option), 'utf-8') 434 | return self.add_op(cmd, full_cmd, self._recv_coll_create, noreply) 435 | 436 | def _bop_incrdecr(self, cmd, key, bkey, val, noreply=False, pipe=False): 437 | if isinstance(val, int): 438 | value = '%d' % val 439 | else: 440 | value = val 441 | 442 | if isinstance(bkey, int): 443 | bkey_str = '%d' % bkey 444 | else: 445 | if bkey[:2] != '0x': 446 | raise CollectionHexFormat() 447 | bkey_str = '%s' % bkey 448 | 449 | option = '%s %s' % (bkey_str, value) 450 | 451 | if noreply == True: 452 | option += ' noreply' 453 | 454 | if pipe == True: 455 | assert noreply == False 456 | option += ' pipe' 457 | 458 | full_cmd = bytes('%s %s %s' % (cmd, key, option), 'utf-8') 459 | return self.add_op(cmd, full_cmd, self._recv_set, noreply or pipe) 460 | 461 | def _coll_set(self, cmd, key, index, val, noreply=False, pipe=False, attr=None, bkey=None, eflag=None): 462 | flags, len, value = self.transcoder.encode(val) 463 | 464 | if bkey != None: # bop 465 | assert index == None 466 | 467 | if isinstance(bkey, int): 468 | bkey_str = '%d' % bkey 469 | else: 470 | if bkey[:2] != '0x': 471 | raise CollectionHexFormat() 472 | bkey_str = '%s' % bkey 473 | 474 | if eflag != None: 475 | if eflag[:2] != '0x': 476 | raise CollectionHexFormat() 477 | option = '%s %s %d' % (bkey_str, eflag, len) 478 | else: 479 | option = '%s %d' % (bkey_str, len) 480 | elif index != None: # lop 481 | option = '%d %d' % (index, len) 482 | else: # sop 483 | option = '%d' % (len) 484 | 485 | if attr != None: 486 | # default mandatory value 487 | if 'flags' not in attr: 488 | attr['flags'] = 0 489 | if 'exptime' not in attr: 490 | attr['exptime'] = 0 491 | if 'maxcount' not in attr: 492 | attr['maxcount'] = 4000 493 | 494 | option += ' create %d %d %d' % (attr['flags'], attr['exptime'], attr['maxcount']) 495 | if 'ovflaction' in attr: 496 | option += ' ' + attr['ovflaction'] 497 | if 'readable' in attr and attr['readable'] == False: 498 | option += ' unreadable' 499 | 500 | if noreply == True: 501 | option += ' noreply' 502 | 503 | if pipe == True: 504 | assert noreply == False 505 | option += ' pipe' 506 | 507 | option += '\r\n' 508 | 509 | full_cmd = bytes('%s %s %s' % (cmd, key, option), 'utf-8') + value 510 | return self.add_op(cmd, full_cmd, self._recv_coll_set, noreply or pipe) 511 | 512 | def _coll_get(self, cmd, key, range, callback, delete=None, drop=None, filter=None): 513 | option = '' 514 | type = cmd[:3] 515 | 516 | if filter != None: 517 | option += filter.get_expr() + ' ' 518 | 519 | if delete == True: 520 | option += 'delete' 521 | 522 | if drop == True: 523 | assert delete == False 524 | option += 'drop' 525 | 526 | if isinstance(range, tuple): 527 | if type == 'bop' and isinstance(range[0], str): 528 | if range[0][:2] != '0x' or range[1][:2] != '0x': 529 | raise CollectionHexFormat() 530 | 531 | full_cmd = bytes("%s %s %s..%s %s" % (cmd, key, range[0], range[1], option), 'utf-8') 532 | return self.add_op(cmd, full_cmd, callback); 533 | else: 534 | full_cmd = bytes("%s %s %d..%d %s" % (cmd, key, range[0], range[1], option), 'utf-8') 535 | return self.add_op(cmd, full_cmd, callback); 536 | else: 537 | if type == 'bop' and isinstance(range, str): 538 | if range[:2] != '0x': 539 | raise CollectionHexFormat() 540 | 541 | full_cmd = bytes("%s %s %s %s" % (cmd, key, range, option), 'utf-8') 542 | return self.add_op(cmd, full_cmd, callback); 543 | else: 544 | full_cmd = bytes("%s %s %d %s" % (cmd, key, range, option), 'utf-8') 545 | return self.add_op(cmd, full_cmd, callback); 546 | 547 | 548 | def _coll_mget(self, org_cmd, key_list, range, filter, offset, count): 549 | 550 | comma_sep_keys = '' 551 | for key in key_list: 552 | if comma_sep_keys != '': 553 | comma_sep_keys += ',' 554 | comma_sep_keys += key 555 | 556 | cmd = '%s %d %d ' % (org_cmd, len(comma_sep_keys), len(key_list)) 557 | 558 | if isinstance(range, tuple): 559 | if isinstance(range[0], str): 560 | if range[0][:2] != '0x' or range[1][:2] != '0x': 561 | raise CollectionHexFormat() 562 | 563 | cmd += '%s..%s' % range 564 | else: 565 | cmd += '%d..%d' % range 566 | else: 567 | if isinstance(range, str): 568 | if range[:2] != '0x': 569 | raise CollectionHexFormat() 570 | 571 | cmd += '%s' % range 572 | else: 573 | cmd += '%d' % range 574 | 575 | if filter != None: 576 | cmd += ' ' + filter.get_expr() 577 | 578 | if offset != None: 579 | cmd += ' %d' % offset 580 | cmd += ' %d' % count 581 | 582 | cmd += '\r\n%s' % comma_sep_keys 583 | cmd = bytes(cmd, 'utf-8') 584 | 585 | if org_cmd == 'bop mget': 586 | reply = self._recv_mget 587 | else: 588 | reply = self._recv_smget 589 | 590 | op = self.add_op(org_cmd, cmd, reply) 591 | 592 | return op 593 | 594 | 595 | 596 | ########################################################################################## 597 | ### recievers 598 | ########################################################################################## 599 | def do_op(self): 600 | self.lock.acquire() 601 | if len(self.ops) <= 0: 602 | arcuslog('ops empty (%s)' % self.addr) 603 | self.lock.release() 604 | return 605 | 606 | op = self.ops.pop(0) 607 | self.lock.release() 608 | 609 | try: 610 | ret = op.callback() 611 | except Exception as e: 612 | arcuslog('do op failed: %s' % str(e)) 613 | ret = e 614 | 615 | op.set_result(ret) 616 | 617 | while self.handle.hasline(): # remaining jobs 618 | self.lock.acquire() 619 | op = self.ops.pop(0) 620 | self.lock.release() 621 | 622 | try: 623 | ret = op.callback() 624 | except Exception as e: 625 | arcuslog('do op failed: %s' % str(e)) 626 | ret = e 627 | 628 | op.set_result(ret) 629 | 630 | def _recv_ok(self): 631 | line = self.handle.readline() 632 | if line == b'OK': 633 | return True 634 | 635 | return False 636 | 637 | def _recv_stat(self): 638 | data = {} 639 | while True: 640 | line = handle.readline() 641 | if line[:3] == b'END' or line is None: 642 | break 643 | 644 | dummy, k, v = line.split(' ', 2) 645 | data[k] = v 646 | 647 | return data 648 | 649 | def _recv_set(self): 650 | line = self.handle.readline() 651 | if line[0:8] == b'RESPONSE': 652 | dummy, count = line.split() 653 | 654 | ret = [] 655 | for i in range(0, int(count)): 656 | line = self.handle.readline() 657 | ret.append(line.decode('utf-8')) 658 | 659 | line = self.handle.readline() # b'END' 660 | 661 | return ret 662 | 663 | 664 | if line == b'STORED': 665 | return True 666 | 667 | if line == b'NOT_FOUND': 668 | return False 669 | 670 | if line == b'TYPE_MISMATCH': 671 | raise CollectionType() 672 | 673 | if line == b'OVERFLOWED': 674 | raise CollectionOverflow() 675 | 676 | if line == b'OUT_OF_RANGE': 677 | raise CollectionIndex() 678 | 679 | if line.isdigit(): # incr, decr, bop incr, bop decr 680 | return int(line) 681 | 682 | return False 683 | 684 | def _recv_delete(self): 685 | line = self.handle.readline() 686 | if line[0:8] == b'RESPONSE': 687 | dummy, count = line.split() 688 | 689 | ret = [] 690 | for i in range(0, int(count)): 691 | line = self.handle.readline() 692 | ret.append(line.decode('utf-8')) 693 | 694 | line = self.handle.readline() # b'END' 695 | 696 | return ret 697 | 698 | if line == b'DELETED': 699 | return True 700 | 701 | if line == b'NOT_FOUND': 702 | return True # True ?? (or exception) 703 | 704 | if line == b'TYPE_MISMATCH': 705 | raise CollectionType() 706 | 707 | if line == b'OVERFLOWED': 708 | raise CollectionOverflow() 709 | 710 | if line == b'OUT_OF_RANGE' or line == b'NOT_FOUND_ELEMENT': 711 | raise CollectionIndex() 712 | 713 | return False 714 | 715 | def _recv_cas_value(self): 716 | line = self.handle.readline() 717 | if line[:5] != b'VALUE': 718 | return None 719 | 720 | resp, rkey, flags, len, cas_id = line.split() 721 | flags = int(flags) 722 | rlen = int(len) 723 | val = self._decode_value(flags, rlen) 724 | return (val, cas_id) 725 | 726 | def _recv_value(self): 727 | line = self.handle.readline() 728 | if line[:5] != b'VALUE': 729 | return None 730 | 731 | resp, rkey, flags, len = line.split() 732 | flags = int(flags) 733 | rlen = int(len) 734 | return self._decode_value(flags, rlen) 735 | 736 | def _recv_coll_create(self): 737 | line = self.handle.readline() 738 | if line == b'CREATED': 739 | return True 740 | 741 | if line == b'EXISTS': 742 | raise CollectionExist() 743 | 744 | return False 745 | 746 | def _recv_coll_set(self): 747 | line = self.handle.readline() 748 | if line[0:8] == b'RESPONSE': 749 | dummy, count = line.split() 750 | 751 | ret = [] 752 | for i in range(0, int(count)): 753 | line = self.handle.readline() 754 | ret.append(line.decode('utf-8')) 755 | 756 | line = self.handle.readline() # b'END' 757 | 758 | return ret 759 | 760 | if line == b'STORED': 761 | return True 762 | 763 | if line == b'NOT_FOUND': 764 | return False 765 | 766 | if line == b'TYPE_MISMATCH': 767 | raise CollectionType() 768 | 769 | if line == b'OVERFLOWED': 770 | raise CollectionOverflow() 771 | 772 | if line == b'OUT_OF_RANGE': 773 | raise CollectionIndex() 774 | 775 | return False 776 | 777 | def _recv_lop_get(self): 778 | ret, value = self._decode_collection('lop') 779 | if ret == b'NOT_FOUND': 780 | return None 781 | 782 | if ret == b'TYPE_MISMATCH': 783 | raise CollectionType() 784 | 785 | 786 | if ret == b'UNREADABLE': 787 | raise CollectionUnreadable() 788 | 789 | if ret == b'OUT_OF_RANGE' or ret == b'NOT_FOUND_ELEMENT': 790 | value = [] 791 | 792 | return value 793 | 794 | def _recv_sop_get(self): 795 | ret, value = self._decode_collection('sop') 796 | if ret == b'NOT_FOUND': 797 | return None 798 | 799 | if ret == b'TYPE_MISMATCH': 800 | raise CollectionType() 801 | 802 | if ret == b'UNREADABLE': 803 | raise CollectionUnreadable() 804 | 805 | if ret == b'OUT_OF_RANGE' or ret == b'NOT_FOUND_ELEMENT': 806 | value = set() 807 | 808 | return value 809 | 810 | def _recv_exist(self): 811 | line = self.handle.readline() 812 | return line == b'EXIST' 813 | 814 | def _recv_bop_get(self): 815 | ret, value = self._decode_collection('bop') 816 | if ret == b'NOT_FOUND': 817 | return None 818 | 819 | if ret == b'TYPE_MISMATCH': 820 | raise CollectionType() 821 | 822 | if ret == b'UNREADABLE': 823 | raise CollectionUnreadable() 824 | 825 | if ret == b'OUT_OF_RANGE' or ret == b'NOT_FOUND_ELEMENT': 826 | value = {} 827 | 828 | return value 829 | 830 | def _recv_mget(self): 831 | ret, value, miss = self._decode_bop_mget() 832 | if ret == b'NOT_FOUND': 833 | return None 834 | 835 | if ret == b'TYPE_MISMATCH': 836 | raise CollectionType() 837 | 838 | if ret == b'UNREADABLE': 839 | raise CollectionUnreadable() 840 | 841 | if ret == b'OUT_OF_RANGE' or ret == b'NOT_FOUND_ELEMENT': 842 | raise CollectionIndex() 843 | 844 | return (value, miss) 845 | 846 | def _recv_smget(self): 847 | ret, value, miss = self._decode_bop_smget() 848 | if ret == b'NOT_FOUND': 849 | return None 850 | 851 | if ret == b'TYPE_MISMATCH': 852 | raise CollectionType() 853 | 854 | if ret == b'UNREADABLE': 855 | raise CollectionUnreadable() 856 | 857 | if ret == b'OUT_OF_RANGE' or ret == b'NOT_FOUND_ELEMENT': 858 | raise CollectionIndex() 859 | 860 | return (value, miss) 861 | 862 | 863 | 864 | 865 | ########################################################################################## 866 | ### decoders 867 | ########################################################################################## 868 | def _decode_value(self, flags, rlen): 869 | rlen += 2 # include \r\n 870 | buf = self.handle.recv(rlen) 871 | if len(buf) != rlen: 872 | raise ArcusNodeSocketException("received %d bytes when expecting %d" % (len(buf), rlen)) 873 | 874 | if len(buf) == rlen: 875 | buf = buf[:-2] # strip \r\n 876 | 877 | val = self.transcoder.decode(flags, buf) 878 | 879 | line = self.handle.readline() 880 | if line != b'END': 881 | raise ArcusProtocolException('invalid response expect END but recv: %s' % line) 882 | 883 | return val 884 | 885 | def _decode_collection(self, type): 886 | if type == 'bop': 887 | values = {} 888 | elif type == 'sop': 889 | values = set() 890 | else: # lop 891 | values = [] 892 | 893 | while True: 894 | line = self.handle.readline() 895 | if line[:5] != b'VALUE' and line[:5] != b'COUNT': 896 | return (line, values) 897 | 898 | if line[:5] == b'VALUE': 899 | resp, flags, count = line.split() 900 | flags = int(flags) 901 | count = int(count) 902 | elif line[:5] == b'COUNT': 903 | cmd, count = line.split(b'=') 904 | return (cmd, int(count)) 905 | 906 | for i in range(0, count): 907 | line = self.handle.readline() 908 | if type == 'bop': # bop get 909 | bkey, eflag, length_buf = line.split(b' ', 2) 910 | 911 | if eflag.isdigit(): # eflag not exist 912 | length = eflag 913 | eflag = None 914 | buf = length_buf 915 | else: 916 | eflag = eflag.decode('utf-8') 917 | length, buf = length_buf.split(b' ', 1) 918 | 919 | if bkey.isdigit(): 920 | bkey = int(bkey) 921 | else: 922 | bkey = bkey.decode('utf-8') 923 | 924 | val = self.transcoder.decode(flags, buf) 925 | values[bkey] = (eflag, val) 926 | elif type == 'lop': 927 | length, buf = line.split(b' ', 1) 928 | val = self.transcoder.decode(flags, buf) 929 | values.append(val) 930 | else: # sop 931 | length, buf = line.split(b' ', 1) 932 | val = self.transcoder.decode(flags, buf) 933 | values.add(val) 934 | 935 | return None 936 | 937 | 938 | def _decode_bop_mget(self): 939 | values = {} 940 | missed_keys = [] 941 | 942 | while True: 943 | line = self.handle.readline() 944 | if line[:11] == b'MISSED_KEYS': 945 | dummy, count = line.split(b' ') 946 | count = int(count) 947 | for i in range(0, count): 948 | line = self.handle.readline() 949 | missed_keys.append(line.decode('utf-8')) 950 | 951 | continue 952 | 953 | if line[:5] != b'VALUE' and line[:5] != b'COUNT': 954 | return (line, values, missed_keys) 955 | 956 | ret = line.split() 957 | key = ret[1].decode('utf-8') 958 | status = ret[2] 959 | 960 | if status == b'NOT_FOUND': 961 | missed_keys.append(key) 962 | continue 963 | 964 | count = 0 965 | if len(ret) == 5: 966 | flags = int(ret[3]) 967 | count = int(ret[4]) 968 | 969 | val = {} 970 | for i in range(0, count): 971 | line = self.handle.readline() 972 | element, bkey, eflag, length_buf = line.split(b' ', 3) 973 | 974 | if eflag.isdigit(): # eflag not exist 975 | length = eflag 976 | eflag = None 977 | buf = length_buf 978 | else: 979 | eflag = eflag.decode('utf-8') 980 | length, buf = length_buf.split(b' ', 1) 981 | 982 | if bkey.isdigit(): 983 | bkey = int(bkey) 984 | else: 985 | bkey = bkey.decode('utf-8') 986 | 987 | ret = self.transcoder.decode(flags, buf) 988 | val[bkey] = (eflag, ret) 989 | 990 | values[key] = val 991 | 992 | return None 993 | 994 | 995 | 996 | def _decode_bop_smget(self): 997 | values = [] 998 | missed_keys = [] 999 | 1000 | while True: 1001 | line = self.handle.readline() 1002 | if line[:11] == b'MISSED_KEYS': 1003 | dummy, count = line.split(b' ') 1004 | count = int(count) 1005 | for i in range(0, count): 1006 | line = self.handle.readline() 1007 | missed_keys.append(line.decode('utf-8')) 1008 | 1009 | continue 1010 | 1011 | if line[:5] != b'VALUE' and line[:5] != b'COUNT': 1012 | return (line, values, missed_keys) 1013 | 1014 | ret = line.split() 1015 | count = int(ret[1]) 1016 | 1017 | for i in range(0, count): 1018 | line = self.handle.readline() 1019 | key, flags, bkey, eflag, length_buf = line.split(b' ', 4) 1020 | 1021 | if eflag.isdigit(): # eflag not exist 1022 | length = eflag 1023 | eflag = None 1024 | buf = length_buf 1025 | else: 1026 | eflag = eflag.decode('utf-8') 1027 | length, buf = length_buf.split(b' ', 1) 1028 | 1029 | key = key.decode('utf-8') 1030 | 1031 | if bkey.isdigit(): 1032 | bkey = int(bkey) 1033 | else: 1034 | bkey = bkey.decode('utf-8') 1035 | 1036 | val = self.transcoder.decode(int(flags), buf) 1037 | values.append((bkey, key, eflag, val)) 1038 | 1039 | return None 1040 | 1041 | 1042 | 1043 | 1044 | 1045 | class EflagFilter: 1046 | def __init__(self, expr = None): 1047 | self.lhs_offset = 0 1048 | self.bit_op = None 1049 | self.bit_rhs = None 1050 | self.comp_op = None 1051 | self.comp_rhs = None 1052 | 1053 | if expr != None: 1054 | self._parse(expr) 1055 | 1056 | def get_expr(self): 1057 | expr = '' 1058 | if self.lhs_offset != None: 1059 | expr += '%d' % self.lhs_offset 1060 | 1061 | if self.bit_op and self.bit_rhs: 1062 | expr += ' %s %s' % (self.bit_op, self.bit_rhs) 1063 | 1064 | if self.comp_op and self.comp_rhs: 1065 | expr += ' %s %s' % (self.comp_op, self.comp_rhs) 1066 | 1067 | return expr 1068 | 1069 | def _parse(self, expr): 1070 | re_expr = re.compile("EFLAG[ ]*(\[[ ]*([0-9]*)[ ]*\:[ ]*\])?[ ]*(([\&\|\^])[ ]*(0x[0-9a-fA-F]+))?[ ]*(==|\!=|<|>|<=|>=)[ ]*(0x[0-9a-fA-F]+)") 1071 | 1072 | match = re_expr.match(expr) 1073 | if match == None: 1074 | raise FilterInvalid() 1075 | 1076 | # ( dummy, lhs_offset, dummy, bit_op, bit_rhs, comp_op, comp_rhs ) 1077 | g = match.groups() 1078 | dummy_1, self.lhs_offset, dummy_2, self.bit_op, self.bit_rhs, self.comp_op, self.comp_rhs = g 1079 | 1080 | if self.lhs_offset == None: 1081 | self.lhs_offset = 0 1082 | else: 1083 | self.lhs_offset = int(self.lhs_offset) 1084 | 1085 | if self.comp_op == '==': 1086 | self.comp_op = 'EQ' 1087 | elif self.comp_op == '!=': 1088 | self.comp_op = 'NE' 1089 | elif self.comp_op == '<': 1090 | self.comp_op = 'LT' 1091 | elif self.comp_op == '<=': 1092 | self.comp_op = 'LE' 1093 | elif self.comp_op == '>': 1094 | self.comp_op = 'GT' 1095 | elif self.comp_op == '>=': 1096 | self.comp_op = 'GE' 1097 | 1098 | 1099 | 1100 | class ArcusMCPoll(threading.Thread): 1101 | def __init__(self, node_allocator): 1102 | threading.Thread.__init__(self) 1103 | self.epoll = select.epoll() 1104 | self.sock_node_map = {} 1105 | self.node_allocator = node_allocator 1106 | 1107 | def run(self): 1108 | arcuslog(self, 'epoll start') 1109 | 1110 | while True: 1111 | events = self.epoll.poll(2) 1112 | 1113 | if self.node_allocator.shutdown == True: 1114 | arcuslog(self, 'epoll out') 1115 | return 1116 | 1117 | for fileno, event in events: 1118 | if event & select.EPOLLIN: 1119 | node = self.sock_node_map[fileno] 1120 | node.do_op() 1121 | 1122 | if event & select.EPOLLHUP: 1123 | print('EPOLL HUP') 1124 | self.epoll.unregister(fileno) 1125 | node = self.sock_node_map[fileno] 1126 | node.disconnect() 1127 | del self.sock_node_map[fileno] 1128 | 1129 | 1130 | def register_node(self, node): 1131 | self.epoll.register(node.get_fileno(), select.EPOLLIN | select.EPOLLHUP) 1132 | 1133 | arcuslog(self, 'regist node: ', node.get_fileno(), node) 1134 | self.sock_node_map[node.get_fileno()] = node 1135 | 1136 | 1137 | 1138 | class ArcusMCWorker(threading.Thread): 1139 | def __init__(self, node_allocator): 1140 | threading.Thread.__init__(self) 1141 | self.q = queue.Queue() 1142 | self.poll = ArcusMCPoll(node_allocator) 1143 | self.poll.start() 1144 | self.node_allocator = node_allocator 1145 | 1146 | def run(self): 1147 | arcuslog(self, 'worker start') 1148 | 1149 | while True: 1150 | op = self.q.get() 1151 | if self.node_allocator.shutdown == True: 1152 | arcuslog(self, 'worker done') 1153 | self.poll.join() 1154 | return 1155 | 1156 | if op == None: # maybe shutdown 1157 | continue 1158 | 1159 | arcuslog(self, 'get operation %s(%s:%s) from %s' % (op.request, op.callback, hex(id(op)), op.node)) 1160 | node = op.node 1161 | 1162 | try: 1163 | node.process_request(op.request) 1164 | except Exception as e: 1165 | arcuslog(self, 'operation failed: %s' % str(e)) 1166 | op.set_result(e) 1167 | 1168 | 1169 | def register_node(self, node): 1170 | self.poll.register_node(node) 1171 | 1172 | 1173 | 1174 | 1175 | class ArcusMCNodeAllocator: 1176 | def __init__(self, transcoder): 1177 | self.transcoder = transcoder 1178 | self.worker = ArcusMCWorker(self) 1179 | self.worker.start() 1180 | self.shutdown = False 1181 | 1182 | 1183 | def alloc(self, addr, name): 1184 | ret = ArcusMCNode(addr, name, self.transcoder, self) 1185 | self.worker.register_node(ret) 1186 | return ret 1187 | 1188 | 1189 | def join(self): 1190 | self.worker.join() 1191 | 1192 | 1193 | 1194 | -------------------------------------------------------------------------------- /arcus_util.py: -------------------------------------------------------------------------------- 1 | # 2 | # arcus-python-client - Arcus python client drvier 3 | # Copyright 2014 NAVER Corp. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License") 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | 19 | import telnetlib, os, sys 20 | import socket 21 | import threading 22 | 23 | 24 | 25 | from kazoo.client import KazooClient 26 | import kazoo 27 | from kazoo.exceptions import * 28 | 29 | 30 | class arcus_cache: 31 | def __init__(self, zk_addr, code): 32 | self.code = code 33 | self.zk_addr = zk_addr 34 | self.node = [] 35 | self.active_node = [] 36 | self.dead_node = [] 37 | self.meta = ['', None] 38 | 39 | def __repr__(self): 40 | repr = '[Service Code: %s] (zk:%s)\n (node) %s\n (active) %s\n (dead) %s' % (self.code, self.zk_addr, self.node, self.active_node, self.dead_node) 41 | return repr 42 | 43 | 44 | class arcus_node: 45 | def __init__(self, ip, port): 46 | self.ip = ip 47 | self.port = port 48 | 49 | self.name = '' 50 | self.code = '' 51 | self.zk_addr = '' 52 | self.active = False 53 | 54 | self.noport = False 55 | 56 | def __repr__(self): 57 | if self.name and self.code: 58 | return '[%s:%s-(%s,%s)]' % (self.ip, self.port, self.name, self.code) 59 | elif self.name: 60 | return '[%s:%s-(%s)]' % (self.ip, self.port, self.name) 61 | elif self.code: 62 | return '[%s:%s-(%s)]' % (self.ip, self.port, self.code) 63 | 64 | return '[%s:%s]' % (self.ip, self.port) 65 | 66 | def do_arcus_command(self, command, timeout=0.2): 67 | tn = telnetlib.Telnet(self.ip, self.port) 68 | tn.write(bytes(command + '\r\n', 'utf-8')) 69 | 70 | if command[0:5] == 'scrub' or command[0:5] == 'flush': 71 | message = 'OK' 72 | else: 73 | message = 'END' 74 | 75 | result = tn.read_until(bytes(message, 'utf-8'), timeout) 76 | 77 | result = result.decode('utf-8') 78 | tn.write(bytes('quit\r\n', 'utf-8')) 79 | tn.close() 80 | return result 81 | 82 | 83 | class zookeeper: 84 | def __init__(self, address): 85 | self.address = address 86 | self.zk = KazooClient(address) 87 | self.zk.start() 88 | 89 | self.arcus_cache_map = {} 90 | self.arcus_node_map = {} 91 | 92 | self.force = False 93 | self.meta = ('', None) 94 | self.meta_mtime = None 95 | 96 | def __repr__(self): 97 | repr = '[ZooKeeper: %s] %s, %s' % (self.address, self.meta[0], str(self.meta[1])) 98 | 99 | for code, cache in self.arcus_cache_map.items(): 100 | repr = '%s\n\n%s' % (repr, cache) 101 | 102 | return repr 103 | 104 | def set_force(self): 105 | self.force = True 106 | 107 | def zk_read(self, path): 108 | data, stat = self.zk.get(path) 109 | children = self.zk.get_children(path) 110 | return data, stat, children 111 | 112 | def zk_children(self, path, watch=None): 113 | if watch != None: 114 | return self.zk.get_children(path, watch = watch) 115 | else: 116 | return self.zk.get_children(path) 117 | 118 | def zk_children_if_exists(self, path, watch=None): 119 | if self.zk_exists(path) == False: 120 | return [] 121 | 122 | return self.zk_children(path, watch) 123 | 124 | def zk_exists(self, path): 125 | if self.zk.exists(path) == None: 126 | return False 127 | 128 | return True 129 | 130 | def zk_create(self, path, value): 131 | try: 132 | self.zk.create(path, bytes(value, 'utf-8')) 133 | except NodeExistsError: 134 | if self.force == False: 135 | raise NodeExistsError 136 | 137 | def zk_delete(self, path): 138 | try: 139 | self.zk.delete(path) 140 | except NoNodeError: 141 | if self.force == False: 142 | raise NoNodeError 143 | 144 | def zk_update(self, path, value): 145 | try: 146 | self.zk.set(path, bytes(value, 'utf-8')) 147 | except NoNodeError: 148 | if self.force == False: 149 | raise NoNodeError 150 | 151 | def get_arcus_cache_list(self): 152 | children = self.zk_children_if_exists('/arcus/cache_list/') 153 | children += self.zk_children_if_exists('/arcus_repl/cache_list/') 154 | 155 | return children 156 | 157 | def get_arcus_node_of_code(self, code, server): 158 | # repl case 159 | children = self.zk_children_if_exists('/arcus_repl/cache_list/' + code) 160 | children += self.zk_children_if_exists('/arcus/cache_list/' + code) 161 | ret = [] 162 | for child in children: 163 | tmp = child.split('^', 2) # remove repl info 164 | if len(tmp) == 3: 165 | child = tmp[2] 166 | 167 | addr, name = child.split('-', 1) 168 | ip, port = addr.split(':', 1) 169 | 170 | if server != '' and (server != ip and server != name): 171 | continue # skip this 172 | 173 | node = arcus_node(ip, port) 174 | node.name = name 175 | ret.append(node) 176 | 177 | 178 | return ret 179 | 180 | def get_arcus_node_of_server(self, addr): 181 | ip = socket.gethostbyname(addr) 182 | 183 | children = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/') 184 | children += self.zk_children_if_exists('/arcus/cache_server_mapping/') 185 | ret = [] 186 | for child in children: 187 | l = len(ip) 188 | if child[:l] == ip: 189 | code = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/' + child) 190 | if len(code) == 0: 191 | code = self.zk_children_if_exists('/arcus/cache_server_mapping/' + child) 192 | 193 | code = code[0] 194 | 195 | tmp = code.split('^') # remove repl info 196 | code = tmp[0] 197 | 198 | try: 199 | ip, port = child.split(':') 200 | except ValueError: 201 | print('No port defined in cache_server_mapping: %s' % child) 202 | continue 203 | 204 | node = arcus_node(ip, port) 205 | node.code = code 206 | ret.append(node) 207 | 208 | return ret 209 | 210 | def _get_arcus_node(self, child, results): 211 | code = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/' + child) 212 | if len(code) == 0: 213 | code = self.zk_children_if_exists('/arcus/cache_server_mapping/' + child) 214 | 215 | if len(code) == 0: 216 | print('no childrens in cache_server_mapping error: %s' % child) 217 | print(code) 218 | return 219 | 220 | code = code[0] 221 | 222 | tmp = code.split('^') # remove repl info 223 | code = tmp[0] 224 | 225 | try: 226 | ip, port = child.split(':') 227 | except ValueError: 228 | print('No port defined in cache_server_mapping: %s' % child) 229 | ip = child 230 | port = '0' 231 | 232 | 233 | node = arcus_node(ip, port) 234 | node.code = code 235 | results.append(node) 236 | 237 | def get_arcus_node_all(self): 238 | children = self.zk_children_if_exists('/arcus_repl/cache_server_mapping/') 239 | children += self.zk_children_if_exists('/arcus/cache_server_mapping/') 240 | 241 | ret = [] 242 | threads = [] 243 | 244 | #print(children) 245 | for child in children: 246 | th = threading.Thread(target = self._get_arcus_node, args = (child, ret)) 247 | th.start() 248 | threads.append(th) 249 | 250 | for th in threads: 251 | th.join() 252 | 253 | return ret 254 | 255 | def _get_arcus_meta(self, child, results): 256 | data, stat, children = self.zk_read('/arcus/meta/' + child) 257 | results[child] = [data.decode('utf-8'), stat] 258 | 259 | 260 | def get_arcus_meta_all(self): 261 | if self.zk.exists('/arcus/meta') == None: 262 | self.zk.create('/arcus/meta', b'arcus meta info') 263 | 264 | children = self.zk.get_children('/arcus/meta') 265 | print('# children') 266 | print(children) 267 | 268 | threads = [] 269 | ret = {} 270 | 271 | #print(children) 272 | for child in children: 273 | th = threading.Thread(target = self._get_arcus_meta, args = (child, ret)) 274 | th.start() 275 | threads.append(th) 276 | 277 | for th in threads: 278 | th.join() 279 | 280 | return ret 281 | 282 | 283 | def _match_code_and_nodes(self, code, cache, meta): 284 | #repl case 285 | children = self.zk_children_if_exists('/arcus_repl/cache_list/' + code) 286 | children += self.zk_children_if_exists('/arcus/cache_list/' + code) 287 | for child in children: 288 | tmp = child.split('^', 2) # remove repl info 289 | if len(tmp) == 3: 290 | child = tmp[2] 291 | 292 | addr, name = child.split('-') 293 | try: 294 | node = self.arcus_node_map[addr] 295 | except KeyError: 296 | print('%s of %s is not defined in cache_server_mapping' % (addr, code)) 297 | ip, port = addr.split(':') 298 | node = arcus_node(ip, port) 299 | node.noport = True 300 | 301 | node.active = True 302 | cache.active_node.append(node) 303 | 304 | 305 | for node in cache.node: 306 | if node.active == False: 307 | cache.dead_node.append(node) 308 | 309 | if code in meta: 310 | cache.meta = meta[code] 311 | 312 | 313 | 314 | def load_all(self): 315 | codes = self.get_arcus_cache_list() 316 | for code in codes: 317 | cache = arcus_cache(self.address, code) 318 | self.arcus_cache_map[code] = cache 319 | 320 | print('# get_arcus_node_all()') 321 | nodes = self.get_arcus_node_all() 322 | print('# done') 323 | 324 | for node in nodes: 325 | self.arcus_node_map[node.ip + ":" + node.port] = node 326 | self.arcus_cache_map[node.code].node.append(node) 327 | 328 | # meta info 329 | print('# get_arcus_meta_all()') 330 | meta = self.get_arcus_meta_all() 331 | print('# done') 332 | 333 | print('# match code & nodes') 334 | threads = [] 335 | 336 | for code, cache in self.arcus_cache_map.items(): 337 | th = threading.Thread(target = self._match_code_and_nodes, args = (code, cache, meta)) 338 | th.start() 339 | threads.append(th) 340 | 341 | for th in threads: 342 | th.join() 343 | 344 | print('#done') 345 | 346 | if 'zookeeper' in meta: 347 | self.meta = meta['zookeeper'] 348 | 349 | 350 | def _callback(self, event): 351 | child_list = self.zk.get_children(event.path) 352 | cloud = os.path.basename(event.path) 353 | cache = self.arcus_cache_map[cloud] 354 | 355 | event_list = { 'created':[], 'deleted':[] } 356 | current = {} 357 | print('##### active node') 358 | print(cache.active_node) 359 | 360 | children = [] 361 | for child in child_list: 362 | addr = child.split('-')[0] 363 | children.append(addr) 364 | 365 | print('#### children') 366 | print(children) 367 | 368 | for node in cache.active_node: 369 | current[node.ip + ':' + node.port] = True 370 | 371 | print('##### current') 372 | print(current) 373 | 374 | for node in cache.active_node: 375 | addr = node.ip + ':' + node.port 376 | if addr not in children: 377 | event_list['deleted'].append(addr) 378 | cache.active_node.remove(node) 379 | 380 | 381 | for child in children: 382 | if child not in current: 383 | event_list['created'].append(child) 384 | ip, port = child.split(':') 385 | node = arcus_node(ip, port) 386 | cache.active_node.append(node) 387 | 388 | 389 | print('####### result') 390 | print(cache.active_node) 391 | 392 | self.callback(event, event_list) 393 | children = self.zk.get_children(event.path, watch = self._callback) 394 | 395 | 396 | def watch(self, callback): 397 | self.callback = callback 398 | for code, cache in self.arcus_cache_map.items(): 399 | children = self.zk_children_if_exists('/arcus/cache_list/' + code, watch=self._callback) 400 | children += self.zk_children_if_exists('/arcus_repl/cache_list/' + code, watch=self._callback) 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | -------------------------------------------------------------------------------- /arcus_zk_cmd.py: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/python3 2 | 3 | # 4 | # arcus-python-client - Arcus python client drvier 5 | # Copyright 2014 NAVER Corp. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License") 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | 21 | import sys 22 | import re 23 | import socket 24 | 25 | from optparse import OptionParser 26 | 27 | from arcus_util import zookeeper 28 | from arcus_util import arcus_node 29 | 30 | from kazoo.client import KazooClient 31 | import kazoo 32 | 33 | 34 | 35 | if __name__ == '__main__': 36 | usage = "usage: %prog [options]" 37 | parser = OptionParser(usage=usage, version="%prog 1.0") 38 | parser.add_option('-a', '--address', dest='address', default='', help='zookeeper address') 39 | parser.add_option('-s', '--service', dest='service', default='', help='service code') 40 | parser.add_option('-n', '--node', dest='node', default='', help='node address:port') 41 | parser.add_option('', '--name', dest='name', default='', help='node domain name') 42 | parser.add_option('-c', '--command', dest='command', default='', help='add_service | del_service | add_node | del_node') 43 | parser.add_option('-f', '--force', dest='force', default=False, help='ignore exception', action='store_true') 44 | 45 | (options, args) = parser.parse_args() 46 | 47 | address = options.address 48 | service = options.service 49 | node = options.node 50 | command = options.command 51 | force = options.force 52 | 53 | ret = node.split(':') 54 | name = ret[0] 55 | if len(ret) > 1: 56 | port = ret[1] 57 | else: 58 | port = '' 59 | 60 | ip = socket.gethostbyname(name) 61 | 62 | if options.name != '': 63 | name = options.name 64 | 65 | 66 | zoo = zookeeper(address) 67 | print(zoo) 68 | 69 | if force: 70 | zoo.set_force() 71 | 72 | if zoo.zk_exists('/arcus') == False: 73 | print ('create /arcus') 74 | zoo.zk_create('/arcus', 'arcus') 75 | 76 | if zoo.zk_exists('/arcus/cache_list') == False: 77 | print ('create /arcus/cache_list') 78 | zoo.zk_create('/arcus/cache_list', 'cache_list') 79 | 80 | if zoo.zk_exists('/arcus/client_list') == False: 81 | print ('create /arcus/client_list') 82 | zoo.zk_create('/arcus/client_list', 'client_list') 83 | 84 | if zoo.zk_exists('/arcus/cache_server_mapping') == False: 85 | print ('create /arcus/cache_server_mapping') 86 | zoo.zk_create('/arcus/cache_server_mapping', 'cache_server_mapping') 87 | 88 | if zoo.zk_exists('/arcus/service_code_mapping') == False: 89 | print ('create /arcus/service_code_mapping') 90 | zoo.zk_create('/arcus/service_code_mapping', 'service_code_mapping') 91 | 92 | 93 | if command: 94 | if command == 'add_service' and service: 95 | print ('add /arcus/cache_list/' + service) 96 | zoo.zk_create('/arcus/cache_list/' + service, 'arcus1.8') 97 | 98 | print ('add /arcus/client_list/' + service) 99 | zoo.zk_create('/arcus/client_list/' + service, 'arcus1.8') 100 | 101 | print ('add /arcus/service_code_mapping/' + service) 102 | zoo.zk_create('/arcus/service_code_mapping/' + service, 'arcus1.8') 103 | 104 | elif command == 'del_service' and service: 105 | data, stat, children = zoo.zk_read('/arcus/service_code_mapping/' + service) 106 | 107 | print ('delete /arcus/service_code_mapping/' + service) 108 | zoo.zk_delete_tree('/arcus/service_code_mapping/' + service) 109 | 110 | for child in children: 111 | ret = '/arcus/cache_server_mapping/' + child 112 | print ('delete node %s' % ret) 113 | zoo.zk_delete_tree(ret) 114 | 115 | print ('delete /arcus/client_list/' + service) 116 | zoo.zk_delete_tree('/arcus/client_list/' + service) 117 | 118 | 119 | elif command == 'add_node' and service and node: 120 | assert port != '' 121 | 122 | print ('create /arcus/service_code_mapping/%s/%s:%s' % (service, ip, port)) 123 | zoo.zk_create('/arcus/service_code_mapping/%s/%s:%s' % (service, ip, port), name) 124 | 125 | print ('create /arcus/cache_server_mapping/%s:%s' % (ip, port)) 126 | zoo.zk_create('/arcus/cache_server_mapping/%s:%s' % (ip, port), name) 127 | 128 | print('create /arcus/cache_server_mapping/%s:%s/%s' % (ip, port, service)) 129 | zoo.zk_create('/arcus/cache_server_mapping/%s:%s/%s' % (ip, port, service), 'arcus1.8') 130 | 131 | elif command == 'del_node' and service and node: 132 | if port != '': # delete one 133 | print ('delete /arcus/service_code_mapping/%s/%s:%s' % (service, ip, port)) 134 | zoo.zk_delete_tree('/arcus/service_code_mapping/%s/%s:%s' % (service, ip, port)) 135 | 136 | print ('delete /arcus/cache_server_mapping/%s:%s' % (ip, port)) 137 | zoo.zk_delete_tree('/arcus/cache_server_mapping/%s:%s' % (ip, port)) 138 | 139 | else: # delete all 140 | head = '/arcus/service_code_mapping/' + service 141 | data, stat, children = zoo.zk_read(head) 142 | 143 | for child in children: 144 | idx = len(ip) 145 | 146 | if child[0:idx] == ip: 147 | ret = head + '/' + child 148 | print ('delete %s' % ret) 149 | zoo.zk_delete_tree(ret) 150 | 151 | head = '/arcus/cache_server_mapping' 152 | data, stat, children = zoo.zk_read(head) 153 | 154 | for child in children: 155 | idx = len(ip) 156 | 157 | if child[0:idx] == ip: 158 | ret = head + '/' + child 159 | print ('delete %s' % ret) 160 | zoo.zk_delete_tree(ret) 161 | elif service: 162 | data, stat, children = zoo.zk_read('/arcus/cache_list/' + service) 163 | print ('## cache_list/' + service) 164 | print (children) 165 | 166 | data, stat, children = zoo.zk_read('/arcus/service_code_mapping/' + service) 167 | print ('## cache_server_mapping/' + service) 168 | print (children) 169 | else: 170 | data, stat, children = zoo.zk_read('/arcus/cache_list') 171 | print ('## cache_list') 172 | print (children) 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # 2 | # arcus-python-client - Arcus python client drvier 3 | # Copyright 2014 NAVER Corp. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | 19 | # 20 | # sys.argv[1] : connection url for Arcus cloud (zookeeper address:port) 21 | # sys.argv[2] : Arcus cloud service code 22 | # 23 | # USAGE: python3 test.py your.arcuscloud.com:11223 service_code 24 | # 25 | 26 | from arcus import * 27 | from arcus_mc_node import ArcusMCNodeAllocator 28 | from arcus_mc_node import EflagFilter 29 | import datetime, time, sys 30 | 31 | #enable_log() 32 | 33 | timeout = 20 34 | 35 | 36 | # client which use arcus memcached node & default arcus transcoder 37 | client = Arcus(ArcusLocator(ArcusMCNodeAllocator(ArcusTranscoder()))) 38 | 39 | print('### connect to client') 40 | client.connect(sys.argv[1], sys.argv[2]) 41 | 42 | 43 | ##################################################################################################### 44 | # 45 | # TEST 1: primitive type 46 | # 47 | ##################################################################################################### 48 | ret = client.set('test:string1', 'test...', timeout) 49 | print(ret.get_result()) 50 | assert ret.get_result() == True 51 | 52 | ret = client.get('test:string1') 53 | print(ret.get_result()) 54 | assert ret.get_result() == 'test...' 55 | 56 | ret = client.set('test:string2', 'test...2', timeout) 57 | print(ret.get_result()) 58 | assert ret.get_result() == True 59 | 60 | ret = client.get('test:string2') 61 | print(ret.get_result()) 62 | assert ret.get_result() == 'test...2' 63 | 64 | ret = client.set('test:int', 1, timeout) 65 | print(ret.get_result()) 66 | assert ret.get_result() == True 67 | 68 | ret = client.get('test:int') 69 | print(ret.get_result()) 70 | assert ret.get_result() == 1 71 | 72 | ret = client.set('test:float', 1.2, timeout) 73 | print(ret.get_result()) 74 | assert ret.get_result() == True 75 | 76 | ret = client.get('test:float') 77 | print(ret.get_result()) 78 | assert ret.get_result() == 1.2 79 | 80 | ret = client.set('test:bool', True, timeout) 81 | print(ret.get_result()) 82 | assert ret.get_result() == True 83 | 84 | ret = client.get('test:bool') 85 | print(ret.get_result()) 86 | assert ret.get_result() == True 87 | 88 | now = datetime.datetime.now() 89 | ret = client.set('test:date', now, timeout) 90 | print(ret.get_result()) 91 | assert ret.get_result() == True 92 | 93 | ret = client.get('test:date') 94 | print(ret.get_result()) 95 | print(now) 96 | assert (abs(ret.get_result() - now)) < datetime.timedelta(1000) 97 | 98 | ret = client.set('test:bytearray', b'bytes array', timeout) 99 | print(ret.get_result()) 100 | assert ret.get_result() == True 101 | 102 | ret = client.get('test:bytearray') 103 | print(ret.get_result()) 104 | assert ret.get_result() == b'bytes array' 105 | 106 | 107 | ret = client.set('test:incr', '1', timeout) 108 | print(ret.get_result()) 109 | assert ret.get_result() == True 110 | 111 | ret = client.incr('test:incr', 10) 112 | print(ret.get_result()) 113 | assert ret.get_result() == 11 114 | 115 | ret = client.decr('test:incr', 3) 116 | print(ret.get_result()) 117 | assert ret.get_result() == 11-3 118 | 119 | ret = client.decr('test:incr', 100) 120 | print(ret.get_result()) 121 | assert ret.get_result() == 0 # minimum value is 0 122 | 123 | 124 | ##################################################################################################### 125 | # 126 | # TEST 2: list 127 | # 128 | ##################################################################################################### 129 | ret = client.lop_create('test:list_1', ArcusTranscoder.FLAG_STRING, timeout) 130 | print(ret.get_result()) 131 | assert ret.get_result() == True 132 | 133 | items = ['item 1', 'item 2', 'item 3', 'item 4', 'item 5', 'item 6'] 134 | 135 | for item in items: 136 | ret = client.lop_insert('test:list_1', -1, item) 137 | print(ret.get_result()) 138 | assert ret.get_result() == True 139 | 140 | ret = client.lop_get('test:list_1', (0, -1)) 141 | print(ret.get_result()) 142 | assert ret.get_result() == items 143 | 144 | ret = client.lop_get('test:list_1', (2, 4)) 145 | print(ret.get_result()) 146 | assert ret.get_result() == items[2:4+1] 147 | 148 | ret = client.lop_get('test:list_1', (1, -2)) 149 | print(ret.get_result()) 150 | assert ret.get_result() == items[1:-2+1] 151 | 152 | 153 | 154 | ##################################################################################################### 155 | # 156 | # TEST 3: set 157 | # 158 | ##################################################################################################### 159 | ret = client.sop_create('test:set_1', ArcusTranscoder.FLAG_STRING, timeout) 160 | print(ret.get_result()) 161 | assert ret.get_result() == True 162 | 163 | items = ['item 1', 'item 2', 'item 3', 'item 4', 'item 5', 'item 6'] 164 | set_items = set() 165 | for item in items: 166 | set_items.add(item) 167 | 168 | for item in set_items: 169 | ret = client.sop_insert('test:set_1', item) 170 | print(ret.get_result()) 171 | assert ret.get_result() == True 172 | 173 | ret = client.sop_get('test:set_1') 174 | print(ret.get_result()) 175 | assert ret.get_result() == set_items 176 | 177 | for item in set_items: 178 | ret = client.sop_exist('test:set_1', item) 179 | print(ret.get_result()) 180 | assert ret.get_result() == True 181 | 182 | ret = client.sop_exist('test:set_1', 'item 100') 183 | print(ret.get_result()) 184 | assert ret.get_result() == False 185 | 186 | 187 | 188 | ##################################################################################################### 189 | # 190 | # TEST 4: btree 191 | # 192 | ##################################################################################################### 193 | def itoh(i): 194 | h = hex(i) 195 | if len(h) % 2 == 1: 196 | h = '0x0%s' % h[2:].upper() 197 | else: 198 | h = '0x%s' % h[2:].upper() 199 | 200 | return h 201 | 202 | 203 | 204 | # int key 205 | ret = client.bop_create('test:btree_int', ArcusTranscoder.FLAG_INTEGER, timeout) 206 | print (ret.get_result()) 207 | assert ret.get_result() == True 208 | 209 | for i in range(0, 1000): 210 | ret = client.bop_insert('test:btree_int', i, i, itoh(i)) 211 | print(ret.get_result()) 212 | assert ret.get_result() == True 213 | 214 | ret = client.bop_get('test:btree_int', (200, 400)) 215 | print(ret.get_result()) 216 | 217 | result = ret.get_result() 218 | for i in range(200, 400): 219 | assert result[i] == (itoh(i), i) 220 | 221 | ret = client.bop_count('test:btree_int', (100, 199)) 222 | print(ret.get_result()) 223 | assert ret.get_result() == 100 224 | 225 | 226 | 227 | 228 | 229 | # hex key 230 | ret = client.bop_create('test:btree_hex', ArcusTranscoder.FLAG_STRING, timeout) 231 | print (ret.get_result()) 232 | assert ret.get_result() == True 233 | 234 | for i in range(0x10000, 0x10200): 235 | ret = client.bop_insert('test:btree_hex', itoh(i), 'bop item %d' % i, itoh(i)) 236 | print(ret.get_result()) 237 | assert ret.get_result() == True 238 | 239 | ret = client.bop_get('test:btree_hex', ('0x010050', '0x010150')) 240 | print(ret.get_result()) 241 | 242 | result = ret.get_result() 243 | for i in range(0x10050, 0x10150): 244 | assert result[itoh(i)] == (itoh(i), 'bop item %d' % i) 245 | 246 | 247 | 248 | 249 | # eflag test 250 | 251 | ret = client.bop_create('test:btree_eflag', ArcusTranscoder.FLAG_INTEGER, timeout) 252 | print (ret.get_result()) 253 | assert ret.get_result() == True 254 | 255 | for i in range(0, 1000): 256 | ret = client.bop_insert('test:btree_eflag', i, i, itoh(i)) 257 | print(ret.get_result()) 258 | assert ret.get_result() == True 259 | 260 | ret = client.bop_get('test:btree_eflag', (200, 400), EflagFilter('EFLAG & 0x00ff == 0x0001')) 261 | print(ret.get_result()) 262 | result = ret.get_result() 263 | assert result[257] == ('0x0101', 257) 264 | 265 | ret = client.bop_get('test:btree_eflag', (200, 400), EflagFilter('EFLAG & 0x00ff > 0x0010')) 266 | print(ret.get_result()) 267 | result = ret.get_result() 268 | 269 | for i in range(200, 401): 270 | if (len(itoh(i)) < 6): 271 | if i in result: 272 | assert False 273 | continue 274 | 275 | if (i & 0x00ff) <= 0x0010: 276 | if i in result: 277 | assert False 278 | continue 279 | 280 | assert result[i] == (itoh(i), i) 281 | 282 | 283 | 284 | 285 | ##################################################################################################### 286 | # 287 | # TEST 5: btree mget, smget 288 | # 289 | ##################################################################################################### 290 | # int key 291 | ret = client.bop_create('test:btree_1', ArcusTranscoder.FLAG_INTEGER, timeout) 292 | print (ret.get_result()) 293 | assert ret.get_result() == True 294 | 295 | for i in range(0, 1000): 296 | ret = client.bop_insert('test:btree_1', i, i, itoh(i)) 297 | print(ret.get_result()) 298 | assert ret.get_result() == True 299 | 300 | 301 | ret = client.bop_create('test:btree_2', ArcusTranscoder.FLAG_INTEGER, timeout) 302 | print (ret.get_result()) 303 | assert ret.get_result() == True 304 | 305 | for i in range(1000, 2000): 306 | ret = client.bop_insert('test:btree_2', i, i, itoh(i)) 307 | print(ret.get_result()) 308 | assert ret.get_result() == True 309 | 310 | ret = client.bop_create('test:btree_3', ArcusTranscoder.FLAG_INTEGER, timeout) 311 | print (ret.get_result()) 312 | assert ret.get_result() == True 313 | 314 | for i in range(2000, 3000): 315 | ret = client.bop_insert('test:btree_3', i, i, itoh(i)) 316 | print(ret.get_result()) 317 | assert ret.get_result() == True 318 | 319 | 320 | 321 | ret = client.bop_mget(['test:btree_1', 'test:btree_2', 'test:btree_3', 'test:btree_4', 'test:btree_5'], (500, 2500)) 322 | print(ret.get_result()) 323 | 324 | 325 | ret = client.bop_smget(['test:btree_1', 'test:btree_2', 'test:btree_3', 'test:btree_4', 'test:btree_5'], (500, 2500)) 326 | 327 | print(ret.get_result()) 328 | result = ret.get_result() 329 | missed_key = ret.get_missed_key() 330 | 331 | idx = 500 332 | for item in result: 333 | if item[0] != idx: # bkey 334 | print(item[0]) 335 | print(idx) 336 | 337 | assert item[0] == idx # bkey 338 | assert item[1][:11] == 'test:btree_' # key 339 | assert item[2] == itoh(idx) # eflag 340 | assert item[3] == idx # value 341 | idx += 1 342 | 343 | assert missed_key == ['test:btree_4', 'test:btree_5'] 344 | 345 | 346 | 347 | 348 | ##################################################################################################### 349 | # 350 | # TEST 6: dynamic list 351 | # 352 | ##################################################################################################### 353 | 354 | arcus_list = client.list_alloc('test:arcus_list', ArcusTranscoder.FLAG_STRING, 5) 355 | print (arcus_list) 356 | assert arcus_list == [] 357 | 358 | items = ['item 1', 'item 2', 'item 3', 'item 4', 'item 5', 'item 6'] 359 | 360 | for item in items: 361 | arcus_list.append(item) 362 | 363 | print (arcus_list) 364 | assert arcus_list == items 365 | print (arcus_list[2:4]) 366 | assert arcus_list[2:4] == items[2:4] 367 | print (arcus_list[:2]) 368 | assert arcus_list[:2] == items[:2] 369 | print (arcus_list[3:]) 370 | assert arcus_list[3:] == items[3:] 371 | 372 | 373 | print('## for loop test') 374 | idx = 0 375 | for a in arcus_list: 376 | print(a) 377 | assert a == items[idx] 378 | idx += 1 379 | 380 | 381 | # cached ArcusList Test 382 | arcus_list = client.list_alloc('test:arcus_list_cache', ArcusTranscoder.FLAG_STRING, 5, cache_time=10) 383 | assert arcus_list == [] 384 | 385 | items = ['item 1', 'item 2', 'item 3', 'item 4', 'item 5', 'item 6'] 386 | 387 | for item in items: 388 | arcus_list.append(item) 389 | 390 | print (arcus_list) 391 | assert arcus_list == items 392 | print (arcus_list[2:4]) 393 | assert arcus_list[2:4] == items[2:4] 394 | print (arcus_list[:2]) 395 | assert arcus_list[:2] == items[:2] 396 | print (arcus_list[3:]) 397 | assert arcus_list[3:] == items[3:] 398 | 399 | 400 | print('## for loop test') 401 | idx = 0 402 | for a in arcus_list: 403 | print(a) 404 | assert a == items[idx] 405 | idx += 1 406 | 407 | ##################################################################################################### 408 | # 409 | # TEST 7: dynamic set 410 | # 411 | ##################################################################################################### 412 | 413 | 414 | arcus_set = client.set_alloc('test:arcus_set', ArcusTranscoder.FLAG_STRING, 5) 415 | print (arcus_set) 416 | 417 | items = ['item 1', 'item 2', 'item 3', 'item 4', 'item 5', 'item 6'] 418 | 419 | for item in items: 420 | arcus_set.add(item) 421 | 422 | print (arcus_set) 423 | 424 | print('## for loop test') 425 | for a in arcus_set: 426 | print(a) 427 | 428 | for a in items: 429 | assert a in arcus_set 430 | 431 | print ('### test done ###') 432 | client.disconnect() 433 | 434 | 435 | 436 | 437 | -------------------------------------------------------------------------------- /zk_sync.py: -------------------------------------------------------------------------------- 1 | # 2 | # arcus-python-client - Arcus python client drvier 3 | # Copyright 2014 NAVER Corp. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | 19 | from kazoo.client import KazooClient 20 | import kazoo 21 | import sys, os, time 22 | import re 23 | from threading import Lock 24 | from datetime import datetime 25 | 26 | def log(*arg): 27 | str = '[%s] ' % datetime.now() 28 | 29 | for a in arg: 30 | str += a.__str__() 31 | 32 | print(str) 33 | 34 | class Manager: 35 | def __init__(self): 36 | self.zk_list = [] 37 | self.lock = Lock() 38 | 39 | def append(self, zk): 40 | self.zk_list.append(zk) 41 | 42 | def sync(self): 43 | self.lock.acquire() 44 | log('Sync start') 45 | 46 | # read children 47 | for zk in self.zk_list: 48 | zk.read() 49 | 50 | # make new ehphemeral node 51 | for zk1 in self.zk_list: 52 | for zk2 in self.zk_list: 53 | if zk1 == zk2: 54 | continue 55 | 56 | # make node 57 | for node in zk1.ephemerals: 58 | if node in zk2.ephemerals: 59 | log('Error: Duplicated ephemeral %s%s - %s' % (zk1.name, zk1.path, node)) 60 | continue 61 | 62 | if node not in zk2.nonephemerals: 63 | log('Create: %s%s - %s' % (zk1.name, zk1.path, node)) 64 | zk2.create(node, False) 65 | 66 | 67 | # delete old nonehphemeral node 68 | for zk1 in self.zk_list: 69 | for node in zk1.nonephemerals: 70 | flag = False 71 | for zk2 in self.zk_list: 72 | if zk1 == zk2: 73 | continue 74 | 75 | if node in zk2.ephemerals: 76 | flag = True 77 | break 78 | 79 | if flag == False: 80 | # delete abnormal node 81 | log('Delete: %s%s - %s is abnormal' % (zk1.name, zk1.path, node)) 82 | zk1.delete(node) 83 | 84 | 85 | # view result & watch children again 86 | log('Sync result') 87 | for zk in self.zk_list: 88 | zk.read(self.watch_children) 89 | 90 | log('Sync done') 91 | self.lock.release() 92 | 93 | def watch_children(self, event): 94 | log('watch children called: ', event) 95 | self.sync() 96 | 97 | 98 | 99 | class Zookeeper: 100 | def __init__(self, zk): 101 | zk, path = zk.split('/', 1) 102 | self.zk = KazooClient(zk) 103 | self.zk.start() 104 | self.name = zk 105 | self.path = '/' + path 106 | 107 | self.children = [] 108 | self.ephemerals = [] 109 | self.nonephemerals = [] 110 | 111 | # safety check 112 | if '/arcus/cache_list/' not in self.path: 113 | log('invalid zk node path (should include /arcus/cache_list)') 114 | sys.exit(0) 115 | 116 | def is_ephemeral(self, path): 117 | data, stat = self.zk.get(path) 118 | return stat.owner_session_id != None 119 | 120 | def read(self, watch = None): 121 | while True: 122 | try: 123 | self.children = [] 124 | self.ephemerals = [] 125 | self.nonephemerals = [] 126 | 127 | self.children = self.zk.get_children(self.path, watch) 128 | 129 | for child in self.children: 130 | if self.is_ephemeral(self.path + '/' + child): 131 | self.ephemerals.append(child) 132 | else: 133 | self.nonephemerals.append(child) 134 | 135 | log('read zk(%s%s)' % (self.name, self.path)) 136 | log('\tchildren(%d): ' % len(self.children), self.children) 137 | log('\tephemeral(%d): ' % len(self.ephemerals), self.ephemerals) 138 | log('\tnonephemeral(%d): ' % len(self.nonephemerals), self.nonephemerals) 139 | except kazoo.exceptions.NoNodeError as e: 140 | log('Exception occur(%s):' % self.name) 141 | log(e) 142 | log('\tpath:', self.path + '/' + child) 143 | log('\tchildren(%d): ' % len(self.children), self.children) 144 | log('\tephemeral(%d): ' % len(self.ephemerals), self.ephemerals) 145 | log('\tnonephemeral(%d): ' % len(self.nonephemerals), self.nonephemerals) 146 | 147 | children = self.zk.get_children(self.path, watch) 148 | log('\treal children(%d): ' % len(children), children) 149 | 150 | log('######### RETRY ###########') 151 | continue 152 | break 153 | 154 | 155 | def create(self, path, ephemeral=False): 156 | return self.zk.create(self.path + '/' + path, ephemeral = ephemeral) 157 | 158 | def delete(self, path): 159 | return self.zk.delete(self.path + '/' + path) 160 | 161 | 162 | 163 | 164 | 165 | if __name__ == '__main__': 166 | # for test 167 | if len(sys.argv) == 1: 168 | # add here for test like below 169 | #sys.argv.append('zk1.addr.com:17288/arcus/cache_list/cloud_1') 170 | #sys.argv.append('zk2.addr.com:17288/arcus/cache_list/cloud_2') 171 | pass 172 | 173 | if len(sys.argv) < 3: 174 | print("usage: python3 zk_sync.py [ZKADDR:PORT/PATH/CLOUD]+") 175 | sys.exit(0) 176 | 177 | mgr = Manager() 178 | for arg in sys.argv[1:]: 179 | zk = Zookeeper(arg) 180 | mgr.append(zk) 181 | 182 | log("sync manager start") 183 | mgr.sync() 184 | 185 | ''' 186 | print('############## Create start ###############') 187 | for i in range(0, 100): 188 | zk.create('node%d' % i, True) 189 | print('############## Create done ###############') 190 | time.sleep(1) 191 | 192 | # for test 193 | print('############## Test 1 start ###############') 194 | zk.create('nodeA', True) 195 | zk.create('nodeB', True) 196 | zk.create('nodeC', True) 197 | zk.create('nodeD', True) 198 | print('############## Test 1 done ###############') 199 | time.sleep(1) 200 | 201 | print('############## Test 2 start ###############') 202 | zk.delete('nodeA') 203 | zk.delete('nodeB') 204 | zk.delete('nodeC') 205 | zk.delete('nodeD') 206 | print('############## Test 2 done ###############') 207 | time.sleep(1) 208 | 209 | print('############## Delete start ###############') 210 | for i in range(0, 100): 211 | zk.delete('node%d' % i) 212 | print('############## Delete done ###############') 213 | ''' 214 | 215 | while True: 216 | log('running...') 217 | time.sleep(10) 218 | 219 | log('sync manager done') 220 | 221 | 222 | -------------------------------------------------------------------------------- /zk_util.py: -------------------------------------------------------------------------------- 1 | # 2 | # arcus-python-client - Arcus python client drvier 3 | # Copyright 2014 NAVER Corp. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | from kazoo.client import KazooClient 19 | import kazoo 20 | import sys, os, time 21 | import re 22 | from optparse import OptionParser 23 | 24 | 25 | def do_zookeeper_read(zk, path): 26 | print(path) 27 | data, stat = zk.get(path) 28 | print('node info:', data) 29 | print('node stat:', stat) 30 | 31 | children = zk.get_children(path) 32 | print('node children:', children) 33 | 34 | return (data, stat, children) 35 | 36 | 37 | def do_zookeeper_read_tree(zk, path): 38 | print(path) 39 | data, stat = zk.get(path) 40 | print('node info:', data) 41 | print('node stat:', stat) 42 | 43 | children = zk.get_children(path) 44 | print('node children:', children) 45 | 46 | for child in children: 47 | do_zookeeper_read_tree(zk, path + '/' + child) 48 | 49 | 50 | def do_zookeeper_create(zk, path, value): 51 | print(path) 52 | zk.create(path, bytes(value, 'utf-8')) 53 | 54 | do_zookeeper_read(zk, path) 55 | 56 | def do_zookeeper_delete(zk, path): 57 | print(path) 58 | zk.delete(path) 59 | 60 | try: 61 | do_zookeeper_read(zk, path) 62 | except kazoo.exceptions.NoNodeError: 63 | print('deleted') 64 | 65 | def do_zookeeper_update(zk, path, value): 66 | print(path) 67 | zk.set(path, bytes(value, 'utf-8')) 68 | 69 | do_zookeeper_read(zk, path) 70 | 71 | 72 | def do_zookeeper_copy(zk_src, src, zk_dst, dst): 73 | data, stat, children = do_zookeeper_read(zk_src, path) 74 | print ('## copy %s -> %s (%s)' % (src, dst + src, data)) 75 | zk_dst.create(dst + src, data) 76 | 77 | for child in children: 78 | zookeeper_copy(zk_src, src + '/' + child, zk_dst, dst) 79 | 80 | 81 | 82 | 83 | if __name__ == '__main__': 84 | 85 | 86 | usage = "usage: %prog [options]" 87 | parser = OptionParser(usage=usage, version="%prog 1.0") 88 | parser.add_option('-a', '--address', dest='address', default='', help='zookeeper address') 89 | parser.add_option('-n', '--node', dest='node', default='', help='zookeeper node path') 90 | parser.add_option('-r', '--read', dest='read', default=False, help='zookeeper node read', action='store_true') 91 | parser.add_option('-c', '--create', dest='create', default='', help='zookeeper node create') 92 | parser.add_option('-d', '--delete', dest='delete', default=False, help='zookeeper node delete', action='store_true') 93 | parser.add_option('-u', '--update', dest='update', default='', help='zookeeper node update') 94 | parser.add_option('', '--copy', dest='copy', default='', help='zookeeper copy addr:port/new_path') 95 | parser.add_option('', '--read_tree', dest='read_tree', default=False, help='zookeeper node read tree', action='store_true') 96 | 97 | (options, args) = parser.parse_args() 98 | 99 | zk = KazooClient(options.address) 100 | zk.start() 101 | 102 | if options.read: 103 | do_zookeeper_read(zk, options.node) 104 | elif options.create != '': 105 | do_zookeeper_create(zk, options.node, options.create) 106 | elif options.delete: 107 | do_zookeeper_delete(zk, options.node) 108 | elif options.update != '': 109 | do_zookeeper_update(zk, options.node, options.update) 110 | elif options.copy != '': 111 | dest_addr, dest_path = options.copy.split('/', 1) 112 | zk_dest = KazooClient(dest_addr) 113 | zk_dest.start() 114 | do_zookeeper_copy(zk, options.node, zk_dest, '/' + dest_path) 115 | elif options.read_tree: 116 | do_zookeeper_read_tree(zk, options.node) 117 | else: 118 | parser.print_usage(); 119 | 120 | --------------------------------------------------------------------------------