├── ORZ ├── __init__.py ├── base_mgr.py ├── cache_mgr.py ├── configs.py ├── decorators.py ├── deprecated.py ├── environ.py ├── exports.py ├── klass_init.py ├── mixed_ins.py └── sql_executor.py ├── README.rst ├── code_config.yaml ├── docs ├── API │ └── index.rst ├── Changelog │ └── index.rst ├── Introduction │ └── index.rst ├── Makefile ├── Usage │ └── index.rst ├── conf.py └── index.rst ├── setup.py └── tests ├── __init__.py ├── env_init.py ├── stub ├── __init__.py ├── cmemcached.py └── memcache.py ├── test_class_init.py ├── test_new_orz.py ├── test_new_orz_transcation.py ├── test_orz.py ├── test_orz_configs.py └── test_transcation.py /ORZ/__init__.py: -------------------------------------------------------------------------------- 1 | from ORZ.exports import * 2 | 3 | version_info = (0, 3, 3, 0) 4 | 5 | __version__ = "%s.%s" % (version_info[0], "".join(str(i) for i in version_info[1:] if i > 0)) 6 | -------------------------------------------------------------------------------- /ORZ/base_mgr.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from functools import wraps 3 | from MySQLdb import IntegrityError 4 | from functools import wraps 5 | from contextlib import contextmanager 6 | 7 | 8 | class OrzField(object): 9 | NO_DEFAULT = sys.maxint 10 | name = None 11 | 12 | class KeyType(object): 13 | NOT_INDEX, DESC, ASC, AD, ONLY_INDEX = range(5) 14 | 15 | def __init__(self, as_key=KeyType.NOT_INDEX, default=NO_DEFAULT, output_filter=lambda x:x): 16 | self.as_key = as_key 17 | self.default = default 18 | self.output_filter = output_filter 19 | self.field_name = None 20 | 21 | def __set__(self, obj, value): 22 | value = int(value) if type(value) == bool else value 23 | 24 | if not obj._initted: 25 | setattr(obj, "hidden____org_" + self.field_name, value) 26 | else: 27 | obj.dirty_fields.add(self.field_name) 28 | 29 | setattr(obj, "hidden____" + self.field_name, value) 30 | 31 | def __get__(self, obj, objtype): 32 | return self.output_filter(getattr(obj, "hidden____" + self.field_name, None)) 33 | 34 | 35 | OrmItem = OrzField 36 | 37 | class OrzPrimaryField(OrzField): 38 | class OrderType(object): 39 | DESC, ASC, AD = range(3) 40 | 41 | _order_tranformation = { 42 | OrderType.DESC: ('-%s',), 43 | OrderType.ASC: ('%s', ), 44 | OrderType.AD: ('-%s', ), 45 | } 46 | 47 | def __init__(self, order_kind=OrderType.DESC): 48 | keytype_tranform = { 49 | self.OrderType.DESC: OrzField.KeyType.DESC, 50 | self.OrderType.ASC: OrzField.KeyType.ASC, 51 | self.OrderType.AD: OrzField.KeyType.AD, 52 | } 53 | super(OrzPrimaryField, self).__init__(keytype_tranform[order_kind]) 54 | self.order_kind = order_kind 55 | 56 | def as_default_order_key(self): 57 | if self.field_name is None: 58 | raise ValueError('name is needed') 59 | return tuple([i % self.field_name for i in self._order_tranformation[self.order_kind]]) 60 | 61 | def orz_get_multi(func): 62 | @wraps(func) 63 | def __(self_or_cls, *a, **kw): 64 | return self_or_cls.objects.get_multiple_ids(func(self_or_cls, *a, **kw)) 65 | return __ 66 | 67 | @contextmanager 68 | def start_transaction(*cls_or_ins): 69 | assert len(cls_or_ins) > 0 70 | def replace(): 71 | for c in cls_or_ins: 72 | if hasattr(c, '__new_orz__'): 73 | c.__transaction__ = True 74 | else: 75 | for i in ['delete', 'save', 'create']: 76 | setattr(c, 'old_'+i, getattr(c, i)) 77 | setattr(c, i, getattr(c, i+'_transactionally')) 78 | 79 | def recover(): 80 | for c in cls_or_ins: 81 | if hasattr(c, '__new_orz__'): 82 | c.__transaction__ = False 83 | else: 84 | for i in ['delete', 'save', 'create']: 85 | setattr(c, i, getattr(c, 'old_'+i)) 86 | 87 | replace() 88 | try: 89 | yield cls_or_ins 90 | 91 | except (IntegrityError, OrzForceRollBack): 92 | cls_or_ins[0].objects.sql_executor.sqlstore.rollback() 93 | 94 | except: 95 | recover() 96 | raise 97 | 98 | else: 99 | cls_or_ins[0].objects.sql_executor.sqlstore.commit() 100 | finally: 101 | recover() 102 | 103 | 104 | 105 | class OrzForceRollBack(Exception): 106 | pass 107 | 108 | 109 | if __name__=='__main__': 110 | pass 111 | -------------------------------------------------------------------------------- /ORZ/cache_mgr.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | import sys 3 | from collections import defaultdict 4 | 5 | from .sql_executor import SqlExecutor 6 | from .base_mgr import OrzField, OrzPrimaryField 7 | from .configs import CacheConfigMgr, Config 8 | 9 | ONE_HOUR=3600 10 | 11 | HEADQUARTER_VERSION = 'orz-hq-ver:1' 12 | 13 | SINGLE_OBJ_CACHE_KEY_PATTERN = "%s|{table_name}|single_obj_ck|ver:{ver}|id:" % HEADQUARTER_VERSION 14 | 15 | KV_TO_IDS_CACHE_KEY_PATTERN = "%s|{table_name}|kv_to_ids|ver:{ver}|" % HEADQUARTER_VERSION 16 | 17 | 18 | def make_orders(fields): 19 | mapper = { 20 | OrzField.KeyType.DESC: lambda x, y: x + [("-%s" % y.field_name,)], 21 | OrzField.KeyType.ASC: lambda x, y: x + [("%s" % y.field_name,)], 22 | OrzField.KeyType.AD: lambda x, y: x + [("%s" % y.field_name, ), ("-%s" % y.field_name)], 23 | OrzField.KeyType.NOT_INDEX: lambda x, y: x, 24 | OrzField.KeyType.ONLY_INDEX: lambda x, y: x, 25 | } 26 | return tuple(reduce(lambda x, y:mapper[y.as_key](x, y), fields, [])) 27 | 28 | 29 | class CachedOrmManager(object): 30 | def __init__(self, table_name, cls, db_fields, sqlstore, mc, 31 | cache_ver='', order_combs=tuple()): 32 | self.single_obj_ck = SINGLE_OBJ_CACHE_KEY_PATTERN.format(table_name=table_name, ver=cache_ver) 33 | self.cls = cls 34 | self.mc = mc 35 | self.db_field_names = [i.field_name for i in db_fields] 36 | self.primary_field = (i for i in db_fields if isinstance(i, OrzPrimaryField)).next() 37 | self.sql_executor = SqlExecutor(table_name, self.primary_field.field_name, [f.field_name for f in db_fields], sqlstore) 38 | kv_to_ids_ck = KV_TO_IDS_CACHE_KEY_PATTERN.format(table_name=table_name, ver=cache_ver) 39 | self.config_mgr = CacheConfigMgr() 40 | 41 | orders = make_orders(db_fields) + order_combs 42 | self.config_mgr.generate_basic_configs(kv_to_ids_ck, 43 | [f.field_name for f in db_fields if f.as_key], orders) 44 | 45 | self.default_vals = dict((k.field_name, k.default) for k in db_fields if k.default != OrzField.NO_DEFAULT) 46 | 47 | 48 | def _get_and_refresh(self, sql_executor, primary_field_vals, force_flush=False): 49 | res = [] 50 | if not force_flush: 51 | di = dict(zip(primary_field_vals, self.mc.get_list([self.single_obj_ck + str(i) for i in primary_field_vals]))) 52 | else: 53 | di = {} 54 | 55 | for i in primary_field_vals: 56 | if di.get(i) is not None: 57 | obj = di[i] 58 | else: 59 | obj = self.cls(**sql_executor.get(i)) 60 | self.mc.set(self.single_obj_ck + str(i), obj, ONE_HOUR) 61 | res.append(obj) 62 | return res 63 | 64 | def get(self, id=None, force_flush=False, **kw): 65 | ret = self.gets_by(id=id, force_flush=force_flush) 66 | if len(ret) == 0: 67 | return None 68 | return ret[0] 69 | 70 | def get_multiple_ids(self, primary_field_vals): 71 | return self._get_and_refresh(self.sql_executor, primary_field_vals) 72 | 73 | def _amount_check(self, amount, start_limit): 74 | if not start_limit: 75 | return True 76 | 77 | start, limit = start_limit 78 | if start + limit > amount: 79 | return True 80 | 81 | return False 82 | 83 | 84 | def fetch(self, force_flush, conditions, order_keys = None, start_limit = None): 85 | amount = sys.maxint 86 | sql_executor = self.sql_executor 87 | if conditions: 88 | config = self.config_mgr.lookup_gets_by(conditions.keys(), order_keys) 89 | if config is None or (amount is not None and self._amount_check(amount, start_limit)): 90 | primary_field_vals = self.sql_executor.get_ids(conditions, start_limit, order_keys) 91 | return [self.cls(**self.sql_executor.get(i)) for i in primary_field_vals] 92 | 93 | _start_limit = (0, amount) if amount is not None else tuple() 94 | 95 | ck = config.to_string(conditions) 96 | 97 | if not force_flush: 98 | primary_field_vals = self.mc.get(ck) 99 | else: 100 | primary_field_vals = None 101 | 102 | if primary_field_vals is None: 103 | primary_field_vals = sql_executor.get_ids(conditions, _start_limit, order_keys) 104 | self.mc.set(ck, primary_field_vals, ONE_HOUR) 105 | 106 | else: 107 | primary_field_vals = sql_executor.get_ids(conditions, start_limit, order_keys) 108 | 109 | if start_limit: 110 | start, limit = start_limit 111 | primary_field_vals = primary_field_vals[start:start + limit] 112 | 113 | return self._get_and_refresh(sql_executor, primary_field_vals, force_flush) 114 | 115 | def create(self, raw_kwargs, transactional=False): 116 | return self.cls(**self.create_record(raw_kwargs, transactional)) 117 | 118 | def create_record(self, raw_kwargs, transactional=False): 119 | kwargs = [] 120 | kwargs = dict((k, (v() if callable(v) else v)) for k, v in self.default_vals.iteritems()) 121 | kwargs.update(raw_kwargs) 122 | 123 | cks = self._get_cks(kwargs, self.db_field_names) 124 | self.mc.delete_multi(cks) 125 | 126 | sql_data = dict((field, kwargs.pop(field)) for field in self.db_field_names if field in kwargs) 127 | _primary_field_val = self.sql_executor.create(sql_data, transactional) 128 | 129 | return self.sql_executor.get(_primary_field_val) 130 | 131 | def _get_cks(self, data_src, fields): 132 | cks = [] 133 | configs = {} 134 | for field in fields: 135 | for i in self.config_mgr.lookup_related(field): 136 | configs[i.as_key()]=i 137 | 138 | for c in configs.itervalues(): 139 | field_cks = c.to_string(data_src) 140 | cks.append(field_cks) 141 | return cks 142 | 143 | def save(self, ins, transactional=False): 144 | cks = [] 145 | 146 | if not ins.dirty_fields: 147 | return 0 148 | 149 | datum = dict((f, getattr(ins, "hidden____org_" + f)) for f in self.db_field_names) 150 | cks.extend(self._get_cks(datum, ins.dirty_fields)) 151 | cks.extend(self._get_cks(ins, ins.dirty_fields)) 152 | 153 | all_cks = cks + [self.single_obj_ck+str(ins.id)] 154 | self.mc.delete_multi(all_cks) 155 | 156 | sql_data = dict((field, getattr(ins, field)) for field in ins.dirty_fields) 157 | ret = self.sql_executor.update_row(ins.id, sql_data, transactional) 158 | 159 | data = self.sql_executor.get(ins.id) 160 | for i in self.db_field_names: 161 | setattr(ins, i, data[i]) 162 | 163 | ins.dirty_fields = set() 164 | return ret 165 | 166 | 167 | def delete(self, ins, transactional=False): 168 | cks = self._get_cks(ins, [self.primary_field.name]+self.db_field_names) 169 | 170 | self.mc.delete_multi(cks + [self.single_obj_ck+str(ins.id)]) 171 | 172 | return self.sql_executor.delete(ins.id, transactional) 173 | 174 | def gets_by(self, order_by=None, start=0, limit=sys.maxint, force_flush=False, **kw): 175 | if order_by is None: 176 | real_order_by = self.primary_field.as_default_order_key() 177 | else: 178 | real_order_by = (order_by, ) if type(order_by) is not tuple else order_by 179 | return self.fetch(force_flush, kw, real_order_by, (start, limit)) 180 | 181 | def count_by(self, **conditions): 182 | config = self.config_mgr.lookup_normal(conditions.keys()) 183 | if config is None: 184 | return self.sql_executor.calc_count(conditions) 185 | 186 | ck = config.to_string(conditions) 187 | c = self.mc.get(ck) 188 | if c is None: 189 | ret = self.sql_executor.calc_count(conditions) 190 | self.mc.set(ck, ret, ONE_HOUR) 191 | return ret 192 | else: 193 | return c 194 | -------------------------------------------------------------------------------- /ORZ/configs.py: -------------------------------------------------------------------------------- 1 | from itertools import combinations, chain 2 | from collections import defaultdict 3 | from operator import attrgetter 4 | from functools import partial 5 | import logging 6 | 7 | 8 | class Forward(object): 9 | def __init__(self, dest_obj_member, attr_name): 10 | self.dest = attr_name 11 | self.dest_obj_member = dest_obj_member 12 | 13 | def __get__(self, obj, objtype): 14 | return getattr(getattr(obj, self.dest_obj_member), self.dest) 15 | 16 | 17 | def serialize_kv_alphabetically(di): 18 | def change_bool(value): 19 | return int(value) if type(value) == bool else value 20 | return '|'.join("%s=%s" % (k, change_bool(v)) for k, v in di).replace(" ", "") 21 | 22 | 23 | class ConfigColl(object): 24 | def __init__(self): 25 | self._coll = {} 26 | 27 | def __getitem__(self, key): 28 | try: 29 | return self._coll[key] 30 | except KeyError: 31 | logging.warning("your query key [%s] doesn't match your previous definition" % str(key)) 32 | return None 33 | 34 | def __setitem__(self, key, val): 35 | self._coll[key] = val 36 | 37 | def itervalues(self): 38 | return self._coll.itervalues() 39 | 40 | def __len__(self): 41 | return len(self._coll) 42 | 43 | def keys(self): 44 | return self._coll.keys() 45 | 46 | 47 | class CacheConfigMgr(object): 48 | def __init__(self): 49 | self.gets_by_config_coll = ConfigColl() 50 | self.normal_config_coll = ConfigColl() 51 | self.key_related = defaultdict(list) 52 | 53 | def add_to(self, config_coll, config): 54 | for key in config.keys: 55 | self.key_related[key].append(config) 56 | 57 | config_coll[config.as_key()] = config 58 | 59 | def items(self): 60 | for member in [self.gets_by_config_coll, self.normal_config_coll]: 61 | for i in member.itervalues(): 62 | yield i 63 | 64 | def generate_basic_configs(self, prefix, raw_key_field_names, orders=tuple()): 65 | key_field_names = list(raw_key_field_names) 66 | key_field_names.remove('id') 67 | comb = list(chain(*[combinations(key_field_names, i) for i in range(1, len(key_field_names)+1)])) 68 | cfgs = [] 69 | 70 | for i in comb+[('id',),]: 71 | c = Config(prefix, i) 72 | self.add_to(self.normal_config_coll, c) 73 | cfgs.append(c) 74 | 75 | for c in cfgs: 76 | for e in orders: 77 | self.add_to(self.gets_by_config_coll, GetsByConfig(c, e)) 78 | 79 | 80 | def lookup_normal(self, raw_keywords): 81 | keywords = tuple(sorted(raw_keywords)) 82 | return self.normal_config_coll[keywords] 83 | 84 | def lookup_related(self, field): 85 | return self.key_related[field] 86 | 87 | def lookup_gets_by(self, fields, order_by_fields=('-id',)): 88 | order_bys = ('order_by:%s' % ('|'.join(sorted(order_by_fields))), ) 89 | keywords = tuple(sorted(tuple(fields) + order_bys)) 90 | return self.gets_by_config_coll[keywords] 91 | 92 | 93 | class Config(object): 94 | def __init__(self, prefix, keys): 95 | self.keys = keys 96 | self.prefix = prefix 97 | 98 | def as_key(self): 99 | return tuple(sorted(self.keys)) 100 | 101 | def to_string(self, data): 102 | if type(data) == dict: 103 | func = data.get 104 | else: 105 | func = lambda x: getattr(data, x) 106 | 107 | _t_str = serialize_kv_alphabetically((f, func(f)) for f in sorted(self.keys)) 108 | return self.prefix + ":" + _t_str 109 | 110 | 111 | class GetsByConfig(object): 112 | def __init__(self, config, order): 113 | self.config = config 114 | # self.keys = tuple(config.keys) + order 115 | self.keys = tuple(config.keys) + tuple(i.strip("-") for i in order) 116 | self.order = 'order_by:' + ('|'.join(sorted(order)).replace(" ", "")) 117 | 118 | def as_key(self): 119 | return tuple(sorted(self.config.as_key() + (self.order, ))) 120 | 121 | def to_string(self, data): 122 | return self.config.to_string(data) + "|" + self.order 123 | -------------------------------------------------------------------------------- /ORZ/decorators.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from os.path import dirname, join 3 | import sys 4 | sys.path.append(join(dirname(__file__), "..")) 5 | 6 | from .deprecated import cached_wrapper 7 | 8 | 9 | def _deco(func): 10 | def __(table_name, *a, **kw): 11 | def _(cls): 12 | return func(cls, table_name, *a, **kw) 13 | return _ 14 | return __ 15 | 16 | orz_decorate = _deco(cached_wrapper) 17 | 18 | if __name__ == '__main__': 19 | pass 20 | # @cached_orm_decorate('complete_video') 21 | # class A(object): 22 | # subject_id = OrzField() 23 | # ep_num = OrzField() 24 | # default_src = OrzField() 25 | 26 | # a = A.create(subject_id=10, ep_num=1, default_src=1) 27 | 28 | -------------------------------------------------------------------------------- /ORZ/deprecated.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | from .cache_mgr import CachedOrmManager 3 | from .mixed_ins import * 4 | from .base_mgr import OrzField, OrzPrimaryField 5 | from .klass_init import _split_dictonary, _initialize_primary_field, _collect_fields, _collect_order_combs 6 | 7 | def method_combine(func, reserved_args=tuple(), alias=None): 8 | alias = alias if alias is not None else func.func_name 9 | def _combine(cls_or_self, **kw): 10 | # 参数的问题没有想清楚,所以下面有些BadSmell 11 | reserved_kw, exclude_kw = _split_dictonary(kw, lambda k, _: k in reserved_args) 12 | def call_after(belonged): 13 | if hasattr(belonged, "after_"+alias): 14 | after_func = getattr(belonged, "after_"+alias) 15 | after_func(**exclude_kw) 16 | 17 | if hasattr(cls_or_self, "before_"+alias): 18 | before_func = getattr(cls_or_self, "before_"+alias) 19 | before_func(**kw) 20 | 21 | ret = func(cls_or_self, **reserved_kw) 22 | if alias == 'create': 23 | call_after(ret) 24 | else: 25 | call_after(cls_or_self) 26 | return ret 27 | return _combine 28 | 29 | 30 | def cached_wrapper(cls, table_name, sqlstore=None, mc=None, cache_ver='', id2str=True): 31 | primary_field = _initialize_primary_field(cls) 32 | db_fields, raw_db_fields = zip(*_collect_fields(cls, id2str)) 33 | order_combs = _collect_order_combs(cls) 34 | 35 | 36 | cls.objects = CachedOrmManager(table_name, 37 | cls, 38 | raw_db_fields, 39 | sqlstore=sqlstore, 40 | mc=mc, 41 | cache_ver=cache_ver, 42 | order_combs=order_combs) 43 | 44 | 45 | cls.save = method_combine(save) 46 | cls.create = classmethod(method_combine(create, db_fields)) 47 | cls.delete = method_combine(delete) 48 | cls.delete_transactionally = method_combine(delete_transactionally, alias='delete') 49 | cls.create_transactionally = classmethod(method_combine(create_transactionally, db_fields, alias='create')) 50 | cls.save_transactionally = method_combine(save_transactionally, alias='save') 51 | cls.__org_init__ = cls.__init__ 52 | cls.__init__ = init 53 | cls.__setstate__ = setstate 54 | cls.__getstate__ = getstate 55 | cls.db_fields = db_fields 56 | cls.gets_by = cls.objects.gets_by 57 | cls.count_by = cls.objects.count_by 58 | cls.get_by = cls.objects.get 59 | cls.exist = classmethod(exist) 60 | 61 | return cls 62 | 63 | -------------------------------------------------------------------------------- /ORZ/environ.py: -------------------------------------------------------------------------------- 1 | orz_mc = None 2 | orz_sqlstore = None 3 | 4 | def setup(sqlstore, mc): 5 | global orz_mc, orz_sqlstore 6 | 7 | orz_mc = mc 8 | orz_sqlstore = sqlstore 9 | -------------------------------------------------------------------------------- /ORZ/exports.py: -------------------------------------------------------------------------------- 1 | from .decorators import orz_decorate 2 | from .klass_init import OrzBase, OrzData4Mixin 3 | from .environ import setup 4 | from .base_mgr import OrzField, orz_get_multi, OrzForceRollBack, start_transaction, OrzPrimaryField 5 | 6 | -------------------------------------------------------------------------------- /ORZ/klass_init.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | from functools import wraps 3 | 4 | from .cache_mgr import CachedOrmManager 5 | from .base_mgr import OrzField, OrzPrimaryField 6 | import warnings 7 | 8 | def _split_dictonary(di, predicate): 9 | include_kw, exclude_kw = {}, {} 10 | for k, v in di.iteritems(): 11 | if predicate(k, v): 12 | include_kw[k] = v 13 | else: 14 | exclude_kw[k] = v 15 | return include_kw, exclude_kw 16 | 17 | 18 | def _initialize_primary_field(cls): 19 | primary_fields = [(i, v) for i, v in cls.__dict__.iteritems() if isinstance(v, OrzPrimaryField)] 20 | if len(primary_fields) > 1: 21 | raise ValueError("one primary_field only") 22 | 23 | if len(primary_fields) == 0: 24 | v = OrzPrimaryField() 25 | v.field_name = "id" 26 | setattr(cls, 'id', v) 27 | return v 28 | else: 29 | field_name, field = primary_fields[0] 30 | field.field_name = field_name 31 | return field 32 | 33 | 34 | def _collect_fields(cls, id2str): 35 | def merge_dict(di_from, di_to): 36 | m = di_from.copy() 37 | m.update(di_to) 38 | return m 39 | 40 | # use dicts of classes by reversed mro order instead of dir(cls) to bypass invoking descriptor 41 | for i, v in reduce(merge_dict, [o.__dict__ for o in reversed(cls.__mro__)]).iteritems(): 42 | if isinstance(v, OrzField): 43 | v.field_name = i 44 | if id2str and (i=='id' or i.endswith("_id")): 45 | v.output_filter = lambda x: None if x is None else str(x) 46 | yield (i, v) 47 | 48 | 49 | def _collect_order_combs(cls): 50 | if hasattr(cls, "OrzMeta"): 51 | declarations = tuple() 52 | 53 | if hasattr(cls.OrzMeta, 'extra_orders'): 54 | declarations = getattr(cls.OrzMeta, "extra_orders") 55 | warnings.warn("extra_orders is deprecated; use order_combs instead.") 56 | 57 | if hasattr(cls.OrzMeta, 'order_combs'): 58 | if declarations: 59 | warnings.warn("order_combs will override extra_orders. use order_combs only") 60 | declarations = getattr(cls.OrzMeta, "order_combs") 61 | 62 | order_combs = tuple(((i, ) if type(i) is str else i) for i in declarations) 63 | else: 64 | order_combs = tuple() 65 | return order_combs 66 | 67 | 68 | class OrzMeta(type): 69 | def __init__(cls, cls_name, bases, di): 70 | if cls.__orz_table__ is not None: 71 | from .environ import orz_mc, orz_sqlstore 72 | 73 | table_name = cls.__orz_table__ 74 | cache_ver = getattr(cls.OrzMeta, 'cache_ver', '') 75 | id2str = getattr(cls.OrzMeta, 'id2str', False) 76 | primary_field = _initialize_primary_field(cls) 77 | db_fields, raw_db_fields = zip(*_collect_fields(cls, id2str)) 78 | order_combs = _collect_order_combs(cls) 79 | cls.db_fields = db_fields 80 | cls.objects = CachedOrmManager(table_name, 81 | cls, 82 | raw_db_fields, 83 | sqlstore=orz_sqlstore, 84 | mc=orz_mc, 85 | cache_ver=cache_ver, 86 | order_combs=order_combs) 87 | 88 | class OrzBase(object): 89 | 90 | objects = None 91 | 92 | __metaclass__ = OrzMeta 93 | 94 | __orz_table__ = None 95 | 96 | __orz_cache_ver__ = "" 97 | 98 | __new_orz__ = True 99 | 100 | __transaction__ = False 101 | 102 | class OrzMeta: 103 | cache_ver = "" 104 | 105 | def _refresh_db_fields(self, kw): 106 | self.dirty_fields = set() 107 | for i in self.db_fields: 108 | val = kw.pop(i) 109 | setattr(self, i, val) 110 | self._initted = True 111 | 112 | def __init__(self, to_create=False, *a, **kw): 113 | self.to_create = to_create 114 | if not to_create: 115 | self._initted = False 116 | self._refresh_db_fields(kw) 117 | self._initted = True 118 | self._detached = False 119 | else: 120 | self._initted = False 121 | for k, v in kw.iteritems(): 122 | setattr(self, k, v) 123 | self._initted = False 124 | self._detached = True 125 | 126 | def _do_create(self, **kw): 127 | reserved_kw, exclude_kw = _split_dictonary(kw, lambda k, _: k in self.db_fields) 128 | self._detached = True 129 | self.before_create(**exclude_kw) 130 | 131 | data = self.objects.create_record(reserved_kw, self.__transaction__) 132 | self._detached = False 133 | self._refresh_db_fields(data) 134 | 135 | self.after_create(**exclude_kw) 136 | 137 | def __detached_proof(func): 138 | @wraps(func) 139 | def __(self, *a, **kw): 140 | if self._detached: 141 | raise AttributeError("The %s can't be called when the instance is detached, namely not created or just deleted" % func.func_name) 142 | return func(self, *a, **kw) 143 | return __ 144 | 145 | @classmethod 146 | def create(cls, **kw): 147 | ins = cls(to_create=True, detached=False, **kw) 148 | ins._do_create(**kw) 149 | return ins 150 | 151 | @__detached_proof 152 | def save(self): 153 | self.before_save() 154 | ret = self.objects.save(self, self.__transaction__) 155 | self.after_save() 156 | return ret 157 | 158 | @__detached_proof 159 | def delete(self): 160 | self.before_delete() 161 | 162 | ret = self.objects.delete(self, self.__transaction__) 163 | 164 | self._detached = True 165 | self.after_delete() 166 | return ret 167 | 168 | def __getstate__(self): 169 | ret = {'dict': self.__dict__.copy(), 'db_fields': {}} 170 | 171 | for i in self.db_fields: 172 | ret['db_fields'][i] = getattr(self, i) 173 | 174 | return ret 175 | 176 | def __setstate__(self, state): 177 | self.__dict__.update(state['dict']) 178 | self._initted = False 179 | for i in self.db_fields: 180 | setattr(self, i, state['db_fields'][i]) 181 | self._initted = True 182 | 183 | 184 | @classmethod 185 | def gets_by(cls, *a, **kw): 186 | return cls.objects.gets_by(*a, **kw) 187 | 188 | @classmethod 189 | def get_by(cls, *a, **kw): 190 | return cls.objects.get(*a, **kw) 191 | 192 | @classmethod 193 | def count_by(cls, *a, **kw): 194 | return cls.objects.count_by(*a, **kw) 195 | 196 | @classmethod 197 | def exist(cls, *a, **kw): 198 | return cls.objects.count_by(*a, **kw) > 0 199 | 200 | def after_create(self, **kw): 201 | pass 202 | 203 | def before_create(self, **kw): 204 | pass 205 | 206 | def before_save(self): 207 | pass 208 | 209 | def after_save(self): 210 | pass 211 | 212 | def before_delete(self): 213 | pass 214 | 215 | def after_delete(self): 216 | pass 217 | 218 | 219 | class OrzMixinMeta(OrzMeta): 220 | def __init__(cls, name, bases, di): 221 | super(OrzMixinMeta, cls).__init__(name, bases, di) 222 | if 'OrzData4Mixin' in [i.__name__ for i in bases] and 'OrzBase' not in [i.__name__ for i in bases]: 223 | cls.__orz_mixin_data__ = cls 224 | 225 | 226 | class OrzData4Mixin(object): 227 | 228 | __orz_table__ = None 229 | 230 | __metaclass__ = OrzMixinMeta 231 | 232 | def __reduce__(self): 233 | return (unpickle_for_orz_mixin, (self.__orz_mixin_data__, self.__orz_table__, self.__getstate__())) 234 | 235 | @classmethod 236 | def as_data_cls(cls, table_name): 237 | di = dict(cls.__dict__) 238 | di['__orz_table__'] = table_name 239 | cls = type('mixin_4_data_'+cls.__name__, (cls, OrzBase), di) 240 | return cls 241 | 242 | def unpickle_for_orz_mixin(mixin_cls, table_name, state): 243 | di = dict(mixin_cls.__dict__) 244 | di['__orz_table__'] = table_name 245 | cls = type('mixin_4_data_'+mixin_cls.__name__, (mixin_cls, OrzBase), di) 246 | ins = cls(**state['db_fields']) 247 | ins.__dict__.update(state['dict']) 248 | return ins 249 | 250 | -------------------------------------------------------------------------------- /ORZ/mixed_ins.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf8 -*- 2 | 3 | def create(cls, **kwargs): 4 | ins = cls.objects.create(kwargs) 5 | return ins 6 | 7 | 8 | def delete(self): 9 | return self.objects.delete(self) 10 | 11 | 12 | def save(self): 13 | return self.objects.save(self) 14 | 15 | 16 | def create_transactionally(cls, **kwargs): 17 | ins = cls.objects.create(kwargs, True) 18 | return ins 19 | 20 | 21 | def delete_transactionally(self): 22 | return self.objects.delete(self, True) 23 | 24 | 25 | def save_transactionally(self): 26 | return self.objects.save(self, True) 27 | 28 | 29 | def getstate(self): 30 | ret = {'dict': self.__dict__.copy(), 'db_fields': {}} 31 | 32 | for i in self.db_fields: 33 | ret['db_fields'][i] = getattr(self, i) 34 | 35 | return ret 36 | 37 | def exist(cls, **conditions): 38 | ret = cls.count_by(**conditions) 39 | return ret > 0 40 | 41 | 42 | def setstate(self, state): 43 | self.__dict__.update(state['dict']) 44 | self._initted = False 45 | for i in self.db_fields: 46 | setattr(self, i, state['db_fields'][i]) 47 | self._initted = True 48 | 49 | 50 | def init(self, to_create=True, *a, **kw): 51 | self.to_create = to_create 52 | self._initted = False 53 | self.dirty_fields = set() 54 | for i in self.db_fields: 55 | val = kw.pop(i) 56 | setattr(self, i, val) 57 | self._initted = True 58 | -------------------------------------------------------------------------------- /ORZ/sql_executor.py: -------------------------------------------------------------------------------- 1 | from operator import itemgetter 2 | from itertools import chain 3 | from copy import deepcopy 4 | 5 | 6 | class SqlExecutor(object): 7 | def __init__(self, table_name, primary_field_name, db_fields, sqlstore): 8 | self.sqlstore = sqlstore 9 | self.conditions = {} 10 | self.primary_field_name = primary_field_name 11 | self.db_fields = db_fields 12 | self.table_name = table_name 13 | #self.dirty_fields = set() 14 | 15 | def create(self, field_data, transational=False): 16 | set_sql, v = self._sql_statement('SET', 17 | [("%s=%%s" % kv[0], kv[1]) for kv in field_data.items()], 18 | ',') 19 | statement = "insert into %s %s" % (self.table_name, set_sql) 20 | _id = self.sqlstore.execute(statement, v) 21 | if not transational: 22 | self.sqlstore.commit() 23 | return _id 24 | 25 | def update_row(self, primary_field, field_data, transational=False): 26 | set_sql, v = self._sql_statement('SET', 27 | [("%s=%%s" % kv[0], kv[1]) for kv in field_data.items()], 28 | ',') 29 | statement = "update %s %s where %s = %s" % (self.table_name, set_sql, self.primary_field_name, primary_field) 30 | ret = self.sqlstore.execute(statement, tuple(v)) 31 | if not transational: 32 | self.sqlstore.commit() 33 | return ret 34 | 35 | def delete(self, primary_field, transational=False): 36 | statement = 'delete from %s where %s = %%s' % (self.table_name, self.primary_field_name) 37 | ret = self.sqlstore.execute(statement, primary_field) 38 | self.sqlstore.commit() 39 | if not transational: 40 | self.sqlstore.commit() 41 | return ret 42 | 43 | def _transform_order_keys(self, keys): 44 | def __(key): 45 | if key.startswith('-'): 46 | key = (key[1:], 'desc') 47 | else: 48 | key = (key, 'asc') 49 | return key 50 | return [__(key) for key in keys] 51 | 52 | def get_ids(self, conditions, start_limit, order_keys=tuple()): 53 | limit_sql, v3 = self._sql_statement('limit', zip(["%s", "%s"], start_limit)) 54 | if not order_keys: 55 | order_sql = '' 56 | else: 57 | order_sql = 'order by %s' % ",".join("%s %s" % (k, v) for k, v in self._transform_order_keys(order_keys)) 58 | where_sql, v1 = self._sql_statement('where', 59 | [("%s=%%s" % k, v) for k, v in conditions.iteritems()], 60 | ' and ') 61 | 62 | statement = "select %s from %s %s %s %s" % \ 63 | (self.primary_field_name, self.table_name, where_sql, order_sql, limit_sql) 64 | ids = map(itemgetter(0), self.sqlstore.execute(statement, tuple(chain(v1, v3)))) 65 | return ids 66 | 67 | def get(self, primary_field): 68 | #BIG TODO for non exist obj 69 | fields = [self.primary_field_name] + list(self.db_fields) 70 | statement = "select %s from %s where %s=%%s" % (",".join(fields), self.table_name, self.primary_field_name, ) 71 | ret = self.sqlstore.execute(statement, primary_field) 72 | if not ret: 73 | return None 74 | return dict(zip(fields+['to_create'], list(ret[0])+[False])) 75 | 76 | def _sql_statement(self, keyword, fields, concat_by=','): 77 | if len(fields) == 0: 78 | return "", () 79 | prefix, values = zip(*fields) 80 | return keyword + " " + (concat_by.join(prefix)), values 81 | 82 | def calc_count(self, conditions): 83 | sql, vals = self._sql_statement('where', 84 | [("%s=%%s" % k, v) for k, v in conditions.iteritems()], 85 | ' and ') 86 | statement = "select count(1) from %s %s" % (self.table_name, sql) 87 | ret = self.sqlstore.execute(statement, vals) 88 | return ret[0][0] 89 | 90 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | The Missing Data Manager In Shire 2 | =========================================== 3 | -------------------------------------------------------------------------------- /code_config.yaml: -------------------------------------------------------------------------------- 1 | docs: 2 | doc: 3 | dir: docs 4 | builder: html 5 | name: Docs 6 | -------------------------------------------------------------------------------- /docs/API/index.rst: -------------------------------------------------------------------------------- 1 | Full API 2 | ============ 3 | 4 | Namespace 5 | ^^^^^^^^^^^ 6 | 7 | :: 8 | 9 | from ORZ.exports import x 10 | 11 | Transaction 12 | ^^^^^^^^^^^ 13 | 14 | ORZ封装了一个便捷的事务处理 15 | 16 | :: 17 | 18 | with start_transaction(orz_instance, OrzModelCLS) as transactional_orz_instance, transactional_orz_cls: 19 | transactional_orz_instance.save() 20 | transactional_orz_cls.create(**kwargs) 21 | 22 | - 在这个transaction的context里,抛出``IntegrityError``或者``OrzForceRollBack``都会Rollback 23 | - 另外,由于现在实现上的问题, 其实orz\_instance, 24 | OrzModelCLS也是发生修改了的。。。 25 | 26 | 27 | OrzField 28 | '''''''' 29 | 30 | OrzField(as\_key=OrzField.KeyType.NOT\_INDEX, default=handler, output\_filter=lambda x:x) 31 | 32 | 33 | as\_key 34 | 有4种默认值 35 | 36 | - KeyType.NOT\_INDEX: 默认值。顾名思义 37 | - KeyType.DESC: 该字段可能会作为查询条件, 38 | 同时以*只以subject\_id降序*可能会作为查询集合*单独*排序条件 39 | - KeyType.ASC: 该字段可能会作为查询条件, 40 | 同时以*只以subject\_id升序*可能会作为查询集合*单独*排序条件 41 | - KeyType.AD: 该字段可能会作为查询条件, 42 | 同时以*subject\_id升序或者降序*都可能会作为查询集合*单独*排序条件 43 | - KeyType.ONLY\_INDEX: 该字段仅仅可能会作为查询条件 44 | 45 | default 46 | 值或者一个没有参数函数 47 | 48 | output\_filter 49 | 把字段从MC或者DB里取出以后,再转一次。。 50 | 51 | OrzMeta 52 | ''''''' 53 | 54 | :: 55 | 56 | class OrzMeta: 57 | order_combs = (,) 58 | id2str = False 59 | cache_ver = '' 60 | 61 | order\_combs 62 | 排序条件的组合 63 | 64 | id2str 65 | Shire里都有个不成文的Convention,都会把id或者\_id结尾的字段转换为字符串 66 | 67 | cache\_ver 68 | 缓存的版本 69 | 70 | -------------------------------------------------------------------------------- /docs/Changelog/index.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ^^^^^^^^^ 3 | 4 | ORZ 0.3.3@2014-02-07 5 | '''''''''''''''''''' 6 | 7 | [BugFix]: 8 | 9 | 0. 修正文档中例子代码 10 | 11 | [Refactor]: 12 | 13 | 0. 清理v0.1时候的遗留代码,合并OrmItem 到 OrzField 14 | 15 | 16 | ORZ 0.3.0@2014-01-15 17 | '''''''''''''''''''' 18 | 19 | 0. 使用OrzBase 以及 Nested Class OrzMeta 来代替orz\_decorate; 20 | orz\_decorate只是废弃了,但在这个版本中仍然可以照常使用。具体区别可以见PR2637 21 | 1. 增加了事务 22 | 2. 优化了性能 23 | 3. 优化了默认值的处理(具体见文档) 24 | 4. 整了一个文档的雏形 25 | 26 | 27 | -------------------------------------------------------------------------------- /docs/Introduction/index.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ------------ 3 | 4 | ORZ 不是ORM,只是封装了基础的数据库CRUD及其Cache管理,并尽可能提供基于Pythonic的方式进行扩展的数据层 5 | 6 | In a nut shell 7 | ~~~~~~~~~~~~~~ 8 | 9 | 假如数据库声明是这样的 10 | 11 | :: 12 | 13 | CREATE TABLE `dummy_yummy` ( 14 | `id` int(11) unsigned NOT NULL AUTO_INCREMENT, 15 | `uid` int(11), 16 | `username` varchar(20) NOT NULL, 17 | `subject_id` int(11) NOT NULL, 18 | `user_id` int(11) NOT NULL, 19 | `subtype` varchar(50), 20 | PRIMARY KEY (`id`), 21 | KEY `user_id` (`user_id`), 22 | KEY `subject_id_subtype_idx` (`subject_id`, `subtype`), 23 | KEY `uid` (`uid`) 24 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT=''; 25 | 26 | 之前我们会这样写 27 | 28 | :: 29 | 30 | class DummyYummy(object): 31 | OBJ_CACHE = 'dummyyummy-obj:%s' 32 | USER_INDEX_CACHE = 'dummyyummy-user:%s' 33 | SUBJECT_SUBTYPE_INDEX_CACHE = 'dummyyummy-subject:%s|subtype:%s' 34 | UID_INDEX_CACHE = 'dummyyummy-uid:%s' 35 | 36 | def __init__(self, id, uid, username, subject_id, user_id, subtype): 37 | self.id = id 38 | self.uid = uid 39 | self.username = username 40 | self.subject_id = subject_id 41 | self.user_id = user_id 42 | self.subtype = subtype 43 | 44 | 45 | @classmethod 46 | def create(cls, uid, username, subject_id, user_id, subtype): 47 | id = store.exexute('insert into dummy_yummy (`uid`, `username`, `subject_id`, `user_id`, `subtype`)' 48 | 'values (%s, %s, %s, %s, %s)', (uid, username, subject_id, user_id, subtype)) 49 | store.commit() 50 | 51 | mc.delete(cls.SUBJECT_SUBTYPE_INDEX_CACHE % (subject_id, subtype)) 52 | mc.delete(cls.UID_INDEX_CACHE % uid) 53 | mc.delete(cls.USER_INDEX_CACHE % user_id) 54 | 55 | ins = cls(id, uid, username, subject_id, user_id, subtype) 56 | mc.set(cls.OBJ_CACHE % ins.id, ind) 57 | return ins 58 | 59 | def update_subject_id(self, subject_id): 60 | store.execute('update dummy_yummy set subject_id=%s where id=%s', (subject_id, self.id)) 61 | store.commit() 62 | 63 | mc.delete(self.SUBJECT_SUBTYPE_INDEX_CACHE % (self.subject_id, self.subtype)) 64 | 65 | self.subject_id = subject_id 66 | 67 | mc.delete(self.SUBJECT_SUBTYPE_INDEX_CACHE % (subject_id, self.subtype)) 68 | mc.delete(self.OBJ_CACHE % self.id) 69 | 70 | @classmethod 71 | def gets(cls, ids): 72 | mc.get_multi(ids) 73 | return [cls.get(id=id) for id in ids] 74 | 75 | 76 | @cache(USER_INDEX_CACHE % "{user_id}") 77 | def _gets_by_user_id(cls, user_id): 78 | return [id for id, in store.execute("select id from dummy_yummy where user_id = %s", user_id)] 79 | 80 | def gets_by_user_id(cls, user_id): 81 | return cls.gets(cls._gets_by_user_id(user)) 82 | 83 | @cache(SUBJECT_SUBTYPE_INDEX_CACHE % ("{subject_id}", "{subtype}")) 84 | def _gets_by_subject_id_and_subtype(cls, subject_id, subtype): 85 | return [id for id, in store.execute("select id from dummy_yummy where subject_id = %s and subtype = %s", (subject_id, subtype))] 86 | 87 | def gets_by_subject_id_and_subtype(cls, subject_id, subtype): 88 | return cls.gets(cls._gets_by_subject_id_and_subtype(cls, subject_id, subtype)) 89 | 90 | @cache(UID_INDEX_CACHE % "{uid}") 91 | def _gets_by_uid(cls, uid): 92 | return [id for id, in store.execute("select id from dummy_yummy where uid = %s", uid)] 93 | 94 | def gets_by_uid(cls, uid): 95 | return cls.gets(cls._gets_by_uid(cls, uid)) 96 | 97 | 然后我们这样用 98 | 99 | :: 100 | 101 | Dummy.gets_by_subject_id_and_subtype(subject_id, subtype): 102 | Dummy.gets_by_uid(uid) 103 | Dummy.gets_by_user_id(user_id) 104 | 105 | 106 | dummy_obj = Dummy.create(uid=uid, subject_id=subject_id, subtype=subtype, user_id=user_id, username=username) 107 | 108 | dummy_obj.update_subject_id(subject_id=subject_id) 109 | 110 | 使用ORZ以后我们这样写 111 | 112 | :: 113 | 114 | from ORZ.exports import OrzBase 115 | 116 | setup(your_store, your_mc) 117 | 118 | class DummyYummy(OrzBase): 119 | __orz_table__ = 'dummy_yummy' 120 | uid = OrzField(as_key=OrzField.KeyType.ONLY_INDEX) 121 | username = OrzField() 122 | subject_id = OrzField(as_key=OrzField.KeyType.ONLY_INDEX) 123 | user_id = OrzField(as_key=OrzField.KeyType.ONLY_INDEX) 124 | subtype = OrzField(as_key=OrzField.KeyType.ONLY_INDEX) 125 | 126 | 于是我们这样用 127 | 128 | :: 129 | 130 | Dummy.gets_by(uid=uid) 131 | Dummy.gets_by(subject_id=subject_id, subtype=subtype) 132 | Dummy.gets_by(user_id=user_id) 133 | 134 | dummy_obj = Dummy.create(uid=uid, subject_id=subject_id, subtype=subtype, user_id=user_id, username=username) 135 | 136 | dummy_obj.subject_id = subject_id 137 | dummy_obj.save() 138 | 139 | 140 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/orz.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/orz.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/orz" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/orz" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/Usage/index.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ----- 3 | 4 | set up 5 | ~~~~~~ 6 | 7 | ORZ 8 | 在使用Orz关联表之前,需要确保先使用基于douban.corelib.sqlstore和douban.corelib.mc派生的store和mc对ORZ进行配置。 9 | 10 | :: 11 | 12 | from ORZ import setup 13 | setup(your_store, your_mc) 14 | 15 | 与数据表的关联 16 | ~~~~~~~~~~~~~~ 17 | 18 | 假定我们有简单表声明如下: 19 | 20 | :: 21 | 22 | CREATE TABLE `question` ( 23 | `id` int(11) unsigned NOT NULL AUTO_INCREMENT, 24 | `subject_id` int(11) unsigned NOT NULL, 25 | `title` varchar(1), 26 | `author_id` int(11) unsigned NOT NULL 27 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT=''; 28 | 29 | 根据这个表我们这样定义Model: 30 | 31 | :: 32 | 33 | # don't forget to setup 34 | from ORZ import OrzBase, Orz 35 | class Question(OrzBase): 36 | __orz_table__ = "question" 37 | title = OrzField() 38 | subject_id = OrzField() 39 | author_id = OrzField() 40 | 41 | 从上述定义里可知 42 | 43 | - 每个类都是OrzBase的子类 44 | - ``__orz_table__`` 指定对应的table 45 | - 每个 OrzField 46 | 实例化对象对应数据表里字段,并且该类成员名字与数据表里保持一致 47 | 48 | 字段声明,索引,索引查询缓存 49 | ^^^^^^^^^^^^^^^^^^^^^^^^^^ 50 | 51 | 基本用法 52 | '''''''' 53 | 54 | 上面的数据表, 我们增加一个查询的需求--查询用户,在条目下的问题,于是表增加 55 | 56 | :: 57 | 58 | KEY `author_subject_idx` (`author_id`, `subject_id`) 59 | 60 | Model字段定义改为 61 | 62 | ``python subject_id = OrzField(as_key=OrzField.KeyType.IndexOnly) author_id = OrzField(as_key=OrzField.KeyType.IndexOnly)`` 63 | 这样不仅按照需求实现的查询的Cache就能被管理了,而且是``subject_id``和``author_id``两者,或者之一为维度查询的Cache都能被管理了。也就说,当需求变更索引改为 64 | 65 | ``sql KEY `author_idx` (`author_id`), KEY `subject_idx` (`subject_id`)`` 66 | 或者 67 | 68 | ``sql KEY `author_idx` (`author_id`)`` 69 | 的时候,Model声明的字段也是满足需求的对应查询的缓存管理 70 | 71 | 接口使用 72 | ^^^^^^^^ 73 | 74 | 按照上述Question Model的定义,接着介绍CRUD 75 | 76 | Create 77 | '''''' 78 | 79 | :: 80 | 81 | ModelClass.create(**field_and_its_val) 82 | 83 | - 根据字段定义,以 keyword argruments 的形式传入。 84 | - 在唯一性约束的表里,参数和表内数据有重复,会直接抛出 85 | ``MySQLdb.IntegrityError`` 86 | 87 | 例子: 88 | 89 | :: 90 | 91 | question = Question.create(subject_id=subject.id, author_id=user.id, title="hdadfasdf") 92 | 93 | Read 94 | '''' 95 | 96 | :: 97 | 98 | ModelCLS.gets_by(order_by=order_keys, start=0, limit=EPISION, **condition) 99 | 100 | - condition 即 SQL where key = val 101 | - order\_by 默认为按 id 降序排列, 详细见[排序][] 102 | - 默认取全部数据,也可以分页。 103 | 104 | 例子: 105 | 106 | :: 107 | 108 | questions = Question.gets_by(subject_id=subject.id) 109 | questions = Question.gets_by(subject_id=subject.id, author_id=user.id) 110 | questions = Question.gets_by(author_id=user.id) 111 | 112 | 亦可根据ID,查询单个对象 113 | 114 | :: 115 | 116 | question = Question.get_by(id=1) 117 | 118 | - 现阶段 ``get_by`` 只接受 ``id`` 的查询 119 | 120 | Update 121 | '''''' 122 | 123 | :: 124 | 125 | model_instance.save() 126 | 127 | 例子: 128 | 129 | :: 130 | 131 | question = Question.gets_by(subject_id=1)[0] 132 | question.title = "hello world" 133 | question.subject_id = 2 134 | ret = quesiton.save() 135 | 136 | - ret 为数据库操作的返回值 137 | 138 | - 上面这个例子中 Question 所有和 subject\_id=1 以及 subject\_id=2 139 | 的关联mc cache都会被清除 140 | 141 | Delete 142 | '''''' 143 | 144 | :: 145 | 146 | ret = model_instance.delete() 147 | 148 | - ret 为数据库操作的返回值 149 | 150 | 例子: 151 | 152 | :: 153 | 154 | question = Question.gets_by(subject_id=1)[0] 155 | question.delete() 156 | 157 | 组合与扩展 158 | ^^^^^^^^^^ 159 | 160 | 一般情况下你并不需要去 Override Create/Save/Delete 来做扩展, 161 | ORZ提供了一个简单的Aspect-like的方式来更好的拆分逻辑。这些扩展的方法都是Instancemethod。 162 | 163 | Creation Aspect 164 | ''''''''''''''' 165 | 166 | :: 167 | 168 | def before_create(self, **extra_args): 169 | pass 170 | 171 | def after_create(self, **extra_args): 172 | pass 173 | 174 | - ``extra_args`` 是调用 ``create`` 时传入的非 ``OrzField`` 定义的参数。 175 | - ``before_create`` 里,直接通过 ``self.attr`` 即可访问 ``OrzField`` 176 | 定义的参数。 177 | 178 | Save Aspect 179 | ''''''''''' 180 | 181 | :: 182 | 183 | def before_save(self): 184 | pass 185 | 186 | def after_save(self): 187 | pass 188 | 189 | Deletion Aspect 190 | ''''''''''''''' 191 | 192 | :: 193 | 194 | def before_delete(self): 195 | pass 196 | 197 | def after_delete(self): 198 | pass 199 | 200 | Warning 201 | ''''''' 202 | 203 | ``before_create`` 和 ``after_delete`` 的时候, instance 都是处于 204 | ``detached_state``--无法再调用``delete``, ``save`` 。 205 | 206 | 排序 207 | ^^^^ 208 | 209 | 简单的排序 210 | '''''''''' 211 | 212 | 由于在定义数据库的时候,用于排序的Field都会被定义为索引(的一部分), 213 | 同时我发现我们在实际操作中一般,都只有一个方向的排序,所以基于这个语义 214 | ``OrzField`` 提供了一个便捷的方式来处理定义以便管理缓存 215 | 216 | ``python subject_id = OrzField(as_key=OrzField.KeyType.X)`` 217 | - X = DESC subject\_id 可能会作为查询条件, 同时以*只以subject\_id降序*可能会作为查询集合*单独*排序条件 218 | - X = ASC subject\_id 可能会作为查询条件, 同时以*只以subject\_id升序*可能会作为查询集合*单独*排序条件 219 | - X = AD subject\_id 可能会作为查询条件, 同时以*subject\_id升序或者降序*都可能会作为查询集合*单独*排序条件 220 | 221 | 例子: 222 | ``python # 假如上述例子里X为 DESC questions = Question.gets_by(author_id=user.id, order_by='-subject_id')`` 223 | 224 | 复杂的排序 225 | '''''''''' 226 | 227 | 假如需要的查询是类似 ``sql`` 里 ``order by key1 desc, key2``,或者定义的 228 | ``OrzField`` 并作为查询条件,那么可以在OrzModel里定义一个 Nested Class - 229 | ``OrzMeta`` 230 | 231 | :: 232 | 233 | class OrzMeta: 234 | order_combs = (('-key1', 'key2'), ('key3',), ...) 235 | 236 | - order\_combs 是 237 | ``tuple``,``tuple``里每个元素都是一个排序组合的``tuple``。排序的字段用字符串表示,降序在字段前加"-"作为前缀。 238 | 239 | 缓存管理和SQL的一些细节 240 | ^^^^^^^^^^^^^^^^^^^^^^^ 241 | 242 | OrzField 以及 order\_combs 243 | 看起来像是Database的映射,其实是以一种显式声明的方式,作为缓存管理的一部分。 244 | 245 | - OrzField.KeyType.X 都是为缓存服务的。这个从本质来讲和SQL 246 | Index没有任何关系,但是从代码最佳实践的角度来说,频繁被访问的数据更加值得缓存,频繁被访问的查询都应该有索引支撑。(这也是ORZ安身立命的支点 247 | XD)。 248 | - KeyType.AD\|DESC\|ASC 和 order\_combs 249 | 同样和数据库没有直接关系,只是基于这个Model排序策略的缓存管理声明 250 | - Orz缓存管理由于和SQL没有关系,所以你可以用ORZ缓存一个没有优化的查询。。。与之相对的,如果你使用ORZ的查询没有声明的缓存管理,那么这个查询是不会进入缓存的,但仍能获得查询集以及一个Warning。。 251 | 252 | 默认值处理 253 | ^^^^^^^^^^ 254 | 255 | SQL的默认值 256 | ''''''''''' 257 | 258 | 先看定义和Model定义 259 | 260 | :: 261 | 262 | `title` varchar(10) DEFAULT 'hello' 263 | `created_at` timestamp NOT NULL DEFAULT '2010-10-10 10:10:10', 264 | `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP 265 | 266 | :: 267 | 268 | class Question(OrzModel): 269 | __orz_table__="question" 270 | updated_at = OrzField() 271 | created_at = OrzField() 272 | title = OrzField() 273 | 274 | 那么 ``q = Question.create()`` 里 ``q.updated_at`` 等于当前时间, 275 | ``q.created_at`` 等于 276 | ``datetime(year=2010, month=10, day=10, hour=10, minute=10, sencond=10)`` 277 | , ``q.title`` 等于 ``'hello'`` 278 | 279 | 更进一步 280 | 281 | ``python q = Question.create() q.title = "world" q.save()`` 那么 282 | ``q.updated_at`` 变更为调用save那个时间点 283 | 284 | 换句话说,Orz会把SQL产生的结果更新到instance上。 285 | 286 | Orz's defaults on creation 287 | '''''''''''''''''''''''''' 288 | 289 | 除了SQL自身的默认值, ORZ也提供了创建在创建对象时候的默认值 290 | 291 | :: 292 | 293 | OrzField(default=default_val) 294 | 295 | - default\_val 可以是一个值或者一个不接受任何参数的函数。 296 | 297 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # orz documentation build configuration file, created by 4 | # sphinx-quickstart on Fri Jan 17 18:07:14 2014. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | #sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [] 32 | 33 | # Add any paths that contain templates here, relative to this directory. 34 | templates_path = ['_templates'] 35 | 36 | # The suffix of source filenames. 37 | source_suffix = '.rst' 38 | 39 | # The encoding of source files. 40 | #source_encoding = 'utf-8-sig' 41 | 42 | # The master toctree document. 43 | master_doc = 'index' 44 | 45 | # General information about the project. 46 | project = u'orz' 47 | copyright = u'2014, phay' 48 | 49 | # The version info for the project you're documenting, acts as replacement for 50 | # |version| and |release|, also used in various other places throughout the 51 | # built documents. 52 | # 53 | # The short X.Y version. 54 | version = '0.3' 55 | # The full version, including alpha/beta/rc tags. 56 | release = '0.3' 57 | 58 | # The language for content autogenerated by Sphinx. Refer to documentation 59 | # for a list of supported languages. 60 | #language = None 61 | 62 | # There are two options for replacing |today|: either, you set today to some 63 | # non-false value, then it is used: 64 | #today = '' 65 | # Else, today_fmt is used as the format for a strftime call. 66 | #today_fmt = '%B %d, %Y' 67 | 68 | # List of patterns, relative to source directory, that match files and 69 | # directories to ignore when looking for source files. 70 | exclude_patterns = ['_build'] 71 | 72 | # The reST default role (used for this markup: `text`) to use for all 73 | # documents. 74 | #default_role = None 75 | 76 | # If true, '()' will be appended to :func: etc. cross-reference text. 77 | #add_function_parentheses = True 78 | 79 | # If true, the current module name will be prepended to all description 80 | # unit titles (such as .. function::). 81 | #add_module_names = True 82 | 83 | # If true, sectionauthor and moduleauthor directives will be shown in the 84 | # output. They are ignored by default. 85 | #show_authors = False 86 | 87 | # The name of the Pygments (syntax highlighting) style to use. 88 | pygments_style = 'sphinx' 89 | 90 | # A list of ignored prefixes for module index sorting. 91 | #modindex_common_prefix = [] 92 | 93 | # If true, keep warnings as "system message" paragraphs in the built documents. 94 | #keep_warnings = False 95 | 96 | 97 | # -- Options for HTML output ---------------------------------------------- 98 | 99 | # The theme to use for HTML and HTML Help pages. See the documentation for 100 | # a list of builtin themes. 101 | html_theme = 'default' 102 | 103 | # Theme options are theme-specific and customize the look and feel of a theme 104 | # further. For a list of options available for each theme, see the 105 | # documentation. 106 | #html_theme_options = {} 107 | 108 | # Add any paths that contain custom themes here, relative to this directory. 109 | #html_theme_path = [] 110 | 111 | # The name for this set of Sphinx documents. If None, it defaults to 112 | # " v documentation". 113 | #html_title = None 114 | 115 | # A shorter title for the navigation bar. Default is the same as html_title. 116 | #html_short_title = None 117 | 118 | # The name of an image file (relative to this directory) to place at the top 119 | # of the sidebar. 120 | #html_logo = None 121 | 122 | # The name of an image file (within the static path) to use as favicon of the 123 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 124 | # pixels large. 125 | #html_favicon = None 126 | 127 | # Add any paths that contain custom static files (such as style sheets) here, 128 | # relative to this directory. They are copied after the builtin static files, 129 | # so a file named "default.css" will overwrite the builtin "default.css". 130 | html_static_path = ['_static'] 131 | 132 | # Add any extra paths that contain custom files (such as robots.txt or 133 | # .htaccess) here, relative to this directory. These files are copied 134 | # directly to the root of the documentation. 135 | #html_extra_path = [] 136 | 137 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 138 | # using the given strftime format. 139 | #html_last_updated_fmt = '%b %d, %Y' 140 | 141 | # If true, SmartyPants will be used to convert quotes and dashes to 142 | # typographically correct entities. 143 | #html_use_smartypants = True 144 | 145 | # Custom sidebar templates, maps document names to template names. 146 | #html_sidebars = {} 147 | 148 | # Additional templates that should be rendered to pages, maps page names to 149 | # template names. 150 | #html_additional_pages = {} 151 | 152 | # If false, no module index is generated. 153 | #html_domain_indices = True 154 | 155 | # If false, no index is generated. 156 | #html_use_index = True 157 | 158 | # If true, the index is split into individual pages for each letter. 159 | #html_split_index = False 160 | 161 | # If true, links to the reST sources are added to the pages. 162 | #html_show_sourcelink = True 163 | 164 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 165 | #html_show_sphinx = True 166 | 167 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 168 | #html_show_copyright = True 169 | 170 | # If true, an OpenSearch description file will be output, and all pages will 171 | # contain a tag referring to it. The value of this option must be the 172 | # base URL from which the finished HTML is served. 173 | #html_use_opensearch = '' 174 | 175 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 176 | #html_file_suffix = None 177 | 178 | # Output file base name for HTML help builder. 179 | htmlhelp_basename = 'orzdoc' 180 | 181 | 182 | # -- Options for LaTeX output --------------------------------------------- 183 | 184 | latex_elements = { 185 | # The paper size ('letterpaper' or 'a4paper'). 186 | #'papersize': 'letterpaper', 187 | 188 | # The font size ('10pt', '11pt' or '12pt'). 189 | #'pointsize': '10pt', 190 | 191 | # Additional stuff for the LaTeX preamble. 192 | #'preamble': '', 193 | } 194 | 195 | # Grouping the document tree into LaTeX files. List of tuples 196 | # (source start file, target name, title, 197 | # author, documentclass [howto, manual, or own class]). 198 | latex_documents = [ 199 | ('index', 'orz.tex', u'orz Documentation', 200 | u'phay', 'manual'), 201 | ] 202 | 203 | # The name of an image file (relative to this directory) to place at the top of 204 | # the title page. 205 | #latex_logo = None 206 | 207 | # For "manual" documents, if this is true, then toplevel headings are parts, 208 | # not chapters. 209 | #latex_use_parts = False 210 | 211 | # If true, show page references after internal links. 212 | #latex_show_pagerefs = False 213 | 214 | # If true, show URL addresses after external links. 215 | #latex_show_urls = False 216 | 217 | # Documents to append as an appendix to all manuals. 218 | #latex_appendices = [] 219 | 220 | # If false, no module index is generated. 221 | #latex_domain_indices = True 222 | 223 | 224 | # -- Options for manual page output --------------------------------------- 225 | 226 | # One entry per manual page. List of tuples 227 | # (source start file, name, description, authors, manual section). 228 | man_pages = [ 229 | ('index', 'orz', u'orz Documentation', 230 | [u'phay'], 1) 231 | ] 232 | 233 | # If true, show URL addresses after external links. 234 | #man_show_urls = False 235 | 236 | 237 | # -- Options for Texinfo output ------------------------------------------- 238 | 239 | # Grouping the document tree into Texinfo files. List of tuples 240 | # (source start file, target name, title, author, 241 | # dir menu entry, description, category) 242 | texinfo_documents = [ 243 | ('index', 'orz', u'orz Documentation', 244 | u'phay', 'orz', 'One line description of project.', 245 | 'Miscellaneous'), 246 | ] 247 | 248 | # Documents to append as an appendix to all manuals. 249 | #texinfo_appendices = [] 250 | 251 | # If false, no module index is generated. 252 | #texinfo_domain_indices = True 253 | 254 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 255 | #texinfo_show_urls = 'footnote' 256 | 257 | # If true, do not generate a @detailmenu in the "Top" node's menu. 258 | #texinfo_no_detailmenu = False 259 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to ORZ's Documentation! 2 | ===================================== 3 | 4 | Contents: 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | Introduction/index 10 | Usage/index 11 | API/index 12 | Changelog/index 13 | 14 | 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup, find_packages 3 | 4 | # package meta info 5 | NAME = "ORZ" 6 | VERSION = "0.33" 7 | DESCRIPTION = "missing data manager in shire, even in middle earth" 8 | AUTHOR = "fuyuquan" 9 | AUTHOR_EMAIL = "fuyuquan@douban.com" 10 | URL = "http://code.dapps.douban.com/ORZ" 11 | KEYWORDS = "" 12 | CLASSIFIERS = [] 13 | 14 | # package contents 15 | MODULES = [] 16 | PACKAGES = find_packages(exclude=['tests.*', 'tests', 'examples.*', 'examples']) 17 | 18 | # dependencies 19 | here = os.path.abspath(os.path.dirname(__file__)) 20 | 21 | def read_long_description(filename): 22 | path = os.path.join(here, filename) 23 | if os.path.exists(path): 24 | return open(path).read() 25 | return "" 26 | 27 | setup( 28 | name=NAME, 29 | version=VERSION, 30 | description=DESCRIPTION, 31 | long_description=read_long_description('README.rst'), 32 | author=AUTHOR, 33 | author_email=AUTHOR_EMAIL, 34 | url=URL, 35 | keywords=KEYWORDS, 36 | classifiers=CLASSIFIERS, 37 | py_modules=MODULES, 38 | packages=PACKAGES, 39 | zip_safe=False, 40 | test_suite='tests', 41 | # install_requires=[ 42 | # 'DoubanCoreLib', 43 | # ], 44 | # dependency_links = [ 45 | # 'git+http://code.dapps.douban.com/douban-corelib.git@b266b854eeb2365280bcc6aa6e4eaef6cd935486#egg=DoubanCoreLib-1.0', 46 | # ], 47 | ) 48 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/douban/douban-orz/208b48177dc847ef2afdadfda302868c39797bdf/tests/__init__.py -------------------------------------------------------------------------------- /tests/env_init.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "stub")) 4 | 5 | from douban.mc import mc_from_config 6 | from douban.mc.wrapper import LocalCached 7 | from douban.sqlstore import store_from_config 8 | 9 | DATABASE = { 10 | 'farms': { 11 | "luz_farm": { 12 | "master": "localhost:test_vagrant9010:eye:sauron", 13 | "tables": ["*"], 14 | }, 15 | }, 16 | 'options': { 17 | 'show_warnings': True, 18 | } 19 | } 20 | 21 | MEMCACHED = { 22 | 'servers' : [], 23 | 'disabled' : False, 24 | } 25 | 26 | mc = LocalCached(mc_from_config(MEMCACHED)) 27 | store = store_from_config(DATABASE) 28 | mc.clear() 29 | 30 | initted = False 31 | -------------------------------------------------------------------------------- /tests/stub/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/douban/douban-orz/208b48177dc847ef2afdadfda302868c39797bdf/tests/stub/__init__.py -------------------------------------------------------------------------------- /tests/stub/cmemcached.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from cPickle import dumps, loads 5 | 6 | __all__ = ['DIST_MODULA', 'DIST_CONSISTENT', 'DIST_CONSISTENT_KETAMA', 7 | 'Client'] 8 | 9 | DIST_MODULA = 0 10 | DIST_CONSISTENT = 1 11 | DIST_CONSISTENT_KETAMA = 2 12 | 13 | (BEHAVIOR_NO_BLOCK, BEHAVIOR_TCP_NODELAY, BEHAVIOR_HASH, BEHAVIOR_KETAMA, 14 | BEHAVIOR_SOCKET_SEND_SIZE, BEHAVIOR_SOCKET_RECV_SIZE, 15 | BEHAVIOR_CACHE_LOOKUPS, BEHAVIOR_SUPPORT_CAS, BEHAVIOR_POLL_TIMEOUT, 16 | BEHAVIOR_DISTRIBUTION, BEHAVIOR_BUFFER_REQUESTS, BEHAVIOR_USER_DATA, 17 | BEHAVIOR_SORT_HOSTS, BEHAVIOR_VERIFY_KEY, BEHAVIOR_CONNECT_TIMEOUT, 18 | BEHAVIOR_RETRY_TIMEOUT, BEHAVIOR_KETAMA_WEIGHTED, 19 | BEHAVIOR_KETAMA_HASH, BEHAVIOR_BINARY_PROTOCOL, BEHAVIOR_SND_TIMEOUT, 20 | BEHAVIOR_RCV_TIMEOUT, BEHAVIOR_SERVER_FAILURE_LIMIT) = range(22) 21 | 22 | 23 | pool = {} 24 | 25 | def prepare(val, comp_threshold): 26 | return dumps(val), 1 27 | 28 | def restore(val, flag): 29 | return loads(val) 30 | 31 | def clear(): 32 | for v in pool.values(): 33 | v.clear() 34 | 35 | class Client(object): 36 | def __init__(self, servers=[], dist=DIST_CONSISTENT_KETAMA, debug=0, 37 | log=None, log_threshold=100000, *a, **kw): 38 | self.dataset = pool.get(';'.join(servers), {}) 39 | pool[id(self.dataset)] = self.dataset 40 | 41 | def clear(self): 42 | self.dataset.clear() 43 | 44 | def set(self, key, val, time=0, compress=False): 45 | self.dataset[key] = prepare(val, 0) 46 | return 1 47 | 48 | def set_multi(self, values, time=0, compress=True): 49 | for k, v in values.iteritems(): 50 | self.set(k, v, time, compress) 51 | return 1 52 | 53 | def add(self, key, val, time=0): 54 | if key not in self.dataset: 55 | return self.set(key, val, time) 56 | return 0 57 | 58 | def replace(self, key, val, time=0): 59 | if key in self.dataset: 60 | return self.set(key, val, time) 61 | return 0 62 | 63 | def cas(self, key, val, time=0, cas=0): 64 | return self.set(key, val, time) 65 | 66 | def delete(self, key, time=0): 67 | if key in self.dataset: 68 | del self.dataset[key] 69 | return 1 70 | else: 71 | return 0 72 | 73 | def delete_multi(self, keys): 74 | for key in keys: 75 | self.delete(key) 76 | return 1 77 | 78 | def get(self, key): 79 | if key == 'is_stub?': 80 | return 'yes' 81 | 82 | dummy = self.dataset.get(key) 83 | if dummy is None: 84 | return None 85 | value, flag = dummy 86 | return restore(value, flag) 87 | 88 | def get_multi(self, keys): 89 | d = {} 90 | for k in keys: 91 | v = self.get(k) 92 | if v is not None: 93 | d[k] = v 94 | return d 95 | 96 | def get_list(self, keys): 97 | return [self.get(key) for key in keys] 98 | 99 | def lrange(self, key, start, end): 100 | #测试环境使用mc作为redis的stub,这个方法仅用于redis 101 | ret = self.get(key) or [] 102 | return ret[start:end] 103 | 104 | def lpush(self, key, item): 105 | #测试环境使用mc作为redis的stub,这个方法仅用于redis 106 | ret = self.get(key) or [] 107 | ret.insert(0, item) 108 | self.set(key, ret) 109 | return 1 110 | 111 | def append(self, key, val, time=0): 112 | if key in self.dataset: 113 | self.set(key, str(self.get(key)) + val) 114 | return 1 115 | else: 116 | return 0 117 | 118 | def append_multi(self, keys, val, time=0): 119 | for k in keys: 120 | self.append(k, val, time) 121 | return 1 122 | 123 | def prepend(self, key, val, time=0): 124 | if key in self.dataset: 125 | self.set(key, val + str(self.get(key))) 126 | return 1 127 | else: 128 | return 0 129 | 130 | def prepend_multi(self, keys, val, time=0): 131 | for k in keys: 132 | self.prepend(k, val, time) 133 | return 1 134 | 135 | def incr(self, key, val=1): 136 | if key in self.dataset: 137 | self.set(key, self.get(key)+val) 138 | return 0 139 | else: 140 | return 16 141 | 142 | def decr(self, key, val=1): 143 | if key in self.dataset: 144 | self.set(key, max(self.get(key)-val, 0)) 145 | return 0 146 | else: 147 | return 16 148 | 149 | def touch(self, key, exptime): 150 | return self.delete(key) 151 | 152 | def expire(self, key): 153 | return self.delete(key) 154 | 155 | def set_behavior(self, flag, behavior): 156 | pass 157 | 158 | def get_last_error(self): 159 | return 0 160 | -------------------------------------------------------------------------------- /tests/stub/memcache.py: -------------------------------------------------------------------------------- 1 | from cmemcached import * 2 | -------------------------------------------------------------------------------- /tests/test_class_init.py: -------------------------------------------------------------------------------- 1 | from mock import patch 2 | from unittest import TestCase 3 | from ORZ import OrzField, OrzPrimaryField 4 | from ORZ.klass_init import _initialize_primary_field, _collect_order_combs 5 | 6 | class TestField(TestCase): 7 | def test_as_order_key(self): 8 | NAME = 'hello' 9 | assertions = { 10 | OrzPrimaryField.OrderType.DESC: ("-%s" % NAME, ), 11 | OrzPrimaryField.OrderType.ASC: ("%s" % NAME, ), 12 | OrzPrimaryField.OrderType.AD: ("-%s" % NAME, ), 13 | } 14 | 15 | for order_t, asst in assertions.iteritems(): 16 | foo = OrzPrimaryField(order_t) 17 | foo.field_name = NAME 18 | self.assertEqual(foo.as_default_order_key(), asst) 19 | 20 | def test_basic_primary_field(self): 21 | class ORZFieldTest(object): 22 | foo = OrzField() 23 | bar = OrzField() 24 | self.klass = ORZFieldTest 25 | 26 | field = _initialize_primary_field(self.klass) 27 | self.assertTrue(hasattr(self.klass, 'id')) 28 | self.assertEqual(field.field_name, 'id') 29 | 30 | def test_customized_primary_field(self): 31 | class ORZFieldTest(object): 32 | foo_bar = OrzPrimaryField() 33 | foo = OrzField() 34 | bar = OrzField() 35 | 36 | field = _initialize_primary_field(ORZFieldTest) 37 | self.assertTrue(hasattr(ORZFieldTest, 'foo_bar')) 38 | self.assertEqual(field.field_name, 'foo_bar') 39 | 40 | 41 | @patch("ORZ.klass_init.warnings.warn") 42 | class TestOrderDecl(TestCase): 43 | def test_functionality(self, mock_warn): 44 | class ORZTest(object): 45 | class OrzMeta: 46 | order_combs = (("hello", ), ('mm','-yy'), "zzz") 47 | combs = _collect_order_combs(ORZTest) 48 | self.assertEqual(combs, (('hello',), ('mm', '-yy'), ("zzz", ))) 49 | 50 | def test_deprecated(self, mock_warn): 51 | class ORZTest(object): 52 | class OrzMeta: 53 | extra_orders = (("hello", ),) 54 | 55 | combs = _collect_order_combs(ORZTest) 56 | mock_warn.assert_called_with("extra_orders is deprecated; use order_combs instead.") 57 | 58 | def test_override(self, mock_warn): 59 | class ORZTest(object): 60 | class OrzMeta: 61 | order_combs = (("hello", "yy"),) 62 | extra_orders = (("hello", ),) 63 | 64 | combs = _collect_order_combs(ORZTest) 65 | mock_warn.assert_called_with("order_combs will override extra_orders. use order_combs only") 66 | -------------------------------------------------------------------------------- /tests/test_new_orz.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, skip 2 | 3 | from .env_init import store, mc, initted 4 | from ORZ import OrzBase, OrzField, orz_get_multi, OrzPrimaryField, setup as setup_orz 5 | 6 | setup_orz(store, mc) 7 | 8 | class TestNewOrz(TestCase): 9 | def setUp(self): 10 | cursor = store.get_cursor() 11 | cursor.execute('''DROP TABLE IF EXISTS `test_orz`''') 12 | cursor.delete_without_where = True 13 | cursor.execute(''' 14 | CREATE TABLE `test_orz` 15 | ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, 16 | `subject_id` int(10) unsigned NOT NULL, 17 | `ep_num` int(10) unsigned NOT NULL, 18 | `flag` smallint(1) unsigned NOT NULL, 19 | `content` varchar(100) NOT NULL, 20 | `extra` int(10) unsigned NOT NULL, 21 | `null_field` int(10) unsigned, 22 | `output_field` int(10) unsigned, 23 | `callable_field` int(10) unsigned, 24 | PRIMARY KEY (`id`), 25 | KEY `idx_subject` (`subject_id`, `ep_num`, `id`)) ENGINE=MEMORY AUTO_INCREMENT=1''') 26 | 27 | def tearDown(self): 28 | store.get_cursor().execute('truncate table `test_orz`') 29 | mc.clear() 30 | 31 | def test_create(self): 32 | z = Dummy.create(subject_id=10, ep_num=10, content='hheheheh', extra_args=10) 33 | self.assertTrue(z.after_created) 34 | self.assertEqual(z.callable_field, 10) 35 | self.assertTrue(z.extra_args, 10) 36 | self.assertEqual(z.null_field, None) 37 | (id, subject_id, ep_num, null_field), = store.execute('''select id, subject_id, ep_num, null_field from test_orz where subject_id=10''') 38 | self.assertEqual((z.id, z.subject_id, ep_num, None), (str(id), str(subject_id), ep_num, null_field)) 39 | 40 | z = Dummy.create(id=5, subject_id=10, ep_num=10, content='hheheheh1') 41 | self.assertEqual(z.id, '5') 42 | 43 | self.assertRaises(ValueError, Dummy.create, **dict(id=5, subject_id=-1, ep_num=10, content='hheheheh1')) 44 | 45 | def test_exist(self): 46 | self.assertFalse(Dummy.exist(subject_id=10, ep_num=10)) 47 | 48 | z = Dummy.create(subject_id=10, ep_num=10, content='hheheheh', extra_args=10) 49 | self.assertTrue(Dummy.exist(subject_id=10, ep_num=10)) 50 | self.assertTrue(Dummy.exist(id=z.id)) 51 | 52 | def test_gets_by(self): 53 | li = [Dummy.create(subject_id=10, ep_num=ep_num, content='hheheheh', output_field=10+ep_num) for ep_num in range(10)] 54 | z = li[-1] 55 | m = Dummy.gets_by(subject_id=10) 56 | self.assertEqual((z.id, z.subject_id), (m[0].id, m[0].subject_id)) 57 | self.assertEqual([int(i.id) for i in m], range(10, 0, -1)) 58 | self.assertEqual(li[-1].output_field, str(19)) 59 | 60 | self.assertEqual(Dummy.gets_by(output_field=19)[0].id, li[-1].id) 61 | 62 | def test_save(self): 63 | z = Dummy.create(subject_id=10, ep_num=10, content='hheheheh') 64 | m = Dummy.gets_by(subject_id=10)[0] 65 | self.assertRaises(AttributeError, lambda :m.after_saved) 66 | m.subject_id = 2 67 | m.save() 68 | self.assertTrue(m.after_saved) 69 | 70 | old_fetched = Dummy.gets_by(subject_id=10) 71 | new_fetched = Dummy.gets_by(subject_id=2) 72 | self.assertEqual(len(old_fetched), 0) 73 | self.assertEqual(new_fetched[0].subject_id, '2') 74 | 75 | def test_count_by(self): 76 | for i in range(10): 77 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 78 | 79 | self.assertEqual(Dummy.count_by(subject_id=10), 10) 80 | m = Dummy.gets_by(subject_id=10)[0] 81 | m.subject_id = 2 82 | m.save() 83 | self.assertEqual(Dummy.count_by(subject_id=10), 9) 84 | 85 | def test_delete(self): 86 | m = Dummy.create(subject_id=10, ep_num=1, content='hheheheh') 87 | self.assertEqual(mc.get('before_delete_test'), None) 88 | m.delete() 89 | self.assertTrue(mc.get('before_delete_test')) 90 | 91 | ret = store.execute('''select id, subject_id, ep_num from test_orz where subject_id=10''') 92 | self.assertEqual(len(ret), 0) 93 | 94 | 95 | def test_default(self): 96 | a = Dummy.create(subject_id=10, ep_num=1) 97 | self.assertEqual(a.content, 'hello world') 98 | 99 | self.assertEqual(Dummy.gets_by(subject_id=10)[0].content, 'hello world') 100 | 101 | a = Dummy.create(subject_id=10) 102 | self.assertEqual(a.ep_num, 0) 103 | 104 | self.assertEqual(len(Dummy.gets_by(subject_id=10)), 2) 105 | 106 | def test_pager(self): 107 | for i in range(100): 108 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 109 | 110 | data = Dummy.gets_by(subject_id=10, limit=50) 111 | self.assertEqual(len(data), 50) 112 | for i, o in enumerate(data): 113 | self.assertEqual(int(o.id), 100 - i) 114 | 115 | start = 20 116 | for i, o in enumerate(Dummy.gets_by(subject_id=10, start=start, limit=50), start): 117 | self.assertEqual(int(o.id), 100 - i) 118 | 119 | def test_boolean(self): 120 | for i in range(100): 121 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 122 | 123 | qrset = Dummy.gets_by(flag=False) 124 | self.assertEqual(len(qrset), 100) 125 | 126 | qrset[0].flag = True 127 | qrset[0].save() 128 | self.assertEqual(len(Dummy.gets_by(flag=False)), 99) 129 | 130 | def test_order_by(self): 131 | for i in range(100, 90, -1): 132 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 133 | 134 | self.assertEqual(range(91, 101), [i.ep_num for i in Dummy.gets_by(subject_id=10, order_by='ep_num')]) 135 | 136 | def test_get_multiple_ids(self): 137 | ids = [] 138 | for i in range(100, 90, -1): 139 | ids.append(Dummy.create(subject_id=10, ep_num=i, content='hheheheh').id) 140 | 141 | self.assertEqual([i.id for i in Dummy.objects.get_multiple_ids(ids)], ids) 142 | 143 | def test_orz_get_multi(self): 144 | for i in range(100, 90, -1): 145 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 146 | 147 | self.assertTrue(100 not in [i.ep_num for i in Dummy.get_non_targeted(100)]) 148 | 149 | def test_extra_order_by(self): 150 | # for i in range(100, 90, -1): 151 | # Dummy.create(subject_id=10, ep_num=i, extra=10+i, content='hheheheh') 152 | 153 | # self.assertEqual(list(reversed(range(101, 111))), [i.extra for i in Dummy.gets_by(subject_id=10, order_by='-extra')]) 154 | 155 | # Dummy.create(subject_id=10, ep_num=130, extra=130, content='hheheheh') 156 | # self.assertEqual([130]+list(reversed(range(101, 111))), [i.extra for i in Dummy.gets_by(subject_id=10, order_by='-extra')]) 157 | 158 | for f in range(2): 159 | for j in range(23, 20, -1): 160 | Dummy.create(subject_id=9, ep_num=j, extra=f, content='hheheheh') 161 | output = [(i.ep_num, i.extra) for i in Dummy.gets_by(subject_id=9, order_by=('-extra', 'ep_num'))] 162 | self.assertEqual(output, [(i, j) for j in (1, 0) for i in range(21, 24)]) 163 | 164 | for i in Dummy.gets_by(subject_id=9, order_by=('-extra', 'ep_num')): 165 | if i.extra == 0: 166 | i.extra = 1 167 | i.save() 168 | 169 | output = [(i.ep_num, i.extra) for i in Dummy.gets_by(subject_id=9, order_by=('-extra', 'ep_num'))] 170 | self.assertEqual(output, [(i, 1) for i in [21, 21, 22, 22, 23, 23]]) 171 | 172 | 173 | def test_creation_should_delete_pk(self): 174 | ID = str(1000) 175 | empty = Dummy.get_by(ID) 176 | self.assertEqual(empty, None) 177 | 178 | Dummy.create(id=ID, subject_id=10, ep_num=1, content='hheheheh') 179 | 180 | new = Dummy.get_by(ID) 181 | self.assertEqual(new.id, ID) 182 | 183 | def test_flush_get(self): 184 | raw_num = 10 185 | i = Dummy.create(subject_id=10, ep_num=1, content='hheheheh') 186 | Dummy.get_by(id=i.id) 187 | 188 | store.execute('update test_orz set ep_num=%s where id=%s', (raw_num, i.id)) 189 | store.commit() 190 | 191 | self.assertEqual(Dummy.get_by(id=i.id).ep_num, i.ep_num) 192 | flushed_obj = Dummy.get_by(id=i.id, force_flush=True) 193 | self.assertNotEqual(flushed_obj.ep_num, i.ep_num) 194 | self.assertEqual(flushed_obj.ep_num, raw_num) 195 | 196 | crset = [] 197 | for i in range(10): 198 | crset.append(Dummy.create(subject_id=11, ep_num=11, content='hheheheh'+str(i))) 199 | 200 | Dummy.gets_by(subject_id=11, ep_num=11) 201 | 202 | self.assertEqual(len(Dummy.gets_by(subject_id=11, ep_num=11)), 10) 203 | 204 | store.execute('update test_orz set ep_num=%s where id=%s', (raw_num, crset[0].id)) 205 | store.commit() 206 | 207 | self.assertEqual(len(Dummy.gets_by(subject_id=11, ep_num=11)), 10) 208 | self.assertEqual(len(Dummy.gets_by(subject_id=11, ep_num=11, force_flush=True)), 9) 209 | 210 | def test_detached(self): 211 | a = Dummy.create(subject_id=10001, ep_num=11, content='hheheheh') 212 | self.assertRaisesRegexp(AttributeError, "The save can't be called when the instance is detached",a.delete) 213 | 214 | a = Dummy.create(subject_id=10002, ep_num=11, content='hheheheh') 215 | self.assertRaisesRegexp(AttributeError, "The delete can't be called when the instance is detached",a.delete) 216 | 217 | self.assertRaisesRegexp(AttributeError, 218 | "The save can't be called when the instance is detached", 219 | Dummy.create, subject_id=-2, ep_num=11, content='hheheheh') 220 | self.assertRaisesRegexp(AttributeError, 221 | "The delete can't be called when the instance is detached", 222 | Dummy.create, subject_id=-3, ep_num=11, content='hheheheh') 223 | 224 | 225 | class DummyBase(OrzBase): 226 | subject_id = OrzField() 227 | updated_at = OrzField() 228 | ep_num = OrzField() 229 | 230 | class DummyCS(DummyBase): 231 | __orz_table__ = 'test_orz' 232 | 233 | class TestFlushGetAfterCreationAndSaving(TestCase): 234 | 235 | def setUp(self): 236 | cursor = store.get_cursor() 237 | cursor.execute('''DROP TABLE IF EXISTS `test_orz`''') 238 | cursor.delete_without_where = True 239 | cursor.execute(''' 240 | CREATE TABLE `test_orz` 241 | ( 242 | `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, 243 | `ep_num` int(10) unsigned NOT NULL, 244 | `subject_id` int(10) unsigned DEFAULT 1001 NOT NULL, 245 | `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, 246 | PRIMARY KEY (`id`) 247 | ) ENGINE=MEMORY AUTO_INCREMENT=1''') 248 | 249 | def tearDown(self): 250 | mc.clear() 251 | 252 | def test_create(self): 253 | from datetime import datetime, timedelta 254 | a = DummyCS.create(ep_num=1) 255 | now = datetime.now() 256 | self.assertEqual(a.subject_id, 1001) 257 | self.assertTrue(now - a.updated_at < timedelta(seconds=1)) 258 | 259 | def test_save(self): 260 | import time 261 | a = DummyCS.create(ep_num=1) 262 | old_updated_at = a.updated_at 263 | 264 | time.sleep(1) 265 | a.ep_num = 10 266 | a.save() 267 | 268 | self.assertTrue(a.updated_at > old_updated_at) 269 | self.assertEqual(DummyCS.get_by(id=a.id).updated_at, a.updated_at) 270 | 271 | # class TestCustomizedPrimaryKey(TestCase): 272 | # @orz_decorate('test_orz', sqlstore=store, mc=mc) 273 | # class Dummy(object): 274 | # subject_id = OrzField(as_key=OrzField.KeyType.ASC) 275 | # ep_num = OrzPrimaryField() 276 | 277 | # def setUp(self): 278 | 279 | # def tearDown(self): 280 | # mc.clear() 281 | 282 | class Dummy(OrzBase): 283 | __orz_table__ = 'test_orz' 284 | 285 | subject_id = OrzField(as_key=OrzField.KeyType.ASC) 286 | ep_num = OrzField(as_key=OrzField.KeyType.ASC, default=0) 287 | content = OrzField(default='hello world') 288 | flag = OrzField(as_key=OrzField.KeyType.ASC, default=False) 289 | extra = OrzField(default=1) 290 | null_field = OrzField(default=None) 291 | output_field = OrzField(output_filter=str, default=10) 292 | callable_field = OrzField(default=lambda :10) 293 | 294 | class OrzMeta: 295 | id2str = True 296 | order_combs = (('-extra', 'ep_num'), ) 297 | 298 | def before_create(self, extra_args=None): 299 | if self.subject_id == '-1': 300 | raise ValueError 301 | 302 | if self.subject_id == '-2': 303 | self.save() 304 | 305 | if self.subject_id == '-3': 306 | self.delete() 307 | 308 | def after_create(self, extra_args=None): 309 | self.after_created = True 310 | self.extra_args = extra_args 311 | 312 | def after_save(self): 313 | self.after_saved = True 314 | 315 | def before_delete(self): 316 | mc.set('before_delete_test', True) 317 | 318 | def after_delete(self): 319 | if self.subject_id=='10001': 320 | self.save() 321 | 322 | if self.subject_id=='10002': 323 | self.delete() 324 | 325 | @classmethod 326 | @orz_get_multi 327 | def get_non_targeted(cls, non_targeted_ep_num): 328 | return [i for i, in store.execute('select id from test_orz where ep_num!=%s', non_targeted_ep_num)] 329 | 330 | 331 | -------------------------------------------------------------------------------- /tests/test_new_orz_transcation.py: -------------------------------------------------------------------------------- 1 | MEMCACHED = { 2 | 'servers' : [], 3 | 'disabled' : False, 4 | } 5 | 6 | # from corelib.config import MEMCACHED 7 | import sys 8 | import os 9 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "stub")) 10 | 11 | from douban.mc import mc_from_config 12 | from douban.mc.wrapper import LocalCached 13 | mc = LocalCached(mc_from_config(MEMCACHED)) 14 | 15 | from douban.sqlstore import store_from_config 16 | from ORZ import OrzBase, OrzField, orz_get_multi, start_transaction, OrzForceRollBack, setup as setup_orz 17 | 18 | DATABASE = { 19 | 'farms': { 20 | "luz_farm": { 21 | "master": "localhost:test_vagrant9010:eye:sauron", 22 | "tables": ["*"], 23 | }, 24 | }, 25 | 'options': { 26 | 'show_warnings': True, 27 | } 28 | } 29 | 30 | from unittest import TestCase 31 | 32 | store = store_from_config(DATABASE) 33 | mc.clear() 34 | 35 | setup_orz(store, mc) 36 | 37 | cursor = store.get_cursor() 38 | cursor.delete_without_where = True 39 | cursor.execute('''DROP TABLE IF EXISTS `test_t`''') 40 | cursor.execute(''' 41 | CREATE TABLE `test_t` 42 | ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, 43 | `subject_id` int(10) unsigned NOT NULL, 44 | PRIMARY KEY (`id`), 45 | UNIQUE KEY `uk_subject` (`subject_id`)) ENGINE=MEMORY AUTO_INCREMENT=1''') 46 | cursor.execute('''DROP TABLE IF EXISTS `test_a`''') 47 | cursor.execute(''' 48 | CREATE TABLE `test_a` 49 | ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, 50 | `ep_num` int(10) unsigned NOT NULL, 51 | PRIMARY KEY (`id`), 52 | KEY `ep_num_idx` (`ep_num`) 53 | ) ENGINE=MEMORY AUTO_INCREMENT=1''') 54 | 55 | class TestT(OrzBase): 56 | __orz_table__ = 'test_t' 57 | 58 | subject_id = OrzField(OrzField.KeyType.DESC) 59 | 60 | def after_create(self): 61 | self.after_create = True 62 | 63 | def after_save(self): 64 | self.after_save = True 65 | 66 | class OrzMeta: 67 | id2str = True 68 | 69 | class TestA(OrzBase): 70 | __orz_table__ = 'test_a' 71 | 72 | ep_num = OrzField(OrzField.KeyType.DESC) 73 | 74 | class OrzMeta: 75 | id2str = True 76 | 77 | 78 | class TestTransacation(TestCase): 79 | def tearDown(self): 80 | cursor.execute("truncate table `test_a`") 81 | cursor.execute("truncate table `test_t`") 82 | 83 | def test_basic(self): 84 | TestA.create(ep_num=1) 85 | TestA.gets_by(ep_num=1) 86 | TestA.count_by(ep_num=1) 87 | 88 | m = 0 89 | before = TestA.create 90 | 91 | self.assertEqual(TestT.__transaction__, False) 92 | self.assertEqual(TestA.__transaction__, False) 93 | with start_transaction(TestT, TestA) as (test_t, test_a): 94 | zz = test_t.create(subject_id=1) 95 | m = test_a.create(ep_num=1) 96 | self.assertEqual(TestT.__transaction__, True) 97 | self.assertEqual(TestA.__transaction__, True) 98 | self.assertEqual(TestT.__transaction__, False) 99 | self.assertEqual(TestA.__transaction__, False) 100 | 101 | after = TestA.create 102 | 103 | self.assertEqual(before, after) 104 | self.assertTrue(zz.after_create) 105 | qrset = [str(i) for i, in store.execute('select id from test_a order by id')] 106 | self.assertEqual(qrset[-1], m.id) 107 | self.assertEqual(len(TestA.gets_by(ep_num=1)), 2) 108 | self.assertEqual(TestA.gets_by(ep_num=1)[0].id, m.id) 109 | 110 | def test_rollback(self): 111 | TestT.create(subject_id=1) 112 | with start_transaction(TestT, TestA) as (test_t, test_a): 113 | test_t.create(subject_id=1) 114 | m = test_a.create(ep_num=1) 115 | qrset = [str(i) for i, in store.execute('select id from test_a order by id')] 116 | self.assertEqual(len(qrset), 0) 117 | 118 | def test_rollback2(self): 119 | m = TestT.create(subject_id=1) 120 | TestA.create(ep_num=10) 121 | a = TestA.gets_by(ep_num=10)[0] 122 | 123 | def run(t_ins, a_ins): 124 | with start_transaction(t_ins, a_ins) as (test_t_ins, test_a_ins): 125 | ret = test_a_ins.delete() 126 | if ret == 0: 127 | raise OrzForceRollBack 128 | test_t_ins.subject_id = 2 129 | test_t_ins.save() 130 | self.assertEqual(t_ins.__transaction__, True) 131 | self.assertEqual(a_ins.__transaction__, True) 132 | self.assertEqual(t_ins.__transaction__, False) 133 | self.assertEqual(a_ins.__transaction__, False) 134 | store.execute('delete from test_a where id=%s', a.id) 135 | run(m, a) 136 | self.assertEqual(TestT.gets_by(subject_id=1)[0].id, m.id) 137 | 138 | new_a = TestA.create(ep_num=10) 139 | run(m, new_a) 140 | self.assertEqual(TestT.gets_by(subject_id=2)[0].id, m.id) 141 | -------------------------------------------------------------------------------- /tests/test_orz.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, skip 2 | 3 | from .env_init import store, mc 4 | from ORZ import orz_decorate, OrzField, orz_get_multi, OrzPrimaryField 5 | 6 | 7 | @orz_decorate('test_orz', sqlstore=store, mc=mc) 8 | class Dummy(object): 9 | subject_id = OrzField(as_key=OrzField.KeyType.ASC) 10 | ep_num = OrzField(as_key=OrzField.KeyType.ASC, default=0) 11 | content = OrzField(default='hello world') 12 | flag = OrzField(as_key=OrzField.KeyType.ASC, default=False) 13 | extra = OrzField(default=1) 14 | null_field = OrzField(default=None) 15 | output_field = OrzField(output_filter=str, default=10) 16 | callable_field = OrzField(default=lambda :10) 17 | 18 | class OrzMeta: 19 | order_combs = (('-extra', 'ep_num'), ) 20 | 21 | @classmethod 22 | def before_create(cls, **kw): 23 | if kw['subject_id'] == -1: 24 | raise ValueError 25 | 26 | def after_create(self, extra_args=None): 27 | self.after_created = True 28 | self.extra_args = extra_args 29 | 30 | def after_save(self): 31 | self.after_saved = True 32 | 33 | def before_delete(self): 34 | mc.set('before_delete_test', True) 35 | 36 | @classmethod 37 | @orz_get_multi 38 | def get_non_targeted(cls, non_targeted_ep_num): 39 | return [i for i, in store.execute('select id from test_orz where ep_num!=%s', non_targeted_ep_num)] 40 | 41 | 42 | class TestOldOrz(TestCase): 43 | def setUp(self): 44 | cursor = store.get_cursor() 45 | cursor.execute('''DROP TABLE IF EXISTS `test_orz`''') 46 | cursor.delete_without_where = True 47 | cursor.execute(''' 48 | CREATE TABLE `test_orz` 49 | ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, 50 | `subject_id` int(10) unsigned NOT NULL, 51 | `ep_num` int(10) unsigned NOT NULL, 52 | `flag` smallint(1) unsigned NOT NULL, 53 | `content` varchar(100) NOT NULL, 54 | `extra` int(10) unsigned NOT NULL, 55 | `null_field` int(10) unsigned, 56 | `output_field` int(10) unsigned, 57 | `callable_field` int(10) unsigned, 58 | PRIMARY KEY (`id`), 59 | KEY `idx_subject` (`subject_id`, `ep_num`, `id`)) ENGINE=MEMORY AUTO_INCREMENT=1''') 60 | 61 | def tearDown(self): 62 | store.get_cursor().execute('truncate table `test_orz`') 63 | mc.clear() 64 | 65 | def test_create(self): 66 | z = Dummy.create(subject_id=10, ep_num=10, content='hheheheh', extra_args=10) 67 | self.assertTrue(z.after_created) 68 | self.assertEqual(z.callable_field, 10) 69 | self.assertTrue(z.extra_args, 10) 70 | self.assertEqual(z.null_field, None) 71 | (id, subject_id, ep_num, null_field), = store.execute('''select id, subject_id, ep_num, null_field from test_orz where subject_id=10''') 72 | self.assertEqual((z.id, z.subject_id, ep_num, None), (str(id), str(subject_id), ep_num, null_field)) 73 | 74 | z = Dummy.create(id=5, subject_id=10, ep_num=10, content='hheheheh1') 75 | self.assertEqual(z.id, '5') 76 | 77 | self.assertRaises(ValueError, Dummy.create, **dict(id=5, subject_id=-1, ep_num=10, content='hheheheh1')) 78 | 79 | def test_exist(self): 80 | self.assertFalse(Dummy.exist(subject_id=10, ep_num=10)) 81 | 82 | z = Dummy.create(subject_id=10, ep_num=10, content='hheheheh', extra_args=10) 83 | self.assertTrue(Dummy.exist(subject_id=10, ep_num=10)) 84 | self.assertTrue(Dummy.exist(id=z.id)) 85 | 86 | 87 | def test_gets_by(self): 88 | li = [Dummy.create(subject_id=10, ep_num=ep_num, content='hheheheh', output_field=10) for ep_num in range(10)] 89 | z = li[-1] 90 | m = Dummy.gets_by(subject_id=10) 91 | self.assertEqual((z.id, z.subject_id), (m[0].id, m[0].subject_id)) 92 | self.assertEqual([int(i.id) for i in m], range(10, 0, -1)) 93 | self.assertEqual(li[-1].output_field, str(10)) 94 | 95 | def test_save(self): 96 | z = Dummy.create(subject_id=10, ep_num=10, content='hheheheh') 97 | m = Dummy.gets_by(subject_id=10)[0] 98 | self.assertRaises(AttributeError, lambda :m.after_saved) 99 | m.subject_id = 2 100 | m.save() 101 | self.assertTrue(m.after_saved) 102 | 103 | old_fetched = Dummy.gets_by(subject_id=10) 104 | new_fetched = Dummy.gets_by(subject_id=2) 105 | self.assertEqual(len(old_fetched), 0) 106 | self.assertEqual(new_fetched[0].subject_id, '2') 107 | 108 | def test_count_by(self): 109 | for i in range(10): 110 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 111 | 112 | self.assertEqual(Dummy.count_by(subject_id=10), 10) 113 | m = Dummy.gets_by(subject_id=10)[0] 114 | m.subject_id = 2 115 | m.save() 116 | self.assertEqual(Dummy.count_by(subject_id=10), 9) 117 | 118 | def test_delete(self): 119 | m = Dummy.create(subject_id=10, ep_num=1, content='hheheheh') 120 | self.assertEqual(mc.get('before_delete_test'), None) 121 | m.delete() 122 | self.assertTrue(mc.get('before_delete_test')) 123 | 124 | ret = store.execute('''select id, subject_id, ep_num from test_orz where subject_id=10''') 125 | self.assertEqual(len(ret), 0) 126 | 127 | 128 | def test_default(self): 129 | a = Dummy.create(subject_id=10, ep_num=1) 130 | self.assertEqual(a.content, 'hello world') 131 | 132 | self.assertEqual(Dummy.gets_by(subject_id=10)[0].content, 'hello world') 133 | 134 | a = Dummy.create(subject_id=10) 135 | self.assertEqual(a.ep_num, 0) 136 | 137 | self.assertEqual(len(Dummy.gets_by(subject_id=10)), 2) 138 | 139 | def test_pager(self): 140 | for i in range(100): 141 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 142 | 143 | data = Dummy.gets_by(subject_id=10, limit=50) 144 | self.assertEqual(len(data), 50) 145 | for i, o in enumerate(data): 146 | self.assertEqual(int(o.id), 100 - i) 147 | 148 | start = 20 149 | for i, o in enumerate(Dummy.gets_by(subject_id=10, start=start, limit=50), start): 150 | self.assertEqual(int(o.id), 100 - i) 151 | 152 | def test_boolean(self): 153 | for i in range(100): 154 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 155 | 156 | qrset = Dummy.gets_by(flag=False) 157 | self.assertEqual(len(qrset), 100) 158 | 159 | qrset[0].flag = True 160 | qrset[0].save() 161 | self.assertEqual(len(Dummy.gets_by(flag=False)), 99) 162 | 163 | def test_order_by(self): 164 | for i in range(100, 90, -1): 165 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 166 | 167 | self.assertEqual(range(91, 101), [i.ep_num for i in Dummy.gets_by(subject_id=10, order_by='ep_num')]) 168 | 169 | def test_get_multiple_ids(self): 170 | ids = [] 171 | for i in range(100, 90, -1): 172 | ids.append(Dummy.create(subject_id=10, ep_num=i, content='hheheheh').id) 173 | 174 | self.assertEqual([i.id for i in Dummy.objects.get_multiple_ids(ids)], ids) 175 | 176 | def test_orz_get_multi(self): 177 | for i in range(100, 90, -1): 178 | Dummy.create(subject_id=10, ep_num=i, content='hheheheh') 179 | 180 | self.assertTrue(100 not in [i.ep_num for i in Dummy.get_non_targeted(100)]) 181 | 182 | def test_extra_order_by(self): 183 | # for i in range(100, 90, -1): 184 | # Dummy.create(subject_id=10, ep_num=i, extra=10+i, content='hheheheh') 185 | 186 | # self.assertEqual(list(reversed(range(101, 111))), [i.extra for i in Dummy.gets_by(subject_id=10, order_by='-extra')]) 187 | 188 | # Dummy.create(subject_id=10, ep_num=130, extra=130, content='hheheheh') 189 | # self.assertEqual([130]+list(reversed(range(101, 111))), [i.extra for i in Dummy.gets_by(subject_id=10, order_by='-extra')]) 190 | 191 | for f in range(2): 192 | for j in range(23, 20, -1): 193 | Dummy.create(subject_id=9, ep_num=j, extra=f, content='hheheheh') 194 | output = [(i.ep_num, i.extra) for i in Dummy.gets_by(subject_id=9, order_by=('-extra', 'ep_num'))] 195 | self.assertEqual(output, [(i, j) for j in (1, 0) for i in range(21, 24)]) 196 | 197 | for i in Dummy.gets_by(subject_id=9, order_by=('-extra', 'ep_num')): 198 | if i.extra == 0: 199 | i.extra = 1 200 | i.save() 201 | 202 | output = [(i.ep_num, i.extra) for i in Dummy.gets_by(subject_id=9, order_by=('-extra', 'ep_num'))] 203 | self.assertEqual(output, [(i, 1) for i in [21, 21, 22, 22, 23, 23]]) 204 | 205 | 206 | def test_creation_should_delete_pk(self): 207 | ID = str(1000) 208 | empty = Dummy.get_by(ID) 209 | self.assertEqual(empty, None) 210 | 211 | Dummy.create(id=ID, subject_id=10, ep_num=1, content='hheheheh') 212 | 213 | new = Dummy.get_by(ID) 214 | self.assertEqual(new.id, ID) 215 | 216 | def test_flush_get(self): 217 | raw_num = 10 218 | i = Dummy.create(subject_id=10, ep_num=1, content='hheheheh') 219 | Dummy.get_by(id=i.id) 220 | 221 | store.execute('update test_orz set ep_num=%s where id=%s', (raw_num, i.id)) 222 | store.commit() 223 | 224 | self.assertEqual(Dummy.get_by(id=i.id).ep_num, i.ep_num) 225 | flushed_obj = Dummy.get_by(id=i.id, force_flush=True) 226 | self.assertNotEqual(flushed_obj.ep_num, i.ep_num) 227 | self.assertEqual(flushed_obj.ep_num, raw_num) 228 | 229 | crset = [] 230 | for i in range(10): 231 | crset.append(Dummy.create(subject_id=11, ep_num=11, content='hheheheh'+str(i))) 232 | 233 | Dummy.gets_by(subject_id=11, ep_num=11) 234 | 235 | self.assertEqual(len(Dummy.gets_by(subject_id=11, ep_num=11)), 10) 236 | 237 | store.execute('update test_orz set ep_num=%s where id=%s', (raw_num, crset[0].id)) 238 | store.commit() 239 | 240 | self.assertEqual(len(Dummy.gets_by(subject_id=11, ep_num=11)), 10) 241 | self.assertEqual(len(Dummy.gets_by(subject_id=11, ep_num=11, force_flush=True)), 9) 242 | 243 | 244 | @orz_decorate('test_orz', sqlstore=store, mc=mc) 245 | class DummyCS(object): 246 | subject_id = OrzField() 247 | updated_at = OrzField() 248 | ep_num = OrzField() 249 | 250 | 251 | class TestFlushGetAfterCreationAndSaving(TestCase): 252 | 253 | def setUp(self): 254 | cursor = store.get_cursor() 255 | cursor.execute('''DROP TABLE IF EXISTS `test_orz`''') 256 | cursor.delete_without_where = True 257 | cursor.execute(''' 258 | CREATE TABLE `test_orz` 259 | ( 260 | `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, 261 | `ep_num` int(10) unsigned NOT NULL, 262 | `subject_id` int(10) unsigned DEFAULT 1001 NOT NULL, 263 | `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, 264 | PRIMARY KEY (`id`) 265 | ) ENGINE=MEMORY AUTO_INCREMENT=1''') 266 | 267 | def tearDown(self): 268 | mc.clear() 269 | 270 | def test_create(self): 271 | from datetime import datetime, timedelta 272 | a = DummyCS.create(ep_num=1) 273 | now = datetime.now() 274 | self.assertEqual(a.subject_id, '1001') 275 | self.assertTrue(now - a.updated_at < timedelta(seconds=1)) 276 | 277 | def test_save(self): 278 | import time 279 | a = DummyCS.create(ep_num=1) 280 | old_updated_at = a.updated_at 281 | 282 | time.sleep(1) 283 | a.ep_num = 10 284 | a.save() 285 | 286 | self.assertTrue(a.updated_at > old_updated_at) 287 | self.assertEqual(DummyCS.get_by(id=a.id).updated_at, a.updated_at) 288 | 289 | # class TestCustomizedPrimaryKey(TestCase): 290 | # @orz_decorate('test_orz', sqlstore=store, mc=mc) 291 | # class Dummy(object): 292 | # subject_id = OrzField(as_key=OrzField.KeyType.ASC) 293 | # ep_num = OrzPrimaryField() 294 | 295 | # def setUp(self): 296 | 297 | # def tearDown(self): 298 | # mc.clear() 299 | -------------------------------------------------------------------------------- /tests/test_orz_configs.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | from itertools import combinations, chain 3 | from ORZ.configs import GetsByConfig, Config, CacheConfigMgr, ConfigColl 4 | 5 | class TestGetsByConfigs(TestCase): 6 | def test_hash_keys(self): 7 | keys = ['a', 'b', 'c', 'd'] 8 | config = Config('111', keys) 9 | cfg = GetsByConfig(config, ('c',)) 10 | self.assertEqual(cfg.as_key(), tuple(sorted(config.as_key()+(cfg.order,)))) 11 | 12 | def test_to_strings(self): 13 | kw = {'a':1, 'b':2} 14 | cfg = GetsByConfig(Config('111', kw.keys()), ('a', )) 15 | self.assertEqual(cfg.to_string(kw), '111:a=1|b=2|order_by:a') 16 | 17 | 18 | class TestConfigMgr(TestCase): 19 | def setUp(self): 20 | self.config = Config("11111", ("a", "b")) 21 | self.gets_by_config = GetsByConfig(self.config, ("a", )) 22 | 23 | def test_add(self): 24 | mgr = CacheConfigMgr() 25 | mgr.add_to(mgr.normal_config_coll, self.config) 26 | self.assertEqual(len(mgr.normal_config_coll), 1) 27 | self.assertEqual(mgr.normal_config_coll[self.config.as_key()], self.config) 28 | 29 | mgr.add_to(mgr.gets_by_config_coll, self.gets_by_config) 30 | self.assertEqual(len(mgr.normal_config_coll), 1) 31 | self.assertEqual(len(mgr.gets_by_config_coll), 1) 32 | self.assertEqual(mgr.gets_by_config_coll[self.gets_by_config.as_key()], self.gets_by_config) 33 | 34 | def test_lookup(self): 35 | mgr = CacheConfigMgr() 36 | mgr.add_to(mgr.normal_config_coll, self.config) 37 | mgr.add_to(mgr.gets_by_config_coll, self.gets_by_config) 38 | 39 | self.assertEqual(mgr.lookup_normal(("b", "a")) , self.config) 40 | self.assertEqual(mgr.lookup_gets_by(("b", "a"), ("a", )) , self.gets_by_config) 41 | 42 | def test_gen_basic_configs(self): 43 | sort_ = lambda x: sorted(x, key= lambda x:''.join(x)) 44 | keys = [ "a", "b", "c", "id" ] 45 | mgr = CacheConfigMgr() 46 | mgr.generate_basic_configs('1111', keys, list((i, ) for i in keys)) 47 | 48 | self.assertEqual(len(mgr.normal_config_coll), 8) 49 | self.assertEqual(len(mgr.gets_by_config_coll), 8*4) 50 | 51 | key_combs = list(chain(*[combinations(keys[:3], i) for i in range(1, 4)]))+[("id",)] 52 | 53 | self.assertEqual(sort_(mgr.normal_config_coll.keys()), sort_(key_combs)) 54 | 55 | key_combs_with_order = [k+("order_by:"+i, ) for k in key_combs for i in keys] 56 | self.assertEqual(sort_(mgr.gets_by_config_coll.keys()), sort_(key_combs_with_order)) 57 | 58 | def test_lookup_related(self): 59 | sort_ = lambda x: sorted(x, key= lambda x:''.join(x)) 60 | keys = ["a", "b", "id"] 61 | mgr = CacheConfigMgr() 62 | mgr.generate_basic_configs('1111', keys) 63 | cfgs = mgr.lookup_related("a") 64 | predate_configs = [c for c in mgr.items() if "a" in c.keys] 65 | self.assertEqual(sort_([i.as_key() for i in cfgs]), 66 | sort_([i.as_key() for i in predate_configs])) 67 | 68 | 69 | mgr.generate_basic_configs('1112', keys, (('c', 'b'),)) 70 | cfgs = mgr.lookup_related("c") 71 | predate_configs = [c for c in mgr.items() if "c" in c.keys] 72 | self.assertEqual(sort_([i.as_key() for i in cfgs]), 73 | sort_([i.as_key() for i in predate_configs])) 74 | 75 | 76 | class TestConfigColl(TestCase): 77 | def test_main(self): 78 | coll = ConfigColl() 79 | coll['1'] = 1 80 | self.assertEqual(coll['1'], 1) 81 | self.assertEqual(coll['2'], None) 82 | -------------------------------------------------------------------------------- /tests/test_transcation.py: -------------------------------------------------------------------------------- 1 | MEMCACHED = { 2 | 'servers' : [], 3 | 'disabled' : False, 4 | } 5 | 6 | # from corelib.config import MEMCACHED 7 | import sys 8 | import os 9 | sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "stub")) 10 | 11 | from douban.mc import mc_from_config 12 | from douban.mc.wrapper import LocalCached 13 | mc = LocalCached(mc_from_config(MEMCACHED)) 14 | 15 | from douban.sqlstore import store_from_config 16 | from ORZ import orz_decorate, OrzField, orz_get_multi, start_transaction, OrzForceRollBack 17 | 18 | DATABASE = { 19 | 'farms': { 20 | "luz_farm": { 21 | "master": "localhost:test_vagrant9010:eye:sauron", 22 | "tables": ["*"], 23 | }, 24 | }, 25 | 'options': { 26 | 'show_warnings': True, 27 | } 28 | } 29 | 30 | from unittest import TestCase 31 | 32 | store = store_from_config(DATABASE) 33 | mc.clear() 34 | 35 | cursor = store.get_cursor() 36 | cursor.delete_without_where = True 37 | cursor.execute('''DROP TABLE IF EXISTS `test_t`''') 38 | cursor.execute(''' 39 | CREATE TABLE `test_t` 40 | ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, 41 | `subject_id` int(10) unsigned NOT NULL, 42 | PRIMARY KEY (`id`), 43 | UNIQUE KEY `uk_subject` (`subject_id`)) ENGINE=MEMORY AUTO_INCREMENT=1''') 44 | cursor.execute('''DROP TABLE IF EXISTS `test_a`''') 45 | cursor.execute(''' 46 | CREATE TABLE `test_a` 47 | ( `id` int(10) unsigned NOT NULL AUTO_INCREMENT, 48 | `ep_num` int(10) unsigned NOT NULL, 49 | PRIMARY KEY (`id`), 50 | KEY `ep_num_idx` (`ep_num`) 51 | ) ENGINE=MEMORY AUTO_INCREMENT=1''') 52 | 53 | @orz_decorate('test_t', sqlstore=store, mc=mc) 54 | class TestT(object): 55 | subject_id = OrzField(OrzField.KeyType.DESC) 56 | 57 | def after_create(self): 58 | print "333d" 59 | self.after_create = True 60 | 61 | def after_save(self): 62 | print "333d"*3 63 | self.after_save = True 64 | 65 | @orz_decorate('test_a', sqlstore=store, mc=mc) 66 | class TestA(object): 67 | ep_num = OrzField(OrzField.KeyType.DESC) 68 | 69 | 70 | class TestTransacation(TestCase): 71 | def tearDown(self): 72 | cursor.execute("truncate table `test_a`") 73 | cursor.execute("truncate table `test_t`") 74 | 75 | def test_basic(self): 76 | TestA.create(ep_num=1) 77 | TestA.gets_by(ep_num=1) 78 | TestA.count_by(ep_num=1) 79 | 80 | m = 0 81 | before = TestA.create 82 | with start_transaction(TestT, TestA) as (test_t, test_a): 83 | zz = test_t.create(subject_id=1) 84 | m = test_a.create(ep_num=1) 85 | self.assertNotEqual(before, TestA.create) 86 | self.assertEqual(TestA.create, TestA.create_transactionally) 87 | 88 | after = TestA.create 89 | 90 | self.assertEqual(before, after) 91 | self.assertTrue(zz.after_create) 92 | qrset = [str(i) for i, in store.execute('select id from test_a order by id')] 93 | self.assertEqual(qrset[-1], m.id) 94 | self.assertEqual(len(TestA.gets_by(ep_num=1)), 2) 95 | self.assertEqual(TestA.gets_by(ep_num=1)[0].id, m.id) 96 | 97 | def test_rollback(self): 98 | TestT.create(subject_id=1) 99 | with start_transaction(TestT, TestA) as (test_t, test_a): 100 | test_t.create(subject_id=1) 101 | m = test_a.create(ep_num=1) 102 | qrset = [str(i) for i, in store.execute('select id from test_a order by id')] 103 | self.assertEqual(len(qrset), 0) 104 | 105 | def test_rollback2(self): 106 | m = TestT.create(subject_id=1) 107 | TestA.create(ep_num=10) 108 | a = TestA.gets_by(ep_num=10)[0] 109 | 110 | def run(t_ins, a_ins): 111 | with start_transaction(t_ins, a_ins) as (test_t_ins, test_a_ins): 112 | ret = test_a_ins.delete() 113 | if ret == 0: 114 | raise OrzForceRollBack 115 | test_t_ins.subject_id = 2 116 | test_t_ins.save() 117 | store.execute('delete from test_a where id=%s', a.id) 118 | run(m, a) 119 | self.assertEqual(TestT.gets_by(subject_id=1)[0].id, m.id) 120 | 121 | new_a = TestA.create(ep_num=10) 122 | run(m, new_a) 123 | self.assertEqual(TestT.gets_by(subject_id=2)[0].id, m.id) 124 | --------------------------------------------------------------------------------