├── .gitignore ├── README.md ├── async_pool_executor.egg-info ├── PKG-INFO ├── SOURCES.txt ├── dependency_links.txt ├── requires.txt └── top_level.txt ├── async_pool_executor ├── __init__.py ├── async_pool_executor_in_async.py └── async_pool_executor_in_sync.py ├── dist └── async_pool_executor-0.1.tar.gz ├── nb_log_config.py ├── setup.py ├── test.py └── tests └── test1.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | .idea/ 3 | env_hotels/ 4 | henv/ 5 | venv/ 6 | *.pyc 7 | app/apis/logs/ 8 | app/logs/ 9 | *.log.* 10 | *.log 11 | *.lock 12 | *.pytest_cache* 13 | nohup.out 14 | apidoc/ 15 | node_modules/ 16 | hotelApi/ 17 | my_patch_frame_config0000.py 18 | my_patch_frame_config_beifen.py 19 | test_frame/my_patch_frame_config.py 20 | function_result_web/ 21 | test_frame/my/ 22 | redis_queue_web/ 23 | not_up_git/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## pip install async_pool_executor 2 | 3 | 4 | 5 | ## 主要功能 6 | 7 | ``` 8 | 主要功能是仿照 concurrent.futures 的线程池报的submit shutdown方法。 9 | 10 | 使得在做生产 消费 时候,无需学习烦人的异步思维写代码 ,可以直接在同步函数中 submit。 11 | 生产和消费不需要在同一个loop中,喜欢同步编程思维的人可以用这个。 12 | 13 | async def 的函数,定义协程函数本身不难,难的是如果要并发起来执行,要搞懂以下这些概念, 14 | 以下这些概念非常多十分之复杂,asyncio的并发玩法与同步函数 + 线程池并发写法区别很大,asyncio的并发写法难度大太多。 15 | 异步要想玩的溜,用户必须精通的常用方法和对象的概念包括以下: 16 | 17 | loop 对象 18 | asyncio.get_event_loop 方法 19 | asyncio.new_event_loop 方法 20 | asyncio.set_event_loop 方法 21 | asyncio.ensure_future 方法 22 | asyncio.create_task 方法 23 | asyncio.wait 方法 24 | asyncio.gather 方法 25 | asyncio.run_coroutine_threadsafe 方法 26 | loop.run_in_executor 方法 27 | run_until_complete 方法 28 | run_forever 方法 29 | future 对象 30 | task 对象 31 | corotinue 对象 32 | 33 | ``` 34 | 35 | ``` 36 | 上面的概念学会要比学怎么使用线程池难太多了,写法代码也更繁琐。但有了这个AsyncPoolExecutor这个包, 37 | 上面所有的概念用户都不需要学了,写起异步并发来简化了10倍。 38 | ``` 39 | 40 | ```python 41 | import asyncio 42 | 43 | async def async_f(x): 44 | await asyncio.sleep(2) 45 | print(x) 46 | 47 | pool = AsyncPoolExecutor(3) 48 | for i in range(30): 49 | pool.submit(async_f,i) 50 | 51 | ``` 52 | 53 | 54 | ## 实现代码 55 | 56 | ```python 57 | 58 | import asyncio 59 | import atexit 60 | import time 61 | import traceback 62 | from threading import Thread 63 | 64 | 65 | class AsyncPoolExecutor: 66 | """ 67 | 使api和线程池一样,最好的性能做法是submit也弄成 async def,生产和消费在同一个线程同一个loop一起运行,但会对调用链路的兼容性产生破坏,从而调用方式不兼容线程池。 68 | """ 69 | 70 | def __init__(self, size, loop=None): 71 | """ 72 | 73 | :param size: 同时并发运行的协程任务数量。 74 | :param loop: 75 | """ 76 | self._size = size 77 | self.loop = loop or asyncio.new_event_loop() 78 | self._sem = asyncio.Semaphore(self._size, loop=self.loop) 79 | self._queue = asyncio.Queue(maxsize=size, loop=self.loop) 80 | t = Thread(target=self._start_loop_in_new_thread) 81 | t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown 82 | t.start() 83 | self._can_be_closed_flag = False 84 | atexit.register(self.shutdown) 85 | 86 | 87 | def submit(self, func, *args, **kwargs): 88 | future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。 89 | future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。 90 | 91 | async def _produce(self, func, *args, **kwargs): 92 | await self._queue.put((func, args, kwargs)) 93 | 94 | async def _consume(self): 95 | while True: 96 | func, args, kwargs = await self._queue.get() 97 | if func == 'stop': 98 | break 99 | try: 100 | await func(*args, **kwargs) 101 | except Exception as e: 102 | traceback.print_exc() 103 | 104 | def _start_loop_in_new_thread(self, ): 105 | # self._loop.run_until_complete(self.__run()) # 这种也可以。 106 | # self._loop.run_forever() 107 | 108 | # asyncio.set_event_loop(self.loop) 109 | self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop)) 110 | self._can_be_closed_flag = True 111 | 112 | def shutdown(self): 113 | for _ in range(self._size): 114 | self.submit('stop', ) 115 | while not self._can_be_closed_flag: 116 | time.sleep(0.1) 117 | self.loop.close() 118 | print('关闭循环') 119 | 120 | 121 | if __name__ == '__main__': 122 | import nb_log 123 | async def async_f(x): 124 | await asyncio.sleep(2) 125 | print(x) 126 | 127 | pool =AsyncPoolExecutor(3) 128 | for i in range(30): 129 | pool.submit(async_f,i) 130 | ``` 131 | 132 | ### 对比没使用 async_pool_executor,如果进行10并发调度协程并实现asyncio动态追加任务,太难了。 133 | 134 | 135 | 136 | #### 下面的例子是对比AsyncPoolExecutor 和临时手写操作loop 和task 137 | 138 | ###### 这个例子还没有实现随时动态追加协程任务,写法就已经很繁琐了。 139 | 140 | ``` 141 | 如果是没有async_pool_executor,那就要手动操作 asyncio.wait/gather run_until_complete future task corotinue 这些复杂的概念。 142 | 143 | 本人热衷于致力使临时写代码简单,把复杂的东西抽象到一个通用地方,虽然是需要花时间花想法来实现这些高难度的抽象,但还是值得的。 144 | ``` 145 | 146 | ```python 147 | import nb_log 148 | import asyncio 149 | 150 | from async_pool_executor import AsyncPoolExecutor 151 | 152 | 153 | async def async_f(x): 154 | await asyncio.sleep(2) 155 | print(x) 156 | 157 | 158 | if __name__ == '__main__': 159 | """ 160 | 使用asyncio异步池实现10并发,可以任意时候动态追加任务到loop循环里面。写法极其简单。 161 | """ 162 | 163 | pool = AsyncPoolExecutor(10) 164 | for i in range(100): 165 | pool.submit(async_f, i) 166 | 167 | 168 | 169 | """ 170 | 如果没有异步池的帮助,代码要完成10并发有多复杂,写法太难了。 171 | 172 | 下面这个还没支持动态向loop添加asyncio的协程任务,如果要动态随时追加任务,下面的run_until_complete就不合适。 173 | 要引入asyncio queue解耦生产和消费 174 | 或者 使用 run_coroutine_threadsafe,例子可以见 https://blog.csdn.net/whatday/article/details/106886811 ,里面的写法复杂到吓人。 175 | """ 176 | 177 | sem = asyncio.Semaphore(10) 178 | async def fun_with_semaphore(x): 179 | async with sem: 180 | await async_f(x) 181 | tasks = [] 182 | for i in range(100): 183 | tasks.append(asyncio.ensure_future(fun_with_semaphore(i))) 184 | asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks)) 185 | 186 | 187 | ``` 188 | 189 | ### csdn 的 python3 异步 asyncio 动态添加任务 190 | 191 | [csdn 的 asyncio python3 异步 asyncio 动态添加任务](https://blog.csdn.net/whatday/article/details/106886811) 192 | 193 | 里面的写法复杂到吓人,所以需要 AsyncPoolExecutor 这个异步池来减小码农的编程难度。 194 | 195 | ```python 196 | import asyncio 197 | import time 198 | 199 | from threading import Thread 200 | 201 | 202 | def start_loop(loop): 203 | asyncio.set_event_loop(loop) 204 | print("start loop", time.time()) 205 | loop.run_forever() 206 | 207 | 208 | async def do_some_work(x): 209 | print('start {}'.format(x)) 210 | await asyncio.sleep(x) 211 | print('Done after {}s'.format(x)) 212 | 213 | 214 | new_loop = asyncio.new_event_loop() 215 | t = Thread(target=start_loop, args=(new_loop,)) 216 | t.start() 217 | 218 | asyncio.run_coroutine_threadsafe(do_some_work(6), new_loop) 219 | asyncio.run_coroutine_threadsafe(do_some_work(4), new_loop) 220 | ``` 221 | 222 | -------------------------------------------------------------------------------- /async_pool_executor.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: async-pool-executor 3 | Version: 0.1 4 | Summary: async_pool_executor,its api like the concurrent.futures 5 | Home-page: UNKNOWN 6 | Author: bfzs 7 | Author-email: ydf0509@sohu.com 8 | Maintainer: ydf 9 | Maintainer-email: ydf0509@sohu.com 10 | License: BSD License 11 | Description: ## pip install async_pool_executor 12 | 13 | 14 | 15 | ## 主要功能 16 | 17 | ``` 18 | 主要功能是仿照 concurrent.futures 的线程池报的submit shutdown方法。 19 | 20 | 使得在做生产 消费 时候,无需学习烦人的异步 loop 、 run_until_complete ,可以直接在同步函数中 submit。 21 | 生产和消费不需要在同一个loop中,喜欢同步编程思维的人可以用这个。 22 | 23 | ``` 24 | 25 | 26 | ## 实现代码 27 | 28 | ```python 29 | 30 | import asyncio 31 | import atexit 32 | import time 33 | import traceback 34 | from threading import Thread 35 | 36 | 37 | class AsyncPoolExecutor: 38 | """ 39 | 使api和线程池一样,最好的性能做法是submit也弄成 async def,生产和消费在同一个线程同一个loop一起运行,但会对调用链路的兼容性产生破坏,从而调用方式不兼容线程池。 40 | """ 41 | 42 | def __init__(self, size, loop=None): 43 | """ 44 | 45 | :param size: 同时并发运行的协程任务数量。 46 | :param loop: 47 | """ 48 | self._size = size 49 | self.loop = loop or asyncio.new_event_loop() 50 | self._sem = asyncio.Semaphore(self._size, loop=self.loop) 51 | self._queue = asyncio.Queue(maxsize=size, loop=self.loop) 52 | t = Thread(target=self._start_loop_in_new_thread) 53 | t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown 54 | t.start() 55 | self._can_be_closed_flag = False 56 | atexit.register(self.shutdown) 57 | 58 | 59 | def submit(self, func, *args, **kwargs): 60 | future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。 61 | future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。 62 | 63 | async def _produce(self, func, *args, **kwargs): 64 | await self._queue.put((func, args, kwargs)) 65 | 66 | async def _consume(self): 67 | while True: 68 | func, args, kwargs = await self._queue.get() 69 | if func == 'stop': 70 | break 71 | try: 72 | await func(*args, **kwargs) 73 | except Exception as e: 74 | traceback.print_exc() 75 | 76 | def _start_loop_in_new_thread(self, ): 77 | # self._loop.run_until_complete(self.__run()) # 这种也可以。 78 | # self._loop.run_forever() 79 | 80 | # asyncio.set_event_loop(self.loop) 81 | self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop)) 82 | self._can_be_closed_flag = True 83 | 84 | def shutdown(self): 85 | for _ in range(self._size): 86 | self.submit('stop', ) 87 | while not self._can_be_closed_flag: 88 | time.sleep(0.1) 89 | self.loop.close() 90 | print('关闭循环') 91 | 92 | 93 | if __name__ == '__main__': 94 | import nb_log 95 | async def async_f(x): 96 | await asyncio.sleep(2) 97 | print(x) 98 | 99 | pool =AsyncPoolExecutor(3) 100 | for i in range(30): 101 | pool.submit(async_f,i) 102 | ``` 103 | Keywords: async_pool_executor,threadpoolexecutor,sync,async 104 | Platform: all 105 | Classifier: Development Status :: 4 - Beta 106 | Classifier: Operating System :: OS Independent 107 | Classifier: Intended Audience :: Developers 108 | Classifier: License :: OSI Approved :: BSD License 109 | Classifier: Programming Language :: Python 110 | Classifier: Programming Language :: Python :: Implementation 111 | Classifier: Programming Language :: Python :: 3.6 112 | Classifier: Topic :: Software Development :: Libraries 113 | Description-Content-Type: text/markdown 114 | -------------------------------------------------------------------------------- /async_pool_executor.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.md 2 | setup.py 3 | async_pool_executor/__init__.py 4 | async_pool_executor.egg-info/PKG-INFO 5 | async_pool_executor.egg-info/SOURCES.txt 6 | async_pool_executor.egg-info/dependency_links.txt 7 | async_pool_executor.egg-info/requires.txt 8 | async_pool_executor.egg-info/top_level.txt -------------------------------------------------------------------------------- /async_pool_executor.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /async_pool_executor.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | nb_log 2 | -------------------------------------------------------------------------------- /async_pool_executor.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | async_pool_executor 2 | -------------------------------------------------------------------------------- /async_pool_executor/__init__.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import atexit 3 | import time 4 | import traceback 5 | from threading import Thread 6 | 7 | 8 | class AsyncPoolExecutor: 9 | """ 10 | 使api和线程池一样,最好的性能做法是submit也弄成 async def,生产和消费在同一个线程同一个loop一起运行,但会对调用链路的兼容性产生破坏,从而调用方式不兼容线程池。 11 | """ 12 | 13 | def __init__(self, size, loop=None): 14 | """ 15 | 16 | :param size: 同时并发运行的协程任务数量。 17 | :param loop: 18 | """ 19 | self._size = size 20 | self.loop = loop or asyncio.new_event_loop() 21 | # self._sem = asyncio.Semaphore(self._size, loop=self.loop) 22 | self._queue = asyncio.Queue(maxsize=size, loop=self.loop) 23 | t = Thread(target=self._start_loop_in_new_thread) 24 | t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown 25 | t.start() 26 | self._can_be_closed_flag = False 27 | atexit.register(self.shutdown) 28 | 29 | 30 | def submit(self, func, *args, **kwargs): 31 | future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。 32 | future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。 33 | 34 | async def _produce(self, func, *args, **kwargs): 35 | await self._queue.put((func, args, kwargs)) 36 | 37 | async def _consume(self): 38 | while True: 39 | func, args, kwargs = await self._queue.get() 40 | if func == 'stop': 41 | break 42 | try: 43 | await func(*args, **kwargs) 44 | except Exception as e: 45 | traceback.print_exc() 46 | 47 | def _start_loop_in_new_thread(self, ): 48 | # self._loop.run_until_complete(self.__run()) # 这种也可以。__ 49 | # self._loop.run_forever() 50 | 51 | # asyncio.set_event_loop(self.loop) 52 | self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop)) 53 | self._can_be_closed_flag = True 54 | 55 | def shutdown(self): 56 | if self.loop.is_running(): # 这个可能是atregster触发,也可能是用户手动调用,需要判断一下,不能关闭两次。 57 | for _ in range(self._size): 58 | self.submit('stop', ) 59 | while not self._can_be_closed_flag: 60 | time.sleep(0.1) 61 | self.loop.stop() 62 | self.loop.close() 63 | print('关闭循环') 64 | 65 | 66 | if __name__ == '__main__': 67 | import nb_log 68 | async def async_f(x): 69 | await asyncio.sleep(2) 70 | print(x) 71 | 72 | pool =AsyncPoolExecutor(3) 73 | for i in range(30): 74 | pool.submit(async_f,i) -------------------------------------------------------------------------------- /async_pool_executor/async_pool_executor_in_async.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import atexit 3 | import time 4 | import traceback 5 | from threading import Thread 6 | import sys 7 | 8 | 9 | print(sys.version_info.major) 10 | class AsyncPoolExecutorInAsync: 11 | """ 12 | 使api和线程池一样,提供并发数量控制。这个是在异步函数种提交任务,不是在同步函数中提交任务。 submit 是async定义的,请注意对比区别。 13 | """ 14 | 15 | def __init__(self, size, loop=None): 16 | """ 17 | 18 | :param size: 同时并发运行的协程任务数量。 19 | :param loop: 20 | """ 21 | self._size = size 22 | self.loop = loop or asyncio.new_event_loop() 23 | if sys.version_info.minor>=10: # 3.10之后 24 | self._sem = asyncio.Semaphore(self._size,) 25 | self._queue = asyncio.Queue(maxsize=size, ) 26 | else: 27 | self._sem = asyncio.Semaphore(self._size, loop=self.loop) 28 | self._queue = asyncio.Queue(maxsize=size, loop=self.loop) 29 | 30 | async def submit(self, func, *args, **kwargs): 31 | # await self._queue.put((func, args, kwargs)) 32 | await self._sem.acquire() 33 | await func(*args,**kwargs) 34 | self._sem.release() 35 | 36 | 37 | 38 | 39 | if __name__ == '__main__': 40 | import nb_log 41 | async def async_f(x): 42 | await asyncio.sleep(2) 43 | print(x) 44 | 45 | async def start(): 46 | pool =AsyncPoolExecutorInAsync(3) 47 | for i in range(30): 48 | print('i:',i) 49 | await pool.submit(async_f,i) 50 | 51 | asyncio.get_event_loop().run_until_complete(start()) -------------------------------------------------------------------------------- /async_pool_executor/async_pool_executor_in_sync.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import atexit 3 | import time 4 | import traceback 5 | from threading import Thread 6 | 7 | 8 | class AsyncPoolExecutorInSync: 9 | """ 10 | 使api和线程池一样,最好的性能做法是submit也弄成 async def,生产和消费在同一个线程同一个loop一起运行,但会对调用链路的兼容性产生破坏,从而调用方式不兼容线程池。 11 | """ 12 | 13 | def __init__(self, size, loop=None): 14 | """ 15 | 16 | :param size: 同时并发运行的协程任务数量。 17 | :param loop: 18 | """ 19 | self._size = size 20 | self.loop = loop or asyncio.new_event_loop() 21 | # self._sem = asyncio.Semaphore(self._size, loop=self.loop) 22 | self._queue = asyncio.Queue(maxsize=size, loop=self.loop) 23 | t = Thread(target=self._start_loop_in_new_thread) 24 | t.setDaemon(True) # 设置守护线程是为了有机会触发atexit,使程序自动结束,不用手动调用shutdown 25 | t.start() 26 | self._can_be_closed_flag = False 27 | atexit.register(self.shutdown) 28 | 29 | 30 | def submit(self, func, *args, **kwargs): 31 | future = asyncio.run_coroutine_threadsafe(self._produce(func, *args, **kwargs), self.loop) # 这个 run_coroutine_threadsafe 方法也有缺点,消耗的性能巨大。 32 | future.result() # 阻止过快放入,放入超过队列大小后,使submit阻塞。 33 | 34 | async def _produce(self, func, *args, **kwargs): 35 | await self._queue.put((func, args, kwargs)) 36 | 37 | async def _consume(self): 38 | while True: 39 | func, args, kwargs = await self._queue.get() 40 | if func == 'stop': 41 | break 42 | try: 43 | await func(*args, **kwargs) 44 | except Exception as e: 45 | traceback.print_exc() 46 | 47 | def _start_loop_in_new_thread(self, ): 48 | # self._loop.run_until_complete(self.__run()) # 这种也可以。__ 49 | # self._loop.run_forever() 50 | 51 | # asyncio.set_event_loop(self.loop) 52 | self.loop.run_until_complete(asyncio.wait([self._consume() for _ in range(self._size)], loop=self.loop)) 53 | self._can_be_closed_flag = True 54 | 55 | def shutdown(self): 56 | if self.loop.is_running(): # 这个可能是atregster触发,也可能是用户手动调用,需要判断一下,不能关闭两次。 57 | for _ in range(self._size): 58 | self.submit('stop', ) 59 | while not self._can_be_closed_flag: 60 | time.sleep(0.1) 61 | self.loop.stop() 62 | self.loop.close() 63 | print('关闭循环') 64 | 65 | 66 | if __name__ == '__main__': 67 | import nb_log 68 | async def async_f(x): 69 | await asyncio.sleep(2) 70 | print(x) 71 | 72 | pool =AsyncPoolExecutorInSync(3) 73 | for i in range(30): 74 | pool.submit(async_f,i) -------------------------------------------------------------------------------- /dist/async_pool_executor-0.1.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/async_pool_executor/19a8fd1f0f4a1d0db6419c1926e6bba92ebe38c6/dist/async_pool_executor-0.1.tar.gz -------------------------------------------------------------------------------- /nb_log_config.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | 此文件nb_log_config.py是自动生成到python项目的根目录的。 4 | 在这里面写的变量会覆盖此文件nb_log_config_default中的值。对nb_log包进行默认的配置。 5 | 但最终配置方式是由get_logger_and_add_handlers方法的各种传参决定,如果方法相应的传参为None则使用这里面的配置。 6 | """ 7 | import logging 8 | from pathlib import Path 9 | import socket 10 | 11 | from pythonjsonlogger.jsonlogger import JsonFormatter 12 | 13 | 14 | def get_host_ip(): 15 | ip = '' 16 | host_name = '' 17 | try: 18 | sc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 19 | sc.connect(('8.8.8.8', 80)) 20 | ip = sc.getsockname()[0] 21 | host_name = socket.gethostname() 22 | sc.close() 23 | except Exception: 24 | pass 25 | return ip, host_name 26 | 27 | 28 | computer_ip,computer_name = get_host_ip() 29 | 30 | 31 | class JsonFormatterJumpAble(JsonFormatter): 32 | def add_fields(self, log_record, record, message_dict): 33 | # log_record['jump_click'] = f"""File '{record.__dict__.get('pathname')}', line {record.__dict__.get('lineno')}""" 34 | log_record[f"{record.__dict__.get('pathname')}:{record.__dict__.get('lineno')}"] = '' # 加个能点击跳转的字段。 35 | log_record['ip'] = computer_ip 36 | log_record['host_name'] = computer_name 37 | super().add_fields(log_record, record, message_dict) 38 | if 'for_segmentation_color' in log_record: 39 | del log_record['for_segmentation_color'] 40 | 41 | 42 | # DING_TALK_TOKEN = '3dd0eexxxxxadab014bd604XXXXXXXXXXXX' # 钉钉报警机器人 43 | # 44 | # EMAIL_HOST = ('smtp.sohu.com', 465) 45 | # EMAIL_FROMADDR = 'aaa0509@sohu.com' # 'matafyhotel-techl@matafy.com', 46 | # EMAIL_TOADDRS = ('cccc.cheng@silknets.com', 'yan@dingtalk.com',) 47 | # EMAIL_CREDENTIALS = ('aaa0509@sohu.com', 'abcdefg') 48 | # 49 | # ELASTIC_HOST = '127.0.0.1' 50 | # ELASTIC_PORT = 9200 51 | # 52 | # KAFKA_BOOTSTRAP_SERVERS = ['192.168.199.202:9092'] 53 | # ALWAYS_ADD_KAFKA_HANDLER_IN_TEST_ENVIRONENT = False 54 | # 55 | # MONGO_URL = 'mongodb://myUserAdmin:mimamiama@127.0.0.1:27016/admin' 56 | # 57 | # DEFAULUT_USE_COLOR_HANDLER = True # 是否默认使用有彩的日志。 58 | # DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = True # 在控制台是否显示彩色块状的日志。为False则不使用大块的背景颜色。 59 | # AUTO_PATCH_PRINT = True # 是否自动打print的猴子补丁,如果打了猴子补丁,print自动变色和可点击跳转。 60 | # WARNING_PYCHARM_COLOR_SETINGS = True 61 | # 62 | # DEFAULT_ADD_MULTIPROCESSING_SAFE_ROATING_FILE_HANDLER = False # 是否默认同时将日志记录到记log文件记事本中。 63 | # LOG_FILE_SIZE = 100 # 单位是M,每个文件的切片大小,超过多少后就自动切割 64 | # LOG_FILE_BACKUP_COUNT = 3 65 | # LOG_PATH = '/pythonlogs' # 默认的日志文件夹 66 | # # LOG_PATH = Path(__file__).absolute().parent / Path("nblogpath") 67 | # IS_USE_WATCHED_FILE_HANDLER_INSTEAD_OF_CUSTOM_CONCURRENT_ROTATING_FILE_HANDLER = False # 需要依靠外力lograte来切割日志,watchedfilehandler性能比此包自定义的日志切割handler写入文件速度慢。 68 | # 69 | # LOG_LEVEL_FILTER = logging.DEBUG # 默认日志级别,低于此级别的日志不记录了。例如设置为INFO,那么logger.debug的不会记录,只会记录logger.info以上级别的。 70 | # RUN_ENV = 'test' 71 | # 72 | # FORMATTER_DICT = { 73 | # 1: logging.Formatter( 74 | # '日志时间【%(asctime)s】 - 日志名称【%(name)s】 - 文件【%(filename)s】 - 第【%(lineno)d】行 - 日志等级【%(levelname)s】 - 日志信息【%(message)s】', 75 | # "%Y-%m-%d %H:%M:%S"), 76 | # 2: logging.Formatter( 77 | # '%(asctime)s - %(name)s - %(filename)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s', 78 | # "%Y-%m-%d %H:%M:%S"), 79 | # 3: logging.Formatter( 80 | # '%(asctime)s - %(name)s - 【 File "%(pathname)s", line %(lineno)d, in %(funcName)s 】 - %(levelname)s - %(message)s', 81 | # "%Y-%m-%d %H:%M:%S"), # 一个模仿traceback异常的可跳转到打印日志地方的模板 82 | # 4: logging.Formatter( 83 | # '%(asctime)s - %(name)s - "%(filename)s" - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s - File "%(pathname)s", line %(lineno)d ', 84 | # "%Y-%m-%d %H:%M:%S"), # 这个也支持日志跳转 85 | # 5: logging.Formatter( 86 | # '%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 87 | # "%Y-%m-%d %H:%M:%S"), # 我认为的最好的模板,推荐 88 | # 6: logging.Formatter('%(name)s - %(asctime)-15s - %(filename)s - %(lineno)d - %(levelname)s: %(message)s', 89 | # "%Y-%m-%d %H:%M:%S"), 90 | # 7: logging.Formatter('%(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 一个只显示简短文件名和所处行数的日志模板 91 | # 92 | # 8: JsonFormatterJumpAble('%(asctime)s - %(name)s - %(levelname)s - %(message)s - "%(filename)s %(lineno)d -" ', "%Y-%m-%d %H:%M:%S", json_ensure_ascii=False) # 这个是json日志,方便分析,但背景彩色块状显示不出来。 93 | # } 94 | # 95 | # FORMATTER_KIND = 5 # 如果get_logger_and_add_handlers不指定日志模板,则默认选择第几个模板 96 | 97 | 98 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from pathlib import Path 3 | from setuptools import setup, find_packages 4 | 5 | # with open("README.md", "r",encoding='utf8') as fh: 6 | # long_description = fh.read() 7 | 8 | # filepath = ((Path(__file__).parent / Path('README.md')).absolute()).as_posix() 9 | filepath = 'README.md' 10 | print(filepath) 11 | 12 | setup( 13 | name='async_pool_executor', # 14 | version="0.1", 15 | description=( 'async_pool_executor,its api like the concurrent.futures'), 16 | keywords=("async_pool_executor", "threadpoolexecutor", "sync","async" ), 17 | # long_description=open('README.md', 'r',encoding='utf8').read(), 18 | long_description_content_type="text/markdown", 19 | long_description= open(filepath, 'r',encoding='utf8').read(), 20 | # data_files=[filepath], 21 | author='bfzs', 22 | author_email='ydf0509@sohu.com', 23 | maintainer='ydf', 24 | maintainer_email='ydf0509@sohu.com', 25 | license='BSD License', 26 | packages=find_packages(), 27 | include_package_data=True, 28 | platforms=["all"], 29 | url='', 30 | classifiers=[ 31 | 'Development Status :: 4 - Beta', 32 | 'Operating System :: OS Independent', 33 | 'Intended Audience :: Developers', 34 | 'License :: OSI Approved :: BSD License', 35 | 'Programming Language :: Python', 36 | 'Programming Language :: Python :: Implementation', 37 | 'Programming Language :: Python :: 3.6', 38 | 'Topic :: Software Development :: Libraries' 39 | ], 40 | install_requires=[ 41 | 'nb_log' 42 | ] 43 | ) 44 | """ 45 | 打包上传 46 | python setup.py sdist upload -r pypi 47 | 48 | 49 | python setup.py sdist & twine upload dist/async_pool_executor-0.1.tar.gz 50 | twine upload dist/* 51 | 52 | 53 | python -m pip install nb_log --upgrade -i https://pypi.org/simple # 及时的方式,不用等待 阿里云 豆瓣 同步 54 | """ 55 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import nb_log 2 | import asyncio 3 | 4 | from async_pool_executor import AsyncPoolExecutor 5 | 6 | 7 | async def async_f(x): 8 | await asyncio.sleep(2) 9 | print(x) 10 | 11 | 12 | if __name__ == '__main__': 13 | """ 14 | 使用asyncio异步池实现10并发,可以任意时候动态追加任务到loop循环里面。 15 | """ 16 | 17 | pool = AsyncPoolExecutor(10) 18 | for i in range(100): 19 | pool.submit(async_f, i) 20 | 21 | 22 | 23 | """ 24 | 如果没有异步池的帮助,代码要完成10并发有多复杂,写法太难了。 25 | 26 | 下面这个还没支持动态向loop添加asyncio的协程任务,如果要动态随时追加任务,下面的run_until_complete就不合适。 27 | 要引入asyncio queue解耦生产和消费 28 | 或者 使用 run_coroutine_threadsafe,例子可以见 https://blog.csdn.net/whatday/article/details/106886811 ,里面的写法复杂到吓人。 29 | """ 30 | 31 | sem = asyncio.Semaphore(10) 32 | async def fun_with_semaphore(x): 33 | async with sem: 34 | await async_f(x) 35 | tasks = [] 36 | for i in range(100): 37 | tasks.append(asyncio.ensure_future(fun_with_semaphore(i))) 38 | asyncio.get_event_loop().run_until_complete(asyncio.wait(tasks)) 39 | 40 | 41 | -------------------------------------------------------------------------------- /tests/test1.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from async_pool_executor import AsyncPoolExecutor 4 | 5 | 6 | 7 | pool = AsyncPoolExecutor(100) 8 | 9 | 10 | async def af(x,y): 11 | await asyncio.sleep(4) 12 | print(x,y) 13 | return (x+y) 14 | 15 | for i in range(10): 16 | r = pool.submit(af,i,i*20) 17 | print('r:',r) --------------------------------------------------------------------------------