├── .code-workspace ├── .editorconfig ├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── .vscode ├── extensions.json ├── launch copy.json ├── launch.json ├── settings.json └── tasks.json ├── README.md ├── README_OLD.md ├── funboost_cli_user.py ├── funboost_config.py ├── git_nb_log_github.py ├── img.png ├── img_1.png ├── jietu ├── color1.png ├── color2.png ├── ding1.png ├── es.png ├── file.png ├── img.png ├── img_no_color.png ├── jump2.png ├── loguru颜色.png ├── mainl.png ├── mongo.png ├── 例子演示.png ├── 例子演示2.png ├── 例子演示3.png ├── 普通roatatingfilehandler多进程错误.png └── 演示.png ├── nb_log ├── __init__.py ├── capture_warnings.py ├── direct_logger.py ├── exception_auto_log.py ├── file_write.py ├── formatters.py ├── frequency_control_log.py ├── global_except_hook.py ├── handlers.py ├── handlers0000.py ├── handlers_loguru.py ├── handlers_more.py ├── helpers.py ├── log_manager.py ├── loggers_imp │ ├── __init__.py │ └── compatible_logger.py ├── logging_tree_helper.py ├── monkey_print.py ├── monkey_std_filter_words.py ├── monkey_sys_std.py ├── nb_log_config_default.py ├── root_logger.py ├── rotate_file_writter.py ├── set_nb_log_config.py └── simple_print.py ├── pub_pip_nb_log.py ├── settings.json ├── setup.py ├── tests ├── _trial_temp │ └── _trial_marker ├── ai_filehandler │ ├── custom_file_handler_demo.py │ └── test_multiprocess_logging.py ├── comprae_loguru │ ├── t_loguru.py │ └── t_nb_log.py ├── d1 │ └── d2 │ │ └── d3 │ │ └── t6.py ├── example.py ├── git_nb_log.py ├── loguru不同功能写入不同文件.py ├── nb_log_config.py ├── nb_log_test_multi_process.py ├── recod_flask_log │ ├── nb_log_flask.py │ └── 笨瓜方式.py ├── replace_coloer.py ├── replace_hjandler.py ├── rotate_file_hanlder_0120.py ├── t_basic_config.py ├── t_capture_warnings_with_frequency_control.py ├── t_fastapi.py ├── t_frequecy_log.py ├── t_logger_mixin.py ├── t_reapeat_recrod.py ├── t_warning.py ├── tes_lock_level.py ├── tes_name_root.py ├── tes_prevent_add_handler.py ├── test.db ├── test6.py ├── test9.py ├── test_batch_print.py ├── test_benchmark.py ├── test_catch_log.py ├── test_colorama.py ├── test_direct_log.py ├── test_exception_hook.py ├── test_exception_hook_thread.py ├── test_file_handler.py ├── test_filehandler.py ├── test_filter_print.py ├── test_getip.py ├── test_gunicorn_dir.py ├── test_icecream.py ├── test_level.py ├── test_logged_exc.py ├── test_loguru)Logger.py ├── test_loguru.py ├── test_loguru_dir │ ├── m1.py │ └── m2.py ├── test_loguru_exception.py ├── test_loguru_handler.py ├── test_memory.py ├── test_nb_log_concurrent_file_handler.py ├── test_panda.py ├── test_preset_log_level.py ├── test_raotaing_filehandler.py ├── test_raw_concurrent_log_handler │ ├── _trial_temp │ │ └── _trial_marker │ ├── test_concurent_log_performence.py │ ├── test_namerxx.py │ └── test_tyimne_rotate.py ├── test_reapet.py ├── test_requests.py ├── test_rotate_error.py ├── test_speed.py ├── test_str_in_perf.py ├── test_struct_log.py ├── test_sys_color.py ├── test_timed_raotaing_filehandler.py ├── test_tornado_log.py ├── test_use_curretn_dir_config │ ├── nb_log_config.py │ ├── test_file_name.py │ ├── test_log_by_current_dir_config.py │ ├── tt_print3.py │ ├── tt_print3b.py │ ├── ttest_s_print2.py │ └── ttest_timestrftime.py ├── test_use_differrent_file.py ├── test_warn.py ├── test_write_eror_file.py ├── tests_othres │ ├── nb_log_demo.py │ ├── nb_log_simple_demo.py │ └── test_funboost.py ├── utput.txt ├── 彩色虚线字体生成.py ├── 文件和屏幕写入速度对比.py ├── 禁止这样封装nb_log.py ├── 虚线字体生成.py └── 错误方式loguru不同功能写入不同文件.py └── ume /.code-workspace: -------------------------------------------------------------------------------- 1 | { 2 | "terminal.integrated.env.windows": { 3 | "PYTHONPATH": "${workspaceFolder:nb_log}", 4 | "va":"333" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*] 2 | charset=utf-8 3 | end_of_line=crlf 4 | insert_final_newline=false 5 | indent_style=space 6 | indent_size=4 7 | max_line_length = 400 8 | 9 | 10 | [{.babelrc,.stylelintrc,.eslintrc,jest.config,*.json,*.jsb3,*.jsb2,*.bowerrc}] 11 | indent_style=space 12 | indent_size=2 13 | 14 | [*.csv] 15 | indent_style=tab 16 | tab_width=1 17 | 18 | [{jshint.json,*.jshintrc}] 19 | indent_style=space 20 | indent_size=2 21 | 22 | [{*.jscs.json,*.jscsrc}] 23 | indent_style=space 24 | indent_size=2 25 | 26 | [*.js.map] 27 | indent_style=space 28 | indent_size=2 29 | 30 | [{*.ddl,*.sql}] 31 | indent_style=space 32 | indent_size=2 33 | 34 | [{*.coffee,*.cjsx}] 35 | indent_style=space 36 | indent_size=2 37 | 38 | [{*.yml,*.yaml}] 39 | indent_style=space 40 | indent_size=2 41 | 42 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | jobs: 16 | deploy: 17 | 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: '3.x' 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install build 30 | - name: Build package 31 | run: python -m build 32 | - name: Publish package 33 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 34 | with: 35 | user: __token__ 36 | password: ${{ secrets.PYPI_API_TOKEN }} 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | .idea/ 3 | env_hotels/ 4 | henv/ 5 | venv/ 6 | *.pyc 7 | app/apis/logs/ 8 | app/logs/ 9 | *.log.* 10 | *.log 11 | *.lock 12 | *.pytest_cache* 13 | nohup.out 14 | apidoc/ 15 | node_modules/ 16 | hotelApi/ 17 | my_patch_frame_config0000.py 18 | my_patch_frame_config_beifen.py 19 | test_frame/my_patch_frame_config.py 20 | function_result_web/ 21 | test_frame/my/ 22 | redis_queue_web/ 23 | not_up_git/ 24 | dist/ 25 | *.egg-info/ 26 | distributed_frame_config.py 27 | /auto_run_on_remote_config.py 28 | build/ 29 | .history/ 30 | 31 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "mgesbert.python-path", 4 | "datasentics.pythonpath-setter" 5 | ] 6 | 7 | } 8 | -------------------------------------------------------------------------------- /.vscode/launch copy.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Python: Run in Output", 6 | "type": "debugpy", 7 | "request": "launch", 8 | "program": "${file}", 9 | // "console": "internalConsole", 10 | 11 | "justMyCode": true, 12 | // 添加以下设置点击run按钮在单独的终端运行新python文件。 13 | "internalConsoleOptions": "neverOpen", 14 | "console": "integratedTerminal" 15 | } 16 | ] 17 | } 18 | 19 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | 8 | { 9 | "name": "Python: 当前文件", 10 | "type": "debugpy", 11 | "request": "launch", 12 | "program": "${file}", 13 | "console": "integratedTerminal", 14 | "env": { 15 | // "PYTHONPATH": "F:/coding2/nb_log;${env:PYTHONPATH}" 16 | "PYTHONPATH": "${workspaceFolder};${env:PYTHONPATH}", 17 | } 18 | 19 | 20 | 21 | }, 22 | { 23 | "name": "Python: FastAPI", 24 | "type": "debugpy", 25 | "request": "launch", 26 | "module": "uvicorn", 27 | "args": [ 28 | "main:app", 29 | "--reload", 30 | "--port", 31 | "8000" 32 | ], 33 | "jinja": true, 34 | "justMyCode": false 35 | }, 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | } 4 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "label": "Run t1.py", // 任务标签,在 VS Code 中显示的名称 6 | "type": "shell", // 任务类型,这里是 shell 命令 7 | "command": "D:\\ProgramData\\Miniconda3\\envs\\py39b\\python.exe", // Python 解释器路径 8 | "args": [ 9 | "tests/test_othres/test_try.py" // 要运行的 Python 脚本路径 10 | ], 11 | "group": "build", // 任务组,这里归类为构建任务 12 | "problemMatcher": [], // 问题匹配器,用于识别输出中的错误,这里为空 13 | "presentation": { 14 | "echo": true, // 是否在输出面板中显示实际执行的命令 15 | "reveal": "always", // 任务执行时总是显示输出 16 | "focus": false, // 不将焦点切换到输出面板 17 | "panel": "shared", // 在共享面板中显示输出 18 | "showReuseMessage": true, // 显示面板重用消息 19 | "clear": false // 每次运行任务时不清除之前的输出 20 | } 21 | } 22 | ] 23 | } 24 | 25 | 26 | -------------------------------------------------------------------------------- /funboost_cli_user.py: -------------------------------------------------------------------------------- 1 | """ 2 | funboost现在 新增 命令行启动消费 发布 和清空消息 3 | 4 | 5 | """ 6 | import sys 7 | from pathlib import Path 8 | import fire 9 | 10 | project_root_path = Path(__file__).absolute().parent 11 | print(f'project_root_path is : {project_root_path} ,请确认是否正确') 12 | sys.path.insert(1, str(project_root_path)) # 这个是为了方便命令行不用用户手动先 export PYTHONPATTH=项目根目录 13 | 14 | # $$$$$$$$$$$$ 15 | # 以上的sys.path代码需要放在最上面,先设置好pythonpath再导入funboost相关的模块 16 | # $$$$$$$$$$$$ 17 | 18 | 19 | from funboost.core.cli.funboost_fire import BoosterFire, env_dict 20 | from funboost.core.cli.discovery_boosters import BoosterDiscovery 21 | 22 | # 需要启动的函数,那么该模块或函数建议建议要被import到这来, 否则需要要在 --import_modules_str 或 booster_dirs 中指定用户项目中有哪些模块包括了booster 23 | ''' 24 | 有4种方式,自动找到有@boost装饰器,注册booster 25 | 26 | 1. 用户亲自把要启动的消费函数所在模块或函数 手动 import 一下到此模块来 27 | 2. 用户在使用命令行时候 --import_modules_str 指定导入哪些模块路径,就能启动那些队列名来消费和发布了. 28 | 3. 用户使用BoosterDiscovery.auto_discovery_boosters 自动 import 指定文件夹下的 .py 文件来实现. 29 | 4 用户在使用命令行时候传参 project_root_path booster_dirs ,自动扫描模块,自动import 30 | ''' 31 | env_dict['project_root_path'] = project_root_path 32 | 33 | if __name__ == '__main__': 34 | # booster_dirs 用户可以自己增加扫描的文件夹,这样可以命令行少传了 --booster_dirs_str 35 | # BoosterDiscovery 可以多次调用 36 | BoosterDiscovery(project_root_path, booster_dirs=[], max_depth=1, py_file_re_str=None).auto_discovery() # 这个最好放到main里面,如果要扫描自身文件夹,没写正则排除文件本身,会无限懵逼死循环导入 37 | fire.Fire(BoosterFire, ) 38 | 39 | ''' 40 | 41 | python /codes/funboost/funboost_cli_user.py --booster_dirs_str=test_frame/test_funboost_cli/test_find_boosters --max_depth=2 push test_find_queue1 --x=1 --y=2 42 | 43 | python /codes/funboost/funboost_cli_user.py --booster_dirs_str=test_frame/test_funboost_cli/test_find_boosters --max_depth=2 consume test_find_queue1 44 | 45 | ''' 46 | -------------------------------------------------------------------------------- /funboost_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | from pathlib import Path 4 | import pytz 5 | from funboost.constant import BrokerEnum, ConcurrentModeEnum 6 | from funboost.core.func_params_model import FunctionResultStatusPersistanceConfig 7 | from funboost.utils.simple_data_class import DataClassBase 8 | from nb_log import nb_log_config_default 9 | 10 | ''' 11 | funboost_config.py 文件是第一次运行框架自动生成到你的项目根目录的,不需要用由户手动创建。 12 | 此文件里面可以写任意python代码。例如 中间件 帐号 密码自己完全可以从apola配置中心获取或者从环境变量获取。 13 | ''' 14 | 15 | ''' 16 | 你项目根目录下自动生成的 funboost_config.py 文件中修改配置,会被自动读取到。 17 | 用户不要动修改框架的源码 funboost/funboost_config_deafult.py 中的代码,此模块的变量会自动被 funboost_config.py 覆盖。 18 | funboost/funboost_config_deafult.py配置覆盖逻辑可看funboost/set_frame_config.py中的代码. 19 | 20 | 框架使用文档是 https://funboost.readthedocs.io/zh_CN/latest/ 21 | ''' 22 | 23 | 24 | class BrokerConnConfig(DataClassBase): 25 | """ 26 | 中间件连接配置 27 | 此文件按需修改,例如你使用redis中间件作为消息队列,可以不用管rabbitmq mongodb kafka啥的配置。 28 | 但有3个功能例外,如果你需要使用rpc模式或者分布式控频或者任务过滤功能,无论设置使用何种消息队列中间件都需要把redis连接配置好, 29 | 如果@boost装饰器设置is_using_rpc_mode为True或者 is_using_distributed_frequency_control为True或do_task_filtering=True则需要把redis连接配置好,默认是False不强迫用户安装redis。 30 | """ 31 | 32 | MONGO_CONNECT_URL = f'mongodb://127.0.0.1:27017' # 如果有密码连接 'mongodb://myUserAdmin:8mwTdy1klnSYepNo@192.168.199.202:27016/' authSource 指定鉴权db,MONGO_CONNECT_URL = 'mongodb://root:123456@192.168.64.151:27017?authSource=admin' 33 | 34 | RABBITMQ_USER = 'rabbitmq_user' 35 | RABBITMQ_PASS = 'rabbitmq_pass' 36 | RABBITMQ_HOST = '127.0.0.1' 37 | RABBITMQ_PORT = 5672 38 | RABBITMQ_VIRTUAL_HOST = '' # my_host # 这个是rabbitmq的虚拟子host用户自己创建的,如果你想直接用rabbitmq的根host而不是使用虚拟子host,这里写 空字符串 即可。 39 | RABBITMQ_URL = f'amqp://{RABBITMQ_USER}:{RABBITMQ_PASS}@{RABBITMQ_HOST}:{RABBITMQ_PORT}/{RABBITMQ_VIRTUAL_HOST}' 40 | 41 | REDIS_HOST = '127.0.0.1' 42 | REDIS_USERNAME = '' 43 | REDIS_PASSWORD = '' 44 | REDIS_PORT = 6379 45 | REDIS_DB = 7 # redis消息队列所在db,请不要在这个db放太多其他键值对,框架里面有的功能会scan扫描unacked的键名,使用单独的db。 46 | REDIS_DB_FILTER_AND_RPC_RESULT = 8 # 如果函数做任务参数过滤 或者使用rpc获取结果,使用这个db,因为这个db的键值对多,和redis消息队列db分开 47 | REDIS_URL = f'redis://{REDIS_USERNAME}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}' 48 | 49 | NSQD_TCP_ADDRESSES = ['127.0.0.1:4150'] 50 | NSQD_HTTP_CLIENT_HOST = '127.0.0.1' 51 | NSQD_HTTP_CLIENT_PORT = 4151 52 | 53 | KAFKA_BOOTSTRAP_SERVERS = ['127.0.0.1:9092'] 54 | KFFKA_SASL_CONFIG = { 55 | "bootstrap_servers": KAFKA_BOOTSTRAP_SERVERS, 56 | "sasl_plain_username": "", 57 | "sasl_plain_password": "", 58 | "sasl_mechanism": "SCRAM-SHA-256", 59 | "security_protocol": "SASL_PLAINTEXT", 60 | } 61 | 62 | SQLACHEMY_ENGINE_URL = 'sqlite:////sqlachemy_queues/queues.db' 63 | 64 | # 如果broker_kind 使用 peewee 中间件模式会使用mysql配置 65 | MYSQL_HOST = '127.0.0.1' 66 | MYSQL_PORT = 3306 67 | MYSQL_USER = 'root' 68 | MYSQL_PASSWORD = '123456' 69 | MYSQL_DATABASE = 'testdb6' 70 | 71 | # persist_quque中间件时候采用本机sqlite的方式,数据库文件生成的位置,如果linux账号在根目录没权限建文件夹,可以换文件夹。 72 | SQLLITE_QUEUES_PATH = '/sqllite_queues' 73 | 74 | TXT_FILE_PATH = Path(__file__).parent / 'txt_queues' # 不建议使用这个txt模拟消息队列中间件,本地持久化优先选择 PERSIST_QUQUE 中间件。 75 | 76 | ROCKETMQ_NAMESRV_ADDR = '192.168.199.202:9876' 77 | 78 | MQTT_HOST = '127.0.0.1' 79 | MQTT_TCP_PORT = 1883 80 | 81 | HTTPSQS_HOST = '127.0.0.1' 82 | HTTPSQS_PORT = '1218' 83 | HTTPSQS_AUTH = '123456' 84 | 85 | NATS_URL = 'nats://192.168.6.134:4222' 86 | 87 | KOMBU_URL = 'redis://127.0.0.1:6379/9' # 这个就是celery依赖包kombu使用的消息队列格式,所以funboost支持一切celery支持的消息队列种类。 88 | # KOMBU_URL = 'sqla+sqlite:////dssf_kombu_sqlite.sqlite' # 4个//// 代表磁盘根目录下生成一个文件。推荐绝对路径。3个///是相对路径。 89 | 90 | CELERY_BROKER_URL = 'redis://127.0.0.1:6379/12' # 使用celery作为中间件。funboost新增支持celery框架来运行函数,url内容就是celery的broker形式. 91 | CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/13' # celery结果存放,可以为None 92 | 93 | DRAMATIQ_URL = RABBITMQ_URL 94 | 95 | PULSAR_URL = 'pulsar://192.168.70.128:6650' 96 | 97 | 98 | class FunboostCommonConfig(DataClassBase): 99 | # nb_log包的第几个日志模板,内置了7个模板,可以在你当前项目根目录下的nb_log_config.py文件扩展模板。 100 | # NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER = 11 # 7是简短的不可跳转,5是可点击跳转的,11是可显示ip 进程 线程的模板,也可以亲自设置日志模板不传递数字。 101 | NB_LOG_FORMATER_INDEX_FOR_CONSUMER_AND_PUBLISHER = logging.Formatter( 102 | f'%(asctime)s-({nb_log_config_default.computer_ip},{nb_log_config_default.computer_name})-[p%(process)d_t%(thread)d] - %(name)s - "%(filename)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(task_id)s - %(message)s', 103 | "%Y-%m-%d %H:%M:%S",) # 这个是带task_id的日志模板,日志可以显示task_id,方便用户串联起来排查某一个任务消息的所有日志. 104 | 105 | TIMEZONE = 'Asia/Shanghai' # 时区 106 | 107 | # 以下配置是修改funboost的一些命名空间和启动时候的日志级别,新手不熟练就别去屏蔽日志了 108 | SHOW_HOW_FUNBOOST_CONFIG_SETTINGS = True # 如果你单纯想屏蔽 "分布式函数调度框架会自动导入funboost_config模块当第一次运行脚本时候,函数调度框架会在你的python当前项目的根目录下 ...... " 这句话, 109 | FUNBOOST_PROMPT_LOG_LEVEL = logging.DEBUG # funboost启动时候的相关提示语,用户可以设置这个命名空间的日志级别来调整 110 | KEEPALIVETIMETHREAD_LOG_LEVEL = logging.DEBUG # funboost的作者发明的可缩小自适应线程池,用户对可变线程池的线程创建和销毁线程完全无兴趣,可以提高日志级别. 111 | -------------------------------------------------------------------------------- /git_nb_log_github.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | import time 4 | 5 | def getstatusoutput(cmd): 6 | try: 7 | data = subprocess.check_output(cmd, shell=True, universal_newlines=True, 8 | stderr=subprocess.STDOUT, encoding='utf8') # 必須設置為utf8, 不然报错了。 9 | exitcode = 0 10 | except subprocess.CalledProcessError as ex: 11 | data = ex.output 12 | exitcode = ex.returncode 13 | if data[-1:] == '\n': 14 | data = data[:-1] 15 | return exitcode, data 16 | 17 | def do_cmd(cmd_strx): 18 | print(f'执行 {cmd_strx}') 19 | retx = getstatusoutput(cmd_strx) 20 | print(retx[0]) 21 | # if retx[0] !=0: 22 | # raise ValueError('要检查git提交') 23 | print(retx[1], '\n') 24 | return retx 25 | 26 | t0 = time.time() 27 | 28 | do_cmd('git pull origin') 29 | 30 | do_cmd('git diff') 31 | 32 | do_cmd('git add ./.') 33 | 34 | do_cmd('git commit -m commit') 35 | 36 | 37 | do_cmd('git push origin') 38 | # do_cmd('git push github') 39 | 40 | # print(subprocess.getstatusoutput('git push github')) 41 | print(f'spend_time {time.time() - t0}') 42 | 43 | if __name__ == '__main__': 44 | 45 | time.sleep(1000000) 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/img.png -------------------------------------------------------------------------------- /img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/img_1.png -------------------------------------------------------------------------------- /jietu/color1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/color1.png -------------------------------------------------------------------------------- /jietu/color2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/color2.png -------------------------------------------------------------------------------- /jietu/ding1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/ding1.png -------------------------------------------------------------------------------- /jietu/es.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/es.png -------------------------------------------------------------------------------- /jietu/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/file.png -------------------------------------------------------------------------------- /jietu/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/img.png -------------------------------------------------------------------------------- /jietu/img_no_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/img_no_color.png -------------------------------------------------------------------------------- /jietu/jump2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/jump2.png -------------------------------------------------------------------------------- /jietu/loguru颜色.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/loguru颜色.png -------------------------------------------------------------------------------- /jietu/mainl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/mainl.png -------------------------------------------------------------------------------- /jietu/mongo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/mongo.png -------------------------------------------------------------------------------- /jietu/例子演示.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/例子演示.png -------------------------------------------------------------------------------- /jietu/例子演示2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/例子演示2.png -------------------------------------------------------------------------------- /jietu/例子演示3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/例子演示3.png -------------------------------------------------------------------------------- /jietu/普通roatatingfilehandler多进程错误.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/普通roatatingfilehandler多进程错误.png -------------------------------------------------------------------------------- /jietu/演示.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/jietu/演示.png -------------------------------------------------------------------------------- /nb_log/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import warnings 3 | 4 | from nb_log.set_nb_log_config import use_config_form_nb_log_config_module 5 | from nb_log import nb_log_config_default 6 | 7 | from nb_log.monkey_sys_std import patch_sys_std 8 | if nb_log_config_default.SYS_STD_FILE_NAME: 9 | patch_sys_std() 10 | 11 | from nb_log.monkey_std_filter_words import patch_std_filter_words 12 | if nb_log_config_default.FILTER_WORDS_PRINT: 13 | patch_std_filter_words() 14 | 15 | from nb_log.monkey_print import nb_print, patch_print, reverse_patch_print, stdout_write, stderr_write, print_raw, is_main_process, only_print_on_main_process 16 | if nb_log_config_default.AUTO_PATCH_PRINT: 17 | patch_print() 18 | 19 | 20 | 21 | from nb_log.helpers import generate_error_file_name 22 | from nb_log import handlers 23 | from nb_log.log_manager import (LogManager, LoggerLevelSetterMixin, LoggerMixin, LoggerMixinDefaultWithFileHandler,FileLoggerMixin, 24 | MetaTypeLogger,MetaTypeFileLogger, 25 | get_logger, get_logger_with_filehanlder, 26 | ) 27 | from nb_log.loggers_imp.compatible_logger import CompatibleLogger 28 | 29 | simple_logger = get_logger('simple') 30 | defaul_logger = LogManager('defaul').get_logger_and_add_handlers(do_not_use_color_handler=True, formatter_template=7) 31 | default_file_logger = LogManager('default_file_logger').get_logger_and_add_handlers(log_filename='default_file_logger.log') 32 | 33 | logger_dingtalk_common = LogManager('钉钉通用报警提示').get_logger_and_add_handlers( 34 | ding_talk_token=nb_log_config_default.DING_TALK_TOKEN, 35 | log_filename='dingding_common.log') 36 | 37 | from nb_log import global_except_hook 38 | from nb_log.exception_auto_log import LogException 39 | 40 | # warnings.simplefilter('always') # 避免维护 sys.__dict__['__warningregistry__'] 字典,由 warning.warn 引起的内存泄漏 41 | # logging.captureWarnings(True) # 将warning.warn的sys.stderr 转化成日志. 42 | # get_logger('',log_level_int=logging.WARNING,log_filename='root.log') # py.warnings 43 | 44 | from nb_log.root_logger import root_logger 45 | 46 | 47 | from nb_log.capture_warnings import capture_warnings_with_frequency_control 48 | 49 | from nb_log.direct_logger import debug,info,warning,error,exception,critical 50 | 51 | if nb_log_config_default.SHOW_NB_LOG_LOGO: 52 | only_print_on_main_process('\033[0m' + r""" 53 | 54 | .__ __. .______ __ ______ _______ 55 | | \ | | | _ \ | | / __ \ / _____| 56 | | \| | | |_) | ______| | | | | | | | __ 57 | | . ` | | _ < |______| | | | | | | | |_ | 58 | | |\ | | |_) | | `----.| `--' | | |__| | 59 | |__| \__| |______/ |_______| \______/ \______| 60 | 61 | """ + '\033[0m') 62 | 63 | if nb_log_config_default.SHOW_PYCHARM_COLOR_SETINGS: 64 | only_print_on_main_process( 65 | """\033[0m 66 | 1)使用pycharm时候,强烈建议按下面的重新自定义设置pycharm的console里面的主题颜色,否则颜色显示瞎眼,代码里面规定的颜色只是大概的红黄蓝绿。在不同的ide软件和主题、字体下是不同的显示效果,需要用户自己设置。 67 | 设置方式为 打开pycharm的 file -> settings -> Editor -> Color Scheme -> Console Colors 选择monokai,点击展开 ANSI colors, 68 | 并重新修改自定义7个颜色,设置Blue为 0454F3 ,Cyan为 06F0F6 ,Green 为 13FC02 ,Magenta为 ff1cd5 ,red为 F80606 ,yellow为 EAFA04 ,gray 为 FFFFFF ,white 为 FFFFFF 。 69 | 不同版本的pycahrm或主题或ide,可以根据控制台根据实际显示设置。 70 | 71 | 2)使用xshell或finashell工具连接linux也可以自定义主题颜色,默认使用shell连接工具的颜色也可以。 72 | 73 | 颜色效果如连接 https://ibb.co/qRssXTr 74 | 75 | 在当前项目根目录的 nb_log_config.py 中可以修改当get_logger方法不传参时后的默认日志行为。 76 | 77 | nb_log文档 https://nb-log-doc.readthedocs.io/zh_CN/latest/ 78 | 79 | 为什么要设置pycharm终端颜色,解释为什么\\033不能决定最终颜色 https://nb-log-doc.readthedocs.io/zh_CN/latest/articles/c1.html#c 80 | \033[0m 81 | 82 | """) 83 | 84 | 85 | 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /nb_log/capture_warnings.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import warnings 4 | from collections import defaultdict 5 | import nb_log 6 | 7 | _warnings_showwarning = None 8 | 9 | 10 | class GlobalVars: 11 | interval = None 12 | logger = None 13 | 14 | 15 | file_line__ts = defaultdict(float) 16 | 17 | FQ_CAPTURE_WARNINGS_LOGGER_NAME = 'fq_capture_warnings' # 控频日志 18 | 19 | 20 | def _frequency_control_showwarning(message, category, filename, lineno, file=None, line=None): 21 | """ 22 | Implementation of showwarnings which redirects to logging, which will first 23 | check to see if the file parameter is None. If a file is specified, it will 24 | delegate to the original warnings implementation of showwarning. Otherwise, 25 | it will call warnings.formatwarning and will log the resulting string to a 26 | warnings logger named "py.warnings" with level logging.WARNING. 27 | """ 28 | if file is not None: 29 | if _warnings_showwarning is not None: 30 | _warnings_showwarning(message, category, filename, lineno, file, line) 31 | else: 32 | key = (filename, lineno) 33 | last_show_log_ts = file_line__ts[key] 34 | if time.time() - last_show_log_ts > GlobalVars.interval: 35 | file_line__ts[key] = time.time() 36 | s = warnings.formatwarning(message, category, filename, lineno, line) 37 | GlobalVars.logger.warning("%s", s) 38 | 39 | 40 | 41 | def capture_warnings_with_frequency_control(capture: bool = True, interval=10): 42 | """ 43 | 对相同文件代码行的警告,使用控频来记录警告 44 | """ 45 | warnings.simplefilter('always', ) # 先设置成始终打印警告,防止python维护 __warningregistry__ 字典造成内存泄漏,然后使用上面的控频日志来记录. 46 | global _warnings_showwarning 47 | GlobalVars.logger = nb_log.get_logger(FQ_CAPTURE_WARNINGS_LOGGER_NAME, log_filename=f'{FQ_CAPTURE_WARNINGS_LOGGER_NAME}.log') 48 | if capture: 49 | if _warnings_showwarning is None: 50 | _warnings_showwarning = warnings.showwarning 51 | warnings.showwarning = _frequency_control_showwarning 52 | else: 53 | if _warnings_showwarning is not None: 54 | warnings.showwarning = _warnings_showwarning 55 | _warnings_showwarning = None 56 | GlobalVars.interval = interval 57 | -------------------------------------------------------------------------------- /nb_log/direct_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import nb_log 4 | 5 | ''' 6 | 7 | 有的笨瓜总是不能理解 logging.getLogger第一个入参name的作用和巨大好处,老是觉得需要实例化生成 logger 对象觉得麻烦,想开箱即用,那就满足这种人。 8 | 用from loguru import logger 这种日志,先不同模块或功能的日志设置不同级别,不同的模块写入不同的文件,非常麻烦不优雅。 9 | 但有的人完全不理解 日志命名空间的作用,只会抱怨nb_log的例子要他实例化不同name的logger麻烦,那就满足这种人,不用他手动实例化生成不同命名空间的logger。 10 | 11 | import nb_log 12 | 13 | nb_log.debug('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用debug函数,那就满足这种人') 14 | nb_log.info('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用info函数,那就满足这种人') 15 | nb_log.warning('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用warning函数,那就满足这种人') 16 | nb_log.error('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用error函数,那就满足这种人') 17 | nb_log.critical('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用critical函数,那就满足这种人') 18 | 19 | 20 | loguru的用法是: 21 | from loguru import logger 22 | logger.debug(msg) 23 | 24 | nb_log的用法是: 25 | import nb_log 26 | nb_log.debug(msg) 27 | 28 | nb_log比loguru少了 from import那不是更简洁了吗?满足这种只知道追求简单的笨瓜。 29 | ''' 30 | 31 | direct_logger = nb_log.LogManager('nb_log_direct', logger_cls=nb_log.CompatibleLogger).get_logger_and_add_handlers(log_filename='nb_log_direct.log') 32 | 33 | 34 | def _convert_extra(kwargs: dict): 35 | """ 36 | 因为封装了原生logging的 debug info等方法,要显示实际的打印日志的文件和行号,需要把查找调用层级加大一级 37 | :param kwargs: 38 | :return: 39 | """ 40 | extra = kwargs.get('extra', {}) 41 | extra.update({"sys_getframe_n": 3}) 42 | kwargs['extra'] = extra 43 | 44 | 45 | def debug(msg, *args, **kwargs): 46 | _convert_extra(kwargs) 47 | direct_logger.debug(msg, *args, **kwargs) 48 | 49 | 50 | def info(msg, *args, **kwargs): 51 | _convert_extra(kwargs) 52 | direct_logger.info(msg, *args, **kwargs) 53 | 54 | 55 | def warning(msg, *args, **kwargs): 56 | _convert_extra(kwargs) 57 | direct_logger.warning(msg, *args, **kwargs) 58 | 59 | 60 | def error(msg, *args, **kwargs): 61 | _convert_extra(kwargs) 62 | direct_logger.error(msg, *args, **kwargs) 63 | 64 | 65 | def exception(msg, *args, **kwargs): 66 | _convert_extra(kwargs) 67 | direct_logger.exception(msg, *args, **kwargs) 68 | 69 | 70 | def critical(msg, *args, **kwargs): 71 | _convert_extra(kwargs) 72 | direct_logger.critical(msg, *args, **kwargs) 73 | -------------------------------------------------------------------------------- /nb_log/exception_auto_log.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import logging 4 | import nb_log 5 | from nb_log import CompatibleLogger 6 | 7 | 8 | class LogException(Exception): 9 | """ 10 | 自动记录日志的异常,抛出异常不需要单独再写日志 11 | """ 12 | logger: logging.Logger = None 13 | is_record_log: bool = True 14 | 15 | def __init__(self, err_msg, *, logger: logging.Logger = None, is_record_log: bool = True): # real signature unknown 16 | logger = logger or self.__class__.logger 17 | self.err_msg = err_msg 18 | if logger and (is_record_log or self.__class__.is_record_log): 19 | logger.error(self.err_msg, extra={'sys_getframe_n': 3}) 20 | 21 | def __str__(self): 22 | return str(self.err_msg) 23 | 24 | 25 | if __name__ == '__main__': 26 | loggerx = nb_log.LogManager('log_exc', logger_cls=CompatibleLogger).get_logger_and_add_handlers(log_filename='log_exc.log') 27 | 28 | # try: 29 | # raise LogException(['cccc', 222], logger=loggerx) 30 | # except Exception as e: 31 | # print(e) 32 | # try: 33 | # raise LogException('cccc', logger=loggerx) 34 | # except Exception as e: 35 | # loggerx.exception(e) 36 | 37 | # print('aaaaaaaaaaaaaaaa') 38 | time.sleep(1) 39 | raise LogException(['cccc', 222], logger=loggerx) # 40 | 41 | print('ok') 42 | -------------------------------------------------------------------------------- /nb_log/file_write.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from functools import wraps 3 | from pathlib import Path 4 | from nb_log import nb_log_config_default 5 | import time 6 | from chained_mode_time_tool import DatetimeConverter 7 | from nb_log.simple_print import sprint 8 | 9 | 10 | def singleton(cls): 11 | """ 12 | 单例模式装饰器,新加入线程锁,更牢固的单例模式,主要解决多线程如100线程同时实例化情况下可能会出现三例四例的情况,实测。 13 | """ 14 | _instance = {} 15 | singleton.__lock = threading.Lock() # 这里直接演示了线程安全版单例模式 16 | 17 | @wraps(cls) 18 | def _singleton(*args, **kwargs): 19 | with singleton.__lock: 20 | if cls not in _instance: 21 | _instance[cls] = cls(*args, **kwargs) 22 | return _instance[cls] 23 | 24 | return _singleton 25 | 26 | 27 | # @singleton 28 | class FileWritter: 29 | _lock = threading.Lock() 30 | need_write_2_file = True 31 | 32 | def __init__(self, file_name: str, log_path=nb_log_config_default.LOG_PATH): 33 | if self.need_write_2_file: 34 | self._file_name = file_name 35 | self.log_path = log_path 36 | if not Path(self.log_path).exists(): 37 | sprint(f'自动创建日志文件夹 {log_path}') 38 | Path(self.log_path).mkdir(exist_ok=True) 39 | self._open_file() 40 | self._last_write_ts = time.time() 41 | self._last_del_old_files_ts = time.time() 42 | 43 | def _open_file(self): 44 | self.file_path = Path(self.log_path) / Path(DatetimeConverter().date_str + '.' + self._file_name) 45 | self._f = open(self.file_path, encoding='utf8', mode='a') 46 | 47 | def _close_file(self): 48 | self._f.close() 49 | 50 | def write_2_file(self, msg): 51 | if self.need_write_2_file: 52 | with self._lock: 53 | now_ts = time.time() 54 | if now_ts - self._last_write_ts > 5: 55 | self._last_write_ts = time.time() 56 | self._close_file() 57 | self._open_file() 58 | self._f.write(msg) 59 | self._f.flush() 60 | if now_ts - self._last_del_old_files_ts > 300: 61 | self._last_del_old_files_ts = time.time() 62 | self._delete_old_files() 63 | 64 | def _delete_old_files(self): 65 | for i in range(10, 100): 66 | file_path = Path(self.log_path) / Path(DatetimeConverter(time.time() - 86400 * i).date_str + '.' + self._file_name) 67 | try: 68 | file_path.unlink() 69 | except FileNotFoundError: 70 | pass 71 | 72 | 73 | class PrintFileWritter(FileWritter): 74 | _lock = threading.Lock() 75 | need_write_2_file = False if nb_log_config_default.PRINT_WRTIE_FILE_NAME in (None, '') else True 76 | 77 | 78 | class StdFileWritter(FileWritter): 79 | _lock = threading.Lock() 80 | need_write_2_file = False if nb_log_config_default.SYS_STD_FILE_NAME in (None, '') else True 81 | 82 | 83 | if __name__ == '__main__': 84 | fw = FileWritter('test_file3', '/test_dir2') 85 | t1 = time.time() 86 | for i in range(10000): 87 | fw.write_2_file(''' 11:18:13 "D:\codes\nb_log\tests\test_use_curretn_dir_config\test_s_print2.py:9" 2023-07-05 10:48:35 - lalala - "D:/codes/funboost/test_frame/test_nb_log/log_example.py:15" - - ERROR - 粉红色说明代码有错误。 粉红色说明代码有错误。 粉红色说明代码有错误。 粉红色说明代码有错误。 88 | ''') 89 | print(time.time()-t1) -------------------------------------------------------------------------------- /nb_log/formatters.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import typing 3 | from logging import Formatter, LogRecord 4 | 5 | 6 | class ContextFormatter(Formatter): 7 | def __init__(self, *args, get_context_field_fun: typing.Callable = None, **kwargs, ): 8 | super().__init__(*args, **kwargs) 9 | self.get_context_field_fun = get_context_field_fun 10 | 11 | def formatMessage(self, record: LogRecord): 12 | context_id = '' 13 | if self.get_context_field_fun: 14 | context_id = self.get_context_field_fun() 15 | setattr(record, 'context_id', context_id) 16 | return self._style.format(record) 17 | 18 | 19 | if __name__ == '__main__': 20 | def f(): 21 | return 'aaaaaa6666' 22 | 23 | 24 | logger = logging.getLogger('abcd') 25 | sh = logging.StreamHandler() 26 | formatter = ContextFormatter( 27 | '%(asctime)s - %(name)s - "%(filename)s" - %(funcName)s - %(lineno)d - %(levelname)s - %(context_id)s - %(message)s - File "%(pathname)s", line %(lineno)d ', 28 | "%Y-%m-%d %H:%M:%S", get_context_field_fun=None) 29 | sh.setFormatter(formatter) 30 | logger.addHandler(sh) 31 | logger.setLevel(logging.DEBUG) 32 | 33 | logger.info('哈哈哈哈') 34 | -------------------------------------------------------------------------------- /nb_log/frequency_control_log.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | import sys 4 | import time 5 | import typing 6 | 7 | from nb_libs.sys_frame_uitils import EasyFrame 8 | 9 | 10 | class FrequencyControlLog: 11 | file_line__ts_map = dict() 12 | 13 | def __init__(self, logger: logging.Logger, interval=10): 14 | self.logger = logger 15 | self.interval = interval 16 | 17 | @staticmethod 18 | def _pass(*args, **kwargs): 19 | pass 20 | 21 | def _fq(self, method): 22 | ef = EasyFrame(2) 23 | file_line = (ef.filename, ef.lineno) 24 | last_ts_log = self.file_line__ts_map.get(file_line, 0) 25 | if not self.interval: 26 | return method 27 | if time.time() - last_ts_log > self.interval: 28 | self.file_line__ts_map[file_line] = time.time() 29 | return method 30 | return self._pass 31 | 32 | @property 33 | def log(self, ) -> logging.Logger.log: 34 | return self._fq(self.logger.log) 35 | 36 | @property 37 | def debug(self, ) -> typing.Callable: 38 | return self._fq(self.logger.debug) 39 | 40 | @property 41 | def info(self, ) -> logging.Logger.info: 42 | return self._fq(self.logger.info) 43 | 44 | @property 45 | def warning(self, ) -> logging.Logger.warning: 46 | return self._fq(self.logger.warning) 47 | 48 | @property 49 | def error(self, ) -> logging.Logger.error: 50 | return self._fq(self.logger.error) 51 | 52 | @property 53 | def critical(self, ) -> logging.Logger.critical: 54 | return self._fq(self.logger.critical) 55 | 56 | # stacklevel 只能支持python3.9 以上的logging 57 | # def log(self, level, msg, *args, stacklevel=2, interval: int = None, **kwargs): 58 | # ef = EasyFrame(1) 59 | # file_line = (ef.filename, ef.lineno) 60 | # last_ts_log = self.file_line__ts_map.get(file_line, 0) 61 | # if not interval: 62 | # self.logger.log(level, msg, *args, stacklevel=stacklevel, **kwargs) 63 | # else: 64 | # if time.time() - last_ts_log > interval: 65 | # self.logger.log(level, msg, *args, stacklevel=stacklevel, **kwargs) 66 | # self.file_line__ts_map[file_line] = time.time() 67 | -------------------------------------------------------------------------------- /nb_log/global_except_hook.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | import sys 4 | import logging 5 | import nb_log 6 | 7 | 8 | logger = nb_log.get_logger('global_except_hook',log_filename='global_except_hook.log') 9 | def global_except_hook(exctype, value, tracebackx): 10 | # 输出异常信息到日志 11 | # print(exctype) 12 | # print(value) 13 | # print(traceback.format_tb(tracebackx)) 14 | logger.error('Unhandled exception:', exc_info=(exctype, value, tracebackx)) 15 | 16 | # 设置全局异常钩子 17 | sys.excepthook = global_except_hook 18 | 19 | 20 | 21 | if __name__ == '__main__': 22 | # 测试异常 23 | def test_exception(): 24 | try: 25 | raise ValueError('Test exception') 26 | except Exception as e: 27 | pass 28 | raise OSError('aaaa32') from e 29 | 30 | 31 | # 触发异常 32 | print(ValueError.__name__) 33 | test_exception() -------------------------------------------------------------------------------- /nb_log/handlers_loguru.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import typing 5 | import uuid 6 | from nb_log import nb_log_config_default 7 | 8 | 9 | class LoguruStreamHandler(logging.Handler): 10 | """ 11 | loguru 的 控制台效果 12 | """ 13 | 14 | format = ("{time:YYYY-MM-DD HH:mm:ss.SSS} | {extra[namespace]} | " 15 | "{level: <8} | " 16 | "{name}:{function}:{line} - {message}") 17 | 18 | def __init__(self, logger_name, sink: typing.Any = sys.stdout): 19 | logging.Handler.__init__(self) 20 | self._logger_name = logger_name 21 | self._sink = sink 22 | from loguru._logger import Logger, Core 23 | 24 | logger = Logger( 25 | core=Core(), 26 | exception=None, 27 | depth=6, # 写6是为了显示实际的日志发生处,而不是封装loguru的emit方法处。 28 | record=False, 29 | lazy=False, 30 | colors=False, 31 | raw=False, 32 | capture=True, 33 | patchers=[], 34 | extra={}, 35 | ) 36 | 37 | self._bind_for = uuid.uuid4() 38 | self._add_handler(logger, ) 39 | # print(logger._core.handlers) 40 | self.logurux = logger.bind(namespace=logger_name, 41 | # bind_for = self._bind_for 42 | ) 43 | 44 | 45 | def _add_handler(self, logger, ): 46 | logger.add(self._sink, 47 | # filter=lambda record: record["extra"]["bind_for"] == self._bind_for, 48 | format=self.format) 49 | 50 | 51 | def emit(self, record): 52 | self.logurux.opt(depth=6, exception=record.exc_info).log(record.levelname, record.getMessage()) 53 | 54 | 55 | class LoguruFileHandler(LoguruStreamHandler): 56 | """ 57 | loguru 的 文件日志写入 58 | """ 59 | 60 | def _add_handler(self, logger, ): 61 | ''' 62 | 63 | :param logger: 64 | :return: 65 | ''' 66 | log_file_full_path = self._sink 67 | # rotation = "100 MB" "00:00" 68 | arr = log_file_full_path.split('.') 69 | part1 = '.'.join(arr[:-1]) 70 | part2 = arr[-1] 71 | loguru_file = f'{part1}.{{time:YYYYMMDD}}.loguru.{part2}' 72 | 73 | # rotation_size = 1024 * 1024 # 1MB 74 | rotation_size = f"{nb_log_config_default.LOG_FILE_SIZE} MB" 75 | rotation_time = "00:00" # 每天的 00:00 76 | 77 | logger.add(loguru_file, 78 | # filter=lambda record: record["extra"]["bind_for"] == self._bind_for, 79 | format=self.format, 80 | enqueue=True, 81 | # rotation=f"{nb_log_config_default.LOG_FILE_SIZE} MB", 82 | rotation=rotation_time, 83 | retention=nb_log_config_default.LOG_FILE_BACKUP_COUNT 84 | ) 85 | -------------------------------------------------------------------------------- /nb_log/handlers_more.py: -------------------------------------------------------------------------------- 1 | # noinspection PyMissingOrEmptyDocstring 2 | 3 | import sys 4 | import os 5 | import threading 6 | import traceback 7 | import socket 8 | import datetime 9 | import json 10 | import time 11 | import typing 12 | 13 | from collections import OrderedDict 14 | from pathlib import Path 15 | from queue import Queue, Empty 16 | # noinspection PyPackageRequirements 17 | from kafka import KafkaProducer 18 | # from elasticsearch import Elasticsearch, helpers # 性能导入时间消耗2秒,实例化时候再导入。 19 | from threading import Lock, Thread 20 | import pymongo 21 | 22 | import logging 23 | 24 | from logging.handlers import WatchedFileHandler 25 | 26 | from nb_log.monkey_print import nb_print 27 | 28 | very_nb_print = nb_print 29 | os_name = os.name 30 | 31 | host_name = socket.gethostname() 32 | 33 | 34 | class MongoHandler(logging.Handler): 35 | """ 36 | 一个mongodb的log handler,支持日志按loggername创建不同的集合写入mongodb中 37 | """ 38 | 39 | # msg_pattern = re.compile('(\d+-\d+-\d+ \d+:\d+:\d+) - (\S*?) - (\S*?) - (\d+) - (\S*?) - ([\s\S]*)') 40 | 41 | def __init__(self, mongo_url, mongo_database='logs'): 42 | """ 43 | :param mongo_url: mongo连接 44 | :param mongo_database: 保存日志的数据库,默认使用logs数据库 45 | """ 46 | logging.Handler.__init__(self) 47 | mongo_client = pymongo.MongoClient(mongo_url) 48 | self.mongo_db = mongo_client.get_database(mongo_database) 49 | 50 | def emit(self, record): 51 | # noinspection PyBroadException, PyPep8 52 | try: 53 | """以下使用解析日志模板的方式提取出字段""" 54 | # msg = self.format(record) 55 | # logging.LogRecord 56 | # msg_match = self.msg_pattern.search(msg) 57 | # log_info_dict = {'time': msg_match.group(1), 58 | # 'name': msg_match.group(2), 59 | # 'file_name': msg_match.group(3), 60 | # 'line_no': msg_match.group(4), 61 | # 'log_level': msg_match.group(5), 62 | # 'detail_msg': msg_match.group(6), 63 | # } 64 | level_str = None 65 | if record.levelno == 10: 66 | level_str = 'DEBUG' 67 | elif record.levelno == 20: 68 | level_str = 'INFO' 69 | elif record.levelno == 30: 70 | level_str = 'WARNING' 71 | elif record.levelno == 40: 72 | level_str = 'ERROR' 73 | elif record.levelno == 50: 74 | level_str = 'CRITICAL' 75 | log_info_dict = OrderedDict() 76 | log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S') 77 | log_info_dict['name'] = record.name 78 | log_info_dict['file_path'] = record.pathname 79 | log_info_dict['file_name'] = record.filename 80 | log_info_dict['func_name'] = record.funcName 81 | log_info_dict['line_no'] = record.lineno 82 | log_info_dict['log_level'] = level_str 83 | log_info_dict['detail_msg'] = record.msg 84 | col = self.mongo_db.get_collection(record.name) 85 | col.insert_one(log_info_dict) 86 | except (KeyboardInterrupt, SystemExit): 87 | raise 88 | except Exception: 89 | self.handleError(record) 90 | 91 | 92 | class KafkaHandler(logging.Handler): 93 | """ 94 | 日志批量写入kafka中。 95 | """ 96 | ES_INTERVAL_SECONDS = 0.5 97 | 98 | host_name = host_name 99 | host_process = f'{host_name} -- {os.getpid()}' 100 | 101 | script_name = sys.argv[0].split('/')[-1] 102 | 103 | task_queue = Queue() 104 | last_es_op_time = time.time() 105 | has_start_do_bulk_op = False 106 | has_start_check_size_and_clear = False 107 | 108 | kafka_producer = None 109 | es_index_prefix = 'pylog-' 110 | 111 | def __init__(self, bootstrap_servers, **configs): 112 | """ 113 | :param elastic_hosts: es的ip地址,数组类型 114 | :param elastic_port: es端口 115 | :param index_prefix: index名字前缀。 116 | """ 117 | logging.Handler.__init__(self) 118 | if not self.__class__.kafka_producer: 119 | very_nb_print('实例化kafka producer') 120 | self.__class__.kafka_producer = KafkaProducer(bootstrap_servers=bootstrap_servers, **configs) 121 | 122 | t = Thread(target=self._do_bulk_op) 123 | t.setDaemon(True) 124 | t.start() 125 | 126 | @classmethod 127 | def __add_task_to_bulk(cls, task): 128 | cls.task_queue.put(task) 129 | 130 | # noinspection PyUnresolvedReferences 131 | @classmethod 132 | def __clear_bulk_task(cls): 133 | cls.task_queue.queue.clear() 134 | 135 | @classmethod 136 | def _check_size_and_clear(cls): 137 | """ 138 | 如果是外网传输日志到测试环境风险很大,测试环境网络经常打满,传输不了会造成日志队列堆积,会造成内存泄漏,所以需要清理。 139 | :return: 140 | """ 141 | if cls.has_start_check_size_and_clear: 142 | return 143 | cls.has_start_check_size_and_clear = True 144 | 145 | def __check_size_and_clear(): 146 | while 1: 147 | size = cls.task_queue.qsize() 148 | if size > 1000: 149 | very_nb_print(f'kafka防止意外日志积累太多了,达到 {size} 个,为防止内存泄漏,清除队列') 150 | cls.__clear_bulk_task() 151 | time.sleep(0.1) 152 | 153 | t = Thread(target=__check_size_and_clear) 154 | t.setDaemon(True) 155 | t.start() 156 | 157 | @classmethod 158 | def _do_bulk_op(cls): 159 | if cls.has_start_do_bulk_op: 160 | return 161 | 162 | cls.has_start_do_bulk_op = True 163 | # very_nb_print(cls.kafka_producer) 164 | while 1: 165 | try: 166 | # noinspection PyUnresolvedReferences 167 | tasks = list(cls.task_queue.queue) 168 | cls.__clear_bulk_task() 169 | for task in tasks: 170 | topic = (cls.es_index_prefix + task['name']).replace('.', '').replace('_', '').replace('-', '') 171 | # very_nb_print(topic) 172 | cls.kafka_producer.send(topic, json.dumps(task).encode()) 173 | cls.last_es_op_time = time.time() 174 | except Exception as e: 175 | very_nb_print(e) 176 | finally: 177 | time.sleep(cls.ES_INTERVAL_SECONDS) 178 | 179 | def emit(self, record): 180 | # noinspection PyBroadException, PyPep8 181 | try: 182 | level_str = None 183 | if record.levelno == 10: 184 | level_str = 'DEBUG' 185 | elif record.levelno == 20: 186 | level_str = 'INFO' 187 | elif record.levelno == 30: 188 | level_str = 'WARNING' 189 | elif record.levelno == 40: 190 | level_str = 'ERROR' 191 | elif record.levelno == 50: 192 | level_str = 'CRITICAL' 193 | log_info_dict = OrderedDict() 194 | log_info_dict['@timestamp'] = datetime.datetime.utcfromtimestamp(record.created).isoformat() 195 | log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S') 196 | log_info_dict['name'] = record.name 197 | log_info_dict['host'] = self.host_name 198 | log_info_dict['host_process'] = self.host_process 199 | # log_info_dict['file_path'] = record.pathname 200 | log_info_dict['file_name'] = record.filename 201 | log_info_dict['func_name'] = record.funcName 202 | # log_info_dict['line_no'] = record.lineno 203 | log_info_dict['log_place'] = f'{record.pathname}:{record.lineno}' 204 | log_info_dict['log_level'] = level_str 205 | log_info_dict['msg'] = str(record.msg) 206 | log_info_dict['script'] = self.script_name 207 | log_info_dict['es_index'] = f'{self.es_index_prefix}{record.name.lower()}' 208 | self.__add_task_to_bulk(log_info_dict) 209 | 210 | except (KeyboardInterrupt, SystemExit): 211 | raise 212 | except Exception: 213 | self.handleError(record) 214 | 215 | 216 | class ElasticHandler000(logging.Handler): 217 | """ 218 | 日志批量写入es中。 219 | """ 220 | ES_INTERVAL_SECONDS = 2 221 | host_name = host_name 222 | 223 | def __init__(self, elastic_hosts: list, elastic_port, index_prefix='pylog-'): 224 | """ 225 | :param elastic_hosts: es的ip地址,数组类型 226 | :param elastic_port: es端口 227 | :param index_prefix: index名字前缀。 228 | """ 229 | from elasticsearch import Elasticsearch, helpers 230 | self._helpers = helpers 231 | logging.Handler.__init__(self) 232 | self._es_client = Elasticsearch(elastic_hosts, port=elastic_port) 233 | self._index_prefix = index_prefix 234 | self._task_list = [] 235 | self._task_queue = Queue() 236 | self._last_es_op_time = time.time() 237 | t = Thread(target=self._do_bulk_op) 238 | t.setDaemon(True) 239 | t.start() 240 | 241 | def __add_task_to_bulk(self, task): 242 | self._task_queue.put(task) 243 | 244 | def __clear_bulk_task(self): 245 | # noinspection PyUnresolvedReferences 246 | self._task_queue.queue.clear() 247 | 248 | def _do_bulk_op(self): 249 | while 1: 250 | try: 251 | if self._task_queue.qsize() > 10000: 252 | very_nb_print('防止意外日志积累太多了,不插入es了。') 253 | self.__clear_bulk_task() 254 | return 255 | # noinspection PyUnresolvedReferences 256 | tasks = list(self._task_queue.queue) 257 | self.__clear_bulk_task() 258 | self._helpers.bulk(self._es_client, tasks) 259 | 260 | self._last_es_op_time = time.time() 261 | except Exception as e: 262 | very_nb_print(e) 263 | finally: 264 | time.sleep(1) 265 | 266 | def emit(self, record): 267 | # noinspection PyBroadException, PyPep8 268 | try: 269 | level_str = None 270 | if record.levelno == 10: 271 | level_str = 'DEBUG' 272 | elif record.levelno == 20: 273 | level_str = 'INFO' 274 | elif record.levelno == 30: 275 | level_str = 'WARNING' 276 | elif record.levelno == 40: 277 | level_str = 'ERROR' 278 | elif record.levelno == 50: 279 | level_str = 'CRITICAL' 280 | log_info_dict = OrderedDict() 281 | log_info_dict['@timestamp'] = datetime.datetime.utcfromtimestamp(record.created).isoformat() 282 | log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S') 283 | log_info_dict['name'] = record.name 284 | log_info_dict['host'] = self.host_name 285 | log_info_dict['file_path'] = record.pathname 286 | log_info_dict['file_name'] = record.filename 287 | log_info_dict['func_name'] = record.funcName 288 | log_info_dict['line_no'] = record.lineno 289 | log_info_dict['log_level'] = level_str 290 | log_info_dict['msg'] = str(record.msg) 291 | self.__add_task_to_bulk({ 292 | "_index": f'{self._index_prefix}{record.name.lower()}', 293 | # "_type": '_doc', # elastic 7 服务端之后不要传递 type了. 294 | "_source": log_info_dict 295 | }) 296 | 297 | except (KeyboardInterrupt, SystemExit): 298 | raise 299 | except Exception: 300 | self.handleError(record) 301 | 302 | 303 | # noinspection PyUnresolvedReferences 304 | class ElasticHandler(logging.Handler): 305 | """ 306 | 日志批量写入es中。 307 | """ 308 | ES_INTERVAL_SECONDS = 0.5 309 | 310 | host_name = host_name 311 | host_process = f'{host_name} -- {os.getpid()}' 312 | 313 | script_name = sys.argv[0] 314 | 315 | task_queue = Queue() 316 | last_es_op_time = time.time() 317 | has_start_do_bulk_op = False 318 | 319 | def __init__(self, elastic_hosts: list, elastic_port, index_prefix='pylog-'): 320 | """ 321 | :param elastic_hosts: es的ip地址,数组类型 322 | :param elastic_port: es端口 323 | :param index_prefix: index名字前缀。 324 | """ 325 | logging.Handler.__init__(self) 326 | from elasticsearch import Elasticsearch, helpers 327 | self._helpers = helpers 328 | self._es_client = Elasticsearch(elastic_hosts, ) 329 | self._index_prefix = index_prefix 330 | t = Thread(target=self._do_bulk_op) 331 | t.setDaemon(True) 332 | t.start() 333 | 334 | @classmethod 335 | def __add_task_to_bulk(cls, task): 336 | cls.task_queue.put(task) 337 | 338 | # noinspection PyUnresolvedReferences 339 | @classmethod 340 | def __clear_bulk_task(cls): 341 | cls.task_queue.queue.clear() 342 | 343 | def _do_bulk_op(self): 344 | if self.__class__.has_start_do_bulk_op: 345 | return 346 | self.__class__.has_start_do_bulk_op = True 347 | while 1: 348 | try: 349 | if self.__class__.task_queue.qsize() > 10000: 350 | very_nb_print('防止意外日志积累太多了,不插入es了。') 351 | self.__clear_bulk_task() 352 | return 353 | tasks = list(self.__class__.task_queue.queue) 354 | self.__clear_bulk_task() 355 | self._helpers.bulk(self._es_client, tasks) 356 | self.__class__.last_es_op_time = time.time() 357 | except Exception as e: 358 | very_nb_print(e) 359 | finally: 360 | time.sleep(self.ES_INTERVAL_SECONDS) 361 | 362 | def emit(self, record): 363 | # noinspection PyBroadException, PyPep8 364 | try: 365 | level_str = None 366 | if record.levelno == 10: 367 | level_str = 'DEBUG' 368 | elif record.levelno == 20: 369 | level_str = 'INFO' 370 | elif record.levelno == 30: 371 | level_str = 'WARNING' 372 | elif record.levelno == 40: 373 | level_str = 'ERROR' 374 | elif record.levelno == 50: 375 | level_str = 'CRITICAL' 376 | log_info_dict = OrderedDict() 377 | log_info_dict['@timestamp'] = datetime.datetime.utcfromtimestamp(record.created).isoformat() 378 | log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S') 379 | log_info_dict['name'] = record.name 380 | log_info_dict['host'] = self.host_name 381 | log_info_dict['host_process'] = self.host_process 382 | log_info_dict['file_path'] = record.pathname 383 | log_info_dict['file_name'] = record.filename 384 | log_info_dict['func_name'] = record.funcName 385 | log_info_dict['line_no'] = record.lineno 386 | log_info_dict['log_level'] = level_str 387 | log_info_dict['msg'] = str(record.msg) 388 | log_info_dict['script'] = self.script_name 389 | self.__add_task_to_bulk({ 390 | "_index": f'{self._index_prefix}{record.name.lower()}', 391 | # "_type": f'_doc', # es7 服务端之后不支持_type设置 392 | "_source": log_info_dict 393 | }) 394 | 395 | except (KeyboardInterrupt, SystemExit): 396 | raise 397 | except Exception: 398 | self.handleError(record) 399 | -------------------------------------------------------------------------------- /nb_log/helpers.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | 4 | def generate_error_file_name(log_filename: str): 5 | """ 6 | 根据正常日志文件名,自动生成错误日志文件名. 7 | :param log_filename: 8 | :return: 9 | """ 10 | if log_filename is None: 11 | return None 12 | arr = log_filename.split('.') 13 | part1 = '.'.join(arr[:-1]) 14 | part2 = arr[-1] 15 | return f'{part1}.error.{part2}' 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /nb_log/loggers_imp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/nb_log/loggers_imp/__init__.py -------------------------------------------------------------------------------- /nb_log/loggers_imp/compatible_logger.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import traceback 4 | import io 5 | import logging 6 | from logging import _srcfile 7 | 8 | ''' 9 | 新增的 NbLogger 类,继承自内置 logging.Logger, 10 | 主要作用是可以改变 sys._getframe 深度, 11 | 目的是 如果用户自己使用代码模式封装了日志类,在自己的类中又多此一举实现 debug info warning error critical 打印日志的方法, 12 | 用户在使用 用户自己类.debug() 时候 ,导致记录日志的行号是用户封装这几个方法的地方,而不是实际打印日志的地方,不方便定位日志是从哪里打印的。 13 | ''' 14 | 15 | ''' 16 | from nb_log import get_logger 17 | 18 | 19 | class 废物日志类: 20 | def __init__(self,name): 21 | self.logger = get_logger(name, _log_filename='废物日志.log') 22 | 23 | def debug(self, msg): 24 | self.logger.debug(msg, extra={'sys_getframe_n': 3}) # 第 x1 行 25 | 26 | def info(self, msg): 27 | self.logger.info(msg, extra={'sys_getframe_n': 3}) # 第 x2 行 28 | 29 | 30 | 废物日志类('命名空间1').info('啊啊啊啊') # 第y行 31 | ''' 32 | 33 | ''' 34 | 有的人手痒,非要封装nb_log,那么封装时候调用原生日志的 info() 务必要传入 extra={'sys_getframe_n': 3} 35 | 如果你不传递 extra={'sys_getframe_n': 3} ,那么 废物日志类().info('啊啊啊啊') ,显示是第 x2 行打印的日志,而不是第 y行打印的日志。 36 | ''' 37 | 38 | 39 | class CompatibleLogger(logging.Logger): 40 | 41 | """ 42 | 写 CompatibleLogger 是在python3.7测试的,python3.9以后官方已经加了stacklevel入参。 43 | 20230705 现在经过github cpython的源码核实,在python3.9版本中 44 | def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 45 | stacklevel=1): 46 | 47 | def findCaller(self, stack_info=False, stacklevel=1): 48 | 49 | python3.9以上用户可以传递 stacklevel 了,本NbLogger是适配python3.6 3.7 3.8版本, Nblogger 的 sys_getframe_n 入参就是 stacklevel 的意义。 50 | 说明我的思维和python官方人员想到一起去了,3.9以后的logging包debug ingo error等 支持修改查找调用堆栈的深度层级,防止用户封装了debug info warnring 等后,日志模板获取的 文件名 行号是错误深度层级的。。 51 | 52 | """ 53 | def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False,**kwargs): 54 | """ 55 | Low-level logging routine which creates a LogRecord and then calls 56 | all the handlers of this logger to handle the record. 57 | """ 58 | 59 | sys_getframe_n =2 60 | if extra and 'sys_getframe_n' in extra: 61 | sys_getframe_n = extra['sys_getframe_n'] 62 | extra.pop('sys_getframe_n') 63 | sinfo = None 64 | if _srcfile: 65 | # IronPython doesn't track Python frames, so findCaller raises an 66 | # exception on some versions of IronPython. We trap it here so that 67 | # IronPython can use logging. 68 | try: 69 | fn, lno, func, sinfo = self.findCaller(stack_info,sys_getframe_n) # 这个改了,加了个入参。 70 | except ValueError: # pragma: no cover 71 | fn, lno, func = "(unknown file)", 0, "(unknown function)" 72 | else: # pragma: no cover 73 | fn, lno, func = "(unknown file)", 0, "(unknown function)" 74 | if exc_info: 75 | if isinstance(exc_info, BaseException): 76 | exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 77 | elif not isinstance(exc_info, tuple): 78 | exc_info = sys.exc_info() 79 | record = self.makeRecord(self.name, level, fn, lno, msg, args, 80 | exc_info, func, extra, sinfo) 81 | self.handle(record) 82 | 83 | def findCaller(self, stack_info=False,sys_getframe_n =2): 84 | """ 85 | 主要是改了这个,使得文件和行号变成用户本身的打印日志地方,而不是封装日志的地方。 86 | :param stack_info: 87 | :param sys_getframe_n: 新增的入参。 88 | :return: 89 | """ 90 | """ 91 | Find the stack frame of the caller so that we can note the source 92 | file name, line number and function name. 93 | """ 94 | f = sys._getframe(sys_getframe_n) # 这行改了。 95 | # f = sys._getframe(3) 96 | # On some versions of IronPython, currentframe() returns None if 97 | # IronPython isn't run with -X:Frames. 98 | if f is not None: 99 | f = f.f_back 100 | rv = "(unknown file)", 0, "(unknown function)", None 101 | while hasattr(f, "f_code"): 102 | co = f.f_code 103 | filename = os.path.normcase(co.co_filename) 104 | if filename == _srcfile: 105 | f = f.f_back 106 | continue 107 | sinfo = None 108 | if stack_info: 109 | sio = io.StringIO() 110 | sio.write('Stack (most recent call last):\n') 111 | traceback.print_stack(f, file=sio) 112 | sinfo = sio.getvalue() 113 | if sinfo[-1] == '\n': 114 | sinfo = sinfo[:-1] 115 | sio.close() 116 | rv = (co.co_filename, f.f_lineno, co.co_name, sinfo) 117 | break 118 | return rv 119 | 120 | -------------------------------------------------------------------------------- /nb_log/logging_tree_helper.py: -------------------------------------------------------------------------------- 1 | 2 | from logging_tree import printout 3 | 4 | if __name__ == '__main__': 5 | import funboost 6 | printout() -------------------------------------------------------------------------------- /nb_log/monkey_print.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Author : ydf 3 | # @Time : 2022/5/9 19:02 4 | """ 5 | 不直接给print打补丁,自己重新赋值。 6 | 7 | """ 8 | import multiprocessing 9 | import os 10 | import sys 11 | import time 12 | import traceback 13 | from nb_log import nb_log_config_default 14 | # from nb_log.file_write import PrintFileWritter 15 | from nb_log.rotate_file_writter import OsFileWritter 16 | 17 | print_raw = print 18 | WORD_COLOR = nb_log_config_default.WHITE_COLOR_CODE 19 | 20 | 21 | def stdout_write(msg: str): 22 | if sys.stdout: 23 | sys.stdout.write(msg) 24 | sys.stdout.flush() 25 | 26 | 27 | def stderr_write(msg: str): 28 | '''打包exe运行或者做成windwos services 这些情况下情况下,sys.stderr是None,None.write会报错''' 29 | if sys.stderr: 30 | sys.stderr.write(msg) 31 | sys.stderr.flush() 32 | else: 33 | stdout_write(msg) 34 | 35 | 36 | print_wrtie_file_name = os.environ.get('PRINT_WRTIE_FILE_NAME', None) or nb_log_config_default.PRINT_WRTIE_FILE_NAME 37 | 38 | print_file_writter = OsFileWritter(print_wrtie_file_name, log_path=nb_log_config_default.LOG_PATH, 39 | back_count=nb_log_config_default.LOG_FILE_BACKUP_COUNT, max_bytes=nb_log_config_default.LOG_FILE_SIZE * 1024 * 1024) 40 | 41 | 42 | def _print_with_file_line(*args, sep=' ', end='\n', file=None, flush=True, sys_getframe_n=2): 43 | args = (str(arg) for arg in args) # REMIND 防止是数字不能被join 44 | args_str = sep.join(args) + end 45 | # stdout_write(f'56:{file}') 46 | if file == sys.stderr: 47 | stderr_write(args_str) # 如 threading 模块第926行,打印线程错误,希望保持原始的红色错误方式,不希望转成蓝色。 48 | print_file_writter.write_2_file(args_str) 49 | elif file in [sys.stdout, None]: 50 | # 获取被调用函数在被调用时所处代码行数 51 | fra = sys._getframe(sys_getframe_n) 52 | line = fra.f_lineno 53 | file_name = fra.f_code.co_filename 54 | fun = fra.f_code.co_name 55 | now_str= time.strftime("%Y-%m-%d %H:%M:%S") 56 | # mtime = time.gmtime() 57 | # now_str = f'{mtime.tm_year}-{mtime.tm_mon}-{mtime.tm_mday} {mtime.tm_hour}:{mtime.tm_min}:{mtime.tm_sec}' 58 | # sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}" {x}\n') 59 | if nb_log_config_default.DEFAULUT_USE_COLOR_HANDLER: 60 | if nb_log_config_default.DISPLAY_BACKGROUD_COLOR_IN_CONSOLE: 61 | stdout_write(f'\033[0;34m{now_str} "{file_name}:{line}" -{fun}-[print]- \033[0;{WORD_COLOR};44m{args_str[:-1]}\033[0m \033[0m\n') # 36 93 96 94 62 | else: 63 | stdout_write( 64 | f'\033[0;{WORD_COLOR};34m{now_str} "{file_name}:{line}" -{fun}-[print]- {args_str[:-1]} \033[0m\n') # 36 93 96 94 65 | # sys.stdout.write(f'\033[0;30;44m"{file_name}:{line}" {time.strftime("%H:%M:%S")} {"".join(args)}\033[0m\n') 66 | else: 67 | stdout_write( 68 | f'{now_str} "{file_name}:{line}" -{fun}-[print]- {args_str} ') 69 | print_file_writter.write_2_file(f'{now_str} "{file_name}:{line}" -[print]-{fun}- {args_str} ') # 36 93 96 94 70 | else: # 例如traceback模块的print_exception函数 file的入参是 <_io.StringIO object at 0x00000264F2F065E8>,必须把内容重定向到这个对象里面,否则exception日志记录不了错误堆栈。 71 | print_raw(args_str, sep=sep, end=end, file=file) 72 | print_file_writter.write_2_file(args_str) 73 | 74 | 75 | # noinspection PyProtectedMember,PyUnusedLocal,PyIncorrectDocstring,DuplicatedCode 76 | def nb_print(*args, sep=' ', end='\n', file=None, flush=True): 77 | """ 78 | 超流弊的print补丁 79 | :param x: 80 | :return: 81 | """ 82 | _print_with_file_line(*args, sep=sep, end=end, file=file, flush=flush, sys_getframe_n=2) 83 | 84 | 85 | # noinspection PyPep8,PyUnusedLocal 86 | def print_exception(etype, value, tb, limit=None, file=None, chain=True): 87 | """ 88 | 避免每行有两个可跳转的,导致第二个可跳转的不被ide识别。 89 | 主要是针对print_exception,logging.exception里面会调用这个函数。 90 | 91 | # traceback.print_exception = print_exception # file类型为 <_io.StringIO object at 0x00000264F2F065E8> 单独判断sys.stderr sys.stdout 以外的情况了,解决了,不需要用到p rint_exception。 92 | 93 | :param etype: 94 | :param value: 95 | :param tb: 96 | :param limit: 97 | :param file: 98 | :param chain: 99 | :return: 100 | """ 101 | if file is None: 102 | file = sys.stderr 103 | for line in traceback.TracebackException( 104 | type(value), value, tb, limit=limit).format(chain=chain): 105 | # print(line, file=file, end="") 106 | if file != sys.stderr: 107 | stderr_write(f'{line} \n') 108 | else: 109 | stdout_write(f'{line} \n') 110 | 111 | 112 | # print = nb_print 113 | 114 | def patch_print(): 115 | """ 116 | Python有几个namespace,分别是 117 | 118 | locals 119 | 120 | globals 121 | 122 | builtin 123 | 124 | 其中定义在函数内声明的变量属于locals,而模块内定义的函数属于globals。 125 | 126 | 127 | https://codeday.me/bug/20180929/266673.html python – 为什么__builtins__既是模块又是dict 128 | 129 | :return: 130 | """ 131 | try: 132 | __builtins__.print = nb_print 133 | except AttributeError: 134 | """ 135 | 136 | 'dict' object has no attribute 'print' 137 | """ 138 | # noinspection PyUnresolvedReferences 139 | __builtins__['print'] = nb_print 140 | # traceback.print_exception = print_exception # file类型为 <_io.StringIO object at 0x00000264F2F065E8> 单独判断,解决了,不要加这个。 141 | 142 | 143 | def common_print(*args, sep=' ', end='\n', file=None): 144 | args = (str(arg) for arg in args) 145 | args = (str(arg) for arg in args) # REMIND 防止是数字不能被join 146 | if file == sys.stderr: 147 | stderr_write(sep.join(args) + end) # 如 threading 模块第926行,打印线程错误,希望保持原始的红色错误方式,不希望转成蓝色。 148 | else: 149 | stdout_write(sep.join(args) + end) 150 | 151 | 152 | def reverse_patch_print(): 153 | """ 154 | 提供一个反猴子补丁,恢复print原状 155 | :return: 156 | """ 157 | # try: 158 | # __builtins__.print = common_print 159 | # except AttributeError: 160 | # __builtins__['print'] = common_print 161 | 162 | try: 163 | __builtins__.print = print_raw 164 | except AttributeError: 165 | __builtins__['print'] = print_raw 166 | 167 | 168 | def is_main_process(): 169 | return multiprocessing.process.current_process().name == 'MainProcess' 170 | 171 | 172 | # noinspection DuplicatedCode 173 | def only_print_on_main_process(*args, sep=' ', end='\n', file=None, flush=True): 174 | # 获取被调用函数在被调用时所处代码行数 175 | if is_main_process(): 176 | _print_with_file_line(*args, sep=sep, end=end, file=file, flush=flush, sys_getframe_n=2) 177 | 178 | 179 | if __name__ == '__main__': 180 | print('before patch') 181 | patch_print() 182 | print(0) 183 | nb_print(123, 'abc') 184 | print(456, 'def') 185 | print('http://www.baidu.com') 186 | 187 | reverse_patch_print() 188 | common_print('hi') 189 | 190 | import logging 191 | 192 | try: 193 | 1 / 0 194 | except Exception as e: 195 | logging.exception(e) 196 | -------------------------------------------------------------------------------- /nb_log/monkey_std_filter_words.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from nb_log import nb_log_config_default 3 | 4 | 5 | def _need_filter_print(msg: str): 6 | for strx in nb_log_config_default.FILTER_WORDS_PRINT: 7 | if strx in str(msg): 8 | return True # 过滤掉需要屏蔽的打印 9 | return False 10 | 11 | 12 | sys_stdout_write_raw = sys.stdout.write 13 | sys_stderr_write_raw = sys.stderr.write 14 | 15 | 16 | def _sys_stdout_write_monkey(msg: str): 17 | if _need_filter_print(msg): 18 | return 19 | else: 20 | sys_stdout_write_raw(msg) 21 | 22 | 23 | def _sys_stderr_write_monkey(msg: str): 24 | if _need_filter_print(msg): 25 | return 26 | else: 27 | sys_stderr_write_raw(msg) 28 | 29 | def patch_std_filter_words(): 30 | sys.stdout.write = _sys_stdout_write_monkey # 对 sys.stdout.write 打了猴子补丁。使得可以过滤包含指定字符串的消息。 31 | sys.stderr.write = _sys_stderr_write_monkey 32 | -------------------------------------------------------------------------------- /nb_log/monkey_sys_std.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import os 3 | import sys 4 | import re 5 | import queue 6 | import threading 7 | import time 8 | # from nb_log.file_write import StdFileWritter 9 | from nb_log.rotate_file_writter import OsFileWritter 10 | from nb_log import nb_log_config_default 11 | 12 | stdout_raw = sys.stdout.write 13 | stderr_raw = sys.stderr.write 14 | 15 | dele_color_pattern = re.compile('\\033\[.+?m') 16 | 17 | sys_std_file_name = os.environ.get('SYS_STD_FILE_NAME', None) or nb_log_config_default.SYS_STD_FILE_NAME 18 | std_writter = OsFileWritter(sys_std_file_name,log_path=nb_log_config_default.LOG_PATH, 19 | back_count=nb_log_config_default.LOG_FILE_BACKUP_COUNT,max_bytes=nb_log_config_default.LOG_FILE_SIZE * 1024 * 1024) 20 | 21 | is_win = True if os.name == 'nt' else False 22 | 23 | 24 | 25 | class BulkStdout: 26 | q = queue.SimpleQueue() 27 | _lock = threading.Lock() 28 | _has_start_bulk_stdout = False 29 | 30 | @classmethod 31 | def _bulk_real_stdout(cls): 32 | with cls._lock: 33 | msg_str_all = '' 34 | while not cls.q.empty(): 35 | msg_str_all += str(cls.q.get()) 36 | if msg_str_all: 37 | stdout_raw(msg_str_all) 38 | 39 | @classmethod 40 | def stdout(cls, msg): 41 | with cls._lock: 42 | cls.q.put(msg) 43 | 44 | @classmethod 45 | def _when_exit(cls): 46 | # stdout_raw('结束 stdout_raw') 47 | return cls._bulk_real_stdout() 48 | 49 | @classmethod 50 | def start_bulk_stdout(cls): 51 | def _bulk_stdout(): 52 | while 1: 53 | cls._bulk_real_stdout() 54 | time.sleep(0.05) 55 | 56 | if not cls._has_start_bulk_stdout: 57 | cls._has_start_bulk_write = True 58 | threading.Thread(target=_bulk_stdout, daemon=True).start() 59 | 60 | 61 | if is_win and nb_log_config_default.USE_BULK_STDOUT_ON_WINDOWS: 62 | BulkStdout.start_bulk_stdout() 63 | atexit.register(BulkStdout._when_exit) 64 | 65 | 66 | def monkey_sys_stdout(msg): 67 | if is_win and nb_log_config_default.USE_BULK_STDOUT_ON_WINDOWS: 68 | BulkStdout.stdout(msg) 69 | else: 70 | stdout_raw(msg) 71 | msg_delete_color = dele_color_pattern.sub('', msg) 72 | std_writter.write_2_file(msg_delete_color) 73 | # std_writter.write_2_file(msg) 74 | 75 | 76 | def monkey_sys_stderr(msg): 77 | stderr_raw(msg) 78 | msg_delete_color = dele_color_pattern.sub('', msg) 79 | std_writter.write_2_file(msg_delete_color) 80 | 81 | 82 | def patch_sys_std(): 83 | sys.stdout.write = monkey_sys_stdout 84 | sys.stderr.write = monkey_sys_stderr 85 | -------------------------------------------------------------------------------- /nb_log/nb_log_config_default.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | """ 3 | 此文件nb_log_config.py是自动生成到python项目的根目录的,因为是自动生成到 sys.path[1]。 4 | 在这里面写的变量会覆盖此文件nb_log_config_default中的值。对nb_log包进行默认的配置。用户是无需修改nb_log安装包位置里面的配置文件的。 5 | 6 | 但最终配置方式是由get_logger_and_add_handlers方法的各种传参决定,如果方法相应的传参为None则使用这里面的配置。 7 | """ 8 | 9 | """ 10 | 如果反对日志有各种彩色,可以设置 DEFAULUT_USE_COLOR_HANDLER = False 11 | 如果反对日志有块状背景彩色,可以设置 DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = False 12 | 如果想屏蔽nb_log包对怎么设置pycahrm的颜色的提示,可以设置 WARNING_PYCHARM_COLOR_SETINGS = False 13 | 如果想改变日志模板,可以设置 FORMATTER_KIND 参数,只带了7种模板,可以自定义添加喜欢的模板 14 | LOG_PATH 配置文件日志的保存路径的文件夹。 15 | """ 16 | import sys 17 | # noinspection PyUnresolvedReferences 18 | import logging 19 | import os 20 | # noinspection PyUnresolvedReferences 21 | from pathlib import Path # noqa 22 | import socket 23 | from pythonjsonlogger.jsonlogger import JsonFormatter 24 | 25 | # 项目中的print是否自动写入到文件中。值为None则不重定向print到文件中。 自动每天一个文件, 2023-06-30.my_proj.print,生成的文件位置在定义的LOG_PATH 26 | # 如果你设置了环境变量,export PRINT_WRTIE_FILE_NAME="my_proj.print" (linux临时环境变量语法,windows语法自己百度这里不举例),那就优先使用环境变量中设置的文件名字,而不是nb_log_config.py中设置的名字 27 | PRINT_WRTIE_FILE_NAME = os.environ.get("PRINT_WRTIE_FILE_NAME") or Path(sys.path[1]).name + '.print' 28 | 29 | # 项目中的所有标准输出(不仅包括print,还包括了streamHandler日志)都写入到这个文件,为None将不把标准输出重定向到文件。自动每天一个文件, 2023-06-30.my_proj.std,生成的文件位置在定义的LOG_PATH 30 | # 如果你设置了环境变量,export SYS_STD_FILE_NAME="my_proj.std" (linux临时环境变量语法,windows语法自己百度这里不举例),那就优先使用环境变量中设置的文件名字,,而不是nb_log_config.py中设置的名字 31 | # 这个相当于是 nohup 自动重定向所有屏幕输出流到一个nohup.out文件的功能了,这个是nb_log日志包的独有黑科技功能,logging 和loguru没这种功能. 32 | SYS_STD_FILE_NAME = os.environ.get("SYS_STD_FILE_NAME") or Path(sys.path[1]).name + '.std' 33 | 34 | USE_BULK_STDOUT_ON_WINDOWS = False # 在win上是否每隔0.1秒批量stdout,win的io太差了 35 | 36 | DEFAULUT_USE_COLOR_HANDLER = True # 是否默认使用有彩的日志。 37 | DEFAULUT_IS_USE_LOGURU_STREAM_HANDLER = False # 是否默认使用 loguru的控制台日志,而非是nb_log的ColorHandler 38 | DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = True # 在控制台是否显示彩色块状的日志。为False则不使用大块的背景颜色。 39 | AUTO_PATCH_PRINT = True # 是否自动打print的猴子补丁,如果打了猴子补丁,print自动变色和可点击跳转。 40 | 41 | # 以下是屏蔽控制台所谓的烦人提示项,如果要关闭,先了解下这三个提示是什么,有的pythoner又菜又爱屏蔽提示,然后不知道为什么,这样的人太烦人了. 42 | SHOW_PYCHARM_COLOR_SETINGS = True # 有的人很反感启动代码时候提示教你怎么优化pycahrm控制台颜色,可以把这里设置为False (怕提示颜色设置打扰你又不懂pycharm和python的颜色原理,就别抱怨颜色瞎眼) 43 | SHOW_NB_LOG_LOGO = True # 有的人反感启动代码时候打印nb_log 的logo图形,可以设置为False 44 | SHOW_IMPORT_NB_LOG_CONFIG_PATH = True # 是否打印读取的nb_log_config.py的文件位置.不懂pythonpath,不懂python导入模块机制的人,别屏蔽了,学习下 https://github.com/ydf0509/pythonpathdemo 45 | 46 | WHITE_COLOR_CODE = 37 # 不同pycharm版本和主题,有的对白颜色生效的代号是97,有的是37, 这里可以设置 37和97, 如2023 pycahrm的console color,白颜色捕获的是97,如果这里写37,调节pycharm颜色没法调. 47 | 48 | DEFAULT_ADD_MULTIPROCESSING_SAFE_ROATING_FILE_HANDLER = False # 是否默认同时将日志记录到记log文件记事本中,就是用户不指定 log_filename的值,会自动写入日志命名空间.log文件中。 49 | AUTO_WRITE_ERROR_LEVEL_TO_SEPARATE_FILE = False # 自动把错误error级别以上日志写到单独的文件,根据log_filename名字自动生成错误文件日志名字。 50 | LOG_FILE_SIZE = 1000 # 单位是M,每个文件的切片大小,超过多少后就自动切割 51 | LOG_FILE_BACKUP_COUNT = 10 # 对同一个日志文件,默认最多备份几个文件,超过就删除了。 52 | 53 | LOG_PATH = os.getenv("LOG_PATH") # 优先从环境变量获取,启动代码之前可以 export LOG_PATH = '/你的日志目录/' 54 | if not LOG_PATH: 55 | LOG_PATH = '/pythonlogs' # 默认的日志文件夹,如果不写明磁盘名,则是项目代码所在磁盘的根目录下的/pythonlogs 56 | # LOG_PATH = Path(__file__).absolute().parent / Path("pythonlogs") #这么配置就会自动在你项目的根目录下创建pythonlogs文件夹了并写入。 57 | if os.name == 'posix': # linux非root用户和mac用户无法操作 /pythonlogs 文件夹,没有权限,默认修改为 home/[username] 下面了。例如你的linux用户名是 xiaomin,那么默认会创建并在 /home/xiaomin/pythonlogs文件夹下写入日志文件。 58 | home_path = os.environ.get("HOME", '/') # 这个是获取linux系统的当前用户的主目录,不需要亲自设置 59 | LOG_PATH = Path(home_path) / Path('pythonlogs') # linux mac 权限很严格,非root权限不能在/pythonlogs写入,修改一下默认值。 60 | # print('LOG_PATH:',LOG_PATH) 61 | 62 | LOG_FILE_HANDLER_TYPE = 6 # 1 2 3 4 5 6 7 # nb_log 的日志切割,全都追求多进程下切割正常. 63 | """ 64 | LOG_FILE_HANDLER_TYPE 这个值可以设置为 1 2 3 4 5 四种值, 65 | 1为使用多进程安全按日志文件大小切割的文件日志,这是本人实现的批量写入日志,减少操作文件锁次数,测试10进程快速写入文件,win上性能比第5种提高了100倍,linux提升5倍 66 | 2为多进程安全按天自动切割的文件日志,同一个文件,每天生成一个新的日志文件。日志文件名字后缀自动加上日期。 67 | 3为不自动切割的单个文件的日志(不切割文件就不会出现所谓进程安不安全的问题) 68 | 4为 WatchedFileHandler,这个是需要在linux下才能使用,需要借助lograte外力进行日志文件的切割,多进程安全。 69 | 5 为第三方的concurrent_log_handler.ConcurrentRotatingFileHandler按日志文件大小切割的文件日志, 70 | 这个是采用了文件锁,多进程安全切割,文件锁在linux上使用fcntl性能还行,win上使用win32con性能非常惨。按大小切割建议不要选第5个个filehandler而是选择第1个。 71 | 6 BothDayAndSizeRotatingFileHandler 使用本人完全彻底开发的,同时按照时间和大小切割,无论是文件的大小、还是时间达到了需要切割的条件就切割。 72 | 7 LoguruFileHandler ,使用知名的 loguru 包的文件日志记录器来写文件。 73 | """ 74 | 75 | LOG_LEVEL_FILTER = logging.DEBUG # 默认日志级别,低于此级别的日志不记录了。例如设置为INFO,那么logger.debug的不会记录,只会记录logger.info以上级别的。 76 | # 强烈不建议调高这里的级别为INFO,日志是有命名空间的,单独提高打印啰嗦的日志命名空间的日志级别就可以了,不要全局提高日志级别。 77 | # https://nb-log-doc.readthedocs.io/zh_CN/latest/articles/c9.html#id2 文档9.5里面讲了几百次 python logging的命名空间的作用了,有些人到现在还不知道日志的name作用。 78 | 79 | ROOT_LOGGER_LEVEL = logging.INFO 80 | 81 | # 屏蔽的字符串显示,用 if in {打印信息} 来判断实现的,如果打印的消息中包括 FILTER_WORDS_PRINT 数组中的任何一个字符串,那么消息就不执行打印。 82 | # 这个配置对 print 和 logger的控制台输出都生效。这个可以过滤某些啰嗦的print信息,也可以过滤同级别日志中的某些烦人的日志。可以用来过滤三方包中某些控制台打印。数组不要配置过多,否则有一丝丝影响性能会。 83 | FILTER_WORDS_PRINT = [] # 例如, 你希望消息中包括阿弥陀佛 或者 包括善哉善哉 就不打印,那么可以设置 FILTER_WORDS_PRINT = ['阿弥陀佛','善哉善哉'] 84 | 85 | 86 | def get_host_ip(): 87 | ip = '' 88 | host_name = '' 89 | # noinspection PyBroadException 90 | try: 91 | sc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 92 | sc.connect(('8.8.8.8', 80)) 93 | ip = sc.getsockname()[0] 94 | host_name = socket.gethostname() 95 | sc.close() 96 | except Exception: 97 | pass 98 | return ip, host_name 99 | 100 | 101 | computer_ip, computer_name = get_host_ip() 102 | 103 | 104 | class JsonFormatterJumpAble(JsonFormatter): 105 | def add_fields(self, log_record, record, message_dict): 106 | # log_record['jump_click'] = f"""File '{record.__dict__.get('pathname')}', line {record.__dict__.get('lineno')}""" 107 | log_record[f"{record.__dict__.get('pathname')}:{record.__dict__.get('lineno')}"] = '' # 加个能点击跳转的字段。 108 | log_record['ip'] = computer_ip 109 | log_record['host_name'] = computer_name 110 | super().add_fields(log_record, record, message_dict) 111 | if 'for_segmentation_color' in log_record: 112 | del log_record['for_segmentation_color'] 113 | 114 | 115 | DING_TALK_TOKEN = '3dd0eexxxxxadab014bd604XXXXXXXXXXXX' # 钉钉报警机器人 116 | 117 | EMAIL_HOST = ('smtp.sohu.com', 465) 118 | EMAIL_FROMADDR = 'aaa0509@sohu.com' # 'matafyhotel-techl@matafy.com', 119 | EMAIL_TOADDRS = ('cccc.cheng@silknets.com', 'yan@dingtalk.com',) 120 | EMAIL_CREDENTIALS = ('aaa0509@sohu.com', 'abcdefg') 121 | 122 | ELASTIC_HOST = '127.0.0.1' 123 | ELASTIC_PORT = 9200 124 | 125 | KAFKA_BOOTSTRAP_SERVERS = ['192.168.199.202:9092'] 126 | ALWAYS_ADD_KAFKA_HANDLER_IN_TEST_ENVIRONENT = False 127 | 128 | MONGO_URL = 'mongodb://myUserAdmin:mimamiama@127.0.0.1:27016/admin' 129 | 130 | RUN_ENV = 'test' 131 | 132 | FORMATTER_DICT = { 133 | 1: logging.Formatter( 134 | '日志时间【%(asctime)s】 - 日志名称【%(name)s】 - 文件【%(filename)s】 - 第【%(lineno)d】行 - 日志等级【%(levelname)s】 - 日志信息【%(message)s】', 135 | "%Y-%m-%d %H:%M:%S"), 136 | 2: logging.Formatter( 137 | '%(asctime)s - %(name)s - %(filename)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s', 138 | "%Y-%m-%d %H:%M:%S"), 139 | 3: logging.Formatter( 140 | '%(asctime)s - %(name)s - 【 File "%(pathname)s", line %(lineno)d, in %(funcName)s 】 - %(levelname)s - %(message)s', 141 | "%Y-%m-%d %H:%M:%S"), # 一个模仿traceback异常的可跳转到打印日志地方的模板 142 | 4: logging.Formatter( 143 | '%(asctime)s - %(name)s - "%(filename)s" - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s - File "%(pathname)s", line %(lineno)d ', 144 | "%Y-%m-%d %H:%M:%S"), # 这个也支持日志跳转 145 | 5: logging.Formatter( 146 | '%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 147 | "%Y-%m-%d %H:%M:%S"), # 我认为的最好的模板,推荐 148 | 6: logging.Formatter('%(name)s - %(asctime)-15s - %(filename)s - %(lineno)d - %(levelname)s: %(message)s', 149 | "%Y-%m-%d %H:%M:%S"), 150 | 7: logging.Formatter('%(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 一个只显示简短文件名和所处行数的日志模板 151 | 152 | 8: JsonFormatterJumpAble('%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(filename)s %(lineno)d %(process)d %(thread)d', "%Y-%m-%d %H:%M:%S.%f", 153 | json_ensure_ascii=False), # 这个是json日志,方便elk采集分析. 154 | 155 | 9: logging.Formatter( 156 | '[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 157 | "%Y-%m-%d %H:%M:%S"), # 对5改进,带进程和线程显示的日志模板。 158 | 10: logging.Formatter( 159 | '[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 对7改进,带进程和线程显示的日志模板。 160 | 11: logging.Formatter( 161 | f'%(asctime)s-({computer_ip},{computer_name})-[p%(process)d_t%(thread)d] - %(name)s - "%(filename)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 对7改进,带进程和线程显示的日志模板以及ip和主机名。 162 | } 163 | 164 | FORMATTER_KIND = 5 # 如果get_logger不指定日志模板,则默认选择第几个模板 165 | -------------------------------------------------------------------------------- /nb_log/root_logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import nb_log 4 | from nb_log import nb_log_config_default 5 | 6 | _root_logger = logging.getLogger() 7 | _root_handlers = _root_logger.handlers 8 | 9 | new_hanlders = [] 10 | 11 | # if len(_root_logger.handlers): 12 | # hdr0 = _root_logger.handlers[0] 13 | # if type(hdr0) is logging.StreamHandler and not isinstance(hdr0,tuple(logging.StreamHandler.__subclasses__())): 14 | # # if hdr0.level == logging.NOTSET and hdr0.: 15 | # _root_logger.handlers.pop(0) 16 | ''' 17 | 有的人在使用nb_log之前,代码就已经运行了 logging.warning 这样的代码,需要先把之前的stream handler 删除掉,不然重复打印。 18 | ''' 19 | 20 | for hdr in _root_handlers: 21 | if type(hdr) is logging.StreamHandler and not isinstance(hdr, tuple(logging.StreamHandler.__subclasses__())): 22 | print(f'drop root logger handler {hdr}') 23 | continue 24 | new_hanlders.append(hdr) 25 | 26 | _root_logger.handlers = new_hanlders 27 | 28 | root_logger = nb_log.get_logger(None, log_filename='root.log', log_level_int=nb_log_config_default.ROOT_LOGGER_LEVEL) 29 | -------------------------------------------------------------------------------- /nb_log/rotate_file_writter.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import multiprocessing 3 | import queue 4 | import threading 5 | import typing 6 | from pathlib import Path 7 | import time 8 | import os 9 | 10 | 11 | # from nb_log.simple_print import sprint as print # 在此模块中不能print,print会写入文件,文件中print又写入文件,无限懵逼死循环。 12 | 13 | 14 | def build_current_date_str(): 15 | return time.strftime('%Y-%m-%d') 16 | 17 | 18 | class FileWritter: 19 | _lock = threading.RLock() 20 | 21 | def __init__(self, file_name: str, log_path='/pythonlogs', max_bytes=1000 * 1000 * 1000, back_count=10): 22 | self._max_bytes = max_bytes 23 | self._back_count = back_count 24 | self.need_write_2_file = True if file_name else False 25 | if self.need_write_2_file: 26 | self._file_name = file_name 27 | self.log_path = log_path 28 | if not Path(self.log_path).exists(): 29 | print(f'自动创建日志文件夹 {log_path}') 30 | Path(self.log_path).mkdir(exist_ok=True) 31 | # self._open_file() 32 | self._first_has_open_file = False 33 | self._last_write_ts = 0 34 | self._last_del_old_files_ts = 0 35 | 36 | @property 37 | def file_path(self): 38 | f_list = [] 39 | for f in Path(self.log_path).glob(f'????-??-??.????.{self._file_name}'): 40 | f_list.append(f) 41 | sn_list = [] 42 | for f in f_list: 43 | if f'{build_current_date_str()}.' in f.name: 44 | sn = f.name.split('.')[1] 45 | sn_list.append(sn) 46 | if not sn_list: 47 | return Path(self.log_path) / Path(f'{build_current_date_str()}.0001.{self._file_name}') 48 | else: 49 | sn_max = max(sn_list) 50 | if (Path(self.log_path) / Path(f'{build_current_date_str()}.{sn_max}.{self._file_name}')).stat().st_size > self._max_bytes: 51 | new_sn_int = int(sn_max) + 1 52 | new_sn_str = str(new_sn_int).zfill(4) 53 | return Path(self.log_path) / Path(f'{build_current_date_str()}.{new_sn_str}.{self._file_name}') 54 | else: 55 | return Path(self.log_path) / Path(f'{build_current_date_str()}.{sn_max}.{self._file_name}') 56 | 57 | def _open_file(self): 58 | self._f = open(self.file_path, encoding='utf8', mode='a') 59 | 60 | def _close_file(self): 61 | self._f.close() 62 | 63 | def write_2_file(self, msg): 64 | if self.need_write_2_file: 65 | if self._first_has_open_file is False: 66 | self._first_has_open_file = True 67 | self._open_file() 68 | 69 | with self._lock: 70 | now_ts = time.time() 71 | if now_ts - self._last_write_ts > 10: 72 | self._last_write_ts = time.time() 73 | self._close_file() 74 | self._open_file() 75 | self._f.write(msg) 76 | self._f.flush() 77 | if now_ts - self._last_del_old_files_ts > 30: 78 | self._last_del_old_files_ts = time.time() 79 | self._delete_old_files() 80 | 81 | def _delete_old_files(self): 82 | f_list = [] 83 | for f in Path(self.log_path).glob(f'????-??-??.????.{self._file_name}'): 84 | f_list.append(f) 85 | # f_list.sort(key=lambda f:f.stat().st_mtime,reverse=True) 86 | f_list.sort(key=lambda f: f.name, reverse=True) 87 | for f in f_list[self._back_count:]: 88 | try: 89 | # print(f'删除 {f} ') # 这里不能print, stdout写入文件,写入文件时候print,死循环 90 | f.unlink() 91 | except (FileNotFoundError, PermissionError): 92 | pass 93 | 94 | 95 | class BulkFileWritter: 96 | _lock = threading.Lock() 97 | 98 | filename__queue_map = {} 99 | filename__options_map = {} 100 | filename__file_writter_map = {} 101 | 102 | _get_queue_lock = threading.Lock() 103 | 104 | _has_start_bulk_write = False 105 | 106 | @classmethod 107 | def _get_queue(cls, file_name): 108 | if file_name not in cls.filename__queue_map: 109 | cls.filename__queue_map[file_name] = queue.SimpleQueue() 110 | return cls.filename__queue_map[file_name] 111 | 112 | @classmethod 113 | def _get_file_writter(cls, file_name): 114 | if file_name not in cls.filename__file_writter_map: 115 | fw = FileWritter(**cls.filename__options_map[file_name]) 116 | cls.filename__file_writter_map[file_name] = fw 117 | return cls.filename__file_writter_map[file_name] 118 | 119 | def __init__(self, file_name: typing.Optional[str], log_path='/pythonlogs', max_bytes=1000 * 1000 * 1000, back_count=10): 120 | self.need_write_2_file = True if file_name else False 121 | self._file_name = file_name 122 | if file_name: 123 | self.__class__.filename__options_map[file_name] = { 124 | 'file_name': file_name, 125 | 'log_path': log_path, 126 | 'max_bytes': max_bytes, 127 | 'back_count': back_count, 128 | } 129 | self.start_bulk_write() 130 | 131 | def write_2_file(self, msg): 132 | if self.need_write_2_file: 133 | with self._lock: 134 | self._get_queue(self._file_name).put(msg) 135 | 136 | @classmethod 137 | def _bulk_real_write(cls): 138 | with cls._lock: 139 | for _file_name, queue in cls.filename__queue_map.items(): 140 | msg_str_all = '' 141 | while not queue.empty(): 142 | msg_str_all += queue.get() 143 | if msg_str_all: 144 | cls._get_file_writter(_file_name).write_2_file(msg_str_all) 145 | 146 | @classmethod 147 | def _when_exit(cls): 148 | # print('结束') 149 | return cls._bulk_real_write() 150 | 151 | @classmethod 152 | def start_bulk_write(cls): 153 | def _bulk_write(): 154 | while 1: 155 | cls._bulk_real_write() 156 | time.sleep(0.1) 157 | 158 | if not cls._has_start_bulk_write: 159 | cls._has_start_bulk_write = True 160 | threading.Thread(target=_bulk_write, daemon=True).start() 161 | 162 | 163 | atexit.register(BulkFileWritter._when_exit) 164 | 165 | OsFileWritter = FileWritter if os.name == 'posix' else BulkFileWritter 166 | 167 | 168 | def tt(): 169 | fw = OsFileWritter('test_file6.log', '/test_dir2', max_bytes=1000 * 100) 170 | t1 = time.time() 171 | for i in range(10000): 172 | # time.sleep(0.001) 173 | msg = f'yyy{str(i).zfill(5)}' * 4 174 | print(msg) 175 | fw.write_2_file(msg + '\n') 176 | print(time.time() - t1) 177 | 178 | 179 | if __name__ == '__main__': 180 | multiprocessing.Process(target=tt).start() 181 | multiprocessing.Process(target=tt).start() 182 | # tt() 183 | -------------------------------------------------------------------------------- /nb_log/set_nb_log_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Author : ydf 3 | # @Time : 2022/4/11 0011 0:56 4 | """ 5 | 6 | 使用覆盖的方式,做配置。 7 | """ 8 | import sys 9 | import importlib 10 | from pathlib import Path 11 | from nb_log import nb_log_config_default 12 | from nb_log.simple_print import sprint 13 | from shutil import copyfile 14 | 15 | def _get_show_import_nb_log_config_path(): 16 | try: 17 | import nb_log_config 18 | return nb_log_config.SHOW_IMPORT_NB_LOG_CONFIG_PATH 19 | except Exception as e: 20 | return True 21 | 22 | if _get_show_import_nb_log_config_path(): 23 | sprint(f'当前项目的根目录是:\n {sys.path[1]}',only_print_on_main_process=True) # 如果获取的项目根目录不正确,请不要在python代码硬编码操作sys.path。pycahrm自动给项目根目录加了PYTHONPATh,如果是shell命令行运行python命令前脚本前先在会话中设置临时环境变量 export PYTHONPATH=项目根目录 24 | 25 | 26 | def show_nb_log_config(): 27 | sprint('显示nb_log 包的默认的低优先级的配置参数') 28 | for var_name in dir(nb_log_config_default): 29 | sprint(var_name, getattr(nb_log_config_default, ':', var_name)) 30 | print('\n') 31 | 32 | 33 | # noinspection PyProtectedMember 34 | def use_config_form_nb_log_config_module(): 35 | """ 36 | 自动读取配置。会优先读取启动脚本的目录的distributed_frame_config.py文件。没有则读取项目根目录下的distributed_frame_config.py 37 | :return: 38 | """ 39 | try: 40 | m = importlib.import_module('nb_log_config') 41 | importlib.reload(m) # 这行是防止用户在导入框架之前,写了 from nb_log_config import xx 这种,导致 m.__dict__.items() 不包括所有配置变量了。 42 | msg = f'nb_log包 读取到\n "{m.__file__}:1" 文件里面的变量作为优先配置了\n' 43 | # nb_print(msg) 44 | if _get_show_import_nb_log_config_path(): 45 | sprint(msg, only_print_on_main_process=True) 46 | for var_namex, var_valuex in m.__dict__.items(): 47 | if var_namex.isupper(): 48 | setattr(nb_log_config_default, var_namex, var_valuex) 49 | except ModuleNotFoundError: 50 | auto_creat_config_file_to_project_root_path() 51 | msg = f'''在你的项目根目录下生成了 \n "{Path(sys.path[1]) / Path('nb_log_config.py')}:1" 的nb_log包的日志配置文件,快去看看并修改一些自定义配置吧''' 52 | sprint(msg, only_print_on_main_process=True) 53 | 54 | 55 | def auto_creat_config_file_to_project_root_path(): 56 | # print(Path(sys.path[1]).as_posix()) 57 | # print((Path(__file__).parent.parent).absolute().as_posix()) 58 | """ 59 | :return: 60 | """ 61 | if Path(sys.path[1]).as_posix() == Path(__file__).parent.parent.absolute().as_posix(): 62 | pass 63 | sprint(f'不希望在本项目 {sys.path[1]} 里面创建 nb_log_config.py') 64 | return 65 | # noinspection PyPep8 66 | """ 67 | 如果没设置PYTHONPATH,sys.path会这样,取第一个就会报错 68 | ['', '/data/miniconda3dir/inner/envs/mtfy/lib/python36.zip', '/data/miniconda3dir/inner/envs/mtfy/lib/python3.6', '/data/miniconda3dir/inner/envs/mtfy/lib/python3.6/lib-dynload', '/root/.local/lib/python3.6/site-packages', '/data/miniconda3dir/inner/envs/mtfy/lib/python3.6/site-packages'] 69 | 70 | ['', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\python36.zip', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\DLLs', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib', 'F:\\minicondadir\\Miniconda2\\envs\\py36', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\multiprocessing_log_manager-0.2.0-py3.6.egg', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\pyinstaller-3.4-py3.6.egg', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\pywin32_ctypes-0.2.0-py3.6.egg', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\altgraph-0.16.1-py3.6.egg', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\macholib-1.11-py3.6.egg', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\pefile-2019.4.18-py3.6.egg', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\win32', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\win32\\lib', 'F:\\minicondadir\\Miniconda2\\envs\\py36\\lib\\site-packages\\Pythonwin'] 71 | """ 72 | if '/lib/python' in sys.path[1] or r'\lib\python' in sys.path[1] or '.zip' in sys.path[1]: 73 | raise EnvironmentError('''如果用pycahrm启动,默认不需要你手动亲自设置PYTHONPATH,如果你是cmd或者shell中直接敲击python xx.py 来运行, 74 | 报现在这个错误,你现在肯定是没有设置PYTHONPATH环境变量,不要设置永久环境变量,设置临时会话环境变量就行, 75 | windows设置 set PYTHONPATH=你当前python项目根目录,然后敲击你的python运行命令 76 | linux设置 export PYTHONPATH=你当前python项目根目录,然后敲击你的python运行命令 77 | 要是连PYTHONPATH这个知识点都不知道,那就要google 百度去学习PYTHONPATH作用了,非常重要非常好用, 78 | 不知道PYTHONPATH作用的人,在深层级文件夹作为运行起点导入外层目录的包的时候,如果把深层级文件作为python的执行文件起点,经常需要到处很low的手写 sys.path.insert硬编码,这种方式写代码太low了。 79 | 知道PYTHONPATH的人无论项目有多少层级的文件夹,无论是多深层级文件夹导入外层文件夹,代码里面永久都不需要出现手动硬编码操纵sys.path.append 80 | 81 | 懂PYTHONPATH 的重要性和妙用见: https://github.com/ydf0509/pythonpathdemo 82 | ''') 83 | # with (Path(sys.path[1]) / Path('nb_log_config.py')).open(mode='w', encoding='utf8') as f: 84 | # f.write(config_file_content) 85 | copyfile(Path(__file__).parent / Path('nb_log_config_default.py'), Path(sys.path[1]) / Path('nb_log_config.py')) 86 | sprint(f'''在 {Path(sys.path[1])} 目录下自动生成了一个文件, 请刷新文件夹查看或修改 \n "{Path(sys.path[1]) / Path('nb_log_config.py')}:1" 文件''') 87 | 88 | 89 | use_config_form_nb_log_config_module() 90 | -------------------------------------------------------------------------------- /nb_log/simple_print.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import os 3 | import queue 4 | import sys 5 | import threading 6 | import time 7 | import multiprocessing 8 | 9 | print_raw = print 10 | WORD_COLOR = 37 11 | 12 | 13 | def stdout_write(msg: str): 14 | if sys.stdout: 15 | sys.stdout.write(msg) 16 | sys.stdout.flush() 17 | 18 | 19 | def stderr_write(msg: str): 20 | if sys.stderr: 21 | sys.stderr.write(msg) 22 | sys.stderr.flush() 23 | else: 24 | stdout_write(msg) 25 | 26 | 27 | 28 | def _sprint(*args, sep=' ', end='\n', file=None, flush=True, sys_getframe_n=2, ): 29 | args = (str(arg) for arg in args) # REMIND 防止是数字不能被join 30 | args_str = sep.join(args) + end 31 | # stdout_write(f'56:{file}') 32 | if file == sys.stderr: 33 | stderr_write(args_str) # 如 threading 模块第926行,打印线程错误,希望保持原始的红色错误方式,不希望转成蓝色。 34 | elif file in [sys.stdout, None]: 35 | # 获取被调用函数在被调用时所处代码行数 36 | fra = sys._getframe(sys_getframe_n) 37 | line = fra.f_lineno 38 | file_name = fra.f_code.co_filename 39 | fun = fra.f_code.co_name 40 | # sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}" {x}\n') 41 | msg = f'{time.strftime("%H:%M:%S")} "{file_name}:{line}" - {fun} - {args_str}' 42 | stdout_write(msg) 43 | else: # 例如traceback模块的print_exception函数 file的入参是 <_io.StringIO object at 0x00000264F2F065E8>,必须把内容重定向到这个对象里面,否则exception日志记录不了错误堆栈。 44 | print_raw(args_str, sep=sep, end=end, file=file) 45 | 46 | 47 | def sprint(*args, sep=' ', end='\n', file=None, flush=True, sys_getframe_n=2, only_print_on_main_process=False): 48 | if only_print_on_main_process: 49 | if multiprocessing.process.current_process().name == 'MainProcess': 50 | _sprint(*args, sep=sep, end=end, file=file, flush=flush, sys_getframe_n=2) 51 | else: 52 | _sprint(*args, sep=sep, end=end, file=file, flush=flush, sys_getframe_n=sys_getframe_n) 53 | 54 | 55 | if __name__ == '__main__': 56 | str1 = 'O(∩_∩)O哈哈' * 40 57 | t1 = time.time() 58 | for i in range(10000): 59 | sprint(str1) 60 | 61 | print(time.time() - t1) 62 | -------------------------------------------------------------------------------- /pub_pip_nb_log.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import sys 4 | import shutil 5 | 6 | # Ensure dependencies 7 | # os.system(f"{sys.executable} -m pip install --user --upgrade setuptools wheel twine") 8 | 9 | # Delete previous build 10 | import time 11 | import git_nb_log_github 12 | 13 | shutil.rmtree("dist", ignore_errors=True) 14 | 15 | # Build 16 | os.system(f"{sys.executable} setup.py sdist bdist_wheel") 17 | 18 | # Upload 19 | os.system(f"{sys.executable} -m twine upload dist/*") 20 | 21 | shutil.rmtree("build", ignore_errors=True) 22 | 23 | 24 | 25 | time.sleep(100000) 26 | 27 | -------------------------------------------------------------------------------- /settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "terminal.integrated.env.windows": { 3 | "PYTHONPATH": "${workspaceFolder}/nb_log" 4 | }, 5 | "terminal.integrated.env.linux": { 6 | "PYTHONPATH": "./" 7 | }, 8 | "python.pythonPath": "D:\\ProgramData\\Miniconda3\\envs\\py39b\\python.exe", 9 | "windsurf.language": "zh-CN", 10 | "window.openFoldersInNewWindow": true, 11 | // Python 相关设置 12 | "python.analysis.typeCheckingMode": "basic", // 启用基本类型检查 13 | "python.formatting.provider": "black", // 使用 black 作为格式化工具 14 | "python.linting.enabled": true, // 启用代码检查 15 | "python.linting.pylintEnabled": true, // 启用 pylint 16 | "python.linting.flake8Enabled": true, // 启用 flake8 17 | "python.testing.pytestEnabled": true, // 启用 pytest 18 | "python.testing.unittestEnabled": false, // 禁用 unittest 19 | // 编辑器设置 20 | "editor.formatOnSave": true, // 保存时自动格式化 21 | "editor.rulers": [ 22 | 88 23 | ], // 显示 88 字符宽度标尺(black 默认) 24 | "editor.renderWhitespace": "all", // 显示空白字符 25 | "editor.suggestSelection": "first", // 自动补全选择第一项 26 | "editor.wordBasedSuggestions": "matchingDocuments", // 基于单词的建议 27 | // 文件设置 28 | "files.trimTrailingWhitespace": true, // 自动删除行尾空格 29 | "files.insertFinalNewline": true, // 文件末尾插入新行 30 | "files.trimFinalNewlines": true, // 保留文件末尾单个新行 31 | // 自动保存 32 | "files.autoSave": "afterDelay", // 自动保存 33 | "files.autoSaveDelay": 1000, // 自动保存延迟(毫秒) 34 | // 智能感知设置 35 | "python.analysis.completeFunctionParens": true, // 函数补全时添加括号 36 | "python.analysis.extraPaths": [ 37 | "./" 38 | ], // 额外的 Python 路径 39 | // 调试设置 40 | "python.debugging.justMyCode": false, // 调试时可以进入第三方库代码 41 | // 终端设置 42 | "terminal.integrated.defaultProfile.windows": "Command Prompt", // 默认终端 43 | "terminal.integrated.profiles.windows": { 44 | "Python": { 45 | "path": "${env:PYTHONPATH}", 46 | "args": [ 47 | "-i" 48 | ] 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from pathlib import Path 3 | from setuptools import setup, find_packages 4 | import os 5 | 6 | # with open("README.md", "r",encoding='utf8') as fh: 7 | # long_description = fh.read() 8 | 9 | # filepath = ((Path(__file__).parent / Path('README.md')).absolute()).as_posix() 10 | filepath = 'README.md' 11 | print(filepath) 12 | 13 | extra_requires = {'all': ['pymongo==4.3.3', 'elasticsearch', 'kafka-python==2.0.2','loguru' ], } 14 | 15 | install_requires = [ 16 | 'tomorrow3==1.1.0', 17 | 'concurrent-log-handler==0.9.23', 18 | 'requests', 19 | 'python-json-logger==0.1.10', 20 | 'nb_filelock', 21 | 'service-identity', 22 | 'deprecated==1.2.14', 23 | 'chained_mode_time_tool', 24 | 'nb_libs', 25 | 'logging_tree' 26 | ] 27 | 28 | # if os.name == 'nt': 29 | # install_requires.append('pywin32') 30 | 31 | print(f'nb_log install_requires:{install_requires}') 32 | setup( 33 | name='nb_log', # 34 | version="13.3", 35 | description=( 36 | 'very sharp color display,monkey patch bulitin print and high-performance multiprocess safe roating file handler,other handlers includeing dintalk ,email,kafka,elastic and so on ' 37 | ), 38 | keywords=["logging", "logger", "multiprocess file handler", "color handler","loguru"], 39 | # long_description=open('README.md', 'r',encoding='utf8').read(), 40 | long_description_content_type="text/markdown", 41 | long_description=open(filepath, 'r', encoding='utf8').read(), 42 | url='https://github.com/ydf0509/nb_log', 43 | # data_files=[filepath], 44 | author='bfzs', 45 | author_email='ydf0509@sohu.com', 46 | maintainer='ydf', 47 | maintainer_email='ydf0509@sohu.com', 48 | license='BSD License', 49 | packages=find_packages(), 50 | include_package_data=True, 51 | platforms=["all"], 52 | classifiers=[ 53 | 'Development Status :: 4 - Beta', 54 | 'Operating System :: OS Independent', 55 | 'Intended Audience :: Developers', 56 | 'License :: OSI Approved :: BSD License', 57 | 'Programming Language :: Python', 58 | 'Programming Language :: Python :: Implementation', 59 | 'Programming Language :: Python :: 3', 60 | 'Topic :: Software Development :: Libraries' 61 | ], 62 | install_requires=install_requires, 63 | extras_require = extra_requires 64 | ) 65 | """ 66 | 打包上传 67 | python setup.py sdist upload -r pypi 68 | 69 | 70 | 71 | python setup.py sdist & twine upload dist/nb_log-6.0.tar.gz 72 | python setup.py sdist & python -m twine upload dist/nb_log-10.3.tar.gz 73 | 74 | twine upload dist/* 75 | 76 | 77 | python -m pip install nb_log --upgrade -i https://pypi.org/simple # 及时的方式,不用等待 阿里云 豆瓣 同步 78 | """ 79 | -------------------------------------------------------------------------------- /tests/_trial_temp/_trial_marker: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/tests/_trial_temp/_trial_marker -------------------------------------------------------------------------------- /tests/ai_filehandler/custom_file_handler_demo.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from pathlib import Path 4 | from typing import Optional, IO, Any 5 | import time 6 | import platform 7 | import signal 8 | import atexit 9 | 10 | 11 | class FileLock: 12 | """Cross-platform file locking.""" 13 | 14 | def __init__(self, file_obj): 15 | self.file_obj = file_obj 16 | self.is_windows = platform.system() == 'Windows' 17 | if self.is_windows: 18 | import msvcrt 19 | self.msvcrt = msvcrt 20 | else: 21 | import fcntl 22 | self.fcntl = fcntl 23 | 24 | def acquire(self, blocking=True): 25 | if self.is_windows: 26 | while True: 27 | try: 28 | self.msvcrt.locking(self.file_obj.fileno(), self.msvcrt.LK_NBLCK, 1) 29 | return True 30 | except IOError: 31 | if not blocking: 32 | return False 33 | time.sleep(0.1) 34 | else: 35 | try: 36 | mode = self.fcntl.LOCK_EX if blocking else (self.fcntl.LOCK_EX | self.fcntl.LOCK_NB) 37 | self.fcntl.flock(self.file_obj.fileno(), mode) 38 | return True 39 | except (IOError, OSError): 40 | return False 41 | 42 | def release(self): 43 | if self.is_windows: 44 | try: 45 | self.file_obj.seek(0) 46 | self.msvcrt.locking(self.file_obj.fileno(), self.msvcrt.LK_UNLCK, 1) 47 | except (IOError, OSError): 48 | pass 49 | else: 50 | try: 51 | self.fcntl.flock(self.file_obj.fileno(), self.fcntl.LOCK_UN) 52 | except (IOError, OSError): 53 | pass 54 | 55 | 56 | class CustomRotatingFileHandler(logging.Handler): 57 | """A process-safe rotating file handler that works on both Windows and Unix.""" 58 | 59 | def __init__(self, filename: str, mode='a', encoding='utf-8', max_bytes=1024*1024, 60 | buffer_size=8192, backup_count=1, flush_interval=0.1): 61 | super().__init__() 62 | self.filename = filename 63 | self.mode = mode 64 | self.encoding = encoding 65 | self.max_bytes = max_bytes 66 | self.buffer_size = buffer_size 67 | self.backup_count = backup_count 68 | self.flush_interval = flush_interval 69 | self.buffer = [] 70 | self._file = None 71 | self._current_size = 0 72 | self._last_flush_time = time.time() 73 | 74 | # 初始化文件锁 75 | self._lock_file_path = f"{self.filename}.lock" 76 | self._lock_file = None 77 | 78 | # 确保日志目录存在 79 | os.makedirs(os.path.dirname(os.path.abspath(filename)), exist_ok=True) 80 | 81 | # 注册清理函数 82 | atexit.register(self._cleanup) 83 | 84 | def _acquire_lock(self): 85 | """获取文件锁""" 86 | try: 87 | self._lock_file = open(self._lock_file_path, 'w') 88 | 89 | # 在 Windows 上使用 msvcrt 90 | if os.name == 'nt': 91 | import msvcrt 92 | msvcrt.locking(self._lock_file.fileno(), msvcrt.LK_NBLCK, 1) 93 | # 在 Unix/Linux 上使用 fcntl 94 | else: 95 | import fcntl 96 | fcntl.flock(self._lock_file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) 97 | 98 | return True 99 | except (IOError, OSError): 100 | if self._lock_file: 101 | self._lock_file.close() 102 | self._lock_file = None 103 | return False 104 | 105 | def _release_lock(self): 106 | """释放文件锁""" 107 | if self._lock_file: 108 | try: 109 | # 在 Windows 上使用 msvcrt 110 | if os.name == 'nt': 111 | import msvcrt 112 | msvcrt.locking(self._lock_file.fileno(), msvcrt.LK_UNLCK, 1) 113 | # 在 Unix/Linux 上使用 fcntl 114 | else: 115 | import fcntl 116 | fcntl.flock(self._lock_file.fileno(), fcntl.LOCK_UN) 117 | except (IOError, OSError): 118 | pass 119 | finally: 120 | self._lock_file.close() 121 | self._lock_file = None 122 | try: 123 | os.remove(self._lock_file_path) 124 | except OSError: 125 | pass 126 | 127 | def _open_file(self): 128 | """Open the current base file with the (original) mode and encoding.""" 129 | try: 130 | self._file = open(self.filename, mode=self.mode, encoding=self.encoding) 131 | self._current_size = os.path.getsize(self.filename) if os.path.exists(self.filename) else 0 132 | except Exception as e: 133 | print(f"Error opening file: {str(e)}") 134 | self._file = None 135 | self._current_size = 0 136 | 137 | def _check_file(self) -> bool: 138 | """ 139 | Check if the current file handle is valid and points to the right file. 140 | Returns True if the file is valid, False if it needs to be reopened. 141 | """ 142 | if self._file is None: 143 | return False 144 | 145 | try: 146 | # Check if file still exists 147 | if not os.path.exists(self.filename): 148 | return False 149 | 150 | # Check if file has been renamed (inode changed) 151 | stat_handle = os.fstat(self._file.fileno()) 152 | stat_name = os.stat(self.filename) 153 | 154 | if stat_handle.st_ino != stat_name.st_ino: 155 | return False 156 | 157 | return True 158 | except Exception: 159 | return False 160 | 161 | def _signal_handler(self, signum, frame): 162 | """Handle termination signals by flushing and closing the handler.""" 163 | self.close() 164 | # Re-raise the signal to allow the program to terminate 165 | signal.signal(signum, signal.SIG_DFL) 166 | os.kill(os.getpid(), signum) 167 | 168 | def _cleanup(self): 169 | """在程序退出时确保所有日志都被写入""" 170 | try: 171 | if hasattr(self, 'buffer') and self.buffer: 172 | self._flush_buffer() 173 | finally: 174 | if self._file: 175 | try: 176 | self._file.close() 177 | except Exception: 178 | pass 179 | self._file = None 180 | 181 | def emit(self, record): 182 | try: 183 | msg = self.format(record) 184 | self.buffer.append(msg + '\n') 185 | 186 | current_time = time.time() 187 | should_flush = ( 188 | len(self.buffer) >= self.buffer_size or 189 | (current_time - self._last_flush_time) >= self.flush_interval 190 | ) 191 | 192 | if should_flush: 193 | self._flush_buffer() 194 | self._last_flush_time = current_time 195 | 196 | except Exception: 197 | self.handleError(record) 198 | 199 | def _flush_buffer(self): 200 | """Flush the buffer to disk.""" 201 | if not self.buffer: 202 | return 203 | 204 | try: 205 | buffer_content = ''.join(self.buffer) 206 | buffer_size = len(buffer_content.encode(self.encoding)) 207 | 208 | # 确保文件已打开 209 | if not self._check_file(): 210 | self._open_file() 211 | 212 | # 获取当前文件大小 213 | self._current_size = os.path.getsize(self.filename) if os.path.exists(self.filename) else 0 214 | 215 | if self._current_size + buffer_size > self.max_bytes: 216 | self.rotate() 217 | self._current_size = 0 # 重置文件大小计数 218 | 219 | # 写入文件 220 | if self._file is not None: 221 | self._file.write(buffer_content) 222 | self._file.flush() 223 | os.fsync(self._file.fileno()) 224 | self._current_size += buffer_size 225 | self.buffer = [] 226 | 227 | except Exception as e: 228 | print(f"Error in _flush_buffer: {e}") 229 | self.handleError(None) 230 | 231 | def rotate(self): 232 | """Rotate the current file in a process-safe way.""" 233 | if not self._acquire_lock(): 234 | return 235 | 236 | try: 237 | # 首先关闭当前文件 238 | if self._file: 239 | self._file.close() 240 | self._file = None 241 | 242 | try: 243 | # 从最大的编号开始删除多余的日志文件 244 | for i in range(self.backup_count + 1, 0, -1): 245 | sfn = f"{self.filename}.{i}" 246 | if os.path.exists(sfn): 247 | try: 248 | os.remove(sfn) 249 | except OSError: 250 | pass 251 | 252 | # 从最后一个备份开始重命名 253 | for i in range(self.backup_count, 0, -1): 254 | sfn = f"{self.filename}.{i-1}" if i > 1 else self.filename 255 | dfn = f"{self.filename}.{i}" 256 | if os.path.exists(sfn): 257 | try: 258 | if os.path.exists(dfn): 259 | os.remove(dfn) 260 | os.rename(sfn, dfn) 261 | except OSError as e: 262 | print(f"Error rotating file {sfn} to {dfn}: {str(e)}") 263 | 264 | finally: 265 | # 重新打开文件 266 | try: 267 | self._open_file() 268 | self._current_size = 0 # 重置大小计数 269 | except Exception as e: 270 | print(f"Error reopening file: {str(e)}") 271 | 272 | finally: 273 | # 释放锁 274 | self._release_lock() 275 | 276 | def close(self): 277 | """Close the file handler and release resources.""" 278 | if getattr(self, '_is_closing', False): 279 | return 280 | self._is_closing = True 281 | 282 | # Flush any remaining buffered messages 283 | try: 284 | self._flush_buffer() 285 | except Exception: 286 | pass 287 | 288 | if self._file: 289 | try: 290 | self._file.close() 291 | except Exception: 292 | pass 293 | self._file = None 294 | 295 | if self._lock_file: 296 | self._release_lock() 297 | try: 298 | self._lock_file.close() 299 | except Exception: 300 | pass 301 | try: 302 | os.remove(self._lock_file_path) 303 | except OSError: 304 | pass 305 | 306 | 307 | def main(): 308 | # Create logger 309 | logger = logging.getLogger("custom_logger") 310 | logger.setLevel(logging.DEBUG) 311 | 312 | # Create formatters 313 | file_formatter = logging.Formatter( 314 | '%(asctime)s - %(name)s - %(levelname)s - %(message)s', 315 | "%Y-%m-%d %H:%M:%S" 316 | ) 317 | 318 | # Create and configure file handler 319 | file_handler = CustomRotatingFileHandler( 320 | filename="/pythonlogs/test.log", 321 | max_bytes=1024*10240, # 1MB 322 | backup_count=3, 323 | flush_interval=0.1 324 | ) 325 | 326 | # file_handler = logging.FileHandler(filename="/pythonlogs/test.log", mode='a', encoding='utf-8') # 327 | file_handler.setLevel(logging.DEBUG) 328 | file_handler.setFormatter(file_formatter) 329 | logger.addHandler(file_handler) 330 | 331 | for i in range(100000): 332 | # 模拟正常的应用程序日志记录 333 | logger.info("Application started") 334 | 335 | # 模拟一些业务操作 336 | logger.debug("Processing user data") 337 | try: 338 | # 模拟一个可能出错的操作 339 | result = 10 / 0 340 | except Exception as e: 341 | logger.error(f"Error occurred during calculation: {str(e)}") 342 | 343 | # 模拟一些警告情况 344 | logger.warning("Database connection pool is running low") 345 | 346 | # 模拟一些业务统计信息 347 | logger.info("Daily statistics: 1000 users logged in, 50000 requests processed") 348 | 349 | logger.info("Application shutting down") 350 | 351 | 352 | if __name__ == "__main__": 353 | t1 = time.time() 354 | main() 355 | t2 = time.time() 356 | print(f"Time taken: {t2 - t1} seconds") 357 | -------------------------------------------------------------------------------- /tests/ai_filehandler/test_multiprocess_logging.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import time 3 | import sys 4 | import os 5 | from pathlib import Path 6 | 7 | # Add parent directory to Python path to make imports work 8 | sys.path.append(str(Path(__file__).parent)) 9 | 10 | from custom_file_handler_demo import CustomRotatingFileHandler 11 | import logging 12 | 13 | 14 | def worker_process(process_id): 15 | # Create logger for this process 16 | logger = logging.getLogger(f"process_{process_id}") 17 | logger.setLevel(logging.DEBUG) 18 | 19 | # Create and configure file handler 20 | file_handler = CustomRotatingFileHandler( 21 | filename="/pythonlogs/multiprocess_test2.log", 22 | max_bytes=1024*10240, # Small size to trigger rotation frequently 23 | backup_count=5, 24 | ) 25 | file_handler.setFormatter(logging.Formatter( 26 | '%(asctime)s - PID:%(process)d - %(message)s', 27 | "%Y-%m-%d %H:%M:%S" 28 | )) 29 | logger.addHandler(file_handler) 30 | 31 | # Write some logs 32 | for i in range(1000000): 33 | logger.info(f"Process {process_id} - Message {i}") 34 | # time.sleep(0.01) # Small delay to simulate work 35 | 36 | # file_handler.close() 37 | 38 | 39 | def main(): 40 | # Create multiple processes 41 | processes = [] 42 | for i in range(5): # Start 5 processes 43 | p = multiprocessing.Process(target=worker_process, args=(i,)) 44 | processes.append(p) 45 | p.start() 46 | 47 | # Wait for all processes to complete 48 | for p in processes: 49 | p.join() 50 | 51 | 52 | if __name__ == "__main__": 53 | t1 = time.time() 54 | main() 55 | t2 = time.time() 56 | print(f"Time taken: {t2 - t1} seconds") -------------------------------------------------------------------------------- /tests/comprae_loguru/t_loguru.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from loguru import logger 4 | import urllib3 5 | 6 | pool = urllib3.PoolManager() 7 | 8 | logger.bind(name='urllib3',level="DEBUG") 9 | # logger.add("urllib3_loguru.log", filter=lambda record: record["extra"].get("name") == "urllib3") 10 | 11 | resp = pool.request('get','http://www.google.com') 12 | 13 | 14 | 15 | logger.debug("只能打印自己的,无法记录urllib3请求了什么url") 16 | print(resp.data) -------------------------------------------------------------------------------- /tests/comprae_loguru/t_nb_log.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | import nb_log 4 | import urllib3 5 | 6 | logger = nb_log.get_logger('urllib3') 7 | 8 | pool = urllib3.PoolManager() 9 | resp = pool.request('get','http://www.google.com') 10 | 11 | logger.debug("除了能打印这行,还能自动记录urllib3请求了什么url") 12 | 13 | print(resp.data) -------------------------------------------------------------------------------- /tests/d1/d2/d3/t6.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | print(os.getenv('va')) 4 | print('导入nb_log之前的print是普通的') 5 | 6 | import sys 7 | print(sys.path) 8 | 9 | from nb_log import get_logger 10 | 11 | logger = get_logger('lalala',log_filename='jinzhifengzhuang.log',formatter_template=5) 12 | 13 | logger.debug(f'debug是绿色,说明是调试的,代码ok ') 14 | logger.info('info是天蓝色,日志正常 ') 15 | logger.warning('黄色yello,有警告了 ') 16 | logger.error('粉红色说明代码有错误 ') 17 | logger.critical('血红色,说明发生了严重错误 ') 18 | 19 | print('导入nb_log之后的print是强化版的可点击跳转的') 20 | 21 | 22 | 23 | #raise Exception("dsadsd") -------------------------------------------------------------------------------- /tests/example.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | print('导入nb_log之前的print是普通的') 4 | from nb_log import get_logger 5 | get_logger('lalala3',log_filename='lalala3.log',formatter_template=5,log_file_handler_type=6,is_use_loguru_stream_handler=False) 6 | logger = get_logger('lalala',log_filename='lalala.log',formatter_template=5,log_file_handler_type=6,is_use_loguru_stream_handler=False) 7 | # logger = get_logger('hihihi',) 8 | 9 | logger.debug(f'debug是绿色,说明是调试的,代码ok ') 10 | logger.info('info是天蓝色,日志正常 ') 11 | logger.warning('黄色yello,有警告了 ') 12 | logger.error('粉红色说明代码有错误 ') 13 | logger.critical('血红色,说明发生了严重错误 ') 14 | logger.debug({"k":1,'k2':2}) 15 | # logger.debug(msg='aaa',extra={"k":1,'k2':2}) 16 | print('导入nb_log之后的print是强化版的可点击跳转的') 17 | def func_ya(x): 18 | print(x) 19 | func_ya('print可以显示是func_ya中的函数打印的') 20 | -------------------------------------------------------------------------------- /tests/git_nb_log.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | import time 4 | 5 | 6 | def getstatusoutput(cmd): 7 | try: 8 | data = subprocess.check_output(cmd, shell=True, universal_newlines=True, 9 | stderr=subprocess.STDOUT, encoding='utf8') # 必須設置為utf8, 不然报错了。 10 | exitcode = 0 11 | except subprocess.CalledProcessError as ex: 12 | data = ex.output 13 | exitcode = ex.returncode 14 | if data[-1:] == '\n': 15 | data = data[:-1] 16 | return exitcode, data 17 | 18 | 19 | def do_cmd(cmd_strx): 20 | print(f'执行 {cmd_strx}') 21 | retx = getstatusoutput(cmd_strx) 22 | print(retx[0]) 23 | # if retx[0] !=0: 24 | # raise ValueError('要检查git提交') 25 | print(retx[1], '\n') 26 | return retx 27 | 28 | 29 | t0 = time.time() 30 | 31 | do_cmd('git pull origin mster') 32 | 33 | do_cmd('git diff') 34 | 35 | do_cmd('git add ../.') 36 | 37 | do_cmd('git commit -m commit') 38 | 39 | do_cmd('git push origin') 40 | 41 | # print(subprocess.getstatusoutput('git push github')) 42 | print(f'spend_time {time.time() - t0}') 43 | time.sleep(100000) 44 | -------------------------------------------------------------------------------- /tests/loguru不同功能写入不同文件.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | from loguru import logger 4 | 5 | format = ("{time:YYYY-MMDD HH:mm:ss.SSS} | {extra[namespace]} | " 6 | "{level: <8} | " 7 | "{name}:{function}:{line} - {message}") 8 | 9 | # logger.remove() 10 | logger.add("file_A.log", filter=lambda record: record["extra"]["namespace"] == "A", format=format) 11 | # logger.add(sys.stdout, filter=lambda record: record["extra"]["namespace"] == "A", format=format) 12 | 13 | logger.add("file_B.log", filter=lambda record: record["extra"]["namespace"] == "B", format=format) 14 | # logger.add(sys.stdout, filter=lambda record: record["extra"]["namespace"] == "B", format=format) 15 | 16 | logger_a = logger.bind(namespace="A") 17 | logger_b = logger.bind(namespace="B") 18 | 19 | 20 | def task_A(): 21 | logger_a.info("Starting task A") 22 | logger_a.success("End of task A") 23 | 24 | 25 | def task_B(): 26 | logger_b.info("Starting task B") 27 | logger_b.success("End of task B") 28 | 29 | 30 | task_A() 31 | task_B() 32 | -------------------------------------------------------------------------------- /tests/nb_log_config.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | """ 3 | 此文件nb_log_config.py是自动生成到python项目的根目录的。 4 | 在这里面写的变量会覆盖此文件nb_log_config_default中的值。对nb_log包进行默认的配置。 5 | 但最终配置方式是由get_logger_and_add_handlers方法的各种传参决定,如果方法相应的传参为None则使用这里面的配置。 6 | """ 7 | import json 8 | 9 | """ 10 | 如果反对日志有各种彩色,可以设置 DEFAULUT_USE_COLOR_HANDLER = False 11 | 如果反对日志有块状背景彩色,可以设置 DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = False 12 | 如果想屏蔽nb_log包对怎么设置pycahrm的颜色的提示,可以设置 WARNING_PYCHARM_COLOR_SETINGS = False 13 | 如果想改变日志模板,可以设置 FORMATTER_KIND 参数,只带了7种模板,可以自定义添加喜欢的模板 14 | LOG_PATH 配置文件日志的保存路径的文件夹。 15 | """ 16 | 17 | # noinspection PyUnresolvedReferences 18 | import logging 19 | import os 20 | # noinspection PyUnresolvedReferences 21 | from pathlib import Path # noqa 22 | import socket 23 | from pythonjsonlogger.jsonlogger import JsonFormatter 24 | 25 | 26 | def get_host_ip(): 27 | ip = '' 28 | host_name = '' 29 | # noinspection PyBroadException 30 | try: 31 | sc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 32 | sc.connect(('8.8.8.8', 80)) 33 | ip = sc.getsockname()[0] 34 | host_name = socket.gethostname() 35 | sc.close() 36 | except Exception: 37 | pass 38 | return ip, host_name 39 | 40 | 41 | computer_ip, computer_name = get_host_ip() 42 | 43 | def _json_translate(obj): 44 | print(obj) 45 | if isinstance(obj, dict): 46 | return obj 47 | 48 | class JsonFormatterJumpAble(JsonFormatter): 49 | def add_fields000(self, log_record, record, message_dict): 50 | print(1,log_record,2,record,3,message_dict) 51 | # log_record['jump_click'] = f"{record.__dict__.get('pathname')}:{record.__dict__.get('lineno')}" 52 | log_record[f"{record.__dict__.get('pathname')}:{record.__dict__.get('lineno')}"] = '' # 加个能点击跳转的字段。 53 | log_record['ip'] = computer_ip 54 | log_record['host_name'] = computer_name 55 | print(type(record.msg),repr(record.msg),type(record.message),record.message) 56 | # msg_dict = None 57 | # if record.msg.startswith('{'): 58 | # try: 59 | # msg_dict = json.loads(record.msg) 60 | # except Exception: 61 | # pass 62 | raw_no_color_msg = getattr(record,'raw_no_color_msg') 63 | print(raw_no_color_msg) 64 | if isinstance(raw_no_color_msg,dict): 65 | log_record['msg_dict'] = record.msg 66 | super().add_fields(log_record, record, message_dict) 67 | if 'for_segmentation_color' in log_record: 68 | del log_record['for_segmentation_color'] 69 | 70 | def add_fields(self, log_record, record, message_dict): 71 | # log_record['jump_click'] = f"""File '{record.__dict__.get('pathname')}', line {record.__dict__.get('lineno')}""" 72 | log_record[f"{record.__dict__.get('pathname')}:{record.__dict__.get('lineno')}"] = '' # 加个能点击跳转的字段。 73 | log_record['ip'] = computer_ip 74 | log_record['host_name'] = computer_name 75 | super().add_fields(log_record, record, message_dict) 76 | if 'for_segmentation_color' in log_record: 77 | del log_record['for_segmentation_color'] 78 | 79 | 80 | DING_TALK_TOKEN = '3dd0eexxxxxadab014bd604XXXXXXXXXXXX' # 钉钉报警机器人 81 | 82 | EMAIL_HOST = ('smtp.sohu.com', 465) 83 | EMAIL_FROMADDR = 'aaa0509@sohu.com' # 'matafyhotel-techl@matafy.com', 84 | EMAIL_TOADDRS = ('cccc.cheng@silknets.com', 'yan@dingtalk.com',) 85 | EMAIL_CREDENTIALS = ('aaa0509@sohu.com', 'abcdefg') 86 | 87 | ELASTIC_HOST = '127.0.0.1' 88 | ELASTIC_PORT = 9200 89 | 90 | KAFKA_BOOTSTRAP_SERVERS = ['192.168.199.202:9092'] 91 | ALWAYS_ADD_KAFKA_HANDLER_IN_TEST_ENVIRONENT = False 92 | 93 | MONGO_URL = 'mongodb://myUserAdmin:mimamiama@127.0.0.1:27016/admin' 94 | 95 | DEFAULUT_USE_COLOR_HANDLER = True # 是否默认使用有彩的日志。 96 | DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = True # 在控制台是否显示彩色块状的日志。为False则不使用大块的背景颜色。 97 | AUTO_PATCH_PRINT = True # 是否自动打print的猴子补丁,如果打了猴子补丁,print自动变色和可点击跳转。 98 | SHOW_PYCHARM_COLOR_SETINGS = False # 有的人很反感启动代码时候提示教你怎么优化pycahrm控制台颜色,可以把这里设置为False 99 | SHOW_NB_LOG_LOGO = False # 有的人方案启动代码时候打印nb_log 的logo图形,可以设置为False 100 | SHOW_IMPORT_NB_LOG_CONFIG_PATH = False 101 | USE_BULK_STDOUT_ON_WINDOWS = False # 在win上是否每隔0.1秒批量stdout,win的io太差了 102 | WHITE_COLOR_CODE = 37 103 | DEFAULUT_IS_USE_LOGURU_STREAM_HANDLER = False # 是否默认使用 loguru的控制台日志,而非是nb_log的ColorHandler 104 | 105 | DEFAULT_ADD_MULTIPROCESSING_SAFE_ROATING_FILE_HANDLER = False # 是否默认同时将日志记录到记log文件记事本中。 106 | AUTO_WRITE_ERROR_LEVEL_TO_SEPARATE_FILE = True # 自动把错误error级别以上日志写到单独的文件,根据log_filename名字自动生成错误文件日志名字。 107 | LOG_FILE_SIZE = 100 # 单位是M,每个文件的切片大小,超过多少后就自动切割 108 | LOG_FILE_BACKUP_COUNT = 3 # 对同一个日志文件,默认最多备份几个文件,超过就删除了。 109 | 110 | LOG_PATH = '/pythonlogs' # 默认的日志文件夹,如果不写明磁盘名,则是项目代码所在磁盘的根目录下的/pythonlogs 111 | # LOG_PATH = Path(__file__).absolute().parent / Path("pythonlogs") #这么配置就会自动在你项目的根目录下创建pythonlogs文件夹了并写入。 112 | if os.name == 'posix': # linux非root用户和mac用户无法操作 /pythonlogs 文件夹,没有权限,默认修改为 home/[username] 下面了。例如你的linux用户名是 xiaomin,那么默认会创建并在 /home/xiaomin/pythonlogs文件夹下写入日志文件。 113 | home_path = os.environ.get("HOME", '/') # 这个是获取linux系统的当前用户的主目录,不需要亲自设置 114 | LOG_PATH = Path(home_path) / Path('pythonlogs') # linux mac 权限很严格,非root权限不能在/pythonlogs写入,修改一下默认值。 115 | # print(LOG_PATH) 116 | LOG_FILE_HANDLER_TYPE = 6 # 1 2 3 4 5 6 117 | """ 118 | LOG_FILE_HANDLER_TYPE 这个值可以设置为 1 2 3 4 5 四种值, 119 | 1为使用多进程安全按日志文件大小切割的文件日志,这是本人实现的批量写入日志,减少操作文件锁次数,测试10进程快速写入文件,win上性能比第5种提高了100倍,linux提升5倍 120 | 2为多进程安全按天自动切割的文件日志,同一个文件,每天生成一个新的日志文件。日志文件名字后缀自动加上日期。 121 | 3为不自动切割的单个文件的日志(不切割文件就不会出现所谓进程安不安全的问题) 122 | 4为 WatchedFileHandler,这个是需要在linux下才能使用,需要借助lograte外力进行日志文件的切割,多进程安全。 123 | 5 为第三方的concurrent_log_handler.ConcurrentRotatingFileHandler按日志文件大小切割的文件日志, 124 | 这个是采用了文件锁,多进程安全切割,文件锁在linux上使用fcntl性能还行,win上使用win32con性能非常惨。按大小切割建议不要选第5个个filehandler而是选择第1个。 125 | """ 126 | 127 | LOG_LEVEL_FILTER = logging.DEBUG # 默认日志级别,低于此级别的日志不记录了。例如设置为INFO,那么logger.debug的不会记录,只会记录logger.info以上级别的。 128 | FILTER_WORDS_PRINT = ["测试过滤字符串的呀", "阿弥陀佛", "善哉善哉"] 129 | 130 | 131 | RUN_ENV = 'test' 132 | 133 | FORMATTER_DICT = { 134 | 1: logging.Formatter( 135 | '日志时间【%(asctime)s】 - 日志名称【%(name)s】 - 文件【%(filename)s】 - 第【%(lineno)d】行 - 日志等级【%(levelname)s】 - 日志信息【%(message)s】', 136 | "%Y-%m-%d %H:%M:%S"), 137 | 2: logging.Formatter( 138 | '%(asctime)s - %(name)s - %(filename)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s', 139 | "%Y-%m-%d %H:%M:%S"), 140 | 3: logging.Formatter( 141 | '%(asctime)s - %(name)s - 【 File "%(pathname)s", line %(lineno)d, in %(funcName)s 】 - %(levelname)s - %(message)s', 142 | "%Y-%m-%d %H:%M:%S"), # 一个模仿traceback异常的可跳转到打印日志地方的模板 143 | 4: logging.Formatter( 144 | '%(asctime)s - %(name)s - "%(filename)s" - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s - File "%(pathname)s", line %(lineno)d ', 145 | "%Y-%m-%d %H:%M:%S"), # 这个也支持日志跳转 146 | 5: logging.Formatter( 147 | '%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 148 | "%Y-%m-%d %H:%M:%S"), # 我认为的最好的模板,推荐 149 | 6: logging.Formatter('%(name)s - %(asctime)-15s - %(filename)s - %(lineno)d - %(levelname)s: %(message)s', 150 | "%Y-%m-%d %H:%M:%S"), 151 | 7: logging.Formatter('%(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 一个只显示简短文件名和所处行数的日志模板 152 | 153 | 8: JsonFormatterJumpAble('%(asctime)s %(name)s %(levelname)s %(message)s %(pathname)s %(lineno)d %(funcName)s %(process)d %(thread)d', "%Y-%m-%d %H:%M:%S", json_ensure_ascii=False,json_default=_json_translate), # 这个是json日志,方便分析. 154 | 155 | 9: logging.Formatter( 156 | '[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 157 | "%Y-%m-%d %H:%M:%S"), # 对5改进,带进程和线程显示的日志模板。 158 | 10: logging.Formatter( 159 | '[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 对7改进,带进程和线程显示的日志模板。 160 | 11: logging.Formatter( 161 | f'({computer_ip},{computer_name})-[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 对7改进,带进程和线程显示的日志模板以及ip和主机名。 162 | } 163 | 164 | FORMATTER_KIND = 5 # 如果get_logger不指定日志模板,则默认选择第几个模板 165 | 166 | 167 | -------------------------------------------------------------------------------- /tests/nb_log_test_multi_process.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from nb_log import get_logger 4 | from multiprocessing import Process 5 | 6 | logger1 = get_logger('f1',log_filename='f1d.log',log_path='/pythonlogs',is_add_stream_handler=False,log_file_handler_type=6) 7 | logger2 = get_logger('f2',log_filename='f2d.log',log_path='/pythonlogs',is_add_stream_handler=False) 8 | 9 | 10 | def fun(): 11 | for i in range(10000): 12 | logger1.warning(f'hi {i}') 13 | logger2.debug(f'hello {i}') 14 | 15 | 16 | if __name__ == '__main__': 17 | Process(target=fun).start() 18 | Process(target=fun).start() 19 | 20 | time.sleep(10000) -------------------------------------------------------------------------------- /tests/recod_flask_log/nb_log_flask.py: -------------------------------------------------------------------------------- 1 | """ 2 | 这个代码里面没有手写任何怎么记录flask的请求和flask异常到日志,但是可以自动记录. 3 | 这就是大神玩日志,懂命名空间. 4 | 5 | 这才是正解, werkzeug 命名空间加上各种handler, 6 | 只要请求接口,就可以记录日志到控制台和文件werkzeug.log了, 7 | 8 | 这里的flask的app的name写的是myapp ,flask框架生成的日志命名空间复用app.name, 9 | 所以给myapp加上handler,那么flask接口函数报错,就可以自动记录堆栈报错到 myapp.log 和控制台了. 10 | 11 | """ 12 | 13 | from flask import Flask, request 14 | import nb_log 15 | 16 | app = Flask('myapp') 17 | 18 | nb_log.get_logger('werkzeug', log_filename='werkzeug.log') 19 | 20 | nb_log.get_logger('myapp', log_filename='myapp.log') 21 | 22 | 23 | @app.route('/') 24 | def hello(): 25 | # 接口中无需写日志记录请求了什么url和入参 26 | return 'Hello World!' 27 | 28 | 29 | @app.route('/api2') 30 | def api2(): 31 | # 接口中无需写日志记录报什么错了 32 | 1 / 0 33 | return '2222' 34 | 35 | 36 | if __name__ == '__main__': 37 | app.run(port=5002) -------------------------------------------------------------------------------- /tests/recod_flask_log/笨瓜方式.py: -------------------------------------------------------------------------------- 1 | """ 2 | 1) 在接口中自己去手写记录请求入参和url,脱裤子放屁 3 | 2) 在接口中手写记录flask接口函数报错信息 4 | 5 | 就算你在框架层面去加日志或者接口加装饰器记录日志,来解决每个接口重复写怎么记录日志,那也是很low, 6 | 7 | 不懂日志命名空间就重复做无用功,这些记录人家框架早就帮你记录了,只是没加日志 handler,等待用户来加而已 8 | """ 9 | import traceback 10 | from flask import Flask, request 11 | from loguru import logger 12 | 13 | app = Flask(__name__) 14 | 15 | logger.add('mylog.log') 16 | 17 | 18 | @app.route('/') 19 | def hello(): 20 | logger.info('Received request: %s %s %s', request.method, request.path, request.remote_addr) # 写这行拖了裤子放屁 21 | return 'Hello World!' 22 | 23 | 24 | @app.route('/api2') 25 | def api2(): 26 | logger.info('Received request: %s %s %s', request.method, request.path, request.remote_addr) 27 | try: 28 | 1 / 0 # 故意1/0 报错 29 | return '2222' 30 | except Exception as e: 31 | logger.error(f'e {traceback.format_exc()}') # 写这行拖了裤子放屁 32 | 33 | 34 | if __name__ == '__main__': 35 | app.run() -------------------------------------------------------------------------------- /tests/replace_coloer.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | str2 = '''\033[0;35m很豪华\033[0m''' 4 | 5 | print(repr(str2)) 6 | 7 | import re 8 | 9 | pattern = re.compile('\\033\[.+?m') 10 | 11 | print(repr(pattern.sub('',str2))) -------------------------------------------------------------------------------- /tests/replace_hjandler.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | import logging 5 | 6 | 7 | class Myhandler(logging.Handler): 8 | def emit(self, record: logging.LogRecord) -> None: 9 | print(f'操作qt5控件写入 {record.msg}') 10 | 11 | logger = logging.getLogger('abcd') 12 | 13 | # logging.StreamHandler = Myhandler 14 | 15 | logger.addHandler(logging.StreamHandler()) 16 | 17 | logger.warning('啊啊啊1') 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /tests/rotate_file_hanlder_0120.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/tests/rotate_file_hanlder_0120.py -------------------------------------------------------------------------------- /tests/t_basic_config.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | import logging 5 | import time 6 | 7 | logging.basicConfig() 8 | 9 | import nb_log 10 | from nb_log import logging_tree_helper 11 | 12 | logger = nb_log.get_logger('aaa',log_level_int=10) 13 | 14 | # logging.getLogger(None,).setLevel(30) 15 | # nb_log.get_logger(None,log_level_int=30) 16 | logging.warning('cccc') 17 | 18 | 19 | logger.debug('hiuhihi') 20 | 21 | 22 | print(logging.getLogger().handlers) 23 | nb_log.logging_tree_helper.printout() 24 | 25 | 26 | time.sleep(100000) -------------------------------------------------------------------------------- /tests/t_capture_warnings_with_frequency_control.py: -------------------------------------------------------------------------------- 1 | import time 2 | import warnings 3 | 4 | import nb_log 5 | from nb_log.capture_warnings import capture_warnings_with_frequency_control 6 | import random 7 | 8 | capture_warnings_with_frequency_control(True, 5) 9 | 10 | for i in range(1000): 11 | warnings.warn(f'{random.random()}警告1', DeprecationWarning) 12 | warnings.warn(f'{random.random()}警告2', UserWarning) 13 | time.sleep(1) 14 | -------------------------------------------------------------------------------- /tests/t_fastapi.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | import nb_log 5 | 6 | 7 | logger = nb_log.get_logger('testname') 8 | 9 | 10 | logger.debug('debug') 11 | logger.info('info') 12 | logger.warning('warning') 13 | logger.error('error') 14 | logger.critical('critical') -------------------------------------------------------------------------------- /tests/t_frequecy_log.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | import nb_log 5 | from nb_log.frequency_control_log import FrequencyControlLog 6 | 7 | logger = nb_log.get_logger('fc_log', ) 8 | 9 | for i in range(100): 10 | time.sleep(1) 11 | FrequencyControlLog(logger, interval=10).log(logging.WARN, 'aaaaa', ) 12 | FrequencyControlLog(logger, interval=5).debug('dasdsad' ) 13 | -------------------------------------------------------------------------------- /tests/t_logger_mixin.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | from nb_log.log_manager import MetaTypeFileLogger,MetaTypeLogger 6 | 7 | 8 | 9 | class Abcd(metaclass=MetaTypeLogger): 10 | @classmethod 11 | def clsf(cls): 12 | cls.logger.debug('aaaa') 13 | 14 | def f(self): 15 | self.logger.debug('bbbb') 16 | 17 | 18 | if __name__ == '__main__': 19 | Abcd.clsf() 20 | Abcd().f() -------------------------------------------------------------------------------- /tests/t_reapeat_recrod.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import logging 4 | 5 | logger= logging.getLogger('abcd') 6 | logger.addHandler(logging.StreamHandler()) 7 | logger.addHandler(logging.StreamHandler()) 8 | 9 | 10 | logger.error('一句话打印2次') -------------------------------------------------------------------------------- /tests/t_warning.py: -------------------------------------------------------------------------------- 1 | 2 | print('dddd') 3 | 4 | import funboost -------------------------------------------------------------------------------- /tests/tes_lock_level.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from nb_log import get_logger,LogManager 4 | 5 | 6 | LogManager('RedisDistributedLockContextManager').preset_log_level(30) 7 | 8 | logger = get_logger('RedisDistributedLockContextManager') 9 | 10 | 11 | logger.debug('这句话打印不出来了.RedisDistributedLockContextManager 命名空间的日志锁定了 warnning级别,不可更改') 12 | 13 | LogManager('').preset_log_level(20) 14 | get_logger('root').info('infoging') 15 | 16 | logging.RootLogger 17 | print(logging.getLogger('')) 18 | print(logging.getLogger('root')) 19 | print(logging.getLogger(None)) 20 | 21 | print(logging.getLogger() == logging.getLogger('root')) 22 | -------------------------------------------------------------------------------- /tests/tes_name_root.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import logging 4 | 5 | logger = logging.getLogger('') 6 | sh = logging.StreamHandler() 7 | sh.setLevel(10) 8 | logger.addHandler(sh) 9 | logger.setLevel(10) 10 | 11 | 12 | logger2 = logging.getLogger('p.name2') 13 | # sh = logging.StreamHandler() 14 | # sh.setLevel(10) 15 | # logger2.addHandler(sh) 16 | # logger2.setLevel(40) 17 | 18 | logger2.info('222') 19 | logger.debug('1111') 20 | -------------------------------------------------------------------------------- /tests/tes_prevent_add_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import nb_log 4 | 5 | nb_log.LogManager('name1').prevent_add_handlers() 6 | logging.getLogger() 7 | 8 | logging.RootLogger 9 | print(nb_log.log_manager.get_all_logging_name()) 10 | nb_log.log_manager.get_all_handlers() 11 | log2 = nb_log.LogManager('name1').get_logger_and_add_handlers() 12 | log2.debug('hi') -------------------------------------------------------------------------------- /tests/test.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/tests/test.db -------------------------------------------------------------------------------- /tests/test6.py: -------------------------------------------------------------------------------- 1 | # 2 | # # 3 | # # 4 | # # import flask 5 | # # from nb_log import get_logger 6 | # # 7 | # # 8 | # # # get_logger('flask',) 9 | # # # get_logger('werkzeug') 10 | # # 11 | # # app = flask.Flask(__name__) 12 | # # 13 | # # @app.get('/') 14 | # # def index(): 15 | # # print('hi') 16 | # # return 'hi' 17 | # # 18 | # # app.run() 19 | # 20 | # 21 | # 22 | # # import logging 23 | # # import nb_log 24 | # 25 | # # logger = logging.getLogger('a.b.c') 26 | # # 27 | # # logger.info('不会自动打印的') 28 | # # 29 | # # nb_log.get_logger(None) 30 | # # 31 | # # logger.info('这行会自动打印了,因为根命名空间加了handler') 32 | # # 33 | # # 34 | # # logger1 = logging.getLogger('aaa') 35 | # # 36 | # # logger2 = logging.getLogger('aaa') 37 | # # 38 | # # logger3 = logging.getLogger('bbb') 39 | # # 40 | # # print('logger1 id: ',id(logger1),'logger2 id: ',id(logger2),'logger3 id: ',id(logger3)) 41 | # 42 | # 43 | # # 44 | # # import logging 45 | # # from nb_log import get_logger 46 | # # 47 | # # 48 | # # logger_foo = get_logger('foo',_log_filename='foo.log') 49 | # # 50 | # # logger_bar = get_logger('bar',_log_filename='bar.log') 51 | # # 52 | # # 53 | # # logger_foo.debug('这句话将会写入foo.log文件') 54 | # # 55 | # # logger_bar.debug('这句话将会写入bar.log文件') 56 | # 57 | # import logging 58 | # from nb_log.handlers import ConcurrentRotatingFileHandler,ColorHandler 59 | # from nb_log.nb_log_config_default import FORMATTER_DICT 60 | # 61 | # logger = logging.getLogger('foo') 62 | # 63 | # logger.setLevel(logging.DEBUG) 64 | # 65 | # ch = ColorHandler() 66 | # ch.setLevel(logging.INFO) 67 | # ch.setFormatter(FORMATTER_DICT[7]) 68 | # logger.addHandler(ch) 69 | # 70 | # fh = ConcurrentRotatingFileHandler('foo.log') 71 | # fh.setLevel(logging.ERROR) 72 | # fh.setFormatter(FORMATTER_DICT[11]) 73 | # logger.addHandler(fh) 74 | # 75 | # 76 | # logger.debug('debug debug') 77 | # logger.info('info info') 78 | # logger.warning('warning warning') 79 | # logger.error('error error ') 80 | # logger.critical('critical critical') 81 | # 82 | 83 | 84 | # import logging 85 | # 86 | # logger = logging.getLogger('abc') 87 | # 88 | # print(__name__) 89 | # logger.addHandler(logging.StreamHandler()) 90 | # logger.setLevel(10) 91 | # logger.debug('hah') 92 | 93 | import requests 94 | import nb_log 95 | 96 | # nb_log.get_logger('urllib3',log_level_int=10) # log_level_int=10 或者 logging.DEBUG 97 | # 98 | # requests.get('https://ww.baidu.com') 99 | 100 | 101 | import logging 102 | import nb_log 103 | 104 | # 105 | # nb_log.LogManager('name1').preset_log_level(20) 106 | # 107 | # logger = nb_log.get_logger('name1',log_level_int=10) 108 | # # logger.setLevel(10) 109 | # logger.debug('啊啊啊') 110 | # 111 | # 112 | # import celery 113 | # 114 | # import flask 115 | # 116 | # import fastapi 117 | # nb_log.get_logger('') 118 | # logging.getLogger('b6').debug(666) 119 | # 120 | # print(logging.Manager(logging.root).loggerDict) 121 | 122 | from nb_log import get_logger 123 | 124 | from nb_log import get_logger 125 | 126 | 127 | class 废物日志类: 128 | def __init__(self,name): 129 | self.logger = get_logger(name, log_filename='废物日志.log') 130 | 131 | def debug(self, msg): 132 | self.logger.debug(msg, extra={'sys_getframe_n': 3}) # 第 x1 行 133 | 134 | def info(self, msg): 135 | self.logger.info(msg, extra={'sys_getframe_n': 3}) # 第 x2 行 136 | 137 | def critical(self, msg): 138 | self.logger.critical(msg, extra={'sys_getframe_n': 3},exc_info=True) # 第 x2 行 139 | 140 | try: 141 | 1/0 142 | except Exception as e: 143 | 废物日志类('命名空间1').critical('啊啊啊啊') # 第y行 -------------------------------------------------------------------------------- /tests/test9.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import logging 4 | from nb_log import get_logger 5 | 6 | import os 7 | 8 | print(os.name) 9 | """ 10 | logging.DEBUG 是个常量枚举,值是10 11 | logging.INFO 是个常量枚举,值是20 12 | logging.WARNING 是个常量枚举,值是30 13 | logging.ERROR 是个常量枚举,值是40 14 | logging.CRITICAL 是个常量枚举,值是50 15 | 16 | 用数字和常量枚举都可以。 17 | """ 18 | 19 | 20 | # 三方包里面的代码,packege1.py 21 | 22 | """ 23 | 假设下面这段代码是三方包里面的 24 | logger_a 和logger_b 是三方包里面的日志,你要调整日志级别,不可能去骚操作去三方包里面的源码修改日志级别吧? 25 | """ 26 | logger_a = get_logger('aaaa',log_level_int=logging.INFO) 27 | 28 | logger_b = get_logger('bbbb',log_level_int=20) 29 | 30 | def funa(): 31 | logger_a.info('模拟a函数里面啰嗦打印你不关心的日志aaaaa') 32 | 33 | def funb(): 34 | logger_b.info('模拟b函数里面,对你来说很重要的提醒日志打印日志bbbbb') 35 | 36 | 37 | 38 | # # 你的原来代码,调用函数funa。啰嗦输出 模拟a函数里面啰嗦打印你不关心的日志aaaaa 这句话到控制台 x1.py 39 | # funa() 40 | # funb() 41 | 42 | 43 | ## 优化日志级别后的代码,这个代码的调用funa函数将不再啰嗦的输出INFO级别日志打扰你了,funb函数仍然正常的输出INFO日志。 x2.py 44 | logging.getLogger('aaaa').setLevel(logging.ERROR) # 这里为什么入参是 aaaa 特别特别重要,如果不懂这个入参,你压根就不会调日志级别。 45 | funa() 46 | funb() 47 | 48 | -------------------------------------------------------------------------------- /tests/test_batch_print.py: -------------------------------------------------------------------------------- 1 | import time 2 | import nb_log 3 | from concurrent.futures import ThreadPoolExecutor 4 | 5 | 6 | 7 | def f(): 8 | for i in range(1000000000): 9 | time.sleep(0.002) 10 | print('1'*1000) 11 | # print(f' {"哈"*30}') 12 | 13 | 14 | pool = ThreadPoolExecutor(100) 15 | 16 | for j in range(100): 17 | pool.submit(f) -------------------------------------------------------------------------------- /tests/test_benchmark.py: -------------------------------------------------------------------------------- 1 | # import nb_log 2 | # import time 3 | # 4 | # logger = nb_log.get_logger('dsdsd',_log_filename='dsdsd.log',is_add_stream_handler=False) 5 | # 6 | # 7 | # t1 = time.perf_counter() 8 | # for i in range(100 * 10000): 9 | # logger.debug('heloo'*10) 10 | # print(time.perf_counter()-t1) 11 | # 12 | # 13 | # # nb_log的 ConcurrentRotatingFileHandlerWithBufferInitiativeWindow windwos 单进程写入100万条 115秒 14 | # # linux 58秒。 15 | import time 16 | import nb_log 17 | t1 = time.time() 18 | for i in range(10000): 19 | print(i) 20 | print(time.time()-t1) -------------------------------------------------------------------------------- /tests/test_catch_log.py: -------------------------------------------------------------------------------- 1 | from nb_log import get_logger 2 | from nb_log.log_manager import logger_catch 3 | 4 | logger = get_logger('tets_catch_log', is_use_loguru_stream_handler=False) 5 | 6 | 7 | @logger_catch(logger, reraise=False) 8 | def f(x, y): 9 | x / y 10 | 11 | 12 | f(1, 0) 13 | -------------------------------------------------------------------------------- /tests/test_colorama.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from colorama import just_fix_windows_console, Fore 4 | # from termcolor import colored 5 | # 6 | # # use Colorama to make Termcolor work on Windows too 7 | # # just_fix_windows_console() 8 | # 9 | # # then use Termcolor for all colored text output 10 | # print(colored('Hello, World!', 'green', 'on_red')) 11 | 12 | 13 | 14 | 15 | 16 | from colorama import init 17 | init(autoreset=True) 18 | print(Fore.RED + 'some red text') 19 | print('automatically back to default color again') 20 | 21 | 22 | 23 | 24 | 25 | time.sleep(110000) -------------------------------------------------------------------------------- /tests/test_direct_log.py: -------------------------------------------------------------------------------- 1 | import nb_log 2 | 3 | nb_log.debug('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用debug函数,那就满足这种人') 4 | nb_log.info('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用info函数,那就满足这种人') 5 | nb_log.warning('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用warning函数,那就满足这种人') 6 | nb_log.error('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用error函数,那就满足这种人') 7 | nb_log.critical('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用critical函数,那就满足这种人') 8 | 9 | from loguru import logger 10 | 11 | logger.debug('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用debug函数,那就满足这种人') 12 | logger.info('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用info函数,那就满足这种人') 13 | logger.warning('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用warning函数,那就满足这种人') 14 | logger.error('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用error函数,那就满足这种人') 15 | logger.critical('笨瓜不想实例化多个不同name的logger,不理解logging.getLogger第一个入参name的作用和好处,想直接粗暴的调用critical函数,那就满足这种人') 16 | 17 | -------------------------------------------------------------------------------- /tests/test_exception_hook.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import flask 4 | import nb_log.global_except_hook 5 | 6 | nb_log.get_logger(None) 7 | app = flask.Flask(__name__) 8 | 9 | @app.route('/') 10 | def index(): 11 | 1/0 12 | return 'hello world' 13 | 14 | if __name__ == '__main__': 15 | app.run(host='0.0.0.0', port=5000) -------------------------------------------------------------------------------- /tests/test_exception_hook_thread.py: -------------------------------------------------------------------------------- 1 | import time 2 | import asyncio 3 | import threading 4 | 5 | import nb_log.global_except_hook 6 | 7 | nb_log.get_logger(None) 8 | 9 | 10 | def f(): 11 | time.sleep(5) 12 | raise Exception('errorxixixi') 13 | 14 | async def af(): 15 | await asyncio.sleep(5) 16 | raise Exception('errorxixixi22') 17 | 18 | # threading.Thread(target=f).start() 19 | # loop = asyncio.new_event_loop() 20 | # loop.create_task(af()) 21 | 22 | 23 | time.sleep(7) 24 | 25 | raise Exception('bbb') 26 | -------------------------------------------------------------------------------- /tests/test_file_handler.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | # from auto_run_on_remote import run_current_script_on_remote 4 | # run_current_script_on_remote() 5 | from nb_log import get_logger 6 | 7 | logger = get_logger('mylog',log_filename='mylog4g.log',log_file_handler_type=6,is_add_stream_handler=False) 8 | get_logger('mylog2',log_filename='mylog4g3.log',log_file_handler_type=6,is_add_stream_handler=False) 9 | print('start') 10 | t1 = time.time() 11 | for i in range(20000): 12 | logger.error(f'testss {i}') 13 | print(time.time() -t1) 14 | print('over') 15 | -------------------------------------------------------------------------------- /tests/test_filehandler.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import nb_log 4 | 5 | nb_log.get_logger('name1',log_filename='file_my.log') 6 | nb_log.get_logger('name2',log_filename='file_my.log') 7 | 8 | 9 | 10 | 11 | logger1 = nb_log.get_logger('name1',log_filename='file1.log') 12 | logger2 = nb_log.get_logger('name2',log_filename='file2.log') 13 | logger1.warning('写入file1') 14 | logger2.warning('写入file2') -------------------------------------------------------------------------------- /tests/test_filter_print.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import nb_log 4 | 5 | logger = nb_log.get_logger('test_name') 6 | 7 | str1 = '我们一起练习 阿弥陀佛' 8 | str2 = '啦啦啦德玛西亚' 9 | str3 = '嘻嘻 善哉善哉的' 10 | 11 | print(str1) 12 | print(str2) 13 | print(str3) 14 | 15 | logger.info(str1) 16 | logger.info(str2) 17 | logger.info(str3) 18 | 19 | sys.stdout.write(str1 + '\n') 20 | sys.stdout.write(str2 + '\n') 21 | sys.stdout.write(str3 + '\n') 22 | 23 | sys.stderr.write(str1 + '\n') 24 | sys.stderr.write(str2 + '\n') 25 | sys.stderr.write(str3 + '\n') 26 | -------------------------------------------------------------------------------- /tests/test_getip.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | 4 | import socket 5 | 6 | print(time.time()) 7 | 8 | def get_host_ip(): 9 | ip = '' 10 | host_name = '' 11 | # noinspection PyBroadException 12 | try: 13 | sc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 14 | sc.connect(('8.8.8.8', 80)) 15 | ip = sc.getsockname()[0] 16 | host_name = socket.gethostname() 17 | sc.close() 18 | except Exception: 19 | pass 20 | return ip, host_name 21 | 22 | 23 | computer_ip, computer_name = get_host_ip() 24 | 25 | print(time.time()) 26 | 27 | print(sys.version_info.minor) -------------------------------------------------------------------------------- /tests/test_gunicorn_dir.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from flask import Flask 4 | 5 | app = Flask(__name__) 6 | 7 | 8 | @app.route('/') 9 | def hello_world(): 10 | return 'Hello, World!' 11 | 12 | 13 | if __name__ == '__main__': 14 | app.run() -------------------------------------------------------------------------------- /tests/test_icecream.py: -------------------------------------------------------------------------------- 1 | 2 | from icecream import ic 3 | ic.configureOutput(includeContext=False) 4 | 5 | name = "Alice" 6 | age = 30 7 | ic(name, age) 8 | 9 | def foo(i): 10 | return i + 333 11 | 12 | ic(foo(123)) -------------------------------------------------------------------------------- /tests/test_level.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from nb_log import get_logger 4 | get_logger(None,log_level_int=logging.WARNING) 5 | logger = get_logger('name2',log_level_int=logging.INFO) 6 | 7 | logging.disable(logging.ERROR) 8 | 9 | logger.debug('debug的消息') 10 | logger.info('info的消息') 11 | logger.error('error的消息') 12 | 13 | -------------------------------------------------------------------------------- /tests/test_logged_exc.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | from nb_log.log_manager import LoggedException,logged_raise 5 | 6 | try : 7 | 1/0 8 | except Exception as e: 9 | logged_raise(ZeroDivisionError('0不能是被除数')) 10 | 11 | 12 | # def f(): 13 | # try : 14 | # 1/0 15 | # except Exception as e: 16 | # raise LoggedException(message='有问题',) 17 | # 18 | # for i in range(10): 19 | # try: 20 | # f() 21 | # except Exception as e: 22 | # pass 23 | # print(e) 24 | 25 | # class MyEXc(Exception): 26 | # def __init__(self,a,b): 27 | # self.a = a 28 | # self.b =b 29 | # 30 | # 31 | # raise MyEXc(1,2) 32 | -------------------------------------------------------------------------------- /tests/test_loguru)Logger.py: -------------------------------------------------------------------------------- 1 | 2 | import pymysql 3 | 4 | from loguru import logger 5 | 6 | # 创建Logger实例 7 | log = logger.Logger() 8 | 9 | # 添加日志处理器 10 | log.add("logs/mylog_{time}.log", rotation="500 MB") 11 | 12 | # 记录日志消息 13 | log.info("This is a log message") 14 | 15 | # 记录带有上下文信息的日志消息 16 | log.bind(user="John").info("User logged in") 17 | 18 | # 记录异常信息 19 | try: 20 | # 一些可能引发异常的代码 21 | raise ValueError("Something went wrong") 22 | except Exception as e: 23 | log.exception(e) -------------------------------------------------------------------------------- /tests/test_loguru.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from loguru import logger 4 | from concurrent.futures import ProcessPoolExecutor 5 | 6 | 7 | # logger.remove(handler_id=None) 8 | 9 | logger.add("./log_files/loguru-test1.log",enqueue=True,rotation="10000 KB") 10 | 11 | def f(): 12 | for i in range(2): 13 | logger.debug("测试多进程日志切割") 14 | logger.info("测试多进程日志切割") 15 | logger.warning("测试多进程日志切割") 16 | logger.error("测试多进程日志切割") 17 | logger.critical("测试多进程日志切割") 18 | 19 | 20 | pool = ProcessPoolExecutor(10) 21 | if __name__ == '__main__': 22 | """ 23 | 100万条需要115秒 24 | 15:12:23 25 | 15:14:18 26 | 27 | 200万条需要186秒 28 | """ 29 | print(time.strftime("%H:%M:%S")) 30 | for _ in range(10): 31 | pool.submit(f) 32 | pool.shutdown() 33 | print(time.strftime("%H:%M:%S")) -------------------------------------------------------------------------------- /tests/test_loguru_dir/m1.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | 3 | 4 | 5 | 6 | 7 | logger.add('file.log') -------------------------------------------------------------------------------- /tests/test_loguru_dir/m2.py: -------------------------------------------------------------------------------- 1 | 2 | import m1 3 | 4 | 5 | from loguru import logger 6 | logger.add('file2.log') 7 | 8 | logger.info('dsadddddddddddd') -------------------------------------------------------------------------------- /tests/test_loguru_exception.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from loguru import logger 4 | import requests 5 | import logging 6 | 7 | def errorf(x,y): 8 | try: 9 | x/y 10 | except Exception as e: 11 | logger.exception(e) 12 | 13 | 14 | def error_req(url): 15 | try: 16 | requests.get(url) 17 | except Exception as e: 18 | # logger.exception('请求出错') 19 | print(logger.catch()) 20 | 21 | #errorf(2,0) 22 | error_req('http://www.baidubgg222.com') -------------------------------------------------------------------------------- /tests/test_loguru_handler.py: -------------------------------------------------------------------------------- 1 | import time 2 | from loguru import logger as lg 3 | import nb_log 4 | 5 | logger = nb_log.get_logger('name1', is_use_loguru_stream_handler=True, log_filename='testloguru_file1.log', log_file_handler_type=7) 6 | logger2 = nb_log.get_logger('name2', is_use_loguru_stream_handler=True, log_filename='testloguru_file2.log', log_file_handler_type=7) 7 | logger_nb = nb_log.get_logger('name3', is_use_loguru_stream_handler=False, log_filename='testnblog_file3.log', log_file_handler_type=6) 8 | 9 | for i in range(1): 10 | logger.debug(f'loguru debug 111111,写入文件 testloguru_file1.log') 11 | logger2.debug(f'loguru debug 222222,写入文件 testloguru_file2.log') 12 | logger_nb.debug(f'nb_log颜色模式,使用nb_log的文件写入方式') 13 | 14 | logger.info('loguru info 111111,写入文件 testloguru_file1.log') 15 | logger2.info('loguru info 222222,写入文件 testloguru_file2.log') 16 | logger_nb.info(f'nb_log颜色模式,使用nb_log的文件写入方式') 17 | 18 | logger.warning('loguru warn 111111,写入文件 testloguru_file1.log') 19 | logger2.warning('loguru warn 22222 ,写入文件 testloguru_file2.log') 20 | logger_nb.warning(f'nb_log颜色模式,使用nb_log的文件写入方式') 21 | 22 | logger.error('loguru err 1111111,写入文件 testloguru_file1.log') 23 | logger2.error('loguru err 2222222,写入文件 testloguru_file2.log') 24 | logger_nb.error(f'nb_log颜色模式,使用nb_log的文件写入方式') 25 | 26 | logger.critical('loguru critical 111111,写入文件 testloguru_file1.log') 27 | logger2.critical('loguru caritical 222222,写入文件 testloguru_file2.log') 28 | logger_nb.critical(f'nb_log颜色模式,使用nb_log的文件写入方式') 29 | 30 | time.sleep(1) 31 | 32 | import requests 33 | 34 | nb_log.get_logger('urllib3', is_use_loguru_stream_handler=True) 35 | 36 | requests.get('http://www.baidu.com') 37 | 38 | 39 | def errorf(x,y): 40 | try: 41 | x/y 42 | except Exception as e: 43 | logger.exception(e) 44 | 45 | 46 | def error_req(url): 47 | try: 48 | requests.get(url) 49 | except Exception as e: 50 | lg.exception(e) 51 | 52 | errorf(2,0) 53 | error_req('http://www.baidu.com2') 54 | time.sleep(100000) 55 | -------------------------------------------------------------------------------- /tests/test_memory.py: -------------------------------------------------------------------------------- 1 | import time 2 | from multiprocessing import Process 3 | from nb_log import get_logger 4 | 5 | logger = get_logger('abcd',log_filename='abcd.log',is_add_stream_handler=False) 6 | 7 | def test(): 8 | while True: 9 | time.sleep(0.0001) # 就算要模拟多进程,最起码要sleep0.1毫秒,真实情况不可能无间隔一直超高速写日志。 10 | logger.info("test") 11 | 12 | 13 | if __name__ == '__main__': 14 | p = [Process(target=test) for _ in range(5)] 15 | 16 | for i in p: 17 | i.start() 18 | for i in p: 19 | i.join() -------------------------------------------------------------------------------- /tests/test_nb_log_concurrent_file_handler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import sys 4 | # sys.path.insert(0,'/home/ydf/pycodes/nb_log/') 5 | print(sys.path) 6 | from nb_log import get_logger,LogManager 7 | from concurrent.futures import ProcessPoolExecutor 8 | # logger = get_logger('test_nb_log_conccreent',is_add_stream_handler=False,_log_filename='test_nb_log_conccreent45.log',log_file_handler_type=1) 9 | 10 | 11 | 12 | from auto_run_on_remote import run_current_script_on_remote 13 | 14 | logger = LogManager('test_nb_log_conccreent').get_logger_and_add_handlers(is_add_stream_handler=False, 15 | log_filename='test_nb_log_conccreent556k.log', log_file_handler_type=6, 16 | # log_path='/root/pythonlogs' 17 | ) 18 | 19 | # logger.warning('xxxx') 20 | 21 | def f(x): 22 | for i in range(10000): 23 | time.sleep(0.0001) 24 | logger.warning(f'{os.getpid()} {x} {i} 哈哈哈') 25 | 26 | # logger.warning('aaaaa') 27 | if __name__ == '__main__': 28 | # run_current_script_on_remote() 29 | # 200万条 45秒 30 | pass 31 | 32 | pool = ProcessPoolExecutor(5) 33 | print('start') 34 | for i in range(5): 35 | pool.submit(f,i) 36 | # pool.shutdown() 37 | # print('end') 38 | 39 | # time.sleep(2) -------------------------------------------------------------------------------- /tests/test_panda.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import pandas as pd 4 | 5 | # from nb_log import get_logger 6 | import nb_log 7 | 8 | data = [ 9 | {"Name": "Alice", "Age": 25, "City": "New York"}, 10 | {"Name": "Bob", "Age": 30, "City": "Los Angeles"} 11 | ] 12 | 13 | df = pd.DataFrame(data) -------------------------------------------------------------------------------- /tests/test_preset_log_level.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Process 2 | 3 | from nb_log import get_logger,LogManager 4 | 5 | 6 | LogManager('abcd').preset_log_level(10) 7 | 8 | 9 | def f(): 10 | logger = get_logger('abcd',log_level_int=10) 11 | logger.debug('哈') 12 | 13 | 14 | if __name__ == '__main__': 15 | Process(target=f).start() -------------------------------------------------------------------------------- /tests/test_raotaing_filehandler.py: -------------------------------------------------------------------------------- 1 | """ 2 | 这个文件是个错误例子,多进程还想按大小切割文件,由于在达到指定大小瞬间,a进程切割了文件,b进程不到文件句柄了。 3 | 只有nb_log才能解决多进程文件切割,实现难度很高。 4 | 5 | 说的是多进程下不行,多线程任何handler都是安全的。说的是多进程不行不是多线程不行!!!! 6 | 此demo会疯狂报错。 7 | """ 8 | from logging.handlers import RotatingFileHandler 9 | import logging 10 | from multiprocessing import Process 11 | 12 | logger = logging.getLogger('test_raotating_filehandler') 13 | 14 | logger.addHandler(RotatingFileHandler(filename='testratationg.log',maxBytes=1000 *100,backupCount=10)) 15 | 16 | def f(): 17 | while 1: 18 | logger.warning('测试多进程切片出错不') 19 | 20 | if __name__ == '__main__': 21 | for _ in range(10): 22 | Process(target=f).start() -------------------------------------------------------------------------------- /tests/test_raw_concurrent_log_handler/_trial_temp/_trial_marker: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ydf0509/nb_log/39577079da0cbb9baa33c2bb6c468571e7a2e811/tests/test_raw_concurrent_log_handler/_trial_temp/_trial_marker -------------------------------------------------------------------------------- /tests/test_raw_concurrent_log_handler/test_concurent_log_performence.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | import concurrent_log_handler 5 | 6 | logger = logging.getLogger("MyExample") 7 | logger.setLevel(logging.DEBUG) # optional to set this level here 8 | 9 | handler = concurrent_log_handler.ConcurrentTimedRotatingFileHandler( 10 | filename="/pythonlogs/logger_fj.log", mode="a", maxBytes=1000 * 1000 * 100, backupCount=6,when='d', 11 | ) 12 | 13 | handler = logging.FileHandler( 14 | filename="/pythonlogs/logger_fj.log", mode="a", 15 | ) 16 | 17 | logger.addHandler(handler) 18 | logger.setLevel(logging.DEBUG) 19 | 20 | print(time.strftime('%Y_%m_%d %H:%M:%S')) 21 | for i in range(100000): 22 | logger.info(f'abcdefgfgbfgddhfgdhjfgjfghkjhkggj {i}') 23 | print(time.strftime('%Y_%m_%d %H:%M:%S')) -------------------------------------------------------------------------------- /tests/test_raw_concurrent_log_handler/test_namerxx.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | import logging # noqa: INP001 5 | import logging.config 6 | import time 7 | from datetime import date 8 | 9 | """ 10 | This is an example which shows how you can use 11 | custom namer function with ConcurrentRotatingFileHandler 12 | """ 13 | 14 | 15 | def log_file_namer(logger_name: str) -> str: 16 | # path/name.log.N 17 | print('logger_name:',logger_name) 18 | logger_name, backup_number = logger_name.rsplit(".", maxsplit=1) 19 | # path/name.log 20 | logger_name = logger_name.replace(".log", "") 21 | # curr_date = date.today().strftime("%Y_%m_%d") # noqa: DTZ011 22 | curr_date = time.strftime("%Y_%m_%d_%H_%M_%S") # noqa: DTZ011 23 | 24 | return f"{logger_name}_{curr_date}_({backup_number}).log" 25 | 26 | 27 | def my_program(): 28 | import concurrent_log_handler 29 | 30 | # Now for the meat of your program... 31 | logger = logging.getLogger("MyExample") 32 | logger.setLevel(logging.DEBUG) # optional to set this level here 33 | 34 | handler = concurrent_log_handler.ConcurrentRotatingFileHandler( 35 | "/pythonlogs/logger_name_testc.log", "a", maxBytes=512000, backupCount=6 36 | ) 37 | handler.namer = log_file_namer 38 | logger.addHandler(handler) 39 | 40 | for idx in range(0, 500): 41 | time.sleep(0.1) 42 | print("Loop %d; logging a message." % idx) 43 | logger.debug("%d > A debug message.", idx) 44 | if idx % 2 == 0: 45 | logger.info("%d > An info message.", idx) 46 | print("Done with example; exiting.") 47 | 48 | 49 | if __name__ == "__main__": 50 | my_program() -------------------------------------------------------------------------------- /tests/test_raw_concurrent_log_handler/test_tyimne_rotate.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | import concurrent_log_handler 5 | 6 | def log_file_namer(logger_name: str) -> str: 7 | # path/name.log.N 8 | print('logger_name:',logger_name) 9 | logger_name, backup_number = logger_name.rsplit(".", maxsplit=1) 10 | # path/name.log 11 | logger_name = logger_name.replace(".log", "") 12 | # curr_date = date.today().strftime("%Y_%m_%d") # noqa: DTZ011 13 | curr_date = time.strftime("%Y_%m_%d_%H_%M_%S") # noqa: DTZ011 14 | 15 | return f"{logger_name}_{curr_date}_({backup_number}).log" 16 | 17 | 18 | # Now for the meat of your program... 19 | logger = logging.getLogger("MyExample") 20 | logger.setLevel(logging.DEBUG) # optional to set this level here 21 | 22 | handler = concurrent_log_handler.ConcurrentTimedRotatingFileHandler( 23 | filename="/pythonlogs/logger_name_testc.log", mode="a", maxBytes=5, backupCount=6,when='s', 24 | ) 25 | # handler.namer = log_file_namer 26 | logger.addHandler(handler) 27 | 28 | for idx in range(0, 500): 29 | time.sleep(0.1) 30 | print("Loop %d; logging a message." % idx) 31 | logger.debug("%d > A debug message.", idx) 32 | if idx % 2 == 0: 33 | logger.info("%d > An info message.", idx) 34 | print("Done with example; exiting.") -------------------------------------------------------------------------------- /tests/test_reapet.py: -------------------------------------------------------------------------------- 1 | """ 2 | 演示重复,由于封装错误的类造成的。模拟一个封装严重失误错误的封装例子。 3 | 4 | 这个代码惨烈程度达到10级。明明是想记录10000次日志,结果却记录了 10000 * 10001 /2 次。 5 | 如果把f函数调用100万次,那么控制台和文件将会各记录5000亿次,日志会把代码拖累死。 6 | 不好好理解观察者模式有多惨烈。因为反复添加观察者(handler), 7 | 导致第1次调用记录1次,第二次调用时候记录2次,第10次调用时候记录10次,这成了高斯求和算法了。 8 | 9 | 造成的后果可想而知,长期部署运行后,不仅项目代码性能几乎被日志占了99%,还造成磁盘被弄爆炸。 10 | 11 | """ 12 | import logging 13 | import time 14 | 15 | 16 | class LogUtil: 17 | def __init__(self): 18 | self.logger = logging.getLogger('a') 19 | self.logger.setLevel(logging.DEBUG) 20 | self._add_stream_handler() 21 | self._add_file_handler() 22 | 23 | def _add_stream_handler(self): 24 | sh = logging.StreamHandler() 25 | sh.setFormatter(logging.Formatter(fmt="%(asctime)s-%(name)s-%(levelname)s-%(message)s")) 26 | self.logger.addHandler(sh) 27 | 28 | def _add_file_handler(self): 29 | fh = logging.FileHandler('a.log') 30 | fh.setFormatter(logging.Formatter(fmt="%(asctime)s-%(name)s-%(levelname)s-%(message)s")) 31 | self.logger.addHandler(fh) 32 | 33 | def debug(self, msg): 34 | self.logger.debug(msg) 35 | 36 | def info(self, msg): 37 | self.logger.info(msg) 38 | 39 | 40 | def f(x): 41 | log = LogUtil() # 重点是这行,写在了函数内部。没有做命名空间的handlers判断控制,也没写单利或者享元模式。 42 | log.debug(x) 43 | 44 | 45 | t1 = time.time() 46 | for i in range(10000): 47 | f(i) 48 | 49 | print(time.time() - t1) 50 | -------------------------------------------------------------------------------- /tests/test_requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from nb_log import get_logger 3 | import pymysql 4 | 5 | logger = get_logger('urllib3', log_filename='urllib3.log', ) 6 | 7 | logger2 = get_logger('pymysql', log_filename='pymysql.log', ) 8 | requests.get('http://www.baidu.com') 9 | 10 | 11 | conn = pymysql.Connection(user='root',password='123456') 12 | cur = conn.cursor() 13 | 14 | cur.execute('select "abc" as x') 15 | print(cur.fetchall()) 16 | # logger.debug(11, extra={'c': 5, 'd': 6}) 17 | # logger.info(22) 18 | # logger.warning(33) 19 | # logger.error(44) 20 | # logger.critical(55,extra=dict(f=7,g=8,h=9)) 21 | # 22 | # logger.debug('哈哈哈哈', extra=dict(a=1, b=2)) 23 | # try: 24 | # 1 / 0 25 | # except Exception as e: 26 | # logger.exception('错误了') 27 | -------------------------------------------------------------------------------- /tests/test_rotate_error.py: -------------------------------------------------------------------------------- 1 | """ 2 | 只要满足3个条件 3 | 1.文件日志 4 | 2.文件日志按大小或者时间切割 5 | 3.多进程写入同一个log文件,可以是代码内部multiprocess.Process启动测试, 6 | 也可以代码内容本身不用多进程但把脚本反复启动运行多个来测试。 7 | 8 | 把切割大小或者切割时间设置的足够小就很容易频繁必现,平时有的人没发现是由于把日子设置成了1000M切割或者1天切割, 9 | 自测时候只随便运行一两下就停了,日志没达到需要切割的临界值,所以不方便观察到切割日志文件的报错。 10 | 11 | 这里说的是多进程文件日志切割报错,有的人老说多线程,简直是服了。 12 | 面试时候把多进程和多线程区别死记硬背 背的一套一套很溜的,结果实际运用连进程和线程都不分。 13 | """ 14 | from logging.handlers import RotatingFileHandler 15 | import logging 16 | from multiprocessing import Process 17 | from threading import Thread 18 | 19 | logger = logging.getLogger('test_raotating_filehandler') 20 | 21 | logger.addHandler(RotatingFileHandler(filename='testratationg.log',maxBytes=1000 *100,backupCount=10)) 22 | 23 | def f(): 24 | while 1: 25 | logger.warning('这个代码会疯狂报错,因为设置了100Kb就切割并且在多进程下写入同一个日志文件'*20) 26 | 27 | if __name__ == '__main__': 28 | for _ in range(10): 29 | Process(target=f).start() # 反复强调的是 文件日志切割并且多进程写入同一个文件,会疯狂报错 30 | # Thread(target=f).start() # 多线程没事,所有日志handler无需考虑多线程是否安全,说的是多进程文件日志切割不安全,你老说多线程干嘛? -------------------------------------------------------------------------------- /tests/test_speed.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | import nb_log 4 | 5 | logger=nb_log.get_logger('cc') 6 | 7 | t_start = time.time() 8 | for i in range(100000): 9 | logger.debug(111) 10 | print(time.time() - t_start) -------------------------------------------------------------------------------- /tests/test_str_in_perf.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | str1 = ''' 4 | (192.168.43.162,SC-202202121439)-[p20300_t2898176036000] 2023-05-13 19:59:00 - CeleryConsumer--celery_q3 - "celery_consumer.py:172" - DEBUG - 这条消息是 celery 从 celery_q3 队列中取出 ,是由 celery 框架调度 f1 函数处理: args: () , kwargs: {'x': 105, 'y': 210} 5 | 19:59:03 "D:\codes\funboost\test_frame\test_broker_celery\test_celery_consume2.py:16" 哈哈哈 105 210 6 | [2023-05-13 19:59:03,248: INFO/MainProcess] Task celery_q3[celery_q3_result:8e5f8526-0174-456c-a95f-ae6b82c662a9] succeeded in 3.0319999999919673s: 315 7 | (192.168.43.162,SC-202202121439)-[p20300_t2898176994112] 2023-05-13 19:59:05 - CeleryConsumer--celery_q3 - "celery_consumer.py:172" - DEBUG - 这条消息是 celery 从 celery_q3 队列中取出 ,是由 celery 框架调度 f1 函数处理: args: () , kwargs: {'x': 106, 'y': 212} 8 | 19:59:08 "D:\codes\funboost\test_frame\test_broker_celery\test_celery_consume2.py:16" 哈哈哈 106 212 9 | ''' 10 | 11 | t1 = time.time() 12 | for i in range(1000000): 13 | strx = f'{i}dsfdsf二位热污染看过的广泛地dgdgtrrt烦得很规范化股份将基于人体我看我oiytrbhhttr台湾人提问题{i}' 14 | boolx = strx in str1 15 | 16 | 17 | print(time.time() - t1) -------------------------------------------------------------------------------- /tests/test_struct_log.py: -------------------------------------------------------------------------------- 1 | 2 | import logging 3 | import structlog 4 | 5 | # 配置标准日志记录器 6 | logging.basicConfig(format="%(message)s", level=logging.INFO) 7 | logger = structlog.get_logger() 8 | 9 | # 使用结构化日志 10 | logger.info("user_logged_in", user_id=123, user_name="Alice") 11 | logger.error("user_login_failed", user_id=123, reason="Invalid password") 12 | 13 | 14 | print(7/4) -------------------------------------------------------------------------------- /tests/test_sys_color.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | 4 | from nb_log import get_logger 5 | 6 | logger = get_logger('lalala',log_filename='lalala.log',formatter_template=7,log_file_handler_type=2) 7 | # logger = get_logger('hihihi',) 8 | 9 | logger.debug(f'debug是绿色,说明是调试的,代码ok ') 10 | logger.info('info是天蓝色,日志正常 ') 11 | logger.warning('黄色yello,有警告了 ') 12 | logger.error('粉红色说明代码有错误 ') 13 | logger.critical('血红色,说明发生了严重错误 ') 14 | print('print被自动转化成蓝色') 15 | 16 | sys.stdout.write( 17 | f'\033[0;37;44m 我我我 \033[0m \n') # 36 93 96 94 18 | 19 | 20 | 21 | # sys.stdout.write(') 22 | 23 | sys.stdout.write("\033[32m这是绿色背景的文本\033[0m") 24 | 25 | sys.stdout.write("\033[42m这是绿色背景的文本\033[0m") 26 | 27 | sys.stdout.write("\033[0;44m这是绿色背景的文本\033[0m") 28 | 29 | 30 | sys.stdout.write('\033[0;31m assist_msg\033[0m \033[0;37;41m effective_information_msg\033[0m') 31 | 32 | 33 | time.sleep(10000) -------------------------------------------------------------------------------- /tests/test_timed_raotaing_filehandler.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | """ 4 | 这个文件是个错误例子,多进程还想按时间切割文件,由于在达到指定大小瞬间,a进程切割了文件,b进程不到文件句柄了。 5 | 只有nb_log才能解决多进程文件切割,实现难度很高。 6 | """ 7 | from logging.handlers import TimedRotatingFileHandler 8 | import logging 9 | from multiprocessing import Process 10 | 11 | logger = logging.getLogger('test_raotating_filehandler') 12 | 13 | logger.addHandler(TimedRotatingFileHandler(filename='test_timed_ratationg.log',when='S',interval=5)) 14 | 15 | def f(): 16 | while 1: 17 | logger.warning('测试多进程按时间切片出错不') 18 | 19 | if __name__ == '__main__': 20 | for _ in range(10): 21 | Process(target=f).start() -------------------------------------------------------------------------------- /tests/test_tornado_log.py: -------------------------------------------------------------------------------- 1 | 2 | import nb_log 3 | import tornado.ioloop 4 | import tornado.web 5 | 6 | nb_log.get_logger('tornado',log_filename='tornado.log') 7 | 8 | # nb_log.get_logger('tornado') 9 | # 10 | # access_log = nb_log.get_logger("tornado.access") 11 | # print(access_log) 12 | # app_log = nb_log.get_logger("tornado.application") 13 | # gen_log = nb_log.get_logger("tornado.general") 14 | 15 | 16 | class MainHandler(tornado.web.RequestHandler): 17 | def get(self): 18 | print('hw') 19 | # self.set_status(434) 20 | self.write("Hello world") 21 | 22 | 23 | class Application(tornado.web.Application): 24 | def __init__(self): 25 | handlers = [ 26 | (r'/index', MainHandler), 27 | ] 28 | tornado.web.Application.__init__(self, handlers) 29 | 30 | if __name__=="__main__": 31 | app = Application() 32 | app.listen(8001) 33 | print("Tornado Started in port 8001,http://127.0.0.1:8000") 34 | tornado.ioloop.IOLoop.current().start() -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/nb_log_config.py: -------------------------------------------------------------------------------- 1 | # coding=utf8 2 | """ 3 | 此文件nb_log_config.py是自动生成到python项目的根目录的。 4 | 在这里面写的变量会覆盖此文件nb_log_config_default中的值。对nb_log包进行默认的配置。用户是无需修改nb_log安装包位置里面的配置文件的。 5 | 6 | 但最终配置方式是由get_logger_and_add_handlers方法的各种传参决定,如果方法相应的传参为None则使用这里面的配置。 7 | """ 8 | 9 | """ 10 | 如果反对日志有各种彩色,可以设置 DEFAULUT_USE_COLOR_HANDLER = False 11 | 如果反对日志有块状背景彩色,可以设置 DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = False 12 | 如果想屏蔽nb_log包对怎么设置pycahrm的颜色的提示,可以设置 WARNING_PYCHARM_COLOR_SETINGS = False 13 | 如果想改变日志模板,可以设置 FORMATTER_KIND 参数,只带了7种模板,可以自定义添加喜欢的模板 14 | LOG_PATH 配置文件日志的保存路径的文件夹。 15 | """ 16 | import sys 17 | # noinspection PyUnresolvedReferences 18 | import logging 19 | import os 20 | # noinspection PyUnresolvedReferences 21 | from pathlib import Path # noqa 22 | import socket 23 | from pythonjsonlogger.jsonlogger import JsonFormatter 24 | 25 | 26 | def get_host_ip(): 27 | ip = '' 28 | host_name = '' 29 | # noinspection PyBroadException 30 | try: 31 | sc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 32 | sc.connect(('8.8.8.8', 80)) 33 | ip = sc.getsockname()[0] 34 | host_name = socket.gethostname() 35 | sc.close() 36 | except Exception: 37 | pass 38 | return ip, host_name 39 | 40 | 41 | computer_ip, computer_name = get_host_ip() 42 | 43 | 44 | class JsonFormatterJumpAble(JsonFormatter): 45 | def add_fields(self, log_record, record, message_dict): 46 | # log_record['jump_click'] = f"""File '{record.__dict__.get('pathname')}', line {record.__dict__.get('lineno')}""" 47 | log_record[f"{record.__dict__.get('pathname')}:{record.__dict__.get('lineno')}"] = '' # 加个能点击跳转的字段。 48 | log_record['ip'] = computer_ip 49 | log_record['host_name'] = computer_name 50 | super().add_fields(log_record, record, message_dict) 51 | if 'for_segmentation_color' in log_record: 52 | del log_record['for_segmentation_color'] 53 | 54 | 55 | DING_TALK_TOKEN = '3dd0eexxxxxadab014bd604XXXXXXXXXXXX' # 钉钉报警机器人 56 | 57 | EMAIL_HOST = ('smtp.sohu.com', 465) 58 | EMAIL_FROMADDR = 'aaa0509@sohu.com' # 'matafyhotel-techl@matafy.com', 59 | EMAIL_TOADDRS = ('cccc.cheng@silknets.com', 'yan@dingtalk.com',) 60 | EMAIL_CREDENTIALS = ('aaa0509@sohu.com', 'abcdefg') 61 | 62 | ELASTIC_HOST = '127.0.0.1' 63 | ELASTIC_PORT = 9200 64 | 65 | KAFKA_BOOTSTRAP_SERVERS = ['192.168.199.202:9092'] 66 | ALWAYS_ADD_KAFKA_HANDLER_IN_TEST_ENVIRONENT = False 67 | 68 | MONGO_URL = 'mongodb://myUserAdmin:mimamiama@127.0.0.1:27016/admin' 69 | 70 | # 项目中的print是否自动写入到文件中。值为None则不重定向print到文件中。 自动每天一个文件, 2023-06-30.my_proj.print,生成的文件位置在定义的LOG_PATH 71 | # 如果你设置了环境变量,export PRINT_WRTIE_FILE_NAME="my_proj.print" (linux临时环境变量语法,windows语法自己百度这里不举例),那就优先使用环境变量中设置的文件名字,而不是nb_log_config.py中设置的名字 72 | PRINT_WRTIE_FILE_NAME = Path(sys.path[1]).name + '.print' 73 | 74 | # 项目中的所有标准输出(不仅包括print,还包括了streamHandler日志)都写入到这个文件,为None将不把标准输出重定向到文件。自动每天一个文件, 2023-06-30.my_proj.std,生成的文件位置在定义的LOG_PATH 75 | # 如果你设置了环境变量,export SYS_STD_FILE_NAME="my_proj.std" (linux临时环境变量语法,windows语法自己百度这里不举例),那就优先使用环境变量中设置的文件名字,,而不是nb_log_config.py中设置的名字 76 | SYS_STD_FILE_NAME = Path(sys.path[1]).name + '.std' 77 | 78 | USE_BULK_STDOUT_ON_WINDOWS = True # 在windows上的stdout.write是否批量stdout,win的io性能没有linux好 79 | 80 | DEFAULUT_USE_COLOR_HANDLER = True # 是否默认使用有彩的日志。 81 | DISPLAY_BACKGROUD_COLOR_IN_CONSOLE = True # 在控制台是否显示彩色块状的日志。为False则不使用大块的背景颜色。 82 | AUTO_PATCH_PRINT = True # 是否自动打print的猴子补丁,如果打了猴子补丁,print自动变色和可点击跳转。 83 | SHOW_PYCHARM_COLOR_SETINGS = True # 有的人很反感启动代码时候提示教你怎么优化pycahrm控制台颜色,可以把这里设置为False 84 | 85 | DEFAULT_ADD_MULTIPROCESSING_SAFE_ROATING_FILE_HANDLER = False # 是否默认同时将日志记录到记log文件记事本中,就是用户不指定 log_filename的值,会自动写入日志命名空间.log文件中。 86 | LOG_FILE_SIZE = 100 # 单位是M,每个文件的切片大小,超过多少后就自动切割 87 | LOG_FILE_BACKUP_COUNT = 2 # 对同一个日志文件,默认最多备份几个文件,超过就删除了。 88 | 89 | LOG_PATH = '/pythonlogs' # 默认的日志文件夹,如果不写明磁盘名,则是项目代码所在磁盘的根目录下的/pythonlogs 90 | # LOG_PATH = Path(__file__).absolute().parent / Path("pythonlogs") #这么配置就会自动在你项目的根目录下创建pythonlogs文件夹了并写入。 91 | if os.name == 'posix': # linux非root用户和mac用户无法操作 /pythonlogs 文件夹,没有权限,默认修改为 home/[username] 下面了。例如你的linux用户名是 xiaomin,那么默认会创建并在 /home/xiaomin/pythonlogs文件夹下写入日志文件。 92 | home_path = os.environ.get("HOME", '/') # 这个是获取linux系统的当前用户的主目录,不需要亲自设置 93 | LOG_PATH = Path(home_path) / Path('pythonlogs') # linux mac 权限很严格,非root权限不能在/pythonlogs写入,修改一下默认值。 94 | # print('LOG_PATH:',LOG_PATH) 95 | 96 | LOG_FILE_HANDLER_TYPE = 6 # 1 2 3 4 5 97 | """ 98 | LOG_FILE_HANDLER_TYPE 这个值可以设置为 1 2 3 4 5 四种值, 99 | 1为使用多进程安全按日志文件大小切割的文件日志,这是本人实现的批量写入日志,减少操作文件锁次数,测试10进程快速写入文件,win上性能比第5种提高了100倍,linux提升5倍 100 | 2为多进程安全按天自动切割的文件日志,同一个文件,每天生成一个新的日志文件。日志文件名字后缀自动加上日期。 101 | 3为不自动切割的单个文件的日志(不切割文件就不会出现所谓进程安不安全的问题) 102 | 4为 WatchedFileHandler,这个是需要在linux下才能使用,需要借助lograte外力进行日志文件的切割,多进程安全。 103 | 5 为第三方的concurrent_log_handler.ConcurrentRotatingFileHandler按日志文件大小切割的文件日志, 104 | 这个是采用了文件锁,多进程安全切割,文件锁在linux上使用fcntl性能还行,win上使用win32con性能非常惨。按大小切割建议不要选第5个个filehandler而是选择第1个。 105 | """ 106 | 107 | LOG_LEVEL_FILTER = logging.DEBUG # 默认日志级别,低于此级别的日志不记录了。例如设置为INFO,那么logger.debug的不会记录,只会记录logger.info以上级别的。 108 | # 强烈不建议调高这里的级别为INFO,日志是有命名空间的,单独提高打印啰嗦的日志命名空间的日志级别就可以了,不要全局提高日志级别。 109 | # https://nb-log-doc.readthedocs.io/zh_CN/latest/articles/c9.html#id2 文档9.5里面讲了几百次 python logging的命名空间的作用了,有些人到现在还不知道日志的name作用。 110 | 111 | # 屏蔽的字符串显示,用 if in {打印信息} 来判断实现的,如果打印的消息中包括 FILTER_WORDS_PRINT 数组中的任何一个字符串,那么消息就不执行打印。 112 | # 这个配置对 print 和 logger的控制台输出都生效。这个可以过滤某些啰嗦的print信息,也可以过滤同级别日志中的某些烦人的日志。可以用来过滤三方包中某些控制台打印。数组不要配置过多,否则有一丝丝影响性能会。 113 | FILTER_WORDS_PRINT = [] # 例如, 你希望消息中包括阿弥陀佛 或者 包括善哉善哉 就不打印,那么可以设置 FILTER_WORDS_PRINT = ['阿弥陀佛','善哉善哉'] 114 | 115 | RUN_ENV = 'test' 116 | 117 | FORMATTER_DICT = { 118 | 1: logging.Formatter( 119 | '日志时间【%(asctime)s】 - 日志名称【%(name)s】 - 文件【%(filename)s】 - 第【%(lineno)d】行 - 日志等级【%(levelname)s】 - 日志信息【%(message)s】', 120 | "%Y-%m-%d %H:%M:%S"), 121 | 2: logging.Formatter( 122 | '%(asctime)s - %(name)s - %(filename)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s', 123 | "%Y-%m-%d %H:%M:%S"), 124 | 3: logging.Formatter( 125 | '%(asctime)s - %(name)s - 【 File "%(pathname)s", line %(lineno)d, in %(funcName)s 】 - %(levelname)s - %(message)s', 126 | "%Y-%m-%d %H:%M:%S"), # 一个模仿traceback异常的可跳转到打印日志地方的模板 127 | 4: logging.Formatter( 128 | '%(asctime)s - %(name)s - "%(filename)s" - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s - File "%(pathname)s", line %(lineno)d ', 129 | "%Y-%m-%d %H:%M:%S"), # 这个也支持日志跳转 130 | 5: logging.Formatter( 131 | '%(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 132 | "%Y-%m-%d %H:%M:%S"), # 我认为的最好的模板,推荐 133 | 6: logging.Formatter('%(name)s - %(asctime)-15s - %(filename)s - %(lineno)d - %(levelname)s: %(message)s', 134 | "%Y-%m-%d %H:%M:%S"), 135 | 7: logging.Formatter('%(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 一个只显示简短文件名和所处行数的日志模板 136 | 137 | 8: JsonFormatterJumpAble('%(asctime)s - %(name)s - %(levelname)s - %(message)s - "%(filename)s %(lineno)d -" ', "%Y-%m-%d %H:%M:%S", json_ensure_ascii=False), # 这个是json日志,方便elk采集分析. 138 | 139 | 9: logging.Formatter( 140 | '[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(pathname)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', 141 | "%Y-%m-%d %H:%M:%S"), # 对5改进,带进程和线程显示的日志模板。 142 | 10: logging.Formatter( 143 | '[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 对7改进,带进程和线程显示的日志模板。 144 | 11: logging.Formatter( 145 | f'({computer_ip},{computer_name})-[p%(process)d_t%(thread)d] %(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(funcName)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S"), # 对7改进,带进程和线程显示的日志模板以及ip和主机名。 146 | } 147 | 148 | FORMATTER_KIND = 11 # 如果get_logger不指定日志模板,则默认选择第几个模板 149 | 150 | 151 | -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/test_file_name.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | from pathlib import Path 5 | 6 | from chained_mode_time_tool import DatetimeConverter 7 | 8 | 9 | 10 | # (Path('/pythonlogs') / Path(f'{DatetimeConverter().date_str}.0002.tn.log')).touch() 11 | 12 | 13 | for f in Path('/pythonlogs').glob('????-??-??.????.tn.log'): 14 | print(f.name) 15 | print(f.stat().st_mtime) 16 | 17 | print(Path('/pythonlogs/2023-07-09.0002.tn.log').stat()) 18 | 19 | 20 | print(int('002')) -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/test_log_by_current_dir_config.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | """ 4 | 如果当前文件夹包含了 nb_log_config.py 则会自动优先使用当前文件夹的 nb_log_config.py 作为配置。 5 | """ 6 | 7 | print('导入nb_log之前的print是普通的') 8 | 9 | from nb_log import get_logger 10 | 11 | logger = get_logger('lalala',log_filename='jinzhifengzhuang.log',is_add_stream_handler=False,) 12 | 13 | logger.debug(f'debug是绿色,说明是调试的,代码ok') 14 | logger.info('info是天蓝色,日志正常') 15 | logger.warning('黄色yello,有警告了') 16 | logger.error('粉红色说明代码有错误') 17 | logger.critical('血红色,说明发生了严重错误') 18 | 19 | 20 | print('导入nb_log之后的print是强化版的可点击跳转的') 21 | 22 | 23 | 24 | import time 25 | t1 = time.time() 26 | for i in range(100000): 27 | # print(i) 28 | logger.debug(f'debug是绿色,说明是调试的,代码ok') 29 | # logger.info('info是天蓝色,日志正常') 30 | # logger.warning('黄色yello,有警告了') 31 | # logger.error('粉红色说明代码有错误') 32 | # logger.critical('血红色,说明发生了严重错误') 33 | print(time.time()-t1) -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/tt_print3.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import os as osx 3 | 4 | import queue 5 | import sys 6 | import threading 7 | import time 8 | import weakref 9 | from pathlib import Path 10 | import multiprocessing 11 | from chained_mode_time_tool import DatetimeConverter 12 | 13 | 14 | class FileWritter: 15 | _lock = threading.Lock() 16 | need_write_2_file = True 17 | 18 | def __init__(self, file_name: str, log_path='/pythonlogs'): 19 | if self.need_write_2_file: 20 | print(f'shilihua {multiprocessing.current_process().pid}') 21 | self._file_name = file_name 22 | self.log_path = log_path 23 | if not Path(self.log_path).exists(): 24 | # sprint(f'自动创建日志文件夹 {log_path}') 25 | Path(self.log_path).mkdir(exist_ok=True) 26 | self._open_file() 27 | self._last_write_ts = time.time() 28 | self._last_del_old_files_ts = time.time() 29 | 30 | @property 31 | def file_path(self): 32 | return Path(self.log_path) / Path(DatetimeConverter().date_str + '.' + self._file_name) 33 | 34 | def _open_file(self): 35 | self._f = open(self.file_path, encoding='utf8', mode='a') 36 | 37 | def _close_file(self): 38 | self._f.close() 39 | 40 | def write_2_file(self, msg): 41 | if self.need_write_2_file: 42 | with self._lock: 43 | now_ts = time.time() 44 | if now_ts - self._last_write_ts > 5: 45 | self._last_write_ts = time.time() 46 | self._close_file() 47 | self._open_file() 48 | self._f.write(msg) 49 | self._f.flush() 50 | if now_ts - self._last_del_old_files_ts > 300: 51 | self._last_del_old_files_ts = time.time() 52 | self._delete_old_files() 53 | 54 | def _delete_old_files(self): 55 | for i in range(10, 100): 56 | file_path = Path(self.log_path) / Path(DatetimeConverter(time.time() - 86400 * i).date_str + '.' + self._file_name) 57 | try: 58 | file_path.unlink() 59 | except FileNotFoundError: 60 | pass 61 | 62 | def start_bulk_write(self): 63 | pass 64 | 65 | 66 | class BulkFileWrite(FileWritter): 67 | # q = queue.SimpleQueue() 68 | # q = queue.Queue() 69 | # q = multiprocessing.Queue() 70 | 71 | pid_filename__queue_map = {} 72 | _get_queue_lock = threading.Lock() 73 | 74 | @property 75 | def queue(self): 76 | with self._get_queue_lock: 77 | pid = multiprocessing.current_process().pid 78 | if (pid,self._file_name) not in self.__class__.pid_filename__queue_map: 79 | self.__class__.pid_filename__queue_map[(pid,self._file_name)] = queue.SimpleQueue() 80 | print(self.__class__.pid_filename__queue_map) 81 | return self.__class__.pid_filename__queue_map[(pid,self._file_name)] 82 | 83 | 84 | 85 | def __init__(self, file_name: str, log_path='/pythonlogs'): 86 | # self.q = queue.SimpleQueue() 87 | super().__init__(file_name,log_path) 88 | # atexit.register(self._at_exit) 89 | 90 | self._cleanup = self._cleanup_factory() 91 | atexit.register(self._cleanup) 92 | 93 | 94 | def write_2_file(self, msg): 95 | if self.need_write_2_file: 96 | with self._lock: 97 | self.queue.put(msg) 98 | 99 | 100 | def _bulk_write(self): 101 | while 1: 102 | self._bulk_write0() 103 | time.sleep(0.1) 104 | 105 | 106 | def _bulk_write0(self): 107 | with self._lock: 108 | msg_list = [] 109 | while 1: 110 | if not self.queue.empty(): 111 | msg_list.append(self.queue.get()) 112 | else: 113 | break 114 | if msg_list: 115 | self._close_file() 116 | self._open_file() 117 | self._f.write('\n'.join(msg_list)) 118 | self._f.flush() 119 | 120 | def _at_exit(self): 121 | print(multiprocessing.current_process().name) 122 | print('要退出了') 123 | self._bulk_write0() 124 | 125 | def close(self): 126 | self._unregister_cleanup() 127 | self._bulk_write0() 128 | 129 | 130 | def _cleanup_factory(self): 131 | """Build a cleanup clojure that doesn't increase our ref count""" 132 | _self = weakref.proxy(self) 133 | def wrapper(): 134 | try: 135 | # _self.close(timeout=0) 136 | print(f'要退出。。 {multiprocessing.current_process().name}') 137 | _self.close() 138 | except (ReferenceError, AttributeError): 139 | pass 140 | return wrapper 141 | 142 | def _unregister_cleanup(self): 143 | print(444) 144 | if getattr(self, '_cleanup', None): 145 | print(555) 146 | if hasattr(atexit, 'unregister'): 147 | print(777) 148 | atexit.unregister(self._cleanup) # pylint: disable=no-member 149 | 150 | # py2 requires removing from private attribute... 151 | else: 152 | 153 | # ValueError on list.remove() if the exithandler no longer exists 154 | # but that is fine here 155 | try: 156 | print(666,atexit._exithandlers) 157 | atexit._exithandlers.remove( # pylint: disable=no-member 158 | (self._cleanup, (), {})) 159 | except ValueError: 160 | pass 161 | self._cleanup = None 162 | 163 | 164 | 165 | def start_bulk_write(self): 166 | threading.Thread(target=self._bulk_write,daemon=True).start() 167 | 168 | 169 | 170 | 171 | print_raw = print 172 | WORD_COLOR = 37 173 | 174 | 175 | def stdout_write(msg: str): 176 | sys.stdout.write(msg) 177 | sys.stdout.flush() 178 | 179 | 180 | def stderr_write(msg: str): 181 | sys.stderr.write(msg) 182 | sys.stderr.flush() 183 | 184 | # print_file_writter = FileWritter('xx3.test') 185 | # print_file_writter = BulkFileWrite('xx7.test') 186 | # print_file_writter.start_bulk_write() 187 | # print_file_writter.need_write_2_file=True 188 | # print(print_file_writter.need_write_2_file) 189 | def _print_with_file_line(*args, sep=' ', end='\n', file=None, flush=True, sys_getframe_n=2): 190 | args = (str(arg) for arg in args) # REMIND 防止是数字不能被join 191 | args_str = sep.join(args) + end 192 | # stdout_write(f'56:{file}') 193 | if file == sys.stderr: 194 | stderr_write(args_str) # 如 threading 模块第926行,打印线程错误,希望保持原始的红色错误方式,不希望转成蓝色。 195 | 196 | elif file in [sys.stdout, None]: 197 | # 获取被调用函数在被调用时所处代码行数 198 | fra = sys._getframe(sys_getframe_n) 199 | line = fra.f_lineno 200 | file_name = fra.f_code.co_filename 201 | fun = fra.f_code.co_name 202 | now = None 203 | for i in range(1): 204 | now = time.strftime("%H:%M:%S") 205 | 206 | # line = None 207 | # file_name = None 208 | # fun =None 209 | # now = None 210 | 211 | # sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}" {x}\n') 212 | 1 + 2 213 | stdout_write( 214 | f'{now} "{file_name}:{line}" {fun} {args_str} ') 215 | # print_file_writter.write_2_file(f'{now} "{file_name}:{line}" {fun} {args_str} ') 216 | 217 | else: # 例如traceback模块的print_exception函数 file的入参是 <_io.StringIO object at 0x00000264F2F065E8>,必须把内容重定向到这个对象里面,否则exception日志记录不了错误堆栈。 218 | pass 219 | 220 | 221 | # noinspection PyProtectedMember,PyUnusedLocal,PyIncorrectDocstring,DuplicatedCode 222 | def nb_print(*args, sep=' ', end='\n', file=None, flush=True): 223 | """ 224 | 超流弊的print补丁 225 | :param x: 226 | :return: 227 | """ 228 | _print_with_file_line(*args, sep=sep, end=end, file=file, flush=flush, sys_getframe_n=2) 229 | 230 | def tt1(): 231 | print_file_writter = BulkFileWrite('xx8.test') 232 | print_file_writter.start_bulk_write() 233 | print_file_writter.need_write_2_file = True 234 | print(print_file_writter.need_write_2_file) 235 | 236 | pid = multiprocessing.current_process().pid 237 | t1 = time.time() 238 | def f(): 239 | # import nb_log 240 | 241 | for i in range(500): 242 | msg = f'{pid} {i} hh \n' 243 | # nb_print(msg*1) 244 | print_file_writter.write_2_file(msg*1) 245 | 246 | f() 247 | 248 | 249 | print(time.time() -t1) 250 | 251 | def tt2(): 252 | import logging 253 | logger = logging.getLogger('abcd') 254 | 255 | fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s - "%(filename)s %(lineno)d -" ', "%Y-%m-%d %H:%M:%S") 256 | 257 | sh = logging.StreamHandler() 258 | sh.setFormatter(fmt) 259 | logger.addHandler(sh) 260 | 261 | fh = logging.FileHandler('testlog3.log') 262 | fh.setFormatter(fmt) 263 | logger.addHandler(fh) 264 | 265 | t1 = time.time() 266 | for i in range(50000): 267 | logger.error(f'xxxxxx{i}'*20) 268 | print(time.time() - t1) 269 | 270 | 271 | @atexit.register 272 | def ex1(): 273 | print('over 1') 274 | 275 | @atexit.register 276 | def ex2(): 277 | time.sleep(0.2) 278 | print('over 2') 279 | 280 | if __name__ == '__main__': 281 | # from auto_run_on_remote import run_current_script_on_remote 282 | # run_current_script_on_remote() 283 | # print(osx.getgid()) 284 | # tt1() 285 | 286 | multiprocessing.Process(target=tt1).start() 287 | multiprocessing.Process(target=tt1).start() 288 | time.sleep(0.3) 289 | print(BulkFileWrite.pid_filename__queue_map) 290 | -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/tt_print3b.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | import os 3 | 4 | import queue 5 | import sys 6 | import threading 7 | import time 8 | from pathlib import Path 9 | import multiprocessing 10 | from chained_mode_time_tool import DatetimeConverter 11 | 12 | 13 | class FileWritter: 14 | _lock = threading.Lock() 15 | need_write_2_file = True 16 | 17 | def __init__(self, file_name: str, log_path='/pythonlogs'): 18 | if self.need_write_2_file: 19 | print(f'shilihua {multiprocessing.current_process().pid}') 20 | self._file_name = file_name 21 | self.log_path = log_path 22 | if not Path(self.log_path).exists(): 23 | # sprint(f'自动创建日志文件夹 {log_path}') 24 | Path(self.log_path).mkdir(exist_ok=True) 25 | self._open_file() 26 | self._last_write_ts = time.time() 27 | self._last_del_old_files_ts = time.time() 28 | 29 | def _open_file(self): 30 | self.file_path = Path(self.log_path) / Path(DatetimeConverter().date_str + '.' + self._file_name) 31 | self._f = open(self.file_path, encoding='utf8', mode='a') 32 | 33 | def _close_file(self): 34 | self._f.close() 35 | 36 | def write_2_file(self, msg): 37 | if self.need_write_2_file: 38 | with self._lock: 39 | now_ts = time.time() 40 | if now_ts - self._last_write_ts > 5: 41 | self._last_write_ts = time.time() 42 | self._close_file() 43 | self._open_file() 44 | self._f.write(msg) 45 | self._f.flush() 46 | if now_ts - self._last_del_old_files_ts > 300: 47 | self._last_del_old_files_ts = time.time() 48 | self._delete_old_files() 49 | 50 | def _delete_old_files(self): 51 | for i in range(10, 100): 52 | file_path = Path(self.log_path) / Path(DatetimeConverter(time.time() - 86400 * i).date_str + '.' + self._file_name) 53 | try: 54 | file_path.unlink() 55 | except FileNotFoundError: 56 | pass 57 | 58 | def start_bulk_write(self): 59 | pass 60 | 61 | 62 | 63 | class NyDaemonThread(threading.Thread): 64 | def _stop(self): 65 | lock = self._tstate_lock 66 | if lock is not None: 67 | assert not lock.locked() 68 | self._is_stopped = True 69 | self._tstate_lock = None 70 | if not self.daemon: 71 | print(1067, 'yyyy') 72 | with threading._shutdown_locks_lock: 73 | # Remove our lock and other released locks from _shutdown_locks 74 | threading._maintain_shutdown_locks() 75 | 76 | 77 | class BulkFileWrite(FileWritter): 78 | 79 | q =queue.SimpleQueue() 80 | 81 | def __init__(self, file_name: str, log_path='/pythonlogs'): 82 | super().__init__(file_name,log_path) 83 | atexit.register(self._at_exit) 84 | 85 | 86 | def write_2_file(self, msg): 87 | if self.need_write_2_file: 88 | with self._lock: 89 | self.q.put(msg) 90 | 91 | 92 | def _bulk_write(self): 93 | while 1: 94 | self._bulk_write0() 95 | time.sleep(0.1) 96 | 97 | def _at_exit(self): 98 | pid = multiprocessing.current_process().name 99 | print(f'要退出 {pid}') 100 | self._bulk_write0() 101 | 102 | def _bulk_write0(self): 103 | with self._lock: 104 | msg_list = [] 105 | while 1: 106 | if not self.q.empty(): 107 | msg_list.append(self.q.get()) 108 | else: 109 | break 110 | if msg_list: 111 | self._close_file() 112 | self._open_file() 113 | self._f.write('\n'.join(msg_list)) 114 | self._f.flush() 115 | 116 | 117 | 118 | def start_bulk_write(self): 119 | NyDaemonThread(target=self._bulk_write,daemon=True).start() 120 | 121 | 122 | 123 | 124 | print_raw = print 125 | WORD_COLOR = 37 126 | 127 | 128 | def stdout_write(msg: str): 129 | sys.stdout.write(msg) 130 | sys.stdout.flush() 131 | 132 | 133 | def stderr_write(msg: str): 134 | sys.stderr.write(msg) 135 | sys.stderr.flush() 136 | 137 | # print_file_writter = FileWritter('xx3.test') 138 | print_file_writter = BulkFileWrite('xx3.test') 139 | print_file_writter.start_bulk_write() 140 | print_file_writter.need_write_2_file=True 141 | print(print_file_writter.need_write_2_file) 142 | def _print_with_file_line(*args, sep=' ', end='\n', file=None, flush=True, sys_getframe_n=2): 143 | args = (str(arg) for arg in args) # REMIND 防止是数字不能被join 144 | args_str = sep.join(args) + end 145 | # stdout_write(f'56:{file}') 146 | if file == sys.stderr: 147 | stderr_write(args_str) # 如 threading 模块第926行,打印线程错误,希望保持原始的红色错误方式,不希望转成蓝色。 148 | 149 | elif file in [sys.stdout, None]: 150 | # 获取被调用函数在被调用时所处代码行数 151 | fra = sys._getframe(sys_getframe_n) 152 | line = fra.f_lineno 153 | file_name = fra.f_code.co_filename 154 | fun = fra.f_code.co_name 155 | now = None 156 | for i in range(1): 157 | now = time.strftime("%H:%M:%S") 158 | 159 | # line = None 160 | # file_name = None 161 | # fun =None 162 | # now = None 163 | 164 | # sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}" {x}\n') 165 | 1 + 2 166 | stdout_write( 167 | f'{now} "{file_name}:{line}" {fun} {args_str} ') 168 | # print_file_writter.write_2_file(f'{now} "{file_name}:{line}" {fun} {args_str} ') 169 | 170 | else: # 例如traceback模块的print_exception函数 file的入参是 <_io.StringIO object at 0x00000264F2F065E8>,必须把内容重定向到这个对象里面,否则exception日志记录不了错误堆栈。 171 | pass 172 | 173 | 174 | # noinspection PyProtectedMember,PyUnusedLocal,PyIncorrectDocstring,DuplicatedCode 175 | def nb_print(*args, sep=' ', end='\n', file=None, flush=True): 176 | """ 177 | 超流弊的print补丁 178 | :param x: 179 | :return: 180 | """ 181 | _print_with_file_line(*args, sep=sep, end=end, file=file, flush=flush, sys_getframe_n=2) 182 | 183 | def tt1(): 184 | pid = multiprocessing.current_process().name 185 | t1 = time.time() 186 | def f(): 187 | # import nb_log 188 | 189 | for i in range(10000): 190 | msg = f'{pid} {i} hh' 191 | # nb_print(msg*1) 192 | print_file_writter.write_2_file(msg*1) 193 | 194 | f() 195 | 196 | 197 | print(time.time() -t1) 198 | 199 | def tt2(): 200 | import logging 201 | logger = logging.getLogger('abcd') 202 | 203 | fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s - "%(filename)s %(lineno)d -" ', "%Y-%m-%d %H:%M:%S") 204 | 205 | sh = logging.StreamHandler() 206 | sh.setFormatter(fmt) 207 | logger.addHandler(sh) 208 | 209 | fh = logging.FileHandler('testlog3.log') 210 | fh.setFormatter(fmt) 211 | logger.addHandler(fh) 212 | 213 | t1 = time.time() 214 | for i in range(10000): 215 | logger.error(f'xxxxxx{i}'*20) 216 | print(time.time() - t1) 217 | 218 | if __name__ == '__main__': 219 | # from auto_run_on_remote import run_current_script_on_remote 220 | # run_current_script_on_remote() 221 | # print(osx.getgid()) 222 | # tt1() 223 | multiprocessing.Process(target=tt1).start() 224 | multiprocessing.Process(target=tt1).start() 225 | -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/ttest_s_print2.py: -------------------------------------------------------------------------------- 1 | 2 | # from nb_log.simple_print import sprint 3 | import time 4 | import datetime 5 | import sys 6 | str1 = ''' 2023-07-05 10:48:35 - lalala - "D:/codes/funboost/test_frame/test_nb_log/log_example2.py:15" - - ERROR - 粉红色说明代码有错误。 粉红色说明代码有错误。 粉红色说明代码有错误。 粉红色说明代码有错误。''' 7 | import logging 8 | 9 | 10 | print(time.gmtime()) 11 | t1 = time.time() 12 | def f(): 13 | for i in range(10000): 14 | fra = sys._getframe(1) 15 | line = fra.f_lineno 16 | file_name = fra.f_code.co_filename 17 | fun = fra.f_code.co_name 18 | 19 | 20 | 21 | # print(file_name,line,fun,str1*3,flush=False) 22 | sys.stdout.write(str([time.strftime('%H:%M:%S'),file_name,line,fun,str1*0]) + '\n') 23 | # sys.stdout.write(str([time.time(), file_name, line, fun, str1 * 0]) + '\n') 24 | sys.stdout.flush() 25 | f() 26 | 27 | print(time.time() -t1) -------------------------------------------------------------------------------- /tests/test_use_curretn_dir_config/ttest_timestrftime.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | 4 | t1 = time.time() 5 | for i in range(100000): 6 | now = time.strftime('%H:%M:%S') 7 | print(now) 8 | 9 | print(time.time() -t1) -------------------------------------------------------------------------------- /tests/test_use_differrent_file.py: -------------------------------------------------------------------------------- 1 | # import logging 2 | # from nb_log import get_logger 3 | # from loguru import logger 4 | # 5 | # # nb_log 写入不同的文件是根据日志命名空间 name 来区分的。方便。 6 | # logger_a = get_logger('a',log_level_int=logging.DEBUG) 7 | # logger_b = get_logger('b',log_level_int=logging.INFO) 8 | # logger_a.debug("嘻嘻a debug会显示") 9 | # logger_a.info("嘻嘻a info会显示") 10 | # logger_b.debug("嘻嘻b debug不会显示") 11 | # logger_b.info("嘻嘻b info会显示") 12 | # 13 | # logger_a.setLevel() 14 | # logger_a.addHandler() 15 | # 16 | # # loguru 不同功能为了写入不同的文件,需要设置消息前缀标志。不方便。 17 | # logger.add('./log_files/c.log',filter=lambda x: '[特殊标志c!]' in x['message'],level='DEBUG') 18 | # logger.add('./log_files/d.log',filter=lambda x: '[特殊标志d!]' in x['message'],level='INFO') 19 | # logger.debug('[特殊标志c!] 嘻嘻c 会显示') # 消息为了控制台显示级别需要带消息标志 20 | # logger.info('[特殊标志c!] 嘻嘻c 会显示') # 消息为了控制台显示级别需要带消息标志 21 | # logger.debug('[特殊标志d!] 嘻嘻d 不会显示') # 消息为了控制台显示级别需要带消息标志 22 | # logger.info('[特殊标志d!] 嘻嘻d') # 消息为了控制台显示级别需要带消息标志 23 | # 24 | # logger.se 25 | # 26 | # 27 | # 28 | # # # nb_log 写入不同的文件是根据日志命名空间 name 来区分的。方便。 29 | # # logger_a = get_logger('a',_log_filename='a.log',log_path='./log_files') 30 | # # logger_b = get_logger('b',_log_filename='b.log',log_path='./log_files') 31 | # # logger_a.info("嘻嘻a") 32 | # # logger_b.info("嘻嘻b") 33 | # # 34 | # # # loguru 不同功能为了写入不同的文件,需要设置消息前缀标志。不方便。 35 | # # logger.add('./log_files/c.log', filter=lambda x: '[特殊标志c!]' in x['message']) 36 | # # logger.add('./log_files/d.log', filter=lambda x: '[特殊标志d!]' in x['message']) 37 | # # logger.add('./log_files/e.log', ) 38 | # # logger.info('[特殊标志c!] 嘻嘻c') # 出现在c.log和 e.log 消息为了写入不同文件需要带消息标志 39 | # # logger.info('[特殊标志d!] 嘻嘻d') # 出现在d.log和 e.log 消息为了写入不同文件需要带消息标志 40 | # 41 | 42 | 43 | 44 | 45 | 46 | 47 | # from nb_log import get_logger,LogManager 48 | # 49 | # # logger_a = get_logger('a',_log_filename='ax.log',log_path='./log_files') 50 | # logger_a = LogManager('a').get_logger_and_add_handlers(_log_filename='ax2.log',log_path='./log_files') 51 | # logger_a.info("嘻嘻a") 52 | 53 | 54 | # from pathlib import Path 55 | # print((Path(__file__).parent.parent.parent.absolute()) / Path('pylogs')) 56 | 57 | 58 | import logging 59 | from logging import handlers 60 | 61 | logger_raw = logging.getLogger("test_logging") 62 | 63 | logger_raw.warning('日志太简单太丑了,并且没记录到文件') 64 | 65 | # 添加控制台日志 66 | handler_console = logging.StreamHandler() 67 | formatter1 = logging.Formatter('%(asctime)s - %(name)s - "%(filename)s:%(lineno)d" - %(levelname)s - %(message)s',"%Y-%m-%d %H:%M:%S") 68 | handler_console.setFormatter(formatter1) 69 | logger_raw.addHandler(handler_console) 70 | 71 | # 添加文件日志handler 72 | handler_file = logging.handlers.RotatingFileHandler('test_logging.log',mode='a') 73 | formatter2 = logging.Formatter('%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s', 74 | "%Y-%m-%d %H:%M:%S") 75 | handler_file.setFormatter(formatter2) 76 | logger_raw.addHandler(handler_file) 77 | 78 | 79 | logger_raw.error("日志现在格式变好了,并且记录到文件了") 80 | 81 | 82 | -------------------------------------------------------------------------------- /tests/test_warn.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import warnings 3 | import logging 4 | # logging.captureWarnings(True) 5 | # # logging.debug('aaa') 6 | # warnings.simplefilter('always') 7 | # import nb_log 8 | # nb_log.get_logger('',log_level_int=logging.WARNING) 9 | 10 | logging.getLogger('asd').info(66666666) 11 | 12 | warnings.warn('sss',DeprecationWarning,1) 13 | warnings.warn('sss',DeprecationWarning,1) 14 | 15 | print(sys.__dict__) -------------------------------------------------------------------------------- /tests/test_write_eror_file.py: -------------------------------------------------------------------------------- 1 | 2 | from nb_log import get_logger 3 | 4 | 5 | logger = get_logger('abcdcd', 6 | log_filename='f5b.log', 7 | # error_log_filename='f4b_error.log' 8 | ) 9 | 10 | # logger = get_logger('abcdcd', 11 | # _log_filename='f4b.log', 12 | # error_log_filename='f4b_error.log') 13 | 14 | logger.info('正常日志入普通文件和错误文件') 15 | 16 | logger.error('错误日志写入单独写入错误文件') 17 | 18 | print(logger.handlers) -------------------------------------------------------------------------------- /tests/tests_othres/nb_log_demo.py: -------------------------------------------------------------------------------- 1 | from nb_log import get_logger 2 | import time 3 | import random 4 | 5 | # 创建一个logger实例 6 | logger = get_logger('nb_log_demo', log_path='logs') 7 | 8 | def simulate_task(): 9 | """模拟一个任务,生成不同级别的日志""" 10 | # 记录一些信息日志 11 | logger.info('开始执行任务') 12 | 13 | # 模拟一些处理过程 14 | for i in range(3): 15 | # 记录调试信息 16 | logger.debug(f'正在处理第 {i+1} 步') 17 | 18 | # 随机模拟一些警告情况 19 | if random.random() < 0.3: 20 | logger.warning(f'第 {i+1} 步出现了一些警告情况') 21 | 22 | # 随机模拟一些错误情况 23 | if random.random() < 0.1: 24 | try: 25 | raise ValueError(f'第 {i+1} 步发生错误') 26 | except Exception as e: 27 | logger.error(f'处理过程出错: {str(e)}', exc_info=True) 28 | 29 | time.sleep(1) # 模拟处理时间 30 | 31 | # 记录完成信息 32 | logger.info('任务执行完成') 33 | 34 | if __name__ == '__main__': 35 | # 运行几次任务来生成不同的日志 36 | for _ in range(3): 37 | simulate_task() 38 | time.sleep(2) # 任务之间的间隔 39 | -------------------------------------------------------------------------------- /tests/tests_othres/nb_log_simple_demo.py: -------------------------------------------------------------------------------- 1 | from nb_log import LoggerMixin 2 | 3 | class MyClass(LoggerMixin): 4 | def do_something(self): 5 | self.logger.info('这是一条信息日志') 6 | self.logger.warning('这是一条警告日志') 7 | self.logger.error('这是一条错误日志') 8 | 9 | if __name__ == '__main__': 10 | obj = MyClass() 11 | obj.do_something() 12 | -------------------------------------------------------------------------------- /tests/tests_othres/test_funboost.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # from funboost import boost,BoosterParams 4 | 5 | # @boost('test_queue') 6 | # def add_numbers(x: int, y: int) -> int: 7 | # return x + y 8 | 9 | # if __name__ == '__main__': 10 | # # Start the consumer 11 | # add_numbers.consume() 12 | 13 | # result = add_numbers.push(3, 4) 14 | # print(f"The sum is: {result.result()}") 15 | from funboost import boost, BoosterParams 16 | from funboost.timing_job.apscheduler_use_redis_store import funboost_background_scheduler_redis_store 17 | 18 | class MyBoosterParams(BoosterParams): 19 | max_retry_times: int = 3 # 设置最大重试次数为3次 20 | function_timeout: int = 10 # 设置超时时间为10秒 21 | 22 | @boost(MyBoosterParams(queue_name='add_numbers_queue')) 23 | def add_numbers(x: int, y: int) -> int: 24 | """Add two numbers.""" 25 | return x + y 26 | 27 | if __name__ == '__main__': 28 | # 定义定时任务 29 | # Start the scheduler 30 | funboost_background_scheduler_redis_store.start() 31 | 32 | funboost_background_scheduler_redis_store.add_push_job( 33 | func=add_numbers, 34 | args=(1, 2), 35 | trigger='date', # 使用日期触发器 36 | run_date='2025-01-16 16:03:40', # 设置运行时间 37 | id='add_numbers_job' # 任务ID 38 | ) 39 | 40 | # 启动消费者 41 | add_numbers.consume() 42 | -------------------------------------------------------------------------------- /tests/utput.txt: -------------------------------------------------------------------------------- 1 | 11122 -------------------------------------------------------------------------------- /tests/彩色虚线字体生成.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from gradient_figlet import * 4 | 5 | 6 | -------------------------------------------------------------------------------- /tests/文件和屏幕写入速度对比.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import time 4 | 5 | # 打印到屏幕 6 | start_time = time.time() 7 | for i in range(1000000): 8 | print(str(i) * 100,end='\n') # end='' 避免换行 9 | print("Time for print:", time.time() - start_time) 10 | 11 | # 写入文件 12 | start_time = time.time() 13 | with open('output.txt', 'w') as f: 14 | for i in range(1000000): 15 | f.write(str(i) * 100 + '\n') 16 | print("Time for file write:", time.time() - start_time) -------------------------------------------------------------------------------- /tests/禁止这样封装nb_log.py: -------------------------------------------------------------------------------- 1 | import nb_log 2 | 3 | """ 4 | 禁止使用此种错误方式来封装 nb_log ,因为跳转到的日志地方跳转到你的这个类了,而不是精确跳转到 logger.debug/info() 的地方 5 | 并且日志的name 千万不要固定死了,多命名空间才是日志精髓。 6 | 所有日志只能写入到mylog.log文件中,不能写入不同的文件 7 | """ 8 | class LogUtil: 9 | def __init__(self): 10 | self.logger = nb_log.get_logger('xx',log_filename='mylog.log') 11 | 12 | def debug(self,msg): 13 | self.logger.debug(msg) 14 | 15 | def info(self, msg): 16 | self.logger.info(msg) 17 | 18 | def warning(self, msg): 19 | self.logger.warning(msg) 20 | 21 | def error(self, msg): 22 | self.logger.error(msg) 23 | 24 | def critical(self, msg): 25 | self.logger.critical(msg) 26 | 27 | if __name__ == '__main__': 28 | print('日志命名固定死了,没有多实例单独控制很差劲。所有日志只能写入到mylog.log文件中,不能写入不同的文件') 29 | LogUtil().debug('不能跳转到本行,跳转到工具类去了') 30 | LogUtil().info('不能跳转到本行,跳转到工具类去了') 31 | LogUtil().warning('不能跳转到本行,跳转到工具类去了') 32 | LogUtil().error('不能跳转到本行,跳转到工具类去了') 33 | LogUtil().critical('不能跳转到本行,跳转到工具类去了') -------------------------------------------------------------------------------- /tests/虚线字体生成.py: -------------------------------------------------------------------------------- 1 | import nb_log 2 | from pyfiglet import Figlet,FigletFont 3 | 4 | fonts_str = '''univers 5 | thin 6 | stop 7 | stellar 8 | starwars 9 | speed 10 | slant 11 | roman 12 | doh 13 | larry3d 14 | ogre 15 | smisome1 16 | isometric1 17 | isometric2 18 | isometric3 19 | isometric4 20 | ''' 21 | 22 | # for font in FigletFont().getFonts(): 23 | for font in fonts_str.split('\n'): 24 | # f = Figlet(font="slant", width=300) # 字体和宽度,可以为空即默认 25 | font = font.replace(' ', '') 26 | if font == '': 27 | continue 28 | # print('font:',font) 29 | f = Figlet(font=font, width=1080,) # 字体和宽度,可以为空即默认 30 | 31 | # print(f.renderText("FUNBOOST")) 32 | nb_log.defaul_logger.warning(f'{font} \n\n' + f.renderText("funboost") + '\n\n') 33 | """ 34 | _______________________ 35 | /_ __/ ____/ ___/_ __/ 36 | / / / __/ \__ \ / / 37 | / / / /___ ___/ // / 38 | /_/ /_____//____//_/ 39 | 40 | """ 41 | 42 | 43 | ''' 44 | univers 45 | thin 46 | stop 47 | stellar 48 | starwars 49 | speed 50 | slant 51 | roman 52 | doh 53 | ''' -------------------------------------------------------------------------------- /tests/错误方式loguru不同功能写入不同文件.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | from loguru import logger 4 | 5 | format = ("{time:YYYY-MMDD HH:mm:ss.SSS} | {extra[namespace]} | " 6 | "{level: <8} | " 7 | "{name}:{function}:{line} - {message}") 8 | 9 | 10 | logger.add("file_A1.log", ) 11 | logger.add("file_A1.log", ) 12 | logger.add("file_A1.log", ) 13 | logger.add("file_A1.log", ) 14 | logger.add("file_A1.log", ) 15 | logger.add("file_A1.log", ) 16 | # logger.add(('file_B1.log')) 17 | 18 | 19 | logger.info('hahha2') 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /ume: -------------------------------------------------------------------------------- 1 | NAME 2 | __main__.py --project_root_path=D:/codes/funboost 3 | 4 | SYNOPSIS 5 | __main__.py --project_root_path=D:/codes/funboost - COMMAND | VALUE 6 | 7 | COMMANDS 8 | COMMAND is one of the following: 9 | 10 | clear 11 | 清空多个queue ; 例子: clear test_cli1_queue1 test_cli1_queue2 # 清空2个消息队列消息队列 12 | 13 | consume 14 | 启动多个消息队列名的消费; 例子: consume queue1 queue2 15 | 16 | m_consume 17 | 使用多进程启动消费,每个队列开启多个单独的进程消费; 例子: m_consume --queue1=2 --queue2=3 # queue1启动两个单独进程消费 queue2 启动3个单独进程消费 18 | 19 | multi_process_consume 20 | 使用多进程启动消费,每个队列开启多个单独的进程消费; 例子: m_consume --queue1=2 --queue2=3 # queue1启动两个单独进程消费 queue2 启动3个单独进程消费 21 | 22 | publish 23 | publish发布消息到消息队列; 假设函数是 def add(x,y) 队列名是 add_queue , 发布 1 + 2求和; publish add_queue "{'x':1,'y':2}" 24 | 25 | push 26 | push发布消息到消息队列 ; 例子: 假设函数是 def add(x,y) 队列名是 add_queue , 发布 1 + 2求和; push add_queue 1 2; 或者 push add_queue --x=1 --y=2; 或者 push add_queue -x 1 -y 2; 27 | 28 | VALUES 29 | VALUE is one of the following: 30 | 31 | import_modules_str 32 | --------------------------------------------------------------------------------