├── .gitignore ├── MANIFEST.in ├── Pipfile ├── Pipfile.lock ├── README.md ├── doc ├── B树.md ├── DNS域名解析过程.md ├── Hbase原理、基本概念、基本架构.md ├── PostgreSQL with Your Ruby on Rails Application on Ubuntu 14.04.md ├── Python中内置的NotImplemented类型.md ├── SSH无密码登录.md ├── Ubuntu下面包依赖损坏的解决unmet dependencies.md ├── WeasyPrint导出中文文档乱码及使用报错.md ├── Xargs用法详解.md ├── abc模块的使用方法.md ├── docker 命令.md ├── docker 安装.md ├── docker-compose安装及相关问题.md ├── elasticsearch cluster配置.md ├── elasticsearch学习之----(1)基础概念.md ├── elasticsearch学习之----(2)安装.md ├── elasticsearch学习之----(3)探索你的集群.md ├── epoll使用案例.md ├── es ruby api.md ├── git windows github两个账号同时使用配置.md ├── git 命令.md ├── git 新建仓库后上传代码.md ├── git 配置 ssh 端口.md ├── google验证码识别ocr.md ├── hadoop搭建.md ├── hbase集群搭建.md ├── https加密过程.md ├── http缓存控制.md ├── javascript的fetchapi使用方法.md ├── kafka.md ├── kong相关命令.md ├── mongodb ruby api.md ├── nginx相关.md ├── numpy中轴的含义.md ├── pHash安装.md ├── pip 相关指令.md ├── pip 相关错误.md ├── postgresql network设置.md ├── postgresql pg_ctl command not found..md ├── postgresql 启动失败.md ├── pypi使用方法.md ├── python MRO 顺序.md ├── python pdb使用方法.md ├── python 模块安装.md ├── python元类,类,实例.md ├── python描述符.md ├── rails时区设置.md ├── scrapy content-length 错误.md ├── scrapy windows安装.md ├── seaweedFS.md ├── select 文件描述符大小超限.md ├── sql.md ├── tornado-协程实现过程.md ├── tyk安装.md ├── ubuntu systemd配置.md ├── ubuntu下docker push 私有仓库 timeout 解决方法.md ├── ubuntu下编码问题.md ├── ubuntu安装ShadowSocks.md ├── ubuntu安装postgresql.md ├── ubuntu安装vpn.md ├── ubuntu配置网络.md ├── uwsgi相关问题.md ├── werkzeug.local.LocalPorxy源码引出的关于python私有属性的知识点.md ├── windows 安装scrapy.md ├── xpath中text()和string()的本质区别.md ├── xpath相关.md ├── xss攻击.md ├── 一个文件被两个进程同时写入的情况.md ├── 云计算是什么.md ├── 关于__getattribute__.md ├── 分布式系统选举算法bully.md ├── 初探Node.js的异步实现.md ├── 前中后缀表达式.md ├── 在mac上解压rar压缩包.md ├── 多版本python pyenv.md ├── 如何在Docker容器内外互相拷贝数据.md ├── 子进程与信号.md ├── 安装配置 Docker Registry.md ├── 常用ubuntu指令.md ├── 我开发的比较前价值的项目和模块方法.md ├── 数据库索引.md ├── 树的旋转.md ├── 树莓派手动指定静态IP和DNS.md ├── 正向断言备忘.md ├── 获取https免费证书及配置.md └── 解决ERR_CONTENT_LENGTH_MISMATCH.md ├── docs ├── Makefile ├── _build │ ├── doctrees │ │ ├── environment.pickle │ │ ├── index.doctree │ │ ├── toolkit.doctree │ │ └── toolkit │ │ │ └── processor.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _modules │ │ ├── index.html │ │ ├── toolkit.html │ │ └── toolkit │ │ │ ├── __init__.html │ │ │ └── processor.html │ │ ├── _sources │ │ ├── index.rst.txt │ │ ├── toolkit.rst.txt │ │ └── toolkit │ │ │ └── processor.rst.txt │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── alabaster.css │ │ ├── basic.css │ │ ├── comment-bright.png │ │ ├── comment-close.png │ │ ├── comment.png │ │ ├── custom.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── down-pressed.png │ │ ├── down.png │ │ ├── file.png │ │ ├── jquery-3.2.1.js │ │ ├── jquery.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── underscore-1.3.1.js │ │ ├── underscore.js │ │ ├── up-pressed.png │ │ ├── up.png │ │ └── websupport.js │ │ ├── genindex.html │ │ ├── index.html │ │ ├── objects.inv │ │ ├── py-modindex.html │ │ ├── search.html │ │ ├── searchindex.js │ │ ├── toolkit.html │ │ └── toolkit │ │ └── processor.html ├── conf.py ├── index.rst ├── make.bat ├── toolkit.rst └── toolkit │ └── processor.rst ├── gulpfile.js ├── package.json ├── requirements.txt ├── resources ├── monitors.jpg ├── toolkit.graffle └── translator.jpg ├── setup.cfg.tpl ├── setup.py ├── tests ├── conftest.py ├── test_async_context.py ├── test_cache.py ├── test_components │ └── test_processor.py ├── test_service │ ├── test_combine.py │ └── test_monitors.py ├── test_settings │ ├── test_frozen.py │ └── test_settings.py ├── test_singleton.py ├── test_structures │ ├── test_linked_list.py │ └── test_queue.py ├── test_toolkit.py ├── test_tools │ ├── test_except_context.py │ ├── test_manager.py │ ├── test_package_control.py │ ├── test_redis_tools.py │ └── test_timer.py ├── test_translate.py └── testdata │ ├── __init__.py │ └── settings.py ├── toolkit ├── __init__.py ├── async_context.py ├── components │ ├── __init__.py │ ├── multi_worker.py │ └── processor.py ├── service │ ├── __init__.py │ ├── combine.py │ ├── console.py │ ├── monitors.py │ └── plugins.py ├── settings │ ├── __init__.py │ ├── frozen.py │ └── settings.py ├── singleton.py ├── structures │ ├── __init__.py │ ├── linked_list.py │ ├── queues.py │ └── thread_safe_collections.py ├── tools │ ├── __init__.py │ ├── file_buffalo.py │ ├── github-markdown.css │ ├── managers.py │ ├── markdown_helper.py │ ├── package_control.py │ └── redis_tools.py └── translator │ ├── __init__.py │ ├── sites.py │ ├── token_acquirer.py │ └── translate_adapter.py └── tools ├── bubble_sort.py ├── coroutine.py ├── curl_to_requests.py ├── get_charged.py ├── hash_set.py ├── heap_sort.py ├── insert_sort.py ├── k_word_sub_string.py ├── lru.py ├── merge_link.py ├── merge_sort.py ├── merge_sort2.py ├── odd_even.py ├── operator_tree.py ├── package_problen.py ├── quick_sort.py ├── quick_sort2.py ├── review.py ├── rm_images_and_container.py ├── scrapy_header_parser.py ├── secret_word.py ├── send_request.py ├── tree.py └── wine.py /.gitignore: -------------------------------------------------------------------------------- 1 | .coverage 2 | *.pyc 3 | *egg-info 4 | dist/ 5 | .pytest_cache 6 | .eggs/ 7 | .idea/ 8 | htmlcov/ 9 | *log 10 | *logs 11 | *lock.json 12 | node_modules/ 13 | setup.cfg -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include requirements.txt 3 | include toolkit/tools/github-markdown.css -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | pytest-cov = "*" 8 | pytest-apistellar = "*" 9 | pytest-asyncio = "*" 10 | pytest = "*" 11 | 12 | [packages] 13 | python-json-logger = "*" 14 | redis = "*" 15 | kafka-python = "*" 16 | requests = "*" 17 | future = "*" 18 | markdown = "*" 19 | toolkity = {editable = true,path = "."} 20 | urllib3 = ">=1.24.2" 21 | contextvars = "*" 22 | 23 | [requires] 24 | python_version = "3.6" 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 参考资料 4 | 5 | [拒绝重复造轮子!python实用工具类及函数大推荐!](https://zhuanlan.zhihu.com/p/31644562) 6 | 7 | [厉害了word哥,交互式实时监控调整python程序执行!](https://zhuanlan.zhihu.com/p/32386023) 8 | 9 | [深入python协程的实现,带你一层一层揭开协程的神秘面纱!](https://zhuanlan.zhihu.com/p/33739573) 10 | -------------------------------------------------------------------------------- /doc/B树.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://p.blog.csdn.net/images/p_blog_csdn_net/manesking/1.JPG)) 2 | 参考资料[B树、B-树、B+树、B*树](http://www.cnblogs.com/oldhorse/archive/2009/11/16/1604009.html) 3 | 4 | 参考视频[B-树](http://v.youku.com/v_show/id_XMjI3NjQyNTQ4.html) 5 | 6 | [comment]: (b-tree) 7 | [comment]: (b树介绍) 8 | [comment]: (B树) 9 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/DNS域名解析过程.md: -------------------------------------------------------------------------------- 1 | 参考资料 2 | 3 | [DNS域名解析过程](http://www.360doc.com/content/13/0527/17/11253639_288596772.shtml) 4 | [comment]: <tags> (dns) 5 | [comment]: <description> (dns解析过程) 6 | [comment]: <title> (DNS域名解析过程) 7 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/Hbase原理、基本概念、基本架构.md: -------------------------------------------------------------------------------- 1 | 参考资料 2 | 3 | [Hbase原理、基本概念、基本架构](http://blog.csdn.net/woshiwanxin102213/article/details/17584043) 4 | [comment]: <tags> (hbase) 5 | [comment]: <description> (Hbase基本原理,架构等等) 6 | [comment]: <title> (Hbase原理、基本概念、基本架构) 7 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/PostgreSQL with Your Ruby on Rails Application on Ubuntu 14.04.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://s7.51cto.com/wyfs02/M01/74/4B/wKiom1YYf-qD2aXaAAQQhNGfnwc576.jpg-wh_651x-s_1733757554.jpg)) 2 | #### 创建rails应用 3 | - 在根目录创建项目,使用-d postgresql 来声明使用postgresql作为数据库 4 | ```bash 5 | cd ~ 6 | rails new star -d postgresql 7 | ``` 8 | - 然后进入应用目录 9 | ```bash 10 | cd star 11 | ``` 12 | - 接下来配置应用数据库连接 13 | #### 配置数据库连接 14 | - 打开数据库配置文件 15 | ```bash 16 | vi config/database.yml 17 | ``` 18 | - 在default片断下,找到pool: 5那一行,在其下方添加如下信息: 19 | ```bash 20 | host: localhost 21 | username: star 22 | password:star 23 | ``` 24 | #### 创建应用数据库 25 | - 使用rake命令创建development和test数据库 26 | ```bash 27 | rake db:create 28 | ``` 29 | - 之后会生成两个数据库star_test和star_development 30 | #### 测试配置 31 | - 用来测试你的程序是正确使用了postgresql最简单的方式是尝试运行你的程序,例如运行默认开发环境,使用如下命令: 32 | ```bash 33 | rails server 34 | ``` 35 | - 这会在http://localhost:3000启动的rails程序 36 | - 如果你的程序在一个远程主机上,而且打算过一个web浏览器去访问它,那么最简单的方式就是绑定服务器的公共ip 37 | ```bash 38 | rails server --binding=192.168.200.58 39 | ``` 40 | 41 | 参考文献[How To Use PostgreSQL with Your Ruby on Rails Application on Ubuntu 14.04](https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-ruby-on-rails-application-on-ubuntu-14-04) 42 | 43 | [comment]: <tags> (postgresql,ruby,rails,ubuntu) 44 | [comment]: <description> (postgresql在ubuntu上配置rails的方法) 45 | [comment]: <title> (PostgreSQL with Your Ruby on Rails Application on Ubuntu 14.04) 46 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/Python中内置的NotImplemented类型.md: -------------------------------------------------------------------------------- 1 | 参考资料[Python中内置的NotImplemented类型 2 | ](http://python.jobbole.com/80913/) -------------------------------------------------------------------------------- /doc/SSH无密码登录.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://unitedwebsoft.in/blog/wp-content/uploads/2017/06/ssh.jpg)) 2 | * 生成公钥 3 | 4 | 5 | 6 | ssh-keygen 7 | 8 | 9 | * 复制到远程主机 10 | 11 | 12 | 13 | ssh-copy-id -i ~/.ssh/id_rsa.pub longen@192.168.200.80 -p 12016 14 | 15 | 16 | 参考[ 使用ssh-keygen和ssh-copy-id三步实现SSH无密码登录 17 | ](http://blog.chinaunix.net/uid-26284395-id-2949145.html) 18 | 19 | 20 | [comment]: <tags> (ssh) 21 | [comment]: <description> (ssh免密登录) 22 | [comment]: <title> (SSH无密码登录) 23 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/Ubuntu下面包依赖损坏的解决unmet dependencies.md: -------------------------------------------------------------------------------- 1 | [comment]: <picture> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1517125379&di=9a91536e804ca5a63efa8c661784bc1f&imgtype=jpg&er=1&src=http%3A%2F%2Fpic1.win4000.com%2Fwallpaper%2F4%2F53a795c66d5ae.jpg)) 2 | 如下错误 3 | 4 | 5 | 6 | $ sudo apt-get install libjack0 7 | Reading package lists... Done 8 | Building dependency tree 9 | Reading state information... Done 10 | You might want to run 'apt-get -f install' to correct these: 11 | The following packages have unmet dependencies: 12 | libbluetooth-dev : Depends: libbluetooth3 (= 4.101-0ubuntu13.1) but 4.101.1-0indt2 is to be installed 13 | libjack-jackd2-0 : Conflicts: libjack-0.116 14 | Conflicts: libjack0 but 1:0.121.3+20120418git75e3e20b-2.1ubuntu1 is to be installed 15 | libjack-jackd2-0:i386 : Conflicts: libjack-0.116 16 | Conflicts: libjack0 but 1:0.121.3+20120418git75e3e20b-2.1ubuntu1 is to be installed 17 | libjack0 : Conflicts: libjack-0.116 18 | Conflicts: libjack-0.116:i386 19 | libpulse-dev : Depends: libpulse0 (= 1:4.0-0ubuntu11.1) but 1:4.0-0ubuntu11indt2 is to be installed 20 | libpulse0 : Breaks: libpulse0:i386 (!= 1:4.0-0ubuntu11indt2) but 1:4.0-0ubuntu11.1 is to be installed 21 | libpulse0:i386 : Breaks: libpulse0 (!= 1:4.0-0ubuntu11.1) but 1:4.0-0ubuntu11indt2 is to be installed 22 | pulseaudio : Depends: libpulse0 (= 1:4.0-0ubuntu11.1) but 1:4.0-0ubuntu11indt2 is to be installed 23 | E: Unmet dependencies. Try 'apt-get -f install' with no packages (or specify a solution). 24 | 25 | 26 | 解决方法: `apt-get -f install` 27 | 28 | 如遇到空间不足,证明/boot满了 这时使用`dpkg --get-selections|grep linux`可以查看内核 想删除过期内核是不可能的, 29 | 因为没有依赖问题还没有解决。而解决依赖又需要空间,所以貌似死循环了 不过,可以通过将/boot目录下initrd和vmlinuz打头的文件暂时转移来腾空间。 30 | 然后运行`apt-get -f install`和`apt-get remove -y 内核名` 31 | 32 | 参考资料 33 | 34 | [Ubuntu下面包依赖损坏的解决unmet 35 | dependencies](http://blog.csdn.net/sy373466062/article/details/53991413) 36 | 37 | [ubuntu boot空间不足的解决方法](http://blog.csdn.net/yypony/article/details/17260153) 38 | 39 | [Linux学习笔记:解决因 /boot 40 | 分区空间不足导致的卸载旧内核失败](http://blog.csdn.net/cloud_xy/article/details/10278769) 41 | 42 | 43 | [comment]: <tags> (ubuntu) 44 | [comment]: <description> (Ubuntu下面包依赖损坏的解决unmet dependencies) 45 | [comment]: <title> (Ubuntu下面包依赖损坏的解决unmet dependencies) 46 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/WeasyPrint导出中文文档乱码及使用报错.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://weasyprint.org/css/img/logo.png)) 2 | #### OSError: dlopen() failed to load a library: cairo / cairo-2 3 | 4 | 是因为没有安装cairo,运行`apt-get install libcairo2-dev`解决 5 | 6 | #### OSError: cannot load library pango-1.0: pango-1.0: cannot open shared 7 | object file: No such file or directory. Additionally, 8 | ctypes.util.find_library() did not manage to locate a library called 9 | 'pango-1.0 10 | 11 | 是因为没有安装pango,运行`apt-get install pango1.0-tests`解决 12 | 13 | #### 乱码 14 | 15 | 是因为没有安装中文字体 16 | 17 | 18 | 19 | apt-get install xfonts-intl-chinese xfonts-wqy ttf-wqy-zenhei 20 | apt-get install ttf-wqy-microhei xfonts-intl-chinese-big 21 | 22 | #### 如何渲染pdf时连同图片一并渲染 23 | 网站中的图片链接必须是绝对url,不能是相对url。 24 | 25 | 参考资料 26 | 27 | [DEBIAN 7,PYTHON3.3.2,安装WEASYPRINT](https://8loo.cn/2013/09/12/debian- 28 | 7python3-3-2%E5%AE%89%E8%A3%85weasyprint/) 29 | 30 | 31 | [comment]: <tags> (weasyprint) 32 | [comment]: <description> (WeasyPrint导出中文文档乱码及使用报错) 33 | [comment]: <title> (WeasyPrint导出中文文档乱码及使用报错) 34 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/Xargs用法详解.md: -------------------------------------------------------------------------------- 1 | [Xargs用法详解](http://blog.csdn.net/zhangfn2011/article/details/6776925/) 2 | [comment]: <tags> (xargs) 3 | [comment]: <description> (xargs用法详解) 4 | [comment]: <title> (Xargs用法详解) 5 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/abc模块的使用方法.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://dbader-static-defugurjmqrkjo.netdna-ssl.com/figures/python-abcs-header.png)) 2 | python中存在abc模块用来获取抽象类,通常的使用方法是先使用ABCMeta创建一个抽象类,使用abstractmethod定义需要子类实现的接口,再通过多继承(maxin)的方式使用它,如下 3 | 4 | 5 | 6 | from abc import ABC, ABCMeta, abstractmethod 7 | # 创建一个抽象基类 8 | class A(metaclass=ABCMeta): 9 | @abstractmethod 10 | def foo(self): 11 | print("这是一个抽象方法,不能直接调用,但可以在子类实例中调用") 12 | # 或者 13 | class A(ABC): 14 | @abstractmethod 15 | def foo(self): 16 | print("这是一个抽象方法,不能直接调用,但可以在子类实例中调用") 17 | 18 | # 直接继承抽象类 19 | class B(A): 20 | pass 21 | 22 | B() # 创建失败 23 | 24 | # 直接继承抽象类并实现抽象方法 25 | class C(A): 26 | def foo(self): 27 | super(C, self).foo() 28 | print("C中的实现") 29 | 30 | C() 31 | 32 | # 创建一个不相关的基类 33 | class D(object): 34 | def bar(self): 35 | print("这是一个不相关的方法") 36 | 37 | # 通过maxin来创建一个混合类 38 | class E(D, A): 39 | pass 40 | 41 | E() #创建失败,未实现A中的抽象方法 42 | 43 | # 通过maxin来创建一个混合类,并实现抽象方法 44 | class F(D, A): 45 | def foo(self): 46 | print("这是F中的实现") 47 | 48 | F() 49 | 50 | # 创建一个不相关的类,但实现了抽象方法 51 | class G(object): 52 | def foo(self): 53 | print("这是G中的实现") 54 | 55 | # 创建混合类 56 | class H(G, A): 57 | pass 58 | 59 | H() 60 | 61 | 62 | 63 | [comment]: <tags> (python,abc) 64 | [comment]: <description> (关于python中实现抽象类的手段) 65 | [comment]: <title> (abc模块的使用方法) 66 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/docker 命令.md: -------------------------------------------------------------------------------- 1 | [Docker容器内不能联网的6种解决方案](http://blog.csdn.net/yangzhenping/article/details/43567155) 2 | ``` 3 | sudo docker network create simple-network 4 | ``` 5 | - 根据dockerfile生成镜像 6 | ``` 7 | sudo docker build -f scrapydockerfile_python3 -t jinanlongen/jay_cluster:py3 . 8 | ``` 9 | - 上传镜像 10 | ``` 11 | docker push jinanlongen/jay_cluster:py3 12 | ``` 13 | - 重新标记镜像 14 | ``` 15 | docker tag 192.168.200.150/longen/jay:latest cnaafhvk/jay 16 | ``` 17 | - 显示退出的镜像id 18 | ``` 19 | docker ps -q -f status=exited 20 | ``` 21 | [comment]: <tags> (docker) 22 | [comment]: <description> (docker常用命令汇总) 23 | [comment]: <title> (docker 命令) 24 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/docker 安装.md: -------------------------------------------------------------------------------- 1 | # 1. Set up the repository 2 | ``` 3 | sudo apt-get -y install \ 4 | apt-transport-https \ 5 | ca-certificates \ 6 | curl 7 | 8 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 9 | 10 | sudo add-apt-repository \ 11 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 12 | $(lsb_release -cs) \ 13 | stable" 14 | 15 | sudo apt-get update 16 | ``` 17 | # 2. Get Docker CE 18 | ``` 19 | sudo apt-get -y install docker-ce 20 | ``` 21 | # 22 | [comment]: <tags> (docker) 23 | [comment]: <description> (docker 安装方法) 24 | [comment]: <title> (docker 安装) 25 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/docker-compose安装及相关问题.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://res.cloudinary.com/blog-mornati-net/image/upload/v1472668207/sz9sfwiji9foh0cv1v5p.png)) 2 | ## 问题描述 3 | 4 | 5 | 6 | Unsupported config option for services service: 'tyk_mongo' 7 | 8 | 9 | ## 配置信息如下: 10 | 11 | 12 | 13 | version: '2' 14 | 15 | services: 16 | tyk_redis: 17 | image: redis:latest 18 | hostname: redis 19 | ports: 20 | - "6379:6379" 21 | networks: 22 | gateway: 23 | aliases: 24 | - redis 25 | tyk_mongo: 26 | image: mongo:latest 27 | command: ["mongod", "--smallfiles"] 28 | hostname: mongo 29 | ports: 30 | - "27017:27017" 31 | networks: 32 | gateway: 33 | aliases: 34 | - mongo 35 | 36 | 37 | 原因是因为docker-compose不支持version '2', 重新安装新版本 38 | 39 | # 安装 40 | 41 | 42 | 43 | sudo -i curl -L "https://github.com/docker/compose/releases/download/1.11.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 44 | sudo chmod +x /usr/local/bin/docker-compose 45 | docker-compose --version 46 | 47 | 48 | 49 | [comment]: <tags> (docker compose) 50 | [comment]: <description> (docker-compose使用过程中可能遇到的问题汇总) 51 | [comment]: <title> (docker-compose安装及相关问题) 52 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/elasticsearch cluster配置.md: -------------------------------------------------------------------------------- 1 | ## 修改配置文件 2 | ``` 3 | vi config/elasticsearch.yml 4 | 5 | node.name: nameNode 6 | 7 | network.host: 192.168.200.107 8 | 9 | discovery.zen.ping.unicast.hosts: ["nameNode"] 10 | 11 | ``` 12 | 13 | ## 启动各个节点 14 | ``` 15 | bin/elasticsearch 16 | ``` 17 | 18 | 参考资料 19 | 20 | [Elasticsearch 集群搭建实战笔记](http://www.bkjia.com/Linux/1125376.html) 21 | [comment]: <tags> (elasticsearch) 22 | [comment]: <description> (es配置方法) 23 | [comment]: <title> (elasticsearch cluster配置) 24 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/elasticsearch学习之----(1)基础概念.md: -------------------------------------------------------------------------------- 1 | # elasticsearch学习之----(1)基础概念 2 | 3 | elasticsearch有几个核心概念。从一开始理解这些概念会对整个学习过程有莫大的帮助。 4 | 5 | ## 接近实时(NRT) 6 | 7 | Elasticsearch是一个接近实时的搜索平台。这意味着,从索引一个文档直到这个文档能够被搜索到有一个轻微的延迟(通常是1秒)。 8 | 9 | ## 集群(cluster) 10 | 一个集群就是由一个或多个节点组织在一起,它们共同持有你整个的数据,并一起提供索引和搜索功能。一个集群由一个唯一的名字标识,这个名字默认就是“elasticsearch”。这个名字是重要的,因为一个节点只能通过指定某个集群的名字,来加入这个集群。在产品环境中显式地设定这个名字是一个好习惯,但是使用默认值来进行测试/开发也是不错的。 11 | 12 | ## 节点(node) 13 | 14 | 一个节点是你集群中的一个服务器,作为集群的一部分,它存储你的数据,参与集群的索引和搜索功能。和集群类似,一个节点也是由一个名字来标识的,默认情况下,这个名字是一个随机的漫威漫画角色的名字,这个名字会在启动的时候赋予节点。这个名字对于管理工作来说挺重要的,因为在这个管理过程中,你会去确定网络中的哪些服务器对应于Elasticsearch集群中的哪些节点。 15 | 16 | 一个节点可以通过配置集群名称的方式来加入一个指定的集群。默认情况下,每个节点都会被安排加入到一个叫做“elasticsearch”的集群中,这意味着,如果你在你的网络中启动了若干个节点,并假定它们能够相互发现彼此,它们将会自动地形成并加入到一个叫做“elasticsearch”的集群中。 17 | 18 | 在一个集群里,只要你想,可以拥有任意多个节点。而且,如果当前你的网络中没有运行任何Elasticsearch节点,这时启动一个节点,会默认创建并加入一个叫做“elasticsearch”的集群。 19 | 20 | ## 索引(index) 21 | 22 | 一个索引就是一个拥有几分相似特征的文档的集合。比如说,你可以有一个客户数据的索引,另一个产品目录的索引,还有一个订单数据的索引。一个索引由一个名字来标识(必须全部是小写字母的),并且当我们要对对应于这个索引中的文档进行索引、搜索、更新和删除的时候,都要使用到这个名字。 23 | 24 | 在一个集群中,如果你想,可以定义任意多的索引。 25 | 26 | ## 类型(type) 27 | 28 | 在一个索引中,你可以定义一种或多种类型。一个类型是你的索引的一个逻辑上的分类/分区,其语义完全由你来定。通常,会为具有一组共同字段的文档定义一个类型。比如说,我们假设你运营一个博客平台并且将你所有的数据存储到一个索引中。在这个索引中,你可以为用户数据定义一个类型,为博客数据定义另一个类型,当然,也可以为评论数据定义另一个类型。 29 | 30 | ## 文档(document) 31 | 32 | 一个文档是一个可被索引的基础信息单元。比如,你可以拥有某一个客户的文档,某一个产品的一个文档,当然,也可以拥有某个订单的一个文档。文档以JSON(Javascript Object Notation)格式来表示,而JSON是一个到处存在的互联网数据交互格式。 33 | 34 | 在一个index/type里面,只要你想,你可以存储任意多的文档。注意,尽管一个文档,物理上存在于一个索引之中,文档必须被索引/赋予一个索引的type。 35 | 36 | ## 分片和复制(shards & replicas) 37 | 38 | 一个索引可以存储超出单个结点硬件限制的大量数据。比如,一个具有10亿文档的索引占据1TB的磁盘空间,而任一节点都没有这样大的磁盘空间;或者单个节点处理搜索请求,响应太慢。 39 | 40 | 为了解决这个问题,Elasticsearch提供了将索引划分成多份的能力,这些份就叫做分片。当你创建一个索引的时候,你可以指定你想要的分片的数量。每个分片本身也是一个功能完善并且独立的“索引”,这个“索引”可以被放置到集群中的任何节点上。 41 | 42 | 分片之所以重要,主要有两方面的原因: 43 | 44 | - 允许你水平分割/扩展你的内容容量 45 | - 允许你在分片(潜在地,位于多个节点上)之上进行分布式的、并行的操作,进而提高性能/吞吐量 46 | 47 | 至于一个分片怎样分布,它的文档怎样聚合回搜索请求,是完全由Elasticsearch管理的,对于作为用户的你来说,这些都是透明的。 48 | 49 | 在一个网络/云的环境里,失败随时都可能发生,在某个分片/节点不知怎么的就处于离线状态,或者由于任何原因消失了,这种情况下,有一个故障转移机制是非常有用并且是强烈推荐的。为此目的,Elasticsearch允许你创建分片的一份或多份拷贝,这些拷贝叫做复制分片,或者直接叫复制。 50 | 51 | 复制之所以重要,有两个主要原因: 52 | - 在分片/节点失败的情况下,提供了高可用性。因为这个原因,注意到复制分片从不与原/主要(original/primary)分片置于同一节点上是非常重要的。 53 | - 扩展你的搜索量/吞吐量,因为搜索可以在所有的复制上并行运行 54 | 55 | 总之,每个索引可以被分成多个分片。一个索引也可以被复制0次(意思是没有复制)或多次。一旦复制了,每个索引就有了主分片(作为复制源的原来的分片)和复制分片(主分片的拷贝)之别。分片和复制的数量可以在索引创建的时候指定。在索引创建之后,你可以在任何时候动态地改变复制的数量,但是你事后不能改变分片的数量。 56 | 57 | 默认情况下,Elasticsearch中的每个索引被分片5个主分片和1个复制,这意味着,如果你的集群中至少有两个节点,你的索引将会有5个主分片和另外5个复制分片(1个完全拷贝),这样的话每个索引总共就有10个分片。 58 | 59 | 这些问题搞清楚之后,我们就要进入好玩的部分了... 60 | [comment]: <tags> (elasticsearch) 61 | [comment]: <description> (es学习1) 62 | [comment]: <title> (elasticsearch学习之----(1)基础概念) 63 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/elasticsearch学习之----(2)安装.md: -------------------------------------------------------------------------------- 1 | # 安装和启动 2 | 3 | Elasticsearch 需要至少Java 8以上的版本支持,本教程推荐使用Oracle JDK version 1.8.0_73.Java的安装细节每个平台都不同,在这里我们不打算详细讨论,在Oracle官网上可以找到推荐的安装手册,我只想说的是,在开始安装elasticsearch之前,运行以下命令来检查Java的版本(然后升级或才安装) 4 | ` 5 | ``` 6 | java -version 7 | echo $JAVA_HOME 8 | 9 | ``` 10 | 一但我们安装了Java,我们可以就可以下载和安装elasticsearch了,在www.elastic.co/downloads可以得到过往所有版本的二进制文件,每个版本你都可以选择zip,tar或者deb,rpm格式的安装文件.简单点,我们接下来选择tar文件。 11 | 通过如下指令我们下载 Elasticsearch 5.0.1 tar 12 | 13 | ``` 14 | curl -L -O https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.0.1.tar.gz 15 | ``` 16 | 然后解压 17 | ``` 18 | tar -xvf elasticsearch-5.0.1.tar.gz 19 | ``` 20 | 解压完毕后,进入bin目录 21 | ``` 22 | cd elasticsearch-5.0.1/bin 23 | ``` 24 | 现在我们准备启动我们的单节点集群 25 | ``` 26 | ./elasticsearch 27 | ``` 28 | 如果一切运行正常,你会见到如下信息 29 | ``` 30 | [2016-09-16T14:17:51,251][INFO ][o.e.n.Node ] [] initializing ... 31 | [2016-09-16T14:17:51,329][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [317.7gb], net total_space [453.6gb], spins? [no], types [ext4] 32 | [2016-09-16T14:17:51,330][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] heap size [1.9gb], compressed ordinary object pointers [true] 33 | [2016-09-16T14:17:51,333][INFO ][o.e.n.Node ] [6-bjhwl] node name [6-bjhwl] derived from node ID; set [node.name] to override 34 | [2016-09-16T14:17:51,334][INFO ][o.e.n.Node ] [6-bjhwl] version[5.0.1], pid[21261], build[f5daa16/2016-09-16T09:12:24.346Z], OS[Linux/4.4.0-36-generic/amd64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23] 35 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [aggs-matrix-stats] 36 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [ingest-common] 37 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-expression] 38 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-groovy] 39 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-mustache] 40 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-painless] 41 | [2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [percolator] 42 | [2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [reindex] 43 | [2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty3] 44 | [2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty4] 45 | [2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded plugin [mapper-murmur3] 46 | [2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] initialized 47 | [2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] starting ... 48 | [2016-09-16T14:17:53,671][INFO ][o.e.t.TransportService ] [6-bjhwl] publish_address {192.168.8.112:9300}, bound_addresses {{192.168.8.112:9300} 49 | [2016-09-16T14:17:53,676][WARN ][o.e.b.BootstrapCheck ] [6-bjhwl] max virtual memory areas vm.max_map_count [65530] likely too low, increase to at least [262144] 50 | [2016-09-16T14:17:56,731][INFO ][o.e.h.HttpServer ] [6-bjhwl] publish_address {192.168.8.112:9200}, bound_addresses {[::1]:9200}, {192.168.8.112:9200} 51 | [2016-09-16T14:17:56,732][INFO ][o.e.g.GatewayService ] [6-bjhwl] recovered [0] indices into cluster_state 52 | ``` 53 | 抛开多余的细节不看,我们可以发现我们名为"I8hydUG"(可能与你的不同)的节点作为一个单节点集群已经启动。你可以指定结点名称和集群名称 54 | ``` 55 | ./elasticsearch -Ecluster.name=my_cluster_name 56 | ``` 57 | 58 | [comment]: <tags> (elasticsearch) 59 | [comment]: <description> (es学习2) 60 | [comment]: <title> (elasticsearch学习之----(2)安装) 61 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/es ruby api.md: -------------------------------------------------------------------------------- 1 | ## 安装es ruby api 2 | ``` 3 | gem install elasticsearch 4 | ``` 5 | 6 | ## 创建链接 7 | ``` 8 | client = Elasticsearch::Client.new host: '192.168.200.107' 9 | ``` 10 | 11 | ## 查看集群的健康情况 12 | ``` 13 | >> client.cluster.health 14 | => {"cluster_name"=>"elasticsearch", "status"=>"yellow", "timed_out"=>false, "number_of_nodes"=>3, "number_of_data_nodes"=>3, "active_primary_shards"=>31, "active_shards"=>31, "relocating_shards"=>0, "initializing_shards"=>0, "unassigned_shards"=>31, "delayed_unassigned_shards"=>0, "number_of_pending_tasks"=>0, "number_of_in_flight_fetch"=>0, "task_max_waiting_in_queue_millis"=>0, "active_shards_percent_as_number"=>50.0} 15 | ``` 16 | ## 查看指定索引,指定类型,指定id的值 17 | ``` 18 | >> client.search index: "customer", type: "external", id: 1 19 | => {"took"=>1, "timed_out"=>false, "_shards"=>{"total"=>5, "successful"=>5, "failed"=>0}, "hits"=>{"total"=>1, "max_score"=>1.0, "hits"=>[{"_index"=>"customer", "_type"=>"external", "_id"=>"1", "_score"=>1.0, "_source"=>{"name"=>"John Doe"}}]}} 20 | ``` 21 | ## 插入一个元素 22 | ``` 23 | >> client.index index: "customer", type: "external", id: 2, body: {name: "wangchuan"} 24 | => {"_index"=>"customer", "_type"=>"external", "_id"=>"2", "_version"=>1, "result"=>"created", "_shards"=>{"total"=>2, "successful"=>1, "failed"=>0}, "created"=>true} 25 | ``` 26 | ## 查询一个数据 27 | ``` 28 | >> client.search index: "customer", body: {"query":{"match":{"name":{"query":"wangchuan"}}}} 29 | => {"took"=>2, "timed_out"=>false, "_shards"=>{"total"=>5, "successful"=>5, "failed"=>0}, "hits"=>{"total"=>1, "max_score"=>0.2876821, "hits"=>[{"_index"=>"customer", "_type"=>"external", "_id"=>"2", "_score"=>0.2876821, "_source"=>{"name"=>"wangchuan"}}]}} 30 | ``` 31 | 32 | [comment]: <tags> (elasticsearch,ruby) 33 | [comment]: <description> (es的ruby api) 34 | [comment]: <title> (es ruby api) 35 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/git windows github两个账号同时使用配置.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://upload-images.jianshu.io/upload_images/3079674-a724f50b31ce43ab.jpg?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)) 2 | 首先在window上安装git时,取消勾选gui及enable git credential manager,随后安装完毕 3 | 4 | ### 现在有2账号A和B 5 | 6 | #### 先使用A账号克隆代码 7 | 8 | 9 | 10 | $ git clone https://github.com/cnaafhvk/deer.git 11 | # 出现异常信息,不用管,输入帐号 12 | ֵ▒▒▒▒Ϊ null▒▒ 13 | ▒▒▒▒▒▒: username 14 | error: unable to read askpass response from 'D:\Softwares\Git\mingw64\libexec\git-core\git-askpass.exe' 15 | Username for 'https://github.com': 16 | 17 | 18 | 19 | #### 这时会弹出登录窗,输入账号密码登录,代码克隆成功,进入项目 20 | 21 | 22 | 23 | cd deer 24 | vi .git/config 25 | ### 把下面的代码粘到最后 26 | [credential] 27 | helper = store --file=.git/cred.txt 28 | 29 | 30 | #### 拉代码 31 | 32 | 33 | 34 | git pull 35 | # 出现异常信息,不用管,输入帐号 36 | ֵ▒▒▒▒Ϊ null▒▒ 37 | ▒▒▒▒▒▒: username 38 | error: unable to read askpass response from 'D:\Softwares\Git\mingw64\libexec\git-core\git-askpass.exe' 39 | Username for 'https://github.com': 40 | 41 | 42 | #### 这时不会再弹出登录窗了,因为windows用户凭据中已经保存了密码,同时,在本地保存了密码 43 | 44 | 45 | 46 | $ cat .git/cred.txt 47 | https://cnaafhvk:04a7a2e73aec8c4051a16656991b752c336cd349@github.com 48 | 49 | 50 | ### 然后打开windows用户凭据,删除刚才保存的凭据 51 | 52 | ### 之后使用B账号重复上面的操作 53 | 54 | ### 最后再把凭证删除 55 | 56 | ### 57 | 如果还有其它账号,重复上面的步骤,如果其中一个账号还有其它项目,只需要配置credential后把同一帐号下面的.git/cred.txt进行复制即可。 58 | 59 | 参考资料 60 | 61 | [如何切换多个GitHub账号](http://www.jianshu.com/p/0ad3d88c51f4) 62 | 63 | 64 | [comment]: <tags> (git,windows) 65 | [comment]: <description> (windows上在多个账号之间切换git) 66 | [comment]: <title> (git windows github两个账号同时使用配置) 67 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/git 命令.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://udemy-images.udemy.com/course/750x422/752950_b773.jpg)) 2 | * git查看当前修改内容 3 | 4 | 5 | 6 | git diff FILENAME 7 | 8 | 9 | * git 回退版本 10 | 11 | 12 | 13 | # 回退上一个版本 14 | git reset --hard HEAD^ 15 | # 跳到某个版本(可以现在和未来的) 16 | git reset --hard 3628164 17 | 18 | 19 | * 查看历史命令 20 | 21 | 22 | 23 | git reflog 24 | 25 | 26 | * 删除分支 27 | 28 | 29 | 30 | git branch -d new 31 | 32 | - 与远程分支进行比较 33 | 34 | git fetch origin 35 | git diff master origin/master 36 | 37 | - git diff命令加减号说明 38 | diff 第一个参数是src版本,第二个参数dest版本,src中拥有使用-号标注,dest使用+号标注,src没有时,默认src为当前前版本,dest为当前工作空间版本 39 | 40 | 41 | 42 | [comment]: <tags> (git) 43 | [comment]: <description> (git 命令大全) 44 | [comment]: <title> (git 命令) 45 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/git 新建仓库后上传代码.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://julienrenaux.fr/wp-content/uploads/2013/10/git-770x605.png)) 2 | ### 初始化仓库 3 | 4 | 5 | 6 | git init 7 | 8 | 9 | ### 添加文件 10 | 11 | 12 | 13 | git add -A # 添加所有文件,可以使用.gitignore排除不必添加的文件 14 | 15 | 16 | ### 从远程仓库拉取数据 17 | 18 | 19 | 20 | git pull https://github.com/derekluo/jay-cluster-captcha-jomashop.git master 21 | 22 | 23 | ### 手动或自动合并冲突,并添加提交 24 | 25 | 26 | 27 | git add 28 | git commit 29 | 30 | 31 | ### 添加远程仓库 32 | 33 | 34 | 35 | git remote add origin https://github.com/derekluo/jay-cluster-captcha-jomashop.git 36 | 37 | 38 | ### 推送远程仓库, 为推送当前分支并建立与远程上游的跟踪 39 | 40 | 41 | 42 | git push --set-upstream origin master 43 | 44 | 45 | 注:origin指的是远程仓库的一个别名, master是一个分支 46 | 47 | 48 | 49 | git branch -v # 查看本地分支 50 | * master b03b633 commit 51 | 52 | git remote -v # 查看远程仓库 53 | origin https://github.com/derekluo/jay-cluster-captcha-jomashop.git (fetch) 54 | origin https://github.com/derekluo/jay-cluster-captcha-jomashop.git (push) 55 | 56 | 57 | 58 | 若之前操作有误,出现如下错误: 59 | 60 | 61 | 62 | fatal: refusing to merge unrelated histories 63 | 64 | 65 | 则在运行 66 | 67 | 68 | 69 | git pull https://github.com/derekluo/flume-cosmo-intercepror.git master --allow-unrelated-histories 70 | 71 | 72 | 参考资料[Git 的origin和master分析 73 | ](http://blog.csdn.net/abo8888882006/article/details/12375091) 74 | 75 | 76 | [comment]: <tags> (git) 77 | [comment]: <description> (git新建仓库后上传代码的方法) 78 | [comment]: <title> (git 新建仓库后上传代码) 79 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/git 配置 ssh 端口.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://sdtimes.com/wp-content/uploads/2014/08/0826.sdt-git-21.jpg)) 2 | * 打开 vi ~/.ssh/config 写入以下内容: 3 | 4 | host github.com hostname github.com port 22 5 | 6 | 7 | [comment]: <tags> (git,ssh) 8 | [comment]: <description> (git配置ssh端口) 9 | [comment]: <title> (git 配置 ssh 端口) 10 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/google验证码识别ocr.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://images.genius.com/6ffb06e2be89ff78f72ff5854c02e7a3.640x640x1.jpg)) 2 | [机制学习参考](http://khalsa.guru/posts/16) 3 | 4 | 5 | [comment]: <tags> (ocr,captcha) 6 | [comment]: <description> (google的开源验证码识别ocr相关) 7 | [comment]: <title> (google验证码识别ocr) 8 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/hbase集群搭建.md: -------------------------------------------------------------------------------- 1 | ## vi conf/hbase-site.xml 2 | <configuration> 3 | <property> 4 | <name>hbase.cluster.distributed</name> 5 | <value>true</value> 6 | </property> 7 | <property> 8 | <name>hbase.rootdir</name> 9 | <value>hdfs://nameNode:9000/hbase</value> 10 | </property> 11 | <property> 12 | <name>hbase.zookeeper.quorum</name> 13 | <value>nameNode,dataNode1, dataNode2</value> 14 | </property> 15 | <property> 16 | <name>hbase.zookeeper.property.dataDir</name> 17 | <value>/home/longen/zookeeper</value> 18 | </property> 19 | </configuration> 20 | ## vi conf/hbase-env.sh 21 | export JAVA_HOME=/home/longen/jdk1.8.0_111/ 22 | export HBASE_PID_DIR=/home/longen/pids # pid默认在/tmp下面 23 | ## vi conf/regionservers # region 24 | dataNode1 25 | dataNode2 26 | ## vi conf/backup-masters # 备用master 27 | dataNode1 28 | 29 | [comment]: <tags> (hbase) 30 | [comment]: <description> (hbase集群搭建) 31 | [comment]: <title> (hbase集群搭建) 32 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/https加密过程.md: -------------------------------------------------------------------------------- 1 | [http工作原理](http://blog.csdn.net/sean_cd/article/details/6966130) 2 | [comment]: <tags> (https) 3 | [comment]: <description> (https的加密过程) 4 | [comment]: <title> (https加密过程) 5 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/http缓存控制.md: -------------------------------------------------------------------------------- 1 | ## Last-Modified和If-Modified-Since 2 | - 第一次:客户端请求服务端,服务端返回LastModified: `该资源上次修改的时间`,该时间和资源同时被浏览器缓存下来 3 | - 第二次:客户端请求服务端,带着If-Modified-Since: `该资源上次修改的时间`, 4 | - 如果该资源修改时间未变,则返回304,浏览器使用本地缓存的资源; 5 | - 否则返回200,重新获取资源。 6 | ## Cache-Control和Etag 7 | - 第一次:客户端请求服务器,服务器返回Etag: `该资源的指纹`和Cache-Control: max-age=60,缓存的最长时间为60秒 8 | - 第二次: 9 | - 未超过60秒,客户发现本地有,则直接200 from_memory_cache/from_disk_cache 10 | - 超过了60秒,客户端请求服务端带着 If-None-Match:`该资源的指纹`, 11 | - 如果资源发生了变化,则200返回资源和新的Etag; 12 | - 则返回304,浏览器使用本地缓存的资源。 13 | 14 | 注: 15 | - 如果服务器返回Cache-Control:no-cache or max-age=0,会当作cache超时并重新确认cache是否可用。 16 | - 如果服务器返回Cache-Control:no-store,则浏览器永远不会缓存cache,每次都是重新获取。 17 | 18 | 19 | -------------------------------------------------------------------------------- /doc/javascript的fetchapi使用方法.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://gss3.bdstatic.com/-Po3dSag_xI4khGkpoWK1HF6hhy/baike/c0%3Dbaike80%2C5%2C5%2C80%2C26/sign=ed20149d0b7b020818c437b303b099b6/91ef76c6a7efce1b1e30f4f2ae51f3deb48f65ea.jpg)) 2 | ### 发送fetch请求 3 | 4 | 一个基本的fetch请求发送起来非常简单,看下面的一段代码 5 | 6 | 7 | 8 | var myImage = document.querySelector('img'); 9 | 10 | fetch('flowers.jpg').then(function(response) { 11 | return response.blob(); 12 | }).then(function(myBlob) { 13 | var objectURL = URL.createObjectURL(myBlob); 14 | myImage.src = objectURL; 15 | }); 16 | 17 | 18 | 在这里,我们通过网络获取图像并将其插入到元素中。 19 | fetch()最简单的用法是使用一个参数:要获取的资源的路径,并返回包含响应(Response对象)的promise。 20 | 21 | 这当然只是一个HTTP响应,而不是实际的图像。 为了从响应中提取图像正文内容,我们使用blob()方法(在Body 22 | mixin中定义,由Request和Response对象实现)。 23 | 24 | ### 提交请求选项 25 | 26 | fetch()方法可以选择性地接受第二个参数,一个init对象,它允许你控制许多不同的设置: 27 | 28 | * method: 请求方法,例如GET,POST。 29 | * headers:请求头,包含在一个Headers对象或一个带有ByteString值的对象文本中。 30 | * body: 任何你想添加到你的请求的主体:这可以是Blob,BufferSource,FormData,URLSearchParams或者USVString对象。请注意,使用GET或HEAD方法的请求不能有一个主体 31 | * mode: 您希望用于请求的模式,例如cors,no-cors或same-origin。 32 | * credentials:要用于请求的请求凭证:忽略 same-origin或include。要为当前域自动发送Cookie,必须提供此选项。从Chrome50开始,此属性还需要一个FederatedCredential实例或一个PasswordCredential实例. 33 | * cache: 要用于请求的缓存模式:default,no-store,reload,no-cache,force-cache或者only-if-cached。 34 | * redirect: 要使用的重定向模式: follow (自动跟随重定向), error (a如果发生重定向时发生错误中止), or manual (手动重定向). 在Chrome中,Chrome47之前默认情况下是follow,之后的版本默认情况下是manal。 35 | * referrer: 一个USVString 指定 no-referrer, client, or a URL. 默认是client. 36 | * referrerPolicy: 指定 referer HTTP header. 可能是 one of no-referrer, no-- referrer-when-downgrade, origin, origin-when-cross-origin, unsafe-url. 37 | * integrity:包含请求的子资源完整性值(例如,sha256-BpfBw7ivV8q2jLiT13fxDYAe2tJllusRSZ273h2nFSE =) 38 | * keepalive: keepalive选项可用于允许请求超出页面。使用Keepalive标志提取是Navigator.sendBeacon()API的替代品。 39 | * signal: 一个AbortSignal对象实例;允许您与获取请求进行通信,并在需要时通过AbortController中止。 40 | 41 | 参考资料 [Using Fetch](https://developer.mozilla.org/en- 42 | US/docs/Web/API/Fetch_API/Using_Fetch) 43 | 44 | [CORS protocol](https://fetch.spec.whatwg.org/#http-cors-protocol) 45 | 46 | 47 | [comment]: <tags> (javascript,fetch) 48 | [comment]: <description> (javascript的fetchapi使用方法) 49 | [comment]: <title> (javascript的fetchapi使用方法) 50 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/kafka.md: -------------------------------------------------------------------------------- 1 | - 增加partition 2 | ``` 3 | bin/kafka-topics.sh --zookeeper localhost --alter --partitions 3 --topic jay_firehose_egress 4 | ``` 5 | [comment]: <tags> (kakfa) 6 | [comment]: <description> (kafka相关知识) 7 | [comment]: <title> (kafka) 8 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/kong相关命令.md: -------------------------------------------------------------------------------- 1 | # 定义api 2 | curl -i -X POST \ 3 | --url http://192.168.200.37:8001/apis/ \ 4 | --data 'name=es-indices-api' \ 5 | --data 'hosts=es.com' \ 6 | --data 'upstream_url=http://192.168.200.90:9200/_cat/indices' 7 | # 访问api 8 | curl -i -X GET \ 9 | --url http://192.168.200.37:8000/ \ 10 | --header 'Host: es.com' 11 | 12 | # 启用plugins 13 | curl -i -X POST \ 14 | --url http://localhost:8001/apis/es-indices-api/plugins/ \ 15 | --data 'name=key-auth' 16 | #创建consumer 17 | curl -i -X POST \ 18 | --url http://localhost:8001/consumers/ \ 19 | --data "username=Jason" 20 | #指定consumer key 21 | curl -i -X POST \ 22 | --url http://localhost:8001/consumers/Jason/key-auth/ \ 23 | --data 'key=ENTER_KEY_HERE' 24 | # 通过key 访问api 25 | curl -i -X GET \ 26 | --url http://localhost:8000/ \ 27 | --header 'Host: es.com' \ 28 | --header "apikey: ENTER_KEY_HERE" 29 | # 增加速率限制plugins 30 | curl -X POST http://localhost:8001/apis/es-indices-api/plugins \ 31 | --data "name=rate-limiting" \ 32 | --data "config.second=5" \ 33 | --data "config.hour=10000" 34 | # 增加datalog plugins 35 | curl -X POST http://localhost:8001/apis/plugins \ 36 | --data "name=datadog" \ 37 | --data "config.host=127.0.0.1" \ 38 | --data "config.port=8125" \ 39 | --data "config.timeout=1000" 40 | # 增加galileo plugins 41 | curl -X POST http://localhost:8001/plugins/ \ 42 | --data "name=galileo" \ 43 | --data "config.service_token=966bbac007bf11e7a47fb3f5df052576" \ 44 | --data "config.environment=default-environment" 45 | # 增加file-log plugins 46 | curl -X POST http://localhost:8001/plugins \ 47 | --data "name=file-log" \ 48 | --data "config.path=/home/ubuntu/logs/file.log" 49 | # 增加http-log plugins 50 | curl -X POST http://localhost:8001/plugins \ 51 | --data "name=http-log" \ 52 | --data "config.http_endpoint=http://192.168.200.58:5000/" \ 53 | --data "config.method=POST" \ 54 | --data "config.timeout=1000" \ 55 | --data "config.keepalive=1000" 56 | # 显示所有plugins 57 | curl -X GET http://localhost:8001/plugins/ 58 | # 删除plugins 最后是plugin的id 59 | curl -X DELETE http://localhost:8001/plugins/bba2a3b5-6a24-4f74-b8cd-f154d9aac88d 60 | [comment]: <tags> (kong,apigateway) 61 | [comment]: <description> (kong api网关的的相关命令) 62 | [comment]: <title> (kong相关命令) 63 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/mongodb ruby api.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://serverdensity-wpengine.netdna-ssl.com/wp-content/themes/blog.new/images/random/mongodb.png)) 2 | ## 查找返回指定字段 3 | 4 | 5 | 6 | >> list = col.find({"parent_asin": "B00UB2D5GS"}, projection: {"price": 1, _id: 0}) 7 | 8 | 9 | 10 | [comment]: <tags> (mongodb,ruby) 11 | [comment]: <description> (monogdb ruby api) 12 | [comment]: <title> (mongodb ruby api) 13 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/nginx相关.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1517415066652&di=05f3dca3a9ddeffce449815573d6f0fa&imgtype=0&src=http%3A%2F%2Fwww.a166.com%2Fupload%2F2017-04%2F10%2Fnginxfanxiangdaili-83bce.png)) 2 | ## nginx不管怎么修改配置,页面都是默认页面welcome to nginx的解决办法 3 | 4 | ### 现象描述: 5 | 6 | 在`/etc/nginx/nginx.conf`中无论怎么配置http,server项,页面都是默认的,而且启动nginx不报错,随意输入网址也不报错(都是welcome 7 | to nginx) 8 | 9 | ### 分析: 10 | 11 | 注意`/etc/nginx/nginx.conf的http`中有没有`include /etc/nginx/sites-enabled/*;`这句话 12 | 13 | 如果有,那么再检查一下语句中的位置,是否有default这个文件,打开看看,原来这里已经有server,location这些定义,而在`/etc/nginx/nginx.conf`中定义的这些信息却不能覆盖先前的。 14 | 15 | ### 解决方案: 16 | 17 | * 第一种:注释掉这行,使其采用nginx.conf中的配置。 18 | * 第二种:直接修改default中的内容 19 | 20 | 参考资料[Welcome to nginx!怎么解决?](https://zhidao.baidu.com/question/509695082.html) 21 | 22 | ## nginx静态资源文件无法访问,403 forbidden错误 23 | 24 | 主要是nginx用户没有权限问题,将nginx.conf中的user改为root即可 25 | 26 | 参考资料[nginx静态资源文件无法访问,403 forbidden错误](http://ngcsnow.iteye.com/blog/2117975) 27 | 28 | 29 | [comment]: <tags> (nginx) 30 | [comment]: <description> (nginx配置相关问题) 31 | [comment]: <title> (nginx相关) 32 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/numpy中轴的含义.md: -------------------------------------------------------------------------------- 1 | By definition, the axis number of the dimension is the index of that dimension within the array's shape. It is also the position used to access that dimension during indexing. 2 | 3 | For example, if a 2D array a has shape (5,6), then you can access a[0,0] up to a[4,5]. Axis 0 is thus the first dimension (the "rows"), and axis 1 is the second dimension (the "columns"). In higher dimensions, where "row" and "column" stop really making sense, try to think of the axes in terms of the shapes and indices involved. 4 | 5 | If you do .sum(axis=n), for example, then dimension n is collapsed and deleted, with all values in the new matrix equal to the sum of the corresponding collapsed values. For example, if b has shape (5,6,7,8), and you do c = b.sum(axis=2), then axis 2 (dimension with size 7) is collapsed, and the result has shape (5,6,8). Furthermore, c[x,y,z] is equal to the sum of all elements c[x,y,:,z]. 6 | 7 | 参考资料 8 | 9 | [how is axis indexed in numpy's array? 10 | ](https://stackoverflow.com/questions/17079279/how-is-axis-indexed-in-numpys-array/17079437#17079437) 11 | [comment]: <tags> (numpy, axis) 12 | [comment]: <description> (From Numpy's tutorial, axis can be indexed with integers, like 0 is for column, 1 is for row, but I don't grasp why they are indexed this way? And How do I figure out each axis' index when coping with multidimensional array?) 13 | [comment]: <title> (numpy中轴的含义) 14 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/pHash安装.md: -------------------------------------------------------------------------------- 1 | [pHash-0.9.6 patched version](https://github.com/ShichaoMa/pHash-0.9.6) 2 | [comment]: <tags> (ubuntu,phash) 3 | [comment]: <description> (phash安装) 4 | [comment]: <title> (pHash安装) 5 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/pip 相关指令.md: -------------------------------------------------------------------------------- 1 | ## PIP安装 2 | 3 | - 获取 pip 安装程序 4 | 5 | ``` 6 | wget https://bootstrap.pypa.io/get-pip.py 7 | 8 | ``` 9 | - 下载安装pip 10 | ``` 11 | sudo python get-pip.py 12 | ``` 13 | ``` 14 | --no-cache-dir # 忽略之前的cache 15 | -i # 指定源 16 | -r # 指定requirements文件进行安装 17 | ``` 18 | [comment]: <tags> (pip) 19 | [comment]: <description> (pip相关命令) 20 | [comment]: <title> (pip 相关指令) 21 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/pip 相关错误.md: -------------------------------------------------------------------------------- 1 | ## 操作系统语言引起的错误 2 | ``` 3 | export LC_ALL=C 4 | ``` 5 | ## How do I fix 'ImportError: cannot import name IncompleteRead'? 6 | ``` 7 | # 重新安装pip 8 | sudo apt-get remove python-pip 9 | sudo easy_install pip 10 | ``` 11 | 12 | ## UnicodeDecodeError: 'ascii' codec can't decode byte 0xe5 in position 52: ordinal not in range(128) 13 | ``` 14 | 修改操作系统的字符集 15 | export LC_ALL=en_US.UTF-8 16 | 若出错bash: warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8) 17 | 则 18 | locale-gen en_US.UTF-8 19 | ``` 20 | [comment]: <tags> (pip) 21 | [comment]: <description> (pip相关错误) 22 | [comment]: <title> (pip 相关错误) 23 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/postgresql network设置.md: -------------------------------------------------------------------------------- 1 | # 修改配置文件 2 | ``` 3 | vi /var/lib/pgsql/data/postgresql.conf 4 | 5 | listen_addresses = '*' 6 | 7 | vi /var/lib/pgsql/data/pg_hba.conf 8 | 9 | Include the following line (at the end of the file): 10 | 11 | host username all 192.168.0.10/32 md5 12 | ``` 13 | # 重启 14 | ``` 15 | sudo /etc/init.d/postgresql restart 16 | /usr/lib/postgresql/9.5/bin/pg_ctl reload 17 | ``` 18 | 参考资料 19 | 20 | [Configure PostgreSQL to accept connections from network](https://www.faqforge.com/linux/server/postgresql/configure-postgresql-accept-connections/) 21 | 22 | [PostgreSQL pg_hba.conf 文件简析](http://www.cnblogs.com/hiloves/archive/2011/08/20/2147043.html) 23 | 24 | ## 当使用pg_ctl reload的时候报错: 25 | ``` 26 | - pg_ctl: could not open PID file "/var/lib/postgresql/9.4/main/postmaster.pid": Permission denied 27 | ``` 28 | ## 解决方法 29 | ``` 30 | sudo -u postgres /usr/lib/postgresql/9.4/bin/pg_ctl -D /var/lib/postgresql/9.4/main/ reload 31 | ``` 32 | 错误原因可能是因为pg_ctl必须使用postgres用户来运行 33 | 34 | 参考资料 35 | 36 | [What should the permissions be on for PostgreSQL /data files?](http://serverfault.com/questions/605493/what-should-the-permissions-be-on-for-postgresql-data-files) 37 | 38 | ## 重启时出现错误 39 | ``` 40 | longen@ubuntu-94:~$ sudo -u postgres /usr/lib/postgresql/9.5/bin/pg_ctl -D /var/lib/postgresql/9.5/main/ reload 41 | sudo: unable to resolve host ubuntu-94 42 | ``` 43 | ## 解决方法 44 | 修改hosts文件,增加对应关系 45 | ``` 46 | sudo vi /etc/hosts 47 | 192.168.200.94 ubuntu-94 48 | ``` 49 | [comment]: <tags> (postgresql) 50 | [comment]: <description> (postgresql网络配置方法) 51 | [comment]: <title> (postgresql network设置) 52 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/postgresql pg_ctl command not found..md: -------------------------------------------------------------------------------- 1 | ``` 2 | $ cd ~ 3 | $ vim .profile 4 | PATH=$PATH:/usr/lib/postgresql/{version}/bin 5 | export PATH 6 | $ . ~/.profile 7 | ``` 8 | [comment]: <tags> (postgresql) 9 | [comment]: <description> (pg_ctl 提供command not found的解决方法) 10 | [comment]: <title> (postgresql pg_ctl command not found.) 11 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/postgresql 启动失败.md: -------------------------------------------------------------------------------- 1 | 在ubuntu操作系统下通过 /etc/init.d/postgresql start 启动postgresql服务时,启动,关闭,状态均显示正常,但是ps -ef|grep postgresql 或进程号,却找不到进程,很怪异的错误 2 | 通过查log 3 | ``` 4 | tail -f /var/log/postgresql/postgresql-9.4-main.log 5 | ``` 6 | 发现以下错误: 7 | ``` 8 | 2016-11-09 14:32:52 CST [2669-1] FATAL: data directory "/var/lib/postgresql/9.4/main" has group or world access 9 | 2016-11-09 14:32:52 CST [2669-2] DETAIL: Permissions should be u=rwx (0700). 10 | 11 | ``` 12 | 原来postgresql 不允许将数据存放目录开放最大权限, 改回0700即正常 13 | ``` 14 | sudo chmod -R 0700 /var/lib/postgresql/9.4/main 15 | ``` 16 | [comment]: <tags> (postgresql) 17 | [comment]: <description> (postgresql启动失败的解决方法) 18 | [comment]: <title> (postgresql 启动失败) 19 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/pypi使用方法.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1507352875&di=6c7b70744b8441002f20401bf3c653ed&imgtype=jpg&er=1&src=http%3A%2F%2Fs6.51cto.com%2Fwyfs02%2FM02%2F23%2FC4%2FwKioL1NDWp2wpN3_AAA1lJYopPE427.gif)) 2 | - 打包 3 | ``` 4 | python setup.py sdist 5 | ``` 6 | - 新建文件`vi ~/.pypirc` 7 | ``` 8 | [distutils] 9 | index-servers = 10 | pypi 11 | 12 | [pypi] 13 | repository: https://upload.pypi.org/legacy/ 14 | username: xxxx 15 | password: xxxx 16 | ~ 17 | ``` 18 | - 上传 19 | ``` 20 | python setup.py sdist upload 21 | ``` 22 | [comment]: <tags> (pip) 23 | [comment]: <description> (pip的pypi上传包的方法) 24 | [comment]: <title> (pypi使用方法) 25 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/python 模块安装.md: -------------------------------------------------------------------------------- 1 | ## ubuntu python 安装pil 2 | - 安装pil 3 | ``` 4 | sudo pip install pillow 5 | ``` 6 | - 如果报错了,则首先执行 7 | ``` 8 | sudo apt-get install libjpeg8-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.5-dev tk8.5-dev python-tk 9 | ``` 10 | [comment]: <tags> (python) 11 | [comment]: <description> (python模块安装) 12 | [comment]: <title> (python 模块安装) 13 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/python元类,类,实例.md: -------------------------------------------------------------------------------- 1 | ### 这是在custom-redis项目中用到例子,介绍了元类new方法的实现,以及元类继承 2 | 3 | ```python 4 | class Meta(type): 5 | """元类基类,给方法增加装饰器""" 6 | wrapper = None 7 | 8 | def __new__(typ, name, bases, properties): 9 | # 一些公共继承方法继承在DataStore里面,不需要被装饰,通过其基类来判断要构造的是否为DataStore类 10 | if bases[0] != object: 11 | for k, v in properties.items(): 12 | if isinstance(v, types.FunctionType): 13 | # 由于这个方法会被继承,通过提供不同的wrapper函数来做不同的包装, RedisMeta没有提供,所以不包装 14 | properties[k] = typ.wrapper(v) if typ.wrapper else v 15 | return super(Meta, typ).__new__(typ, name, bases, properties) 16 | 17 | 18 | class StoreMeta(Meta): 19 | """数据类专用元类""" 20 | wrapper = staticmethod(data_cmd_wrapper) 21 | 22 | 23 | class CommonCmdMeta(StoreMeta): 24 | """通用函数类专用元类""" 25 | wrapper = staticmethod(common_cmd_wrapper) 26 | 27 | 28 | class RedisMeta(CommonCmdMeta): 29 | """Redis类专用元类""" 30 | wrapper = None 31 | 32 | def __new__(typ, *args, **kwargs): 33 | # 这个地方非常诡异, 如果通过type(*args)来组建类,类会使用CommonCmdMeta来创建,可能原因是通过type返回的类是type创建的 34 | # 当使用默认type创建时,python认为该类没有指定元类,所以继续调用了父类的元类进行创建。 35 | # 通过type.__new__则会使用RedisMeta创建 36 | # 不通过super(RedisMeta, typ).__new__(typ, *args)创建,是因为这样会调用CommonCmdMeta.__new__,这不是我们想要的。 37 | # 这里我们不需要对CustomRedis类的函数进行包装操作,所以选择使用type.__new__创建 38 | return super(RedisMeta, typ).__new__(typ, *args) 39 | 40 | def __init__(cls, *args, **kwargs): 41 | # 当创建的实例(在这里是类)不是RedisMeta类型时,比如通过type()直接返回,__init__不会被调用。 42 | pass 43 | ``` 44 | 45 | 详细代码参见[bases.py](https://github.com/ShichaoMa/custom_redis/blob/update3/custom_redis/server/bases.py) 46 | 47 | 实例化时的调用顺序 48 | 49 | - 元类的__call__ 50 | - 类的__new__ 51 | - 类的__init__ 52 | 53 | 子类的元类(除type以外)必须是父类元类的子类,使用了元类的类的子类也会使用该元类创建 54 | 55 | 参见[singleton.py](https://github.com/ShichaoMa/toolkit/blob/master/toolkit/singleton.py) 56 | 57 | [comment]: <tags> (python,meta,class) 58 | [comment]: <description> (python 元类,类,实例创建时的一些理解) 59 | [comment]: <title> (python元类,类,实例) 60 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/python描述符.md: -------------------------------------------------------------------------------- 1 | ## python描述符备忘 2 | ### 对于python2,描述符类不能是经典类 3 | __get__ 4 | ``` 5 | In [23]: class D(object): 6 | ...: def __get__(self, instance, cls): 7 | ...: print(111111111) 8 | ...: return 11111111 9 | ...: def __set__(self, value, instance): 10 | ...: print(22222222222) 11 | ...: 12 | 13 | In [24]: class A(object): 14 | ...: d = D() 15 | ...: @classmethod 16 | ...: def fun(cls): 17 | ...: print(cls.d) 18 | ...: 19 | 20 | In [25]: a = A() 21 | # 通过实例访问,是需要通过描述符的 22 | In [26]: a.d 23 | 111111111 24 | Out[26]: 11111111 25 | # 通过类访问,也需要通过描述符 26 | In [27]: A.d 27 | 111111111 28 | Out[27]: 11111111 29 | # 通过类赋值,会直接把描述符覆盖掉 30 | In [28]: A.d = 3 31 | # 描述符没有了 32 | In [29]: A.d 33 | Out[29]: 3 34 | 35 | In [30]: a.d 36 | Out[30]: 3 37 | 38 | In [31]: a = A() 39 | # 重新创建一个实例也不行 40 | In [32]: a.d 41 | Out[32]: 3 42 | ``` -------------------------------------------------------------------------------- /doc/rails时区设置.md: -------------------------------------------------------------------------------- 1 | - 在application.rb中添加以下两条配置 2 | ``` 3 | config.time_zone = 'Beijing' # 只设置这一个就可以 4 | config.active_record.default_timezone = :local # 设置了之后数据库保存的也会本地记录 5 | ``` 6 | 参考:[Rails 中的时区及时间问题](https://ruby-china.org/topics/16187) 7 | [comment]: <tags> (rails) 8 | [comment]: <description> (rails时区配置) 9 | [comment]: <title> (rails时区设置) 10 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/scrapy content-length 错误.md: -------------------------------------------------------------------------------- 1 | 之前写的一个spider,在发送post请求时使用了content-length来注明request payload的长度。后来这个spider就一直报错 2 | 看了scrapy的源码发现twisted在发请求时发通过body.len计算content-length并发送,所以如果在header中也指定了content-length 3 | 会发送2次。可能正是因为这个原因,才导致了400错误。但之前没有出现,推测是由于服务器端没有对请求头去重导致的。 -------------------------------------------------------------------------------- /doc/scrapy windows安装.md: -------------------------------------------------------------------------------- 1 | 参考资料[Scrapy安转遇到问题](https://pypi.python.org/packages/15/fe/e43871be6559fa1b11bcbd73ea9ac826795bfd77afd6e6029c84bf93145b/lxml-3.4.3.win-amd64-py2.7.exe#md5=ae0d977f59b25f7d042217020202d7b3) 2 | [comment]: <tags> (scrapy) 3 | [comment]: <description> (scrapy windows安装方法) 4 | [comment]: <title> (scrapy windows安装) 5 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/seaweedFS.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)) 2 | 3 | [seaweedFS的设计原理是基于 Facebook 的一篇图片存储系统的论文](https://www.usenix.org/legacy/event/osdi10/tech/full_papers/Beaver.pdf) 4 | 5 | [comment]: <tags> (seaweedfs) 6 | [comment]: <description> (seaweedfs) 7 | [comment]: <title> (seaweedFS) 8 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/select 文件描述符大小超限.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcR20CBKvtHnghLhQvx55PB5w8NOBnB7hLiHWsVGruQcjBcMD03e)) 2 | python调用select时出现如下错误 3 | 4 | 5 | 6 | Traceback (most recent call last): 7 | File "./offer_listing_monitor.py", line 161, in start 8 | readable, writable, _ = select.select(readable, self.clients.keys(), self.clients.keys(), 0.1) 9 | ValueError: filedescriptor out of range in select() 10 | 11 | 12 | 因为大量使用短链接,所以导致单进程的 fd 个数升高,超出了 1024 限制,出现了最开始的异常 13 | 14 | ### 解决方法 15 | 16 | * 因为这个值是定义在内核里面,所以如果在维持目前方案不变的前提下,解决这个问题就需要重新编译 Linux-kernel,将这个值提高 17 | * 修改 stpclient 的客户端,使用epoll,代替比较老旧的 select,当时使用 select 的原因是,fd 个数很少,性能上没有问题,同时 select 在其他平台上也可以得到支持 18 | 19 | 参考资料[Filedescriptor out of range in 20 | select](http://www.jianshu.com/p/a74a48a54fce) 21 | 22 | 23 | [comment]: <tags> (select) 24 | [comment]: <description> (因为大量使用短链接,所以导致单进程的 fd 个数升高,超出了 1024 限制,出现了最开始的异常) 25 | [comment]: <title> (select 文件描述符大小超限) 26 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/sql.md: -------------------------------------------------------------------------------- 1 | - 修改表结构 2 | ``` 3 | # 修改列属性 4 | alter table site_roc alter column url type varchar(500); 5 | # 增加列 6 | alter table site_roc add site varchar(30); 7 | ``` 8 | [comment]: <tags> (sql) 9 | [comment]: <description> (一些sql语句) 10 | [comment]: <title> (sql) 11 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/tornado-协程实现过程.md: -------------------------------------------------------------------------------- 1 | 以一个简单的调用为例来说明 2 | ``` 3 | import tornado.ioloop 4 | from tornado.gen import coroutine 5 | from tornado.concurrent import Future 6 | 7 | @coroutine 8 | def asyn_sum(a, b): 9 | print("begin calculate:sum %d+%d"%(a,b)) 10 | future = Future() 11 | 12 | def callback(a, b): 13 | print("calculating the sum of %d+%d:"%(a,b)) 14 | future.set_result(a+b) 15 | tornado.ioloop.IOLoop.instance().add_callback(callback, a, b) 16 | 17 | result = yield future 18 | 19 | print("after yielded") 20 | print("the %d+%d=%d"%(a, b, result)) 21 | 22 | def main(): 23 | future = asyn_sum(2,3) 24 | tornado.ioloop.IOLoop.instance().start() 25 | 26 | 27 | main() 28 | 29 | ``` 30 | - 程序开始,定义了一个asyn_sum协程,在main函数中第一行调用了该协程 31 | - 同时tornado.gen.coroutine中调用asyn_sum生成器,获取第一个yield值也就是future,并创建了一个Runner对象 32 | ``` 33 | try: 34 | orig_stack_contexts = stack_context._state.contexts 35 | yielded = next(result) # 这一行所示 36 | if stack_context._state.contexts is not orig_stack_contexts: 37 | yielded = TracebackFuture() 38 | yielded.set_exception( 39 | stack_context.StackContextInconsistentError( 40 | 'stack_context inconsistency (probably caused ' 41 | 'by yield within a "with StackContext" block)')) 42 | except (StopIteration, Return) as e: 43 | future.set_result(_value_from_stopiteration(e)) 44 | except Exception: 45 | future.set_exc_info(sys.exc_info()) 46 | else: 47 | _futures_to_runners[future] = Runner(result, future, yielded) 48 | ``` 49 | - Runner通过__init__调用了其方法handle_yield,并在检查future的完成情况,很明显在第一个代码块中`tornado.ioloop.IOLoop.instance().add_callback(callback, a, b)`刚被加入事件循环,此时事件循环还未启动,所以callback中的`future.set_result(a+b)`并未被调用,因此future并未done。 50 | ``` 51 | if not self.future.done() or self.future is moment: 52 | def inner(f): 53 | # Break a reference cycle to speed GC. 54 | f = None # noqa 55 | self.run() 56 | self.io_loop.add_future( 57 | self.future, inner) 58 | return False 59 | return True 60 | ``` 61 | - 让我们把注意力移到`self.io_loop.add_future(self.future, inner)`这么代码中,这么代码将inner在future完成之后加入到了事件循环的callbacks中,由下面的代码可以的看出 62 | ``` 63 | def add_future(self, future, callback): 64 | """Schedules a callback on the ``IOLoop`` when the given 65 | `.Future` is finished. 66 | The callback is invoked with one argument, the 67 | `.Future`. 68 | """ 69 | assert is_future(future) 70 | callback = stack_context.wrap(callback) 71 | future.add_done_callback( 72 | lambda future: self.add_callback(callback, future)) 73 | ``` 74 | - future在开启事件循环后马上就会完成,因此,随后就会调用`self.add_callback(callback, future)`这段代码将inner加入事件循环,继而调用了` self.run()` 75 | - run会获取future的结果,同时发送结果给协程 76 | ``` 77 | orig_stack_contexts = stack_context._state.contexts 78 | exc_info = None 79 | try: 80 | value = future.result() 81 | except Exception: 82 | self.had_exception = True 83 | exc_info = sys.exc_info() 84 | future = None 85 | if exc_info is not None: 86 | try: 87 | yielded = self.gen.throw(*exc_info) 88 | finally: 89 | # Break up a reference to itself 90 | # for faster GC on CPython. 91 | exc_info = None 92 | else: 93 | yielded = self.gen.send(value) 94 | ``` 95 | - 相应的,结果就被赋值给了main中的`result = yield future`,继而结束了整个协程的调用。 96 | 97 | #### 对于事件循环的理解 98 | tornado中的事件循环,本质上使用epoll实现。epoll的最主要作用可能就是为了唤醒事件循环。实现方式就是创建一个管道,通过epoll监听管道输出(READ),并设置了超时时间,此时epoll会在超时时间内阻塞,如果有callback加入。则通过管道写入任意字节唤醒epoll,这样就相当于实现了一个可唤醒的阻塞。唤醒之后,会依次执行callback。同时执行timeouts中的callback,timeouts使用堆来保证时间最近的在最上面。 99 | 100 | #### 对于future对象的理解 101 | future对象可以当成一次异步调用的代理人,异步调用在创建后,加入事件循环中。随着事件循环进行,异步调用被执行了,执行一结束,代理人马上获取结果,并置done=True,同时代理人会将异步调用后的全部回调函数执行,其中的一个回调函数就包括Runner.run,其作用是将异步调用结果赋值给yield左边的变量,同时继续执行接下来的代码,直到下一个yield出现。 102 | 103 | #### 对嵌套协程的调用 104 | 首先要清楚子协程返回的也一个future对象,因为这个future还没有完成,返回对于yield左边的变量会阻塞住。当子协程完成时,通过raise Return或者StopIteration的方式通过一个异常来将结果值传递出来,并为result_future也就是子协程的future进行set_result,子协程future完成后,父协程继续。 105 | python3.4以后,可能过yield from也就是新版的await来更容易的获取子协程的返回值。 106 | 107 | ### 总结 108 | 对于`a = yield b`这种结构。b是一个Future,那么a的最终结果值为Future.result(),对于b,若是一个生成器,将其变成协程的方法只是实现了操控生成器在返回一个Future对象的同时,将生成器中每次yield都变成异步执行。 109 | 110 | [comment]: <tags> (tornado) 111 | [comment]: <description> (tornado协程实现) 112 | [comment]: <title> (tornado-协程实现过程) 113 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/tyk安装.md: -------------------------------------------------------------------------------- 1 | # 使用docker部署 2 | ## Set up hosts entries 3 | sudo vi /etc/hosts 4 | # 添加如下内容 5 | 127.0.0.1 www.tyk-portal-test.com 6 | ## Get the quick start compose files 7 | git clone https://github.com/lonelycode/tyk_quickstart.git 8 | cd tyk_quickstart 9 | ## Add dashboard license 10 | vi tyk_analytics.conf 11 | # 修改如下内容 12 | { 13 | ... 14 | "mongo_url": "mongodb://mongo:27017/tyk_analytics", 15 | "license_key": "LICENSEKEY", 16 | "page_size": 10, 17 | ... 18 | } 19 | licensekey可以从这个页面得到[Tyk On-Premises – FREE Community Edition License](https://tyk.io/product/tyk-professional-edition-free-trial/) 20 | ## Bootstrap dashboard and portal 21 | # docker-compose如果运行没成功考虑是版本问题,需要重装 22 | docker-compose up -d --force-recreate 23 | # 如果上一条命令被墙了,可以给其加个代理 24 | sudo vi /lib/systemd/system/docker.service 25 | # 在[service]加上 26 | Environment="HTTP_PROXY=http://192.168.200.90:8123" 27 | sudo systemctl daemon-reload 28 | sudo systemctl restart docker 29 | 30 | ./setup.sh 31 | ## Log in 32 | The setup script will provide login details for your Dashboard – go ahead and log in. 33 | [comment]: <tags> (tyk,apigateway) 34 | [comment]: <description> (Tyk is an open source API Gateway that is fast, scalable and modern. Out of the box, Tyk offers an API Management Platform with an API Gateway, API Analytics, Developer Portal and API Management Dashboard.) 35 | [comment]: <title> (tyk安装) 36 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/ubuntu systemd配置.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://scottlinux.com/wp-content/uploads/2014/10/systemd_logo_small.jpeg)) 2 | ### 一个简单的开机启动配置文件如下: 3 | 4 | 打开配置文件`vi /lib/systemd/system/deer.service` 5 | 6 | 7 | 8 | [Unit] 9 | # 描述 10 | Description=deplicate images 11 | # 在哪个程序之后启动 12 | After=network.target 13 | # 依赖哪个程序 14 | Wants=network.target 15 | [Service] 16 | Type=simple 17 | # 执行命令 18 | ExecStart=/home/longen/deer/image_process.py --host 192.168.200.94 --port 4567 19 | [Install] 20 | WantedBy=multi-user.target 21 | 22 | 23 | 参考资料: [systemd - Ubuntu Wiki](https://wiki.ubuntu.com/systemd) 24 | [systemd.exec](https://www.freedesktop.org/software/systemd/man/systemd.exec.html) 25 | 26 | 27 | [comment]: <tags> (ubuntu,systemd) 28 | [comment]: <description> (ubuntu systemd配置) 29 | [comment]: <title> (ubuntu systemd配置) 30 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/ubuntu下docker push 私有仓库 timeout 解决方法.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://cdn-images-1.medium.com/max/1600/1*sGHbxxLdm87_n7tKQS3EUg.png)) 2 | ### push 的时候出现如下异常: 3 | 4 | 5 | 6 | ubuntu@dev:~$ docker push 192.168.200.90/kong 7 | The push refers to a repository [192.168.200.90/kong] 8 | Get https://192.168.200.90/v1/_ping: net/http: TLS handshake timeout 9 | 10 | 11 | ### 可能是因为docker使用了外网代理 12 | 13 | 14 | 15 | ubuntu@dev:~$ cat /lib/systemd/system/docker.service 16 | [Unit] 17 | Description=Docker Application Container Engine 18 | Documentation=https://docs.docker.com 19 | After=network.target docker.socket 20 | Requires=docker.socket 21 | 22 | [Service] 23 | Environment="HTTP_PROXY=http://192.168.200.90:8123" 24 | Type=notify 25 | # the default is not to use systemd for cgroups because the delegate issues still 26 | # exists and systemd currently does not support the cgroup feature set required 27 | # for containers run by docker 28 | ExecStart=/usr/bin/dockerd -H fd:// 29 | ExecReload=/bin/kill -s HUP $MAINPID 30 | # Having non-zero Limit*s causes performance problems due to accounting overhead 31 | # in the kernel. We recommend using cgroups to do container-local accounting. 32 | LimitNOFILE=infinity 33 | LimitNPROC=infinity 34 | LimitCORE=infinity 35 | # Uncomment TasksMax if your systemd version supports it. 36 | # Only systemd 226 and above support this version. 37 | #TasksMax=infinity 38 | TimeoutStartSec=0 39 | # set delegate yes so that systemd does not reset the cgroups of docker containers 40 | Delegate=yes 41 | # kill only the docker process, not all processes in the cgroup 42 | KillMode=process 43 | 44 | # 注释掉Environment="HTTP_PROXY=http://192.168.200.90:8123" 45 | # Environment="HTTP_PROXY=http://192.168.200.90:8123" 46 | 47 | 48 | 49 | [comment]: <tags> (docker,repository) 50 | [comment]: <description> (dcoker私有仓库) 51 | [comment]: <title> (ubuntu下docker push 私有仓库 timeout 解决方法) 52 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/ubuntu下编码问题.md: -------------------------------------------------------------------------------- 1 | ### ubuntu下python安装包setup.py中由于读取了带有中文件的文件导致的编码题 2 | ```python 3 | Collecting pyaop (from apistellar>=1.0.30) 4 | Using cached https://files.pythonhosted.org/packages/b5/6d/4a39bf5f225c9925b26122c663860118999b262c4db293c74f435bfb0da0/pyaop-0.0.6.tar.gz 5 | Complete output from command python setup.py egg_info: 6 | Traceback (most recent call last): 7 | File "<string>", line 1, in <module> 8 | File "/tmp/pip-install-33yvlkcn/pyaop/setup.py", line 34, in <module> 9 | VERSION = get_version("pyaop") 10 | File "/tmp/pip-install-33yvlkcn/pyaop/setup.py", line 14, in get_version 11 | init_py = open(os.path.join(package, '__init__.py')).read() 12 | File "/root/.pyenv/versions/3.6.5/lib/python3.6/encodings/ascii.py", line 26, in decode 13 | return codecs.ascii_decode(input, self.errors)[0] 14 | UnicodeDecodeError: 'ascii' codec can't decode byte 0xe4 in position 297: ordinal not in range(128) 15 | ``` 16 | #### 问题原因 17 | 这个问题很明显是由于`init_py = open(os.path.join(package, '__init__.py')).read()` 18 | 这一行读了一个有中文注释的__init__.py文件,而系统本身默认是ascii的,所以导致报错。 19 | #### 解决方案 20 | 21 | ``` 22 | export LC_ALL=zh_CN.UTF-8 23 | ``` 24 | 有些系统没有安中文。可能会出现以下报错: 25 | ``` 26 | bash: warning: setlocale: LC_ALL: cannot change locale (zh_CN.UTF-8) 27 | ``` 28 | 解决方案是安装新的语言包或者 29 | ``` 30 | export LC_ALL=en_US.UTF-8 31 | ``` 32 | -------------------------------------------------------------------------------- /doc/ubuntu安装ShadowSocks.md: -------------------------------------------------------------------------------- 1 | ## 安装 2 | 3 | ``` 4 | sudo pip install shadowsocks 5 | ``` 6 | 7 | ## 配置 8 | 9 | ### server 10 | 11 | 打开`/etc/shadowsocks.json` 加入以下代码: 12 | 13 | ``` 14 | { 15 | "server":"0.0.0.0", 16 | "server_port":8388, 17 | "local_address": "127.0.0.1", 18 | "local_port":1080, 19 | "password":"xxxxx", 20 | "timeout":300, 21 | "method":"aes-256-cfb", 22 | "fast_open": false, 23 | "pid-file": "/root/shadowsocks/shadowsocks.pid", 24 | "log-file": "/root/shadowsocks/shadowsocks.log" 25 | 26 | } 27 | ``` 28 | 29 | 启动`ssserver -c /etc/shadowsocks.json -d start` 30 | 31 | ### client 32 | 33 | - 创建配置文件 34 | 35 | ``` 36 | mkdir shadowsocks 37 | cd shadowsocks/ 38 | vi shadowsocks.json 39 | ``` 40 | 41 | - 添加以下内容 42 | 43 | ``` 44 | { 45 | "server": "xxx.xxx.xxx.xxx", 46 | "server_port": 8388, 47 | "local_address": "127.0.0.1", 48 | "local_port": 1080, 49 | "password": "xxxxx", 50 | "timeout": 600, 51 | "method": "aes-256-cfb", 52 | "fast_open": false, 53 | "workers": 1, 54 | "pid-file": "/home/ubuntu/shadowsocks/shadowsocks.pid", 55 | "log-file": "/home/ubuntu/shadowsocks/shadowsocks.log" 56 | } 57 | ``` 58 | 59 | 启动`sslocal -c ~/shadowsocks/shadowsocks.json -d start` 60 | 61 | ## 配置http代理 62 | 63 | - 安装 polipo 64 | 65 | ``` 66 | sudo apt-get install polipo 67 | ``` 68 | 69 | - 配置polipo 70 | 71 | ``` 72 | vi /etc/polipo/config 73 | ``` 74 | 75 | - 添加以下内容 76 | 77 | ``` 78 | proxyAddress = "127.0.0.1" 79 | proxyPort = "8123" 80 | socksParentProxy = "127.0.0.1:1080" 81 | socksProxyType = socks5 82 | chunkHighMark = 50331648 83 | objectHighMark = 16384 84 | serverMaxSlots = 64 85 | serverSlots = 16 86 | serverSlots1 = 32 87 | ``` 88 | 89 | - 启动 90 | 91 | ``` 92 | sudo /etc/init.d/polipo restart 93 | ``` 94 | [comment]: <tags> (ubuntu,shadowsocks) 95 | [comment]: <description> (shadowsocks在ubuntu上的安装方法) 96 | [comment]: <title> (ubuntu安装ShadowSocks) 97 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/ubuntu安装postgresql.md: -------------------------------------------------------------------------------- 1 | #### 安装 2 | - 安装postgresql server 和 client: 3 | ```bash 4 | sudo apt-get install postgresql postgresql-contrib postgresql-client 5 | ``` 6 | #### 服务 7 | - 启动数据库 8 | ```bash 9 | sudo /etc/init.d/postgresql start 10 | ``` 11 | - 查看数据库状态 12 | ```bash 13 | sudo /etc/init.d/postgresql status 14 | ``` 15 | - 关闭数据库 16 | ```bash 17 | sudo /etc/init.d/postgresql stop 18 | ``` 19 | - 重启数据库 20 | ```bash 21 | sudo /etc/init.d/postgresql restart 22 | ``` 23 | #### 创建用户 24 | - 创建数据库用户 root,并指定其为超级用户: 25 | ```bash 26 | sudo -u postgres createuser --superuser root 27 | ``` 28 | - 登录数据库控制台,设置 root 用户的密码,退出控制台 29 | ```bash 30 | sudo -u postgres psql 31 | \password root # 设置密码 32 | \q 33 | ``` 34 | #### 创建数据库 35 | - 创建 test 数据库,指定用户为 root: 36 | ```bash 37 | sudo -u postgres createdb -O root test 38 | ``` 39 | - 登录数据库控制台, 修改数据库 test 为 star: 40 | ```bash 41 | alter database test rename to star 42 | ``` 43 | - 也可以删除不需要的数据库,如: 44 | ```bash 45 | sudo -u postgres dropdb test 46 | ``` 47 | #### 登录数据库 48 | - 使用 psql 命令: 49 | - -U 指定用户 50 | - -d 指定数据库 51 | - -h 指定服务器 52 | - -p 指定端口。 53 | ```bash 54 | psql -U root -d test -h 127.0.0.1 -p 5432 55 | ``` 56 | - 实际的使用中,我们创建用户名和数据库跟系统名称一样(系统认证),然后通过执行下面命令即可登录我们指定的数据库。 57 | 58 | ```bash 59 | psql 60 | ``` 61 | 62 | 63 | - 也可以通过环境变量指定默认的数据库(test): 64 | ```bash 65 | export PGDATABASE=test 66 | ``` 67 | #### 常用控制台命令 68 | ```bash 69 | \h:查看SQL命令的解释,比如\h select。 70 | \?:查看psql命令列表。 71 | \l:列出所有数据库。 72 | \c [database_name]:连接其他数据库。 73 | \d:列出当前数据库的所有表格。 74 | \d [table_name]:列出某一张表格的结构。 75 | \du:列出所有用户。 76 | \e:打开文本编辑器。 77 | \conninfo:列出当前数据库和连接的信息。 78 | ``` 79 | 参考文献 [ubuntu 下 PostgreSQL 使用小记](http://wenzhixin.net.cn/2014/01/12/hello_postgresql) 80 | 81 | 参考文献 [How To Use PostgreSQL with Your Ruby on Rails Application on Ubuntu 14.04](https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-ruby-on-rails-application-on-ubuntu-14-04) 82 | ![](http://ericsaupe.com/wp-content/uploads/2014/07/install-postgresql-934-on-mac.png) 83 | [comment]: <tags> (ubuntu,postgresql) 84 | [comment]: <description> (在ubuntu上安装postgresql) 85 | [comment]: <title> (ubuntu安装postgresql) 86 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/ubuntu安装vpn.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1517415003170&di=5df719d3755d8b03220866409599a5e6&imgtype=0&src=http%3A%2F%2Fimg001.21cnimg.com%2Fphotos%2Falbum%2F20170123%2Fm600%2FBD39878FA67A1C90400FE84257E09EA6.png)) 2 | ubuntu安装vpn 3 | 4 | # 安装说明 5 | 6 | 用法很简单: 总结成一句话就是:除了类型要选对以外,其他的一路回车就好了23333 7 | 8 | # 特性 9 | 10 | * 服务端要求:Ubuntu或者CentOS-6/7或者Debian 11 | * 客户端: 12 | * iOS/OSX=>ikev1,ikev2 13 | * Andriod=>ikev1 14 | * WindowsPhone=>ikev2 15 | * 其他Windows平台=>ikev2 16 | * 可使用自己的私钥和根证书,也可自动生成 17 | * 证书可绑定域名或ip 18 | * 要是图方便可一路回车 19 | 20 | ### 下载脚本 21 | 22 | 23 | 24 | wget --no-check-certificate https://raw.githubusercontent.com/quericy/one-key-ikev2-vpn/master/one-key-ikev2.sh 25 | 26 | 27 | ### 运行脚本 28 | 29 | 30 | 31 | chmod +x one-key-ikev2.sh 32 | bash one-key-ikev2.sh 33 | 34 | 35 | ### 自动配置 36 | 37 | 等待自动配置部分内容后,选择vps类型(OpenVZ还是Xen、KVM),选错将无法成功连接,请务必核实服务器的类型。输入服务器ip或者绑定的域名(连接vpn时服务器地址将需要与此保持一致,如果是导入泛域名证书这里需要写*.域名的形式); 38 | 39 | ### 证书 40 | 41 | 选择使用使用证书颁发机构签发的SSL证书还是生成自签名证书 42 | 43 | * 如果选择no,使用自签名证书(客户端如果使用IkeV2方式连接,将需要导入生成的证书并信任)则需要填写证书的相关信息(C,O,CN),为空将使用默认值(default value),确认无误后按任意键继续,后续安装过程中会出现输入两次pkcs12证书的密码的提示(可以设置为空) 44 | * 如果选择yes,使用SSL证书(如果证书是被信任的,后续步骤客户端将无需导入证书)请在继续下一步之前,将以下文件按提示命名并放在脚本相同的目录下(SSL证书详细配置和自动续期方案可见https://quericy.me/blog/860/ ): 45 | 46 | * ca.cert.pem 证书颁发机构的CA,比如Let‘s Encrypt的证书,或者其他链证书; 47 | 48 | * server.cert.pem 签发的域名证书; 49 | * server.pem 签发域名证书时用的私钥; 50 | 51 | ### 是否使用SNAT规则(可选) 52 | 53 | 默认为不使用.使用前请确保服务器具有不变的静态公网ip,可提升防火墙对数据包的处理速度.如果服务器网络设置了NAT(如AWS的弹性ip机制),则填写网卡连接接口的ip地址(参见KinonC提供的方案:#36). 54 | 55 | ### 防火墙配置 56 | 57 | 默认配置iptables(如果使用的是firewall(如CentOS7)请选择yes自动配置firewall,将无视SNAT并跳过后续的补充网卡接口步骤).补充网卡接口信息,为空则使用默认值(Xen、KVM默认使用eth0,OpenVZ默认使用venet0).如果服务器使用其他公网接口需要在此指定接口名称,填写错误VPN连接后将无法访问外网) 58 | 59 | ### 看到install Complete字样即表示安装完成 60 | 61 | 默认用户名密码将以黄字显示,可根据提示自行修改配置文件中的用户名密码,多用户则在配置文件中按格式一行一个(多用户时用户名不能使用%any),保存并重启服务生效。 62 | 63 | 将提示信息中的证书文件ca.cert.pem拷贝到客户端,修改后缀名为.cer后导入。ios设备使用Ikev1无需导入证书,而是需要在连接时输入共享密钥,共享密钥即是提示信息中的黄字PSK. 64 | 65 | # 客户端配置说明 66 | 67 | * 连接的服务器地址和证书保持一致,即取决于签发证书ca.cert.pem时使用的是ip还是域名; 68 | 69 | * Android/iOS/OSX 可使用ikeV1,认证方式为用户名+密码+预共享密钥(PSK); 70 | 71 | * iOS/OSX/Windows7+/WindowsPhone8.1+/Linux 均可使用IkeV2,认证方式为用户名+密码。使用SSL证书则无需导入证书;使用自签名证书则需要先导入证书才能连接,可将ca.cert.pem更改后缀名作为邮件附件发送给客户端,手机端也可通过浏览器导入,其中: 72 | 73 | * iOS/OSX 的远程ID和服务器地址保持一致,用户鉴定选择”用户名”.如果通过浏览器导入,将证书放在可访问的远程外链上,并在系统浏览器(Safari)中访问外链地址; 74 | 75 | * Windows PC 系统导入证书需要导入到“本地计算机”的”受信任的根证书颁发机构”,以”当前用户”的导入方式是无效的.推荐运行mmc添加本地计算机的证书管理单元来操作; 76 | * WindowsPhone8.1 登录时的用户名需要带上域信息,即wp”关于”页面的设备名称\用户名,也可以使用%any %any : EAP “密码”进行任意用户名登录,但指定了就不能添加其他用户名了. 77 | * WindowsPhone10 的vpn还存在bug(截至10586.164),ikeV2方式可连接但系统流量不会走vpn,只能等微软解决. (截至14393.5 ,此bug已经得到修复,现在WP10已经可以正常使用IkeV2.) 78 | * Windows10 也存在此bug,部分Win10系统连接后ip不变,没有自动添加路由表,使用以下方法可解决(本方法由 bigbigfish 童鞋提供): 79 | * 手动关闭vpn的split tunneling功能(在远程网络上使用默认网关); 80 | * 也可使用powershell修改,进入CMD窗口,运行如下命令: 81 | 82 | 83 | 84 | powershell #进入ps控制台 85 | get-vpnconnection #检查vpn连接的设置(包括vpn连接的名称) 86 | set-vpnconnection "vpn连接名称" -splittunneling $false #关闭split tunneling 87 | get-vpnconnection #检查修改结果 88 | exit #退出ps控制台 89 | 90 | 91 | # 卸载方式 92 | 93 | * 进入脚本所在目录的strongswan文件夹执行`make uninstall` 94 | 95 | * 删除脚本所在目录的相关文件(one-key-ikev2.sh,strongswan.tar.gz,strongswan文件夹,my_key文件夹). 96 | 97 | * 卸载后记得检查iptables配置. 98 | 99 | 参考资料 100 | 101 | [CentOS/Ubuntu一键安装IPSEC/IKEV2 VPN服务器](https://quericy.me/blog/699/) 102 | 103 | 104 | [comment]: <tags> (vpn,ubuntu) 105 | [comment]: <description> (安装vpn) 106 | [comment]: <title> (ubuntu安装vpn) 107 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/ubuntu配置网络.md: -------------------------------------------------------------------------------- 1 | 参考资料 2 | 3 | [为VMware虚拟机内安装的Ubuntu 16.04设置静态IP地址](http://www.linuxdiyf.com/linux/20707.html) 4 | 5 | [Ubuntu通过修改配置文件进行网络配置 ](http://blog.chinaunix.net/uid-22117508-id-157758.html) 6 | 7 | [ubuntu网络重启后或主机重启后,/etc/resolv.conf恢复原样的解决办法](http://blog.csdn.net/bytxl/article/details/44201347) 8 | [comment]: <tags> (ubuntu) 9 | [comment]: <description> (ubuntu网络配置) 10 | [comment]: <title> (ubuntu配置网络) 11 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/uwsgi相关问题.md: -------------------------------------------------------------------------------- 1 | uwsgi下的web页面发送包含较大请求体的post请求时出现ERR_CONTENT_LENGTH_MISMATCH 2 | nginx中的错误如下: 3 | ``` 4 | 2018/02/22 01:02:46 [error] 31556#31556: *7657 readv() failed (104: Connection reset by peer) while reading upstream, client: 112.231.57.20, server: localhost, request: "POST /modify HTTP/1.1", upstream: "uwsgi://127.0.0.1:3031", host: "mashichao.com", referrer: "http://mashichao.com/?path=/me?code=QQLB8Z" 5 | ``` 6 | uwsgi中没有发现错误 7 | 其实是因为请求过大,导致request头没有被uwsgi读完导致的,调整uwsgi的buffer-size参数就可以,默认是4096,调整到65535。 8 | 9 | 参考资料[Nginx uwsgi (104: Connection reset by peer) while reading response header from upstream](https://stackoverflow.com/questions/22697584/nginx-uwsgi-104-connection-reset-by-peer-while-reading-response-header-from-u) 10 | [comment]: <tags> (uwsgi) 11 | [comment]: <description> (uwsgi web程序ERR_CONTENT_LENGTH_MISMATCH) 12 | [comment]: <title> (uwsgi相关问题) 13 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/werkzeug.local.LocalPorxy源码引出的关于python私有属性的知识点.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://werkzeug.pocoo.org/static/werkzeug.png)) 2 | 3 | 源码中有如下代码 4 | ``` 5 | class LocalProxy(object): 6 | 7 | #__slots__ = ('__local', '__dict__', '__name__') 8 | 9 | def __init__(self, local, name=None): 10 | object.__setattr__(self, '_LocalProxy__local', local) 11 | print(self.__local) 12 | object.__setattr__(self, '__name__', name) 13 | ` 14 | ``` 15 | print是我后加去的,通过object.\_\_setattr\_\_(self, '_LocalProxy__local', local),可以设置为LocalProxy设置__local私有属性,私有属性通过self.__local在类内部是可以访问的,但是如果通过外部实例的方式,则无法访问,比如 16 | ``` 17 | In [1]: class A: 18 | ...: def __init__(self): 19 | ...: self.__a = 0 20 | ...: 21 | 22 | In [2]: A().__a 23 | --------------------------------------------------------------------------- 24 | AttributeError Traceback (most recent call last) 25 | <ipython-input-2-ae9317e12fa9> in <module>() 26 | ----> 1 A().__a 27 | 28 | AttributeError: A instance has no attribute '__a' 29 | 30 | In [3]: A()._A__a 31 | Out[3]: 0 32 | 33 | In [4]: dir(A()) 34 | Out[4]: ['_A__a', '__doc__', '__init__', '__module__'] 35 | 36 | In [5]: class B: 37 | ...: def __init__(self): 38 | ...: self._B__b = 0 39 | ...: print self.__b 40 | ...: 41 | 42 | In [6]: B() 43 | 0 44 | Out[6]: <__main__.B instance at 0x028C56C0> 45 | 46 | # 由上可知,如果通过self._LocalProxy__local,也可以实现对私有属性的访问。 47 | # 至于为什么LocalProxy要使用object.__setattr__(self, '_LocalProxy__local', local)实现属性赋值而不是使用self.__local=local呢。 48 | # 因为,通过self.__local的方式会触发LocalProxy 重写的方法__setattr__,这显然是不可以的。 49 | ``` 50 | [comment]: <tags> (werkzeug,python) 51 | [comment]: <description> (werkzeug源码阅读有感) 52 | [comment]: <title> (werkzeug.local.LocalPorxy源码引出的关于python私有属性的知识点) 53 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/windows 安装scrapy.md: -------------------------------------------------------------------------------- 1 | 去下载 2 | [comment]: <tags> (windows,scrapy,python) 3 | [comment]: <description> (windows下安装scrapy) 4 | [comment]: <title> (windows 安装scrapy) 5 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/xpath中text()和string()的本质区别.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://www.w3.org/Consortium/Offices/Presentations/XSLT_XPATH/images/xpath.png)) 2 | ## 使用要点 3 | 4 | XML例子: 5 | 6 | 7 | 8 | <book> 9 | <author>Tom <em>John</em> cat</author> 10 | <pricing> 11 | <price>20</price> 12 | <discount>0.8</discount> 13 | </pricing> 14 | </book> 15 | 16 | 17 | ### text() 18 | 19 | 经常在XPath表达式的最后看到text(),它仅仅返回所指元素的文本内容。 20 | 21 | 22 | 23 | text = $x('book/author/text()') 24 | 25 | 26 | 返回的结果是Tom cat,其中的John不属于author直接的节点内容。 27 | 28 | ### string() 29 | 30 | string()函数会得到所指元素的所有节点文本内容,这些文本讲会被拼接成一个字符串。 31 | 32 | 33 | 34 | text = $x('book/author/string()') 35 | 36 | 37 | 返回的内容是”Tom John cat” 38 | 39 | ### data() 40 | 41 | 大多数时候,data()函数和string()函数通用,而且不建议经常使用data()函数,有数据表明,该函数会影响XPath的性能。 42 | 43 | 44 | 45 | text = $x('book/pricing/string()') 46 | 47 | 48 | 返回的是200.8 49 | 50 | 51 | 52 | text = $x('book/pricing/data()') 53 | 54 | 55 | 这样将返回分开的20和0.8。 56 | 57 | ## 总结 58 | 59 | text()不是函数,XML结构的细微变化,可能会使得结果与预期不符,应该尽量少用,data()作为特殊用途的函数,可能会出现性能问题,如无特殊需要尽量不用,string()函数可以满足大部分的需求。 60 | 61 | text()是一个node test,而string()是一个函数,data()是一个函数且可以保留数据类型。此外,还有点号(.)表示当前节点。 62 | 63 | 64 | [comment]: <tags> (xpath) 65 | [comment]: <description> (text和string的本质区别) 66 | [comment]: <title> (xpath中text()和string()的本质区别) 67 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/xpath相关.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://dreamix.eu/blog/wp-content/uploads/2015/03/xpath_logo1-1508x706_c.jpg)) 2 | ### 选择包含div的a标签 3 | 4 | 5 | 6 | '//div[@class="col"]/article[@class="product-col"]/a/div/parent::a' 7 | 或 8 | '//div[@class="col"]/article[@class="product-col"]/a/div/../../a' 9 | 或 10 | '//div[@class="col"]/article[@class="product-col"]/a[div]' 11 | 12 | 13 | ### 选择包含href属性的标签 14 | 15 | 16 | 17 | '//div[@class="col"]/article[@class="product-col"]/a[@href]' 18 | 19 | 20 | 21 | [comment]: <tags> (xpath) 22 | [comment]: <description> (比较难的xpath) 23 | [comment]: <title> (xpath相关) 24 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/xss攻击.md: -------------------------------------------------------------------------------- 1 | 参考资料: 2 | 3 | [WiFi流量劫持—— JS脚本缓存投毒](https://www.cnblogs.com/index-html/p/wifi_hijack_3.html) 4 | 5 | [前端安全之XSS攻击](http://www.cnblogs.com/lovesong/p/5199623.html) 6 | 7 | [XSS攻击及防御](http://blog.csdn.net/ghsau/article/details/17027893) 8 | [comment]: <tags> (xss) 9 | [comment]: <description> (xss攻击详解) 10 | [comment]: <title> (xss攻击) 11 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/一个文件被两个进程同时写入的情况.md: -------------------------------------------------------------------------------- 1 | # 测试1 2 | ## 两个进程时同时打开一个文件 3 | # 进程1 4 | f = open("a.txt", "w") 5 | 6 | # 进程2 7 | f = open("a.txt", "w") 8 | 9 | ## 1号进程写入文件 10 | #进程1 11 | f.write("aaaaa") 12 | # 此时文件中并没有数据 13 | ## 2号进程写入文件 14 | #进程2 15 | f.write("bbbbb") 16 | # 此时文件中并没有数据 17 | ## 1号进程flush 18 | # 进程1 19 | f.flush() 20 | # 此时文件中已有数据 21 | ## 2号进程flush 22 | # 进程2 23 | f.flush() 24 | # 此时文件中原来1号进程写入的数据被覆盖 25 | ### 注: close()相关于先flush再关闭 26 | # 测试2 27 | ## 1号进程先打开文件 28 | # 进程1 29 | f.open('a.txt', "w") 30 | ## 1号进程写入文件 31 | # f.write("aaaaa") 32 | # 此时文件中并没有数据 33 | ## 2号进程打开文件 34 | # 进程2 35 | f.open("a.txt", "w") 36 | ## 2号进程写入文件 37 | # 进程2 38 | f.write("bbbbb") 39 | # 此时文件中并没有数据 40 | ## 1号进程flush 41 | # 进程1 42 | f.flush() 43 | # 此时文件中已有数据 44 | ## 2号进程flush 45 | # 进程2 46 | f.flush() 47 | # 此时文件中原来1号进程写入的数据被覆盖 48 | # 测试3 49 | ## 1号进程先打开文件 50 | # 进程1 51 | f.open('a.txt', "w") 52 | ## 1号进程写入文件 53 | # f.write("aaaaa") 54 | # 此时文件中并没有数据 55 | ## 1号进程flush 56 | # 进程1 57 | f.flush() 58 | # 此时文件中已有数据 59 | ## 2号进程打开文件 60 | # 进程2 61 | f.open("a.txt", "w") 62 | # 此时文件中并没有数据 63 | ## 2号进程写入文件 64 | # 进程2 65 | f.write("bbbbb") 66 | # 此时文件中并没有数据 67 | ## 2号进程flush 68 | # 进程2 69 | f.flush() 70 | # 此时文件中原来1号进程写入的数据被覆盖 71 | ## 1号进程写入文件 72 | # f.write("aaaaa") 73 | # 此时文件中的数据为bbbbb 74 | ## 1号进程flush 75 | # 进程1 76 | f.flush() 77 | # 此时文件中的数据为bbbbbaaaa 78 | # 测试4 79 | ## 1号进程先打开文件 80 | # 进程1 81 | f.open('a.txt', "w") 82 | ## 1号进程写入文件 83 | # f.write("aaaaa") 84 | # 此时文件中并没有数据 85 | ## 1号进程flush 86 | # 进程1 87 | f.flush() 88 | # 此时文件中已有数据 89 | ## 2号进程打开文件 90 | # 进程2 91 | f.open("a.txt", "w") 92 | # 此时文件中并没有数据 93 | ## 1号进程写入文件 94 | # f.write("aaaaa") 95 | # 此时文件中的数据为bbbbb 96 | ## 1号进程flush 97 | # 进程1 98 | f.flush() 99 | # 此时文件中的数据为\00\00\00\00\00aaaaa 100 | # 测试5 101 | ## 1号进程先打开文件 102 | # 进程1 103 | f.open('a.txt', "w") 104 | ## 1号进程写入文件 105 | # f.write("aaaaa") 106 | # 此时文件中并没有数据 107 | ## 1号进程flush 108 | # 进程1 109 | f.flush() 110 | # 此时文件中已有数据 111 | ## 2号进程以追加的方式打开文件 112 | # 进程2 113 | f.open("a.txt", "a") 114 | # 此时文件中已有数据aaaaa 115 | ## 2号进程写入文件 116 | # 进程2 117 | f.write("bbbbb") 118 | # 此时文件中并没有数据 119 | ## 2号进程flush 120 | # 进程2 121 | f.flush() 122 | # 此时文件中的数据为aaaabbbbb 123 | 124 | # 结论 125 | - 以w的方式打开文件,会清除文件中的全部内容 126 | - 没有flush的数据不会被存储到文件中 127 | - 字节按顺序写入,游标会一直向前移动,不会关注之前的数据。如测试4,以追加的方式写入数据,写w方式相比,仅仅相当于把初始游标放到的最后,而不是最初的位置 128 | - 如果有两个进程或者线程操作同一个文件,最好还是加上锁,不然可能会造成数据错误 129 | [comment]: <tags> (进程,文件) 130 | [comment]: <description> (一个文件被两个进程同时写入) 131 | [comment]: <title> (一个文件被两个进程同时写入的情况) 132 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/云计算是什么.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1514460813085&di=8152bb4426ffcbfaef15cc4b88947345&imgtype=0&src=http%3A%2F%2Fimgsrc.baidu.com%2Fimgad%2Fpic%2Fitem%2F0b7b02087bf40ad16d84f8d95c2c11dfa9eccea3.jpg)) 2 | 参考资料: [云计算是什么](http://www.chinacloud.cn/show.aspx?id=15917&cid=17) 3 | 4 | 5 | [comment]: <tags> (云计算) 6 | [comment]: <description> (云计算科普) 7 | [comment]: <title> (云计算是什么) 8 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/关于__getattribute__.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://www.mashichao.com/static/img/pretty/0.jpg)) 2 | 在阅读流畅的python这本书时,发现p509页在介绍__getattribute__时说到,寻找属性是特殊属性或特殊方法时除外。 3 | 4 | 5 | 6 | In [6]: def q(name): 7 | ...: def q_g(instance): 8 | ...: print(4444444444444) 9 | ...: return instance.__dict__[name] 10 | ...: def q_s(instance, value): 11 | ...: if value > 0: 12 | ...: instance.__dict__[name] = value 13 | ...: else: 14 | ...: raise ValueError("value must be > 0") 15 | ...: return property(q_g, q_s) 16 | ...: 17 | 18 | 19 | 20 | 21 | 22 | 23 | In [4]: class A: 24 | ...: w = q("w") 25 | ...: def __init__(self, description, w): 26 | ...: self.d = description 27 | ...: self.w = w 28 | ...: def __getattribute__(self, name): 29 | ...: print(3333333333) 30 | ...: return super(A, self).__getattribute__(name) 31 | ...: 32 | In [8]: a = A(33, 44) 33 | 3333333333 34 | 35 | In [9]: a.w 36 | 3333333333 37 | 4444444444444 38 | 3333333333 39 | Out[9]: 44 40 | 41 | 42 | 通过以上的测试得出结果,当属性是描述符或__dict__,双下属性或方法时,同样会调用__getattribute__,因此可以猜测__getattribute__底层实现调用顺序为描述符, 43 | self.\_\_dict\_\_.\_\_getitem\_\_, 类属性, 44 | \_\_getattr\_\_方法。对于添加属性,__setattr__底层的调用为描述符,self.\_\_dict\_\_.\_\_setitem\_\_,如果描述符没有提供\_\_set\_\_方法,那么描述符会被覆盖。 45 | 46 | 47 | [comment]: <tags> (__getattribute__,python) 48 | [comment]: <description> (流畅的python中的错误) 49 | [comment]: <title> (关于__getattribute__) 50 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/分布式系统选举算法bully.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://www.lanceyan.com/wp-content/uploads/2013/12/mongorep3.png)) 2 | [分布式系统理论基础 - 选举、多数派和租约](http://www.cnblogs.com/bangerlee/p/5767845.html) 3 | 4 | ## raft算法举例说明 5 | 6 | - 现有小红小明小刚3个人,要选出一个leader 7 | - 初次启动,没有leader,所以发起选举 8 | - term = 1,小红,小明,小刚作为follower等待leader发来的heartbeat 9 | - 由于所有follower的的定时器时长是不同的,必定有部分follower依次变成candidate,为什么说依次,因为假设小红先变成了candidate,在向小明和小刚发送vote for me 时,网络比较慢,与此同时小刚的定时器也到时间了变成了candidate,所以就存在2个candidate。 10 | - 按假设进行,此时,小红和小刚term=2,小明保持term = 1,小刚和小红分别收到了对方的vote for me,均不予理会,小明先收到了小红的vote for me,小明term同步到了2,并投票给小红,小红现在有两票(自己的+小明的),小刚等待定时器超时后(假设这期间没有收到小红的heartbeat),重新发起了选举term变成3,而这个时候小红收到了小刚的vote for me 发现自己的term=2小于小刚的term=3,则恢复成了跟随者的状态,[如果一个候选人或者领导者发现自己的任期号过期了,那么他会立即恢复成跟随者状态],并向小刚投票,则小刚成为了新的leader 11 | 12 | 13 | 参考[寻找一种易于理解的一致性算法(扩展版)](https://github.com/maemual/raft-zh_cn/blob/master/raft-zh_cn.md) 14 | 15 | [comment]: <tags> (分布式,bully,算法) 16 | [comment]: <description> (分布式系统选举算法) 17 | [comment]: <title> (分布式系统选举算法bully) 18 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/初探Node.js的异步实现.md: -------------------------------------------------------------------------------- 1 | [深入浅出Node.js(五):初探Node.js的异步I/O实现 2 | ](http://www.infoq.com/cn/articles/nodejs-asynchronous-io/) 3 | [comment]: <tags> (node,js,异步) 4 | [comment]: <description> (node.js异步io的实现) 5 | [comment]: <title> (初探Node.js的异步实现) 6 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/前中后缀表达式.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://ss0.bdstatic.com/70cFvHSh_Q1YnxGkpoWK1HF6hhy/it/u=396961987,2170478939&fm=27&gp=0.jpg)) 2 | 参考资料: 3 | [前缀、中缀、后缀表达式](http://blog.csdn.net/antineutrino/article/details/6763722/) 4 | 5 | 6 | [comment]: <tags> (前缀,中缀,后缀) 7 | [comment]: <description> (前中后缀表达式是什么,如何转换) 8 | [comment]: <title> (前中后缀表达式) 9 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/在mac上解压rar压缩包.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1506771469861&di=9d8060746c999dea15db06bb3d93e453&imgtype=0&src=http%3A%2F%2Fwww.xunzai.com%2Fdata%2Fupload%2F201506%2F201506%2F14343615147677.png)) 2 | * 去 [WinRAR and RAR archiver downloads](http://www.rarlab.com/download.htm)下载 rarosx 3 | * 在Mac OS X系统中默认不支持 RAR 文件的解压缩。下面演示如何在Mac OS X系统中使用 rar 命令行操作。 4 | * 首先从rarlab 网站下载 rar/unrar 工具; 5 | * 解压缩下载的 tar.gz 压缩包`tar xvf rarosx-5.2.0.tar.gz`,在下载目录Downloads下自动创建一个rar的目录,其中有rar / unrar 文件; 6 | * 进入终端(命令窗口 `control+空格`) 7 | * 进入刚刚解压缩的rar 目录,使用 `cd Downloads/rar` 进入; 8 | * 使用如下命令分别安装 `unrar` 和 `rar` 命令; 9 | * 安装unrar命令:`sudo install -c -oUSERunrar/bin`安装rar命令:`sudoinstall−c−oUSER rar /bin` 10 | * 测试 `unrar` 和 `rar` 命令; 11 | * 解压命令:`unrar x compressed-package.rar` 12 | * 解压缩 `xxx.rar` 压缩包,如果文件名有空格,则需要使用单引号包起来 如:`'xxx xx.rar'` 13 | 14 | 15 | [comment]: <tags> (mac,rar) 16 | [comment]: <description> (在mac上解压rar压缩包) 17 | [comment]: <title> (在mac上解压rar压缩包) 18 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/多版本python pyenv.md: -------------------------------------------------------------------------------- 1 | [ubuntu下安装多版本Python](http://www.cnblogs.com/ningvsban/p/4384995.html) 2 | [comment]: <tags> (python,pyenv) 3 | [comment]: <description> (python多版本管理) 4 | [comment]: <title> (多版本python pyenv) 5 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/如何在Docker容器内外互相拷贝数据.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://www.fimvisual.com/wp-content/uploads/2016/10/1003956-20160929094610156-2054520507.png)) 2 | #### 从容器内拷贝文件到主机上 3 | 4 | 5 | 6 | [root@oegw1 soft]# docker ps 7 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 8 | 8d418a7b6021 postgres "/docker-entrypoint. 7 hours ago Up 7 hours test1 9 | [root@oegw1 soft]# docker exec -t -i 8d418a7b6021 /bin/bash 10 | root@oegw1:/var/lib/postgresql# pwd 11 | /var/lib/postgresql 12 | root@oegw1:/var/lib/postgresql# ls 13 | data 14 | root@oegw1:/var/lib/postgresql# exit 15 | exit 16 | [root@oegw1 soft]# docker cp 8d418a7b6021:/var/lib/postgresql/data /opt/soft/ 17 | 18 | 19 | **完成拷贝** 20 | 21 | #### 从主机上拷贝文件到容器内 22 | 23 | 24 | 25 | docker run -v /opt/soft:/mnt 8d418a7b6021 26 | 27 | 28 | #### 用-v挂载主机数据卷到容器内,通过-v参数,冒号前为宿主机目录,必须为绝对路径,冒号后为镜像内挂载的路径。 29 | 30 | 31 | 32 | [root@oegw1 soft]# docker run -it -v /opt/soft:/mnt postgres /bin/bash 33 | 34 | 35 | **这种方式的缺点是只能在容器刚刚启动的情况下进行挂载** 36 | 37 | 38 | [comment]: <tags> (docker,copy) 39 | [comment]: <description> (从docker中拷贝数据) 40 | [comment]: <title> (如何在Docker容器内外互相拷贝数据) 41 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/子进程与信号.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](http://s4.51cto.com/wyfs02/M00/7F/D8/wKiom1cv8WOhTWUKAACGK8bZoZ4009.png)) 2 | ### 关于父进程和子进程的关系 3 | 4 | ctrl +c发出的信号,发送到父进程及其所有子进和程,kill -2 则只发送信号到特定进程 5 | 6 | 7 | [comment]: <tags> (进程,信号) 8 | [comment]: <description> (子进程与信号) 9 | [comment]: <title> (子进程与信号) 10 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/常用ubuntu指令.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://gss3.bdstatic.com/-Po3dSag_xI4khGkpoWK1HF6hhy/baike/c0%3Dbaike116%2C5%2C5%2C116%2C38/sign=e2eeaaa6b4fd5266b3263446ca71fc4e/024f78f0f736afc31a149928b119ebc4b7451266.jpg) 2 | ) 3 | ## ubuntu 安装 imagemagick 4 | ### 树莓派 安装 imagemagick 5 | - 更换源 6 | ``` 7 | sudo cp /etc/apt/sources.list /etc/apt/sources.list.bak 8 | sudo nano /etc/apt/sources.list 9 | # 注释掉之前的,添加 10 | deb http://mirrors.aliyun.com/raspbian/raspbian/ wheezy main non-free contrib 11 | deb-src http://mirrors.aliyun.com/raspbian/raspbian/ wheezy main non-free contrib 12 | # CTRL+X Y enter保存 13 | sudo apt-get update && apt-get upgrade -y 14 | ``` 15 | - 安装 16 | ``` 17 | sudo apt-get install imagemagick 18 | ``` 19 | - 使用convert进行图片转换 20 | 21 | ## 获取ip 22 | ``` 23 | ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/' 24 | ``` 25 | ## 挂nas 26 | ``` 27 | sudo apt-get install nfs-common 28 | sudo mount -t nfs -o rw 192.168.200.89:/volume1/LINUXNFS /mnt/nas 29 | ``` 30 | ## 开启vpn 31 | ``` 32 | sudo apt-get install pptp-linux 33 | sudo pptpsetup --create vpn1 --server 58.96.182.136 --username derek --password luoding123 --encrypt --start 34 | sudo route add default dev ppp0 35 | ``` 36 | ## 查看dns解析 37 | ``` 38 | nslookup 39 | ``` 40 | 41 | ## 查看端口占用情况 42 | ``` 43 | netstat -tln|grep 9999 44 | lsof -i:9999 45 | ``` 46 | ## 查看服务状态 47 | ``` 48 | sudo systemctl status 49 | sudo systemctl status postgresql 50 | # or 51 | sudo service postgresql status 52 | ``` 53 | ## 搜索目录包含指定字符的行 54 | ``` 55 | find . -name "*.py"|xargs grep Logger 56 | ``` 57 | ## 高级搜索 58 | ``` 59 | # 先搜索当前目录下的2级目标,将结果排序后,使用parallel并行10个进程执行find搜索 60 | find . -type d -maxdepth 2 | sort | parallel -t -P +10 "find {} -type f -regex '[^_]*\.jpg' > /Users/derek/nfs/{//}_{/}.list" 61 | 62 | ``` 63 | 64 | ## 查看文件行数 65 | ``` 66 | wc -l 67 | # 多文件 68 | wc -ml 69 | ``` 70 | 71 | ## top 查看linux性能 72 | 参考资料:[Linux Top 命令解析 比较详细](http://www.jb51.net/LINUXjishu/34604.html) 73 | 74 | ## systemctl使用 75 | ``` 76 | systemctl restart postgresql 77 | # 查看所有服务 78 | systemctl list-unit-files 79 | ``` 80 | 81 | ## tar 82 | ``` 83 | -c: 建立压缩档案 84 | -x:解压 85 | -t:查看内容 86 | -r:向压缩归档文件末尾追加文件 87 | -u:更新原压缩包中的文件 88 | ``` 89 | 这五个是独立的命令,压缩解压都要用到其中一个,可以和别的命令连用但只能用其中一个。下面的参数是根据需要在压缩或解压档案时可选的。 90 | ``` 91 | -z:有gzip属性的 92 | -j:有bz2属性的 93 | -Z:有compress属性的 94 | -v:显示所有过程 95 | -O:将文件解开到标准输出 96 | ``` 97 | 下面的参数-f是必须的 98 | ``` 99 | -f: 使用档案名字,切记,这个参数是最后一个参数,后面只能接档案名。 100 | 101 | # 这条命令是将所有.jpg的文件打成一个名为all.tar的包。-c是表示产生新的包,-f指定包的文件名。 102 | tar -cf all.tar *.jpg 103 | # 这条命令是将所有.gif的文件增加到all.tar的包里面去。-r是表示增加文件的意思。 104 | tar -rf all.tar *.gif 105 | # 这条命令是更新原来tar包all.tar中logo.gif文件,-u是表示更新文件的意思。 106 | tar -uf all.tar logo.gif 107 | # 这条命令是列出all.tar包中所有文件,-t是列出文件的意思 108 | tar -tf all.tar 109 | # 这条命令是解出all.tar包中所有文件,-t是解开的意思 110 | tar -xf all.tar 111 | ``` 112 | ## 压缩 113 | ``` 114 | tar -cvf jpg.tar *.jpg //将目录里所有jpg文件打包成tar.jpg 115 | 116 | tar -czf jpg.tar.gz *.jpg //将目录里所有jpg文件打包成jpg.tar后,并且将其用gzip压缩,生成一个gzip压缩过的包,命名为jpg.tar.gz 117 | 118 | tar -cjf jpg.tar.bz2 *.jpg //将目录里所有jpg文件打包成jpg.tar后,并且将其用bzip2压缩,生成一个bzip2压缩过的包,命名为jpg.tar.bz2 119 | 120 | tar -cZf jpg.tar.Z *.jpg //将目录里所有jpg文件打包成jpg.tar后,并且将其用compress压缩,生成一个umcompress压缩过的包,命名为jpg.tar.Z 121 | 122 | rar a jpg.rar *.jpg //rar格式的压缩,需要先下载rar for linux 123 | 124 | zip jpg.zip *.jpg //zip格式的压缩,需要先下载zip for linux 125 | ``` 126 | ## 解压 127 | ``` 128 | tar -xvf file.tar //解压 tar包 129 | 130 | tar -xzvf file.tar.gz //解压tar.gz 131 | 132 | tar -xjvf file.tar.bz2 //解压 tar.bz2 133 | 134 | tar -xZvf file.tar.Z //解压tar.Z 135 | 136 | unrar e file.rar //解压rar 137 | 138 | unzip file.zip //解压zip 139 | ``` 140 | ## 总结 141 | ``` 142 | 1、*.tar 用 tar -xvf 解压 143 | 144 | 2、*.gz 用 gzip -d或者gunzip 解压 145 | 146 | 3、*.tar.gz和*.tgz 用 tar -xzf 解压 147 | 148 | 4、*.bz2 用 bzip2 -d或者用bunzip2 解压 149 | 150 | 5、*.tar.bz2用tar -xjf 解压 151 | 152 | 6、*.Z 用 uncompress 解压 153 | 154 | 7、*.tar.Z 用tar -xZf 解压 155 | 156 | 8、*.rar 用 unrar e解压 157 | 158 | 9、*.zip 用 unzip 解压 159 | 160 | ``` 161 | 162 | ## 使用curl通过Get请求获取响应体同时获取响应码 163 | 164 | ``` 165 | curl -o - -s -w "%{http_code}\n" http://www.example.com/ 166 | ``` 167 | [comment]: <tags> (ubuntu) 168 | [comment]: <description> (常用命令) 169 | [comment]: <title> (常用ubuntu指令) 170 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/我开发的比较前价值的项目和模块方法.md: -------------------------------------------------------------------------------- 1 | ## 我的项目介绍 2 | ### 框架 3 | 1. apistellar: 这个一个基于apistar的异步框架,其功能完善易用,使用广泛。细节请参见github。 4 | 2. structure-spider: 这一个基于scrapy的结构化爬虫,这个项目的优势在于抓取结构复杂的嵌套信息。细节请参见github。 5 | ### 工具 6 | 1. pyaop: 一个非常有用的面向切面编程的基础包,在设计底层架构时非常有用,它的实现非常简单,只有一个文件不到百行代码。它使用了代理模式,其可代理一个对象,并自由地为其做生意方法调用前后增加切面,来实现各种调用效果。这个包兼容python全版本。 7 | 2. pytest-apistellar: 这个包最初是作为apistellar的插件存在的,其后来被设计成适用于有mock需求的所有单元测试编写场景。它的主要亮点是支持多作用域,级连mock,绝大多数情况下每次mock只需要一个装饰器就能完成,此外,它还能用来测试apistellar的接口,原理是使用子线程启用一个server并返回其port用来在测试方法中验证响应信息。同时它对异步程序单元测试的编写非常友好。 8 | 3. proxy-factory: 一个代理工厂程序,可自动获取网上的免费代理,并验证其有效性,这个项目完成于两年前,后来没有再维护,因此它自带的代理网站抓取规则可能已失效。但它的各个组件完全解耦,在稍微调整抓取规则后可以快速投入生产中使用。 9 | 4. translate_html: 这是一个翻译程序,其通过爬虫技术实现了自动从各大翻译网站获取翻译结果的功能。这个项目同样完成于两年前,后面没有再维护,它实现的翻译网站可能由于规则改变而失效,但它的各个组件完全解耦,在稍微调整抓取规则后可快速投入生产中使用。 10 | 6. dictionary_walker: 一个可以遍历目录的程序,适合遍历超多的文件。可断点续遍。 11 | 5. toolkit:这是一个工具包,其中包含了我工作以来大部分的通用积累,有些工具类、方法目前来说还是非常有用的,如: 12 | - cache_property: 类似于property,不过使用该装饰器后,该属性被缓存了一下来,只会计算一次,适合幂等方法使用。 13 | - cache_for: 类似于cache_property,不过其可传入一个整型来控制缓存时间。 14 | - cache_method: cache_property和cache_for都是基于property实现的,其用法仅限于将不存在参数的方法转换成属性,但有时我们可能还需要缓存多参数方法的调用,cache_method可用于缓存方法不同实参调用的结果,并持续一段时间。 15 | - cache_method_for_update: 其与cache_method的区别在于,这个方法会等当前对象的updated被置为true时重新更新缓存。这个装饰非常有用,我们可以实现一个updated property,用来控制什么时候该更新缓存了。 16 | - load: 这个函数可以用来返回字符串表示的模块、函数、类、及类的属性等。 17 | - classproperty: property只能用于实例方法到实例属性的转换,使用classproperty来支持类方法到类属性的转换,其经常用于懒加载。 18 | - cache_classproperty: 使用该装饰器后,该属性被缓存了下来,只会计算一次,适合幂等类方法使用。 19 | - global_cache_classproperty: 与cache_classproperty的区别在于子类和父类共用同一份缓存。 20 | - async_context: contextlib中定义了一个有用的工具contextmanager可以将生成器转换成上下文管理器,但是只支持同步生成器转换成同步上下文管理器,async_context.contextmanager支持任意转换,即同步转同步,同步转异步,异步转异步,异步转同步的模式,在异步编程时有非常高的使用价值。 21 | - monitors: 里面定义了一些基类如:Service,适合在编写可执行脚本时作为通用基类使用。 22 | - markdown_helper: 用来将整个markdown文件夹递归的渲染成html。 23 | - processor: 用来监控管理一段业务逻辑的执行进度。 24 | - settings: 配置模块。 25 | - consoler: 继承之后可以切入到正在运行的程序内部,开启一个交互式命令,用于调试。 26 | - redis_tool: redis的一些通用实现,如分布式锁。 27 | - package_control: 用来构建python包的工具。 28 | - .... 29 | 30 | ### 玩具 31 | 1. custom_redis: 这是一个redis的简单实现,是我学习元编程时的作品,其存在的意义在于今后在我项目中使用元编程遇到坑时可以快速翻阅其代码寻找解决方案。 32 | 2. async-downloader: 这是一个练手项目,其存在的意义在于让我可以知道所有的异步魔法方法如何使用,利于我快速回忆异步编程的基础知识,并投入生产开发。 33 | 3. coroutine: 这是一个协程的python全版本通用实现,其存在的意义在于当我忘记协程的实现时,可以快速翻阅其代码来回忆。 -------------------------------------------------------------------------------- /doc/数据库索引.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://gss0.bdstatic.com/-4o3dSag_xI4khGkpoWK1HF6hhy/baike/c0%3Dbaike80%2C5%2C5%2C80%2C26/sign=043737e12ff5e0fefa1581533d095fcd/cefc1e178a82b901beb8fbcb718da9773912ef35.jpg)) 2 | ## 创建索引可以大大提高系统的性能 3 | - 通过创建唯一性索引,可以保证数据库表中每一行数据的唯一性。 4 | - 可以大大加快数据的检索速度,这也是创建索引的最主要的原因。 5 | - 可以加速表和表之间的连接,特别是在实现数据的参考完整性方面特别有意义。 6 | - 在使用分组和排序子句进行数据检索时,同样可以显著减少查询中分组和排序的时间。 7 | - 通过使用索引,可以在查询的过程中,使用优化隐藏器,提高系统的性能。 8 | ## 数据库索引的缺点 9 | 10 | - 创建索引和维护索引要耗费时间,这种时间随着数据量的增加而增加。 11 | - 索引需要占物理空间,除了数据表占数据空间之外,每一个索引还要占一定的物理空间,如果要建立聚簇索引,那么需要的空间就会更大。 12 | - 当对表中的数据进行增加、删除和修改的时候,索引也要动态的维护,这样就降低了数据的维护速度。 13 | ## 数据库索引创建位置 14 | - 在经常需要搜索的列上,可以加快搜索的速度。 15 | - 在作为主键的列上,强制该列的唯一性和组织表中数据的排列结构。 16 | - 在经常用在连接的列上,这些列主要是一些外键,可以加快连接的速度。 17 | - 在经常需要根据范围进行搜索的列上创建索引,因为索引已经排序,其指定的范围是连续的。 18 | - 在经常需要排序的列上创建索引,因为索引已经排序,这样查询可以利用索引的排序,加快排序查询时间。 19 | - 在经常使用在WHERE子句中的列上面创建索引,加快条件的判断速度。 20 | ## 不应该创建索引的的这些列具有下列特点 21 | - 对于那些在查询中很少使用或者参考的列不应该创建索引。这是因为,既然这些列很少使用到,因此有索引或者无索引,并不能提高查询速度。相反,由于增加了索引,反而降低了系统的维护速度和增大了空间需求。 22 | - 对于那些只有很少数据值的列也不应该增加索引。这是因为,由于这些列的取值很少,例如人事表的性别列,在查询的结果中,结果集的数据行占了表中数据行的很大比例,即需要在表中搜索的数据行的比例很大。增加索引,并不能明显加快检索速度。 23 | - 对于那些定义为text,image和bit数据类型的列不应该增加索引。这是因为,这些列的数据量要么相当大,要么取值很少。 24 | - 当修改性能远远大于检索性能时,不应该创建索引。这是因为,修改性能和检索性能是互相矛盾的。当增加索引时,会提高检索性能,但是会降低修改性能。当减少索引时,会提高修改性能,降低检索性能。因此,当修改性能远远大于检索性能时,不应该创建索引。 25 | 26 | 27 | ## 根据数据库的功能,可以在数据库设计器中创建三种索引:唯一索引、主键索引和聚集索引。 28 | ### 唯一索引 29 |        唯一索引是不允许其中任何两行具有相同索引值的索引。 30 | 31 |        当现有数据中存在重复的键值时,大多数数据库不允许将新创建的唯一索引与表一起保存。数据库还可能防止添加将在表中创建重复键值的新数据。例如,如果在employee表中职员的姓(lname)上创建了唯一索引,则任何两个员工都不能同姓。 32 | ### 主键索引 33 |        数据库表经常有一列或列组合,其值 34 | 唯一标识表中的每一行。该列称为表的主键。 35 | 36 |        在数据库关系图中为表定义主键将自动创建主键索引,主键索引是唯一索引的特定类型。该索引要求主键中的每个值都唯一。当在查询中使用主键索引时,它还允许对数据的快速访问。 37 | ### 聚集索引 38 |        在聚集索引中,表中行的物理顺序与键值的逻辑(索引)顺序相同。一个表只能包含一个聚集索引。 39 | 40 |        如果某索引不是聚集索引,则表中行的物理顺序与键值的逻辑顺序不匹配。与非聚集索引相比,聚集索引通常提供更快的数据访问速度。 41 | 42 | 参考资料[数据库索引的实现原理](http://blog.csdn.net/kennyrose/article/details/7532032) 43 | 44 | [comment]: <tags> (数据库,index) 45 | [comment]: <description> (数据索引相关知识) 46 | [comment]: <title> (数据库索引) 47 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/树的旋转.md: -------------------------------------------------------------------------------- 1 | [comment]: <image> (![](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1518346000523&di=868c4dd56f772a2c104980475b024dbb&imgtype=0&src=http%3A%2F%2Fpic2.ooopic.com%2F11%2F83%2F59%2F44b2OOOPIC2d_1024.jpg)) 2 | ### 左旋 3 | 4 | 5 | 6 | //Rotate Left 7 | private void rotateLeft(Entry<K,V> p) { 8 | if (p != null) { 9 | Entry<K,V> r = p.right; 10 | p.right = r.left; 11 | if (r.left != null) 12 | r.left.parent = p; 13 | r.parent = p.parent; 14 | if (p.parent == null) 15 | root = r; 16 | else if (p.parent.left == p) 17 | p.parent.left = r; 18 | else 19 | p.parent.right = r; 20 | r.left = p; 21 | p.parent = r; 22 | } 23 | } 24 | 25 | 26 | ### 右旋 27 | 28 | 29 | 30 | //Rotate Right 31 | private void rotateRight(Entry<K,V> p) { 32 | if (p != null) { 33 | Entry<K,V> l = p.left; 34 | p.left = l.right; 35 | if (l.right != null) l.right.parent = p; 36 | l.parent = p.parent; 37 | if (p.parent == null) 38 | root = l; 39 | else if (p.parent.right == p) 40 | p.parent.right = l; 41 | else p.parent.left = l; 42 | l.right = p; 43 | p.parent = l; 44 | } 45 | } 46 | 47 | 48 | ## 总结树的旋转规律 49 | 50 | 1. 拿到一个树,首先树获取其中一个子树的子树(孙树)变成自己的第三个子树(中间子树),左旋树获取右子树的左孙树,右旋树获取左子树的右孙树,同时建立好父子关系。 51 | 2. 被操作的子树获取树的父亲,并确定树是父亲(排除null的情况)的哪个子树,建立好父子关系。 52 | 3. 树和被操作的子树关系调换。 53 | 54 | 参考资料 55 | 56 | [红黑树并没有我们想象的那么难(上)](http://daoluan.net/%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84/%E7%AE%97%E6%B3%95/2013/09/25/rbtree- 57 | is-not-difficult.html) 58 | 59 | [史上最清晰的红黑树讲解(上)](https://www.cnblogs.com/CarpenterLee/p/5503882.html) 60 | 61 | 62 | [comment]: <tags> (树) 63 | [comment]: <description> (树) 64 | [comment]: <title> (树的旋转) 65 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/树莓派手动指定静态IP和DNS.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://core-electronics.com.au/media/kbase/raspberry-pi-workshop-cover.png)) 2 | ### 使用 vi 编辑文件,增加下列配置项`vi /etc/dhcpcd.conf` 3 | 4 | 5 | 6 | # 指定接口 eth0 7 | interface eth0 8 | # 指定静态IP,/24表示子网掩码为 255.255.255.0 9 | static ip_address=192.168.1.20/24 10 | # 路由器/网关IP地址 11 | static routers=192.168.1.1 12 | # 手动自定义DNS服务器 13 | static domain_name_servers=114.114.114.114 14 | 15 | 16 | ### 修改完成后,按esc键后输入 :wq 保存。重启树莓派就生效了 17 | 18 | 19 | 20 | sudo reboot 21 | 22 | 23 | 24 | [comment]: <tags> (raspberry) 25 | [comment]: <description> (树莓派配置静态ip) 26 | [comment]: <title> (树莓派手动指定静态IP和DNS) 27 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/正向断言备忘.md: -------------------------------------------------------------------------------- 1 | ``` 2 | In [14]: a = "abc" 3 | # 正向后行断言放在前面,当前面有a时,匹配成功,a不参与匹配的结果。 4 | In [22]: re.search(r"(?<=a)bc", a) 5 | Out[22]: <_sre.SRE_Match object; span=(1, 3), match='bc'> 6 | # 正向后行断言放在后面,意义不大,b被匹配了两次,b参与匹配的结果。 7 | In [23]: re.search(r"ab(?<=b)", a) 8 | Out[23]: <_sre.SRE_Match object; span=(0, 2), match='ab'> 9 | # 正向先行断言放在前面,意义不大,a被配置了两次,a参与匹配的结果。 10 | In [24]: re.search(r"(?=a)abc", a) 11 | Out[24]: <_sre.SRE_Match object; span=(0, 3), match='abc'> 12 | # 正向先行断言放在后面,当后面有b时,匹配成功,b不参与匹配的结果。 13 | In [21]: re.search(r"a(?=b)", a) 14 | Out[21]: <_sre.SRE_Match object; span=(0, 1), match='a'> 15 | ``` 16 | -------------------------------------------------------------------------------- /doc/获取https免费证书及配置.md: -------------------------------------------------------------------------------- 1 | [comment]: <> (![](https://letsencrypt.org/images/howitworks_authorization.png)) 2 | # 获取免费证书 3 | 4 | * 克隆letsencrypt`git clone https://github.com/letsencrypt/letsencrypt` 5 | * 执行`cd letsencrypt &./letsencrypt-auto` 6 | * 按提示输入邮箱,域名,出现如下信息即成功生成证明和私钥 7 | 8 | 9 | 10 | IMPORTANT NOTES: 11 | - Unable to install the certificate 12 | - Congratulations! Your certificate and chain have been saved at: 13 | /etc/letsencrypt/live/mashichao.com/fullchain.pem 14 | Your key file has been saved at: 15 | /etc/letsencrypt/live/mashichao.com/privkey.pem 16 | Your cert will expire on 2018-05-01. To obtain a new or tweaked 17 | version of this certificate in the future, simply run 18 | letsencrypt-auto again with the "certonly" option. To 19 | non-interactively renew *all* of your certificates, run 20 | "letsencrypt-auto renew" 21 | - Your account credentials have been saved in your Certbot 22 | configuration directory at /etc/letsencrypt. You should make a 23 | secure backup of this folder now. This configuration directory will 24 | also contain certificates and private keys obtained by Certbot so 25 | making regular backups of this folder is ideal. 26 | 27 | 28 | # 配置nginx 29 | 30 | * 修改nginx配置文件,添加如下信息 31 | 32 | 33 | 34 | server { 35 | listen 443 ssl; 36 | server_name mashichao.com; 37 | 38 | ssl_certificate /etc/letsencrypt/live/mashichao.com/fullchain.pem; 39 | ssl_certificate_key /etc/letsencrypt/live/mashichao.com/privkey.pem; 40 | 41 | ssl_session_cache shared:SSL:1m; 42 | ssl_session_timeout 5m; 43 | 44 | ssl_ciphers HIGH:!aNULL:!MD5; 45 | ssl_prefer_server_ciphers on; 46 | 47 | #静态文件,nginx自己处理 48 | location /static { 49 | alias /root/blog/static; 50 | #过期30天,静态文件不怎么更新,过期可以设大一点,如果频繁更新,则可以设置得小一点。 51 | expires 30d; 52 | } 53 | location / { 54 | include uwsgi_params; 55 | uwsgi_pass 127.0.0.1:3031; 56 | } 57 | 58 | } 59 | 60 | 61 | 其中ssl_certificate指向证书,ssl_certificate_key指向私钥 \- 重启nginx完成 62 | 63 | 64 | [comment]: <tags> (https,LetsEncrypt,nginx) 65 | [comment]: <description> (使用Let’s Encrypt https的免费证书搭建nginx服务器) 66 | [comment]: <title> (获取https免费证书及配置) 67 | [comment]: <author> (夏洛之枫) -------------------------------------------------------------------------------- /doc/解决ERR_CONTENT_LENGTH_MISMATCH.md: -------------------------------------------------------------------------------- 1 | uwsgi下的web页面发送包含较大请求体的post请求时出现ERR_CONTENT_LENGTH_MISMATCH 2 | nginx中的错误如下: 3 | ``` 4 | 2018/02/22 01:02:46 [error] 31556#31556: *7657 readv() failed (104: Connection reset by peer) while reading upstream, client: 112.231.57.20, server: localhost, request: "POST /modify HTTP/1.1", upstream: "uwsgi://127.0.0.1:3031", host: "mashichao.com", referrer: "http://mashichao.com/?path=/me?code=QQLB8Z" 5 | ``` 6 | uwsgi中没有发现错误 7 | 其实是因为请求过大,导致request头没有被uwsgi读完导致的,调整uwsgi的buffer-size参数就可以,默认是4096,调整到65535。 8 | 9 | 参考资料[Nginx uwsgi (104: Connection reset by peer) while reading response header from upstream](https://stackoverflow.com/questions/22697584/nginx-uwsgi-104-connection-reset-by-peer-while-reading-response-header-from-u) -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = toolkity 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/toolkit.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/doctrees/toolkit.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/toolkit/processor.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/doctrees/toolkit/processor.doctree -------------------------------------------------------------------------------- /docs/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: e45b3e99bee8dbb6c0bffe0f1520d7e1 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/_build/html/_modules/index.html: -------------------------------------------------------------------------------- 1 | 2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" 3 | "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> 4 | 5 | <html xmlns="http://www.w3.org/1999/xhtml" lang="zh-CN"> 6 | <head> 7 | <meta http-equiv="X-UA-Compatible" content="IE=Edge" /> 8 | <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> 9 | <title>Overview: module code — toolkity 1.7.5 documentation 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 |
29 |
30 |
31 | 32 |

All modules for which code is available

33 | 37 | 38 |
39 |
40 |
41 | 63 |
64 |
65 | 73 | 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. toolkity documentation master file, created by 2 | sphinx-quickstart on Sat May 19 14:54:57 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to toolkity's documentation! 7 | ==================================== 8 | 9 | .. toctree:: 10 | :caption: 模块文档 11 | 12 | toolkit 13 | toolkit/processor 14 | 15 | 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/toolkit.rst.txt: -------------------------------------------------------------------------------- 1 | .. _module-toolkit: 2 | 3 | ======== 4 | 基础模块 5 | ======== 6 | .. automodule:: toolkit -------------------------------------------------------------------------------- /docs/_build/html/_sources/toolkit/processor.rst.txt: -------------------------------------------------------------------------------- 1 | .. _module-processor: 2 | 3 | ============ 4 | 进度控制模块 5 | ============ 6 | .. automodule:: toolkit.processor -------------------------------------------------------------------------------- /docs/_build/html/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/ajax-loader.gif -------------------------------------------------------------------------------- /docs/_build/html/_static/comment-bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/comment-bright.png -------------------------------------------------------------------------------- /docs/_build/html/_static/comment-close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/comment-close.png -------------------------------------------------------------------------------- /docs/_build/html/_static/comment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/comment.png -------------------------------------------------------------------------------- /docs/_build/html/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* This file intentionally left blank. */ 2 | -------------------------------------------------------------------------------- /docs/_build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: '', 3 | VERSION: '1.7.5', 4 | LANGUAGE: 'zh-CN', 5 | COLLAPSE_INDEX: false, 6 | FILE_SUFFIX: '.html', 7 | HAS_SOURCE: true, 8 | SOURCELINK_SUFFIX: '.txt' 9 | }; -------------------------------------------------------------------------------- /docs/_build/html/_static/down-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/down-pressed.png -------------------------------------------------------------------------------- /docs/_build/html/_static/down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/down.png -------------------------------------------------------------------------------- /docs/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/file.png -------------------------------------------------------------------------------- /docs/_build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/minus.png -------------------------------------------------------------------------------- /docs/_build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/plus.png -------------------------------------------------------------------------------- /docs/_build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ 8 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 9 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 10 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ 11 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 12 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 14 | .highlight .ge { font-style: italic } /* Generic.Emph */ 15 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 18 | .highlight .go { color: #333333 } /* Generic.Output */ 19 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 20 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 23 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 24 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 25 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 26 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 27 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 28 | .highlight .kt { color: #902000 } /* Keyword.Type */ 29 | .highlight .m { color: #208050 } /* Literal.Number */ 30 | .highlight .s { color: #4070a0 } /* Literal.String */ 31 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 32 | .highlight .nb { color: #007020 } /* Name.Builtin */ 33 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 34 | .highlight .no { color: #60add5 } /* Name.Constant */ 35 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 36 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 37 | .highlight .ne { color: #007020 } /* Name.Exception */ 38 | .highlight .nf { color: #06287e } /* Name.Function */ 39 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 40 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 41 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 42 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 43 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 45 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */ 46 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 47 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 48 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 49 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 50 | .highlight .sa { color: #4070a0 } /* Literal.String.Affix */ 51 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 52 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 53 | .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ 54 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 55 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 56 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 57 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 58 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 59 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 60 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 61 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 62 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 63 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 64 | .highlight .fm { color: #06287e } /* Name.Function.Magic */ 65 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 66 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 67 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 68 | .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ 69 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/_build/html/_static/up-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/up-pressed.png -------------------------------------------------------------------------------- /docs/_build/html/_static/up.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/_static/up.png -------------------------------------------------------------------------------- /docs/_build/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | Welcome to toolkity’s documentation! — toolkity 1.7.5 documentation 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 |
30 |
31 |
32 | 33 |
34 |

Welcome to toolkity’s documentation!

35 |
36 |

模块文档

37 | 41 |
42 |
43 |
44 |

Indices and tables

45 | 50 |
51 | 52 | 53 |
54 |
55 |
56 | 92 |
93 |
94 | 105 | 106 | 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /docs/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/docs/_build/html/objects.inv -------------------------------------------------------------------------------- /docs/_build/html/py-modindex.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | Python Module Index — toolkity 1.7.5 documentation 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 |
31 |
32 |
33 |
34 | 35 | 36 |

Python Module Index

37 | 38 |
39 | t 40 |
41 | 42 | 43 | 44 | 46 | 47 | 49 | 52 | 53 | 54 | 57 |
 
45 | t
50 | toolkit 51 |
    55 | toolkit.processor 56 |
58 | 59 | 60 |
61 |
62 |
63 | 85 |
86 |
87 | 95 | 96 | 97 | 98 | 99 | 100 | -------------------------------------------------------------------------------- /docs/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | Search — toolkity 1.7.5 documentation 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
36 |
37 |
38 |
39 | 40 |

Search

41 |
42 | 43 |

44 | Please activate JavaScript to enable the search 45 | functionality. 46 |

47 |
48 |

49 | From here you can search these documents. Enter your search 50 | words into the box below and click "search". Note that the search 51 | function will automatically search for all of the words. Pages 52 | containing fewer words won't appear in the result list. 53 |

54 |
55 | 56 | 57 | 58 |
59 | 60 |
61 | 62 |
63 | 64 |
65 |
66 |
67 | 77 |
78 |
79 | 87 | 88 | 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. toolkity documentation master file, created by 2 | sphinx-quickstart on Sat May 19 14:54:57 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to toolkity's documentation! 7 | ==================================== 8 | 9 | .. toctree:: 10 | :caption: 模块文档 11 | 12 | toolkit 13 | toolkit/processor 14 | 15 | 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=toolkity 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/toolkit.rst: -------------------------------------------------------------------------------- 1 | .. _module-toolkit: 2 | 3 | ======== 4 | 基础模块 5 | ======== 6 | .. automodule:: toolkit -------------------------------------------------------------------------------- /docs/toolkit/processor.rst: -------------------------------------------------------------------------------- 1 | .. _module-processor: 2 | 3 | ============ 4 | 进度控制模块 5 | ============ 6 | .. automodule:: toolkit.processor -------------------------------------------------------------------------------- /gulpfile.js: -------------------------------------------------------------------------------- 1 | var gulp = require('gulp'); 2 | var browserSync = require('browser-sync').create(); 3 | var reload = browserSync.reload; 4 | 5 | 6 | gulp.task('default', function () { 7 | browserSync.init({ 8 | browser: ["google chrome"], 9 | server: { 10 | baseDir: "./htmlcov" 11 | } 12 | }); 13 | 14 | gulp.watch('./htmlcov/**/*.{html,markdown,md,yml,json,txt,xml}') 15 | .on('change', reload); 16 | }); 17 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gulp-dev", 3 | "version": "1.0.0", 4 | "description": "基于Gulp.js的开发调试工具", 5 | "main": "index.js", 6 | "directories": { 7 | "doc": "docs", 8 | "test": "tests" 9 | }, 10 | "dependencies": {}, 11 | "devDependencies": { 12 | "browser-sync": "^2.24.7", 13 | "gulp": "^4.0.0" 14 | }, 15 | "scripts": { 16 | "gulp": "node node_modules/gulp/bin/gulp.js" 17 | }, 18 | "author": "Moore.Huang", 19 | "license": "ISC" 20 | } 21 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | python-json-logger 2 | redis 3 | kafka-python 4 | requests 5 | future 6 | markdown 7 | ipdb -------------------------------------------------------------------------------- /resources/monitors.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/resources/monitors.jpg -------------------------------------------------------------------------------- /resources/toolkit.graffle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/resources/toolkit.graffle -------------------------------------------------------------------------------- /resources/translator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/resources/translator.jpg -------------------------------------------------------------------------------- /setup.cfg.tpl: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | 4 | [tool:pytest] 5 | ;when run pytest command, add --roodir here will be effective, but 6 | ;the printed message of rootdir is only can be changed by command 7 | ;line, so never mind! 8 | addopts = --rootdir=${pwd}/tests --cov-report=html:${pwd}/htmlcov --cov-branch --cov=${pwd}/toolkit/ -vv --disable-warnings 9 | usefixtures = 10 | mock 11 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | import os 3 | import re 4 | import string 5 | 6 | from contextlib import contextmanager 7 | from setuptools import setup, find_packages 8 | 9 | 10 | def get_version(package): 11 | """ 12 | Return package version as listed in `__version__` in `__init__.py`. 13 | """ 14 | init_py = open(os.path.join(package, '__init__.py')).read() 15 | mth = re.search("__version__\s?=\s?['\"]([^'\"]+)['\"]", init_py) 16 | if mth: 17 | return mth.group(1) 18 | else: 19 | raise RuntimeError("Cannot find version!") 20 | 21 | 22 | def install_requires(): 23 | """ 24 | Return requires in requirements.txt 25 | :return: 26 | """ 27 | try: 28 | with open("requirements.txt") as f: 29 | return [line.strip() for line in f.readlines() if line.strip()] 30 | except OSError: 31 | return [] 32 | 33 | 34 | VERSION = get_version("toolkit") 35 | 36 | AUTHOR = "cn" 37 | 38 | AUTHOR_EMAIL = "cnaafhvk@foxmail.com" 39 | 40 | URL = "https://www.github.com/ShichaoMa/toolkit" 41 | 42 | NAME = "toolkity" 43 | 44 | DESCRIPTION = "simple function tools. " 45 | 46 | try: 47 | LONG_DESCRIPTION = open("README.md").read() 48 | except UnicodeDecodeError: 49 | LONG_DESCRIPTION = open("README.md", encoding="utf-8").read() 50 | 51 | KEYWORDS = "tools function" 52 | 53 | LICENSE = "MIT" 54 | 55 | 56 | @contextmanager 57 | def cfg_manage(cfg_tpl_filename): 58 | if os.path.exists(cfg_tpl_filename): 59 | cfg_file_tpl = open(cfg_tpl_filename) 60 | buffer = cfg_file_tpl.read() 61 | try: 62 | with open(cfg_tpl_filename.rstrip(".tpl"), "w") as cfg_file: 63 | cfg_file.write(string.Template(buffer).substitute( 64 | pwd=os.path.abspath(os.path.dirname(__file__)))) 65 | yield 66 | finally: 67 | cfg_file_tpl.close() 68 | else: 69 | yield 70 | 71 | 72 | with cfg_manage(__file__.replace(".py", ".cfg.tpl")): 73 | setup( 74 | name=NAME, 75 | version=VERSION, 76 | description=DESCRIPTION, 77 | long_description=LONG_DESCRIPTION, 78 | long_description_content_type="text/markdown", 79 | classifiers=[ 80 | 'License :: OSI Approved :: MIT License', 81 | 'Programming Language :: Python', 82 | 'Intended Audience :: Developers', 83 | 'Operating System :: OS Independent', 84 | ], 85 | entry_points={ 86 | "console_scripts": [ 87 | "ver-inc = toolkit.tools.package_control:change_version", 88 | "mk-html = toolkit.tools.markdown_helper:main", 89 | ] 90 | }, 91 | keywords=KEYWORDS, 92 | author=AUTHOR, 93 | author_email=AUTHOR_EMAIL, 94 | url=URL, 95 | license=LICENSE, 96 | packages=find_packages(exclude=("tests*",)), 97 | install_requires=install_requires(), 98 | include_package_data=True, 99 | zip_safe=True, 100 | setup_requires=["pytest-runner"], 101 | tests_require=["pytest-apistellar", "pytest-asyncio", "pytest-cov"] 102 | ) -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShichaoMa/toolkit/e98d51961e045652f9df514034e51ad06345d95a/tests/conftest.py -------------------------------------------------------------------------------- /tests/test_cache.py: -------------------------------------------------------------------------------- 1 | from toolkit import _property_cache, cache_property, \ 2 | cache_classproperty, classproperty, global_cache_classproperty 3 | 4 | 5 | class A(object): 6 | 7 | c = 4 8 | b = 3 9 | 10 | @property 11 | @_property_cache 12 | def a(self): 13 | return self.b 14 | 15 | @cache_property 16 | def a_cache(self): 17 | return self.b 18 | 19 | @classproperty 20 | def d(cls): 21 | return cls.c 22 | 23 | @cache_classproperty 24 | def d_cache(cls): 25 | return cls.c 26 | 27 | @global_cache_classproperty 28 | def d_global_cache(cls): 29 | return cls.c 30 | 31 | 32 | class B(A): 33 | pass 34 | 35 | 36 | class TestCache(object): 37 | 38 | def test_cache_property(self): 39 | A.b = 3 40 | a = A() 41 | assert a.a == 3 42 | assert a.a_cache == 3 43 | global b 44 | A.b = 4 45 | assert a.a == 3 46 | assert a.a_cache == 3 47 | assert A.b == 4 48 | 49 | def test_classproperty(self): 50 | A.c = 4 51 | a = A() 52 | assert a.d == 4 53 | A.c = 5 54 | assert a.d == 5 55 | 56 | def test_cache_class_property(self): 57 | A.c = 4 58 | a = A() 59 | assert a.d_cache == 4 60 | A.c = 5 61 | assert a.d_cache == 4 62 | 63 | def test_global_cache_class_property(self): 64 | A.c = 4 65 | a = A() 66 | b = B() 67 | assert b.d_global_cache == 4 68 | A.c = 6 69 | assert b.d_global_cache == 4 70 | assert a.d_global_cache == 4 -------------------------------------------------------------------------------- /tests/test_components/test_processor.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from toolkit.components.processor import Processor, AsyncProcessor 4 | 5 | 6 | @pytest.fixture(scope="module", params=[[183, 432, 1023], [11, 43, 59]]) 7 | def weight(request): 8 | return request.param 9 | 10 | 11 | class TestProcesser(object): 12 | 13 | def test_split(self, weight): 14 | x, y, z = Processor.split(weight, 0, 100) 15 | assert weight[0] // x == (weight[0] + weight[1]) // y ==\ 16 | (weight[0] + weight[1] + weight[2])// z 17 | 18 | def test_processor_with_same_from_to(self, capsys): 19 | updated = list() 20 | 21 | def fun(process): 22 | updated.append(process) 23 | processor = Processor(2, fun, _from=11, to=11) 24 | assert processor.processes == [11, 11] 25 | processor.update() 26 | assert updated == [] 27 | processor.update() 28 | assert updated == [] 29 | 30 | def test_processor_with_weight_zero(self): 31 | updated = list() 32 | 33 | def fun(process): 34 | updated.append(process) 35 | processor = Processor(2, fun) 36 | assert processor.processes == [50, 100] 37 | processor.update() 38 | with processor.hand_out(0) as child: 39 | pass 40 | assert updated == [50, 100] 41 | 42 | @pytest.mark.asyncio 43 | async def test_async_processor(self): 44 | updated = list() 45 | messages = list() 46 | 47 | async def fun(process, message): 48 | updated.append(process) 49 | messages.append(message) 50 | 51 | processor = AsyncProcessor(2, fun) 52 | assert processor.processes == [50, 100] 53 | await processor.update("1") 54 | async with processor.hand_out(0) as child: 55 | pass 56 | assert updated == [50, 100] 57 | assert messages == ["1", ""] 58 | -------------------------------------------------------------------------------- /tests/test_service/test_combine.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from toolkit.service.combine import combine 3 | 4 | 5 | class A: 6 | a = 1 7 | 8 | def __init__(self): 9 | print("__init__ in A. self:", self) 10 | 11 | def fun(self): 12 | print("fun in A. self:", self) 13 | return 2 14 | 15 | def __getattr__(self, item): 16 | if item == "d": 17 | return 33 18 | raise AttributeError 19 | 20 | 21 | class B: 22 | a = 2 23 | 24 | def __init__(self): 25 | self.c = 11 26 | self.d = 55 27 | print("__init__ in B. self:", self) 28 | 29 | def fun(self): 30 | print("fun in B. self:", self) 31 | return 3 32 | 33 | 34 | def test_combine1(): 35 | """ 36 | 默认组合方式,所有方法各自调各自的 37 | """ 38 | @combine(B) 39 | class C(A): 40 | pass 41 | 42 | c = C() 43 | assert c.a == 2 44 | assert c.fun() == 2 45 | 46 | 47 | def test_combine2(): 48 | """ 49 | 默认组合方式,所有方法各自调各自的,但除__init__的方法(fun)调用顺序变了 50 | """ 51 | pass 52 | 53 | @combine(B, after=False) 54 | class D(A): 55 | pass 56 | 57 | d = D() 58 | assert d.fun() == 3 59 | 60 | 61 | def test_combine3(): 62 | """ 63 | 继承方式,除了__init__方法以外,使用E的实例来调用 64 | """ 65 | @combine(B, extend=True) 66 | class E(A): 67 | pass 68 | 69 | e = E() 70 | print(e.fun()) 71 | 72 | 73 | def test_combine4(): 74 | """ 75 | 在__init__中调用普通方法,无论是继承还是组合的方式,全部使用G的实例调用。 76 | """ 77 | @combine(B) 78 | class G(A): 79 | 80 | def __init__(self): 81 | print("__init__ in G. self:", self) 82 | super(G, self).__init__() 83 | self.fun() 84 | 85 | 86 | g = G() 87 | print(g.c) 88 | # 89 | # 90 | # @combine(B) 91 | # class H(A): 92 | # """ 93 | # B中的实例属性cc被代理了 94 | # """ 95 | # pass 96 | # 97 | # h = H() 98 | # print(h.c) 99 | # 100 | # 101 | # @combine(B) 102 | # class I(A): 103 | # """ 104 | # I的类属性c存在,所以没有用使用B的实例属性 105 | # """ 106 | # c = 22 107 | # 108 | # 109 | # i = I() 110 | # print(i.c) 111 | # 112 | # 113 | # @combine(B) 114 | # class J(A): 115 | # """ 116 | # d使用__getattr__获取,没有使用B中的实例属性 117 | # """ 118 | # pass 119 | # 120 | # j = J() 121 | # print(j.d) 122 | # 123 | # 124 | -------------------------------------------------------------------------------- /tests/test_service/test_monitors.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from toolkit.service.monitors import ParallelMonitor 4 | 5 | 6 | class ParallelMonitorTest(unittest.TestCase): 7 | 8 | def setUp(self): 9 | self.pl = ParallelMonitor() 10 | 11 | def test_logger(self): 12 | self.assertIsNotNone(self.pl.logger) 13 | self.pl.logger.info("成功生成logger实例!") -------------------------------------------------------------------------------- /tests/test_settings/test_frozen.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from toolkit.settings.frozen import Frozen 4 | 5 | 6 | class TestFrozen(object): 7 | 8 | def test_dict_get(self): 9 | a = Frozen({"a": 1}) 10 | assert a["a"] == 1 11 | assert a.a == 1 12 | 13 | def test_list_dict_get(self): 14 | a = Frozen([{"b": 3}]) 15 | assert a[0].b == 3 16 | 17 | def test_normalize(self): 18 | a = Frozen([{"b": 3}, {"a", "b"}, ["c", ["b", "d"]]]) 19 | assert isinstance(a.normalize(), list) 20 | assert isinstance(a[0], Frozen) 21 | assert isinstance(a.normalize()[0], dict) 22 | assert isinstance(a[1], Frozen) 23 | assert any(isinstance(i, Frozen) for i in a[2]) 24 | assert any(isinstance(i, list) for i in a[2].normalize()) 25 | 26 | def test_readonly(self): 27 | a = Frozen({"a": 1}) 28 | with pytest.raises(NotImplementedError): 29 | a.a = 3 30 | 31 | with pytest.raises(NotImplementedError): 32 | a["a"] = 3 33 | 34 | with pytest.raises(NotImplementedError): 35 | del a["a"] 36 | 37 | b = Frozen([]) 38 | with pytest.raises(NotImplementedError): 39 | b.insert(0, "1") 40 | -------------------------------------------------------------------------------- /tests/test_settings/test_settings.py: -------------------------------------------------------------------------------- 1 | from toolkit.settings import SettingsLoader 2 | 3 | 4 | def test_settings(): 5 | sw = SettingsLoader() 6 | set = sw.load({"a": 1, "b": "d", "c": {"d": [3,4,5,6], "e": {"a", 4, 5, 6}}}) 7 | print(set) 8 | print(set.c.e) 9 | -------------------------------------------------------------------------------- /tests/test_singleton.py: -------------------------------------------------------------------------------- 1 | from toolkit.singleton import Singleton 2 | 3 | 4 | class A(metaclass=Singleton): 5 | pass 6 | 7 | 8 | class TestSingleton: 9 | 10 | def test_singleton(self): 11 | assert A() is A() 12 | -------------------------------------------------------------------------------- /tests/test_structures/test_linked_list.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from toolkit.structures.linked_list import LinkedList 4 | 5 | 6 | class TestLinkList(object): 7 | 8 | def test_init(self): 9 | lst = LinkedList([1, 2, 3, 4]) 10 | assert list(lst) == [1, 2, 3, 4] 11 | 12 | def test_reverse_iter(self): 13 | lst = LinkedList([1, 2, 3, 4]) 14 | assert list(lst.reverse_iter()) == [4, 3, 2, 1] 15 | 16 | def test_pop(self): 17 | lst = LinkedList([1, 2, 3, 4]) 18 | for i in range(len(lst)): 19 | assert lst.pop(0) == i + 1 20 | 21 | def test_pop_reverse(self): 22 | lst = LinkedList([1, 2, 3, 4]) 23 | for i in range(len(lst)-1, -1, -1): 24 | assert lst.pop(-1) == i + 1 25 | 26 | def test_get(self): 27 | lst = LinkedList([1, 2, 3, 4]) 28 | assert lst[0] == 1 29 | assert lst[1] == 2 30 | assert lst[2] == 3 31 | assert lst[3] == 4 32 | 33 | def test_remove(self): 34 | lst = LinkedList([1, 2, 3, 4]) 35 | assert lst.remove(3) 36 | assert list(lst) == [1, 2, 4] 37 | with pytest.raises(ValueError): 38 | lst.remove(3) 39 | 40 | def test_find(self): 41 | lst = LinkedList([1, 2, 3, 4]) 42 | assert lst.find(3) == 2 43 | 44 | with pytest.raises(ValueError): 45 | lst.find(3, 0, 2) 46 | 47 | def test_in(self): 48 | lst = LinkedList([1, 2, 3, 4]) 49 | assert 3 in lst 50 | 51 | def test_slice(self): 52 | lst = LinkedList([1, 2, 3, 4, 5]) 53 | assert lst[:] == lst 54 | assert lst[1:2] == LinkedList([2]) 55 | assert lst[::2] == LinkedList([1, 3, 5]) 56 | assert lst[::-2] == LinkedList([5, 3, 1]) 57 | assert lst[::-1] == LinkedList(lst.reverse_iter()) 58 | lst[1:2] = [3, 4, 5] 59 | assert lst == LinkedList([1, 3, 4, 5, 3, 4, 5]) 60 | lst[3:2] = [3, 4, 5] 61 | assert lst == LinkedList([1, 3, 4, 3, 4, 5, 5, 3, 4, 5]) 62 | lst[:] = [1, 2, 3, 4] 63 | assert lst == LinkedList([1, 2, 3, 4]) 64 | lst[4: 10] = [6, 7, 8] 65 | assert lst == LinkedList([1, 2, 3, 4, 6, 7, 8]) 66 | 67 | def test_insert(self): 68 | lst = LinkedList([1, 2, 3, 4, 5]) 69 | lst.insert(0, 10) 70 | assert list(lst) == [10, 1, 2, 3, 4, 5] 71 | 72 | def test_iadd(self): 73 | default = LinkedList([1, 2, 3, 4, 5]) 74 | lst = default 75 | lst += [3, 4, 5, 6] 76 | assert lst == LinkedList([1, 2, 3, 4, 5, 3, 4, 5, 6]) 77 | assert lst is default 78 | 79 | def test_add(self): 80 | lst = LinkedList([1, 2, 3, 4, 5]) 81 | new_list = lst + [4, 5, 6] 82 | assert new_list is not lst 83 | assert new_list == LinkedList([1, 2, 3, 4, 5, 4, 5, 6]) 84 | new_list2 = [4, 5, 6] + lst 85 | assert new_list2 == LinkedList([4, 5, 6, 1, 2, 3, 4, 5]) 86 | -------------------------------------------------------------------------------- /tests/test_structures/test_queue.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import unittest 3 | 4 | from redis import Redis 5 | from toolkit.structures.queues import FifoDiskQueue, RedisQueue 6 | 7 | 8 | class FifoDiskQueueTest(unittest.TestCase): 9 | 10 | def setUp(self): 11 | self.queue = FifoDiskQueue("queue") 12 | 13 | def test_rid_3(self): 14 | self.queue.push(b"aaaaa") 15 | self.queue.push(b"bbbbb") 16 | self.queue.push(b"ccccc") 17 | self.assertListEqual([b"aaaaa", b"bbbbb", b"ccccc"], self.queue.rid(3)) 18 | 19 | def test_rid_2(self): 20 | self.queue.push(b"aaaaa") 21 | self.queue.push(b"bbbbb") 22 | self.assertListEqual([b"aaaaa", b"bbbbb"], self.queue.rid(3)) 23 | 24 | def test_rid_1(self): 25 | self.queue.push(b"aaaaa") 26 | self.queue.push(b"bbbbb") 27 | self.queue.push(b"ccccc") 28 | self.assertListEqual([b"aaaaa"], self.queue.rid(1)) 29 | 30 | def tearDown(self): 31 | self.queue.clear() 32 | 33 | 34 | @pytest.mark.skip(reason="May not have redis server. ") 35 | class RedisQueueTest(unittest.TestCase): 36 | 37 | def setUp(self): 38 | self.queue = RedisQueue(Redis(), "test_queue") 39 | 40 | def test_rid_3(self): 41 | self.queue.push(b"aaaaa") 42 | self.queue.push(b"bbbbb") 43 | self.queue.push(b"ccccc") 44 | self.assertListEqual([b"aaaaa", b"bbbbb", b"ccccc"], self.queue.rid(3)) 45 | 46 | def test_rid_2(self): 47 | self.queue.push(b"aaaaa") 48 | self.queue.push(b"bbbbb") 49 | self.assertListEqual([b"aaaaa", b"bbbbb"], self.queue.rid(3)) 50 | 51 | def test_rid_1(self): 52 | self.queue.push(b"aaaaa") 53 | self.queue.push(b"bbbbb") 54 | self.queue.push(b"ccccc") 55 | self.assertListEqual([b"aaaaa"], self.queue.rid(1)) 56 | 57 | def tearDown(self): 58 | self.queue.clear() 59 | -------------------------------------------------------------------------------- /tests/test_toolkit.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from toolkit import cache_classproperty, cache_property, clear_cache, global_cache_classproperty 4 | 5 | 6 | class A(object): 7 | 8 | @cache_classproperty 9 | def fun(self): 10 | return time.time() 11 | 12 | @global_cache_classproperty 13 | def zoo(self): 14 | return time.time() 15 | 16 | @cache_property 17 | def bar(self): 18 | return time.time() 19 | 20 | 21 | class B(A): 22 | pass 23 | 24 | 25 | def test_clear_cache(): 26 | a = A() 27 | fun = a.fun 28 | bar = a.bar 29 | zoo = a.zoo 30 | time.sleep(1) 31 | assert a.fun == fun 32 | assert a.bar == bar 33 | assert a.zoo == zoo 34 | clear_cache(A, "fun") 35 | clear_cache(A, "zoo") 36 | clear_cache(a, "bar") 37 | assert a.fun != fun 38 | assert a.bar != bar 39 | assert a.zoo != zoo 40 | assert A.fun != B.fun 41 | assert A.zoo == B.zoo 42 | -------------------------------------------------------------------------------- /tests/test_tools/test_except_context.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from toolkit.tools.managers import ExceptContext 4 | 5 | 6 | def test_raise(): 7 | with pytest.raises(Exception): 8 | with ExceptContext(Exception, errback=lambda name, *args: print(name)): 9 | raise Exception("test. ..") 10 | 11 | 12 | def test_no_raise(): 13 | with ExceptContext(Exception, errback=lambda name, *args: print(name) is None): 14 | raise Exception("test. ..") 15 | -------------------------------------------------------------------------------- /tests/test_tools/test_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from toolkit.tools.managers import Blocker 4 | 5 | 6 | def test_blocker(): 7 | start_time = time.time() 8 | assert Blocker(3).wait_timeout_or_notify(lambda: time.time() - start_time > 2) 9 | assert not Blocker(3).wait_timeout_or_notify(lambda: time.time() - start_time > 10) -------------------------------------------------------------------------------- /tests/test_tools/test_redis_tools.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import pytest 4 | 5 | from toolkit.tools.redis_tools import DistributedLock 6 | 7 | 8 | @pytest.mark.skip(reason="May not have redis server. ") 9 | class TestRedisLock(object): 10 | 11 | def test_lock_sync(self): 12 | from redis import Redis 13 | redis_conn = Redis(host="127.0.0.1", port=7777) 14 | lock = DistributedLock(redis_conn, "test_key", 10) 15 | with lock: 16 | time.sleep(int(sys.argv[1])) 17 | print(11111111111, sys.argv[1]) 18 | 19 | @pytest.mark.asyncio 20 | def test_lock_async(self): 21 | async def lock(): 22 | from aredis import StrictRedis 23 | redis_conn = StrictRedis(host="127.0.0.1", port=7777) 24 | lock = DistributedLock(redis_conn, "test_key", 10) 25 | async with lock: 26 | time.sleep(int(sys.argv[1])) 27 | print(11111111111, sys.argv[1]) 28 | import asyncio 29 | loop = asyncio.get_event_loop() 30 | loop.run_until_complete(lock()) 31 | 32 | -------------------------------------------------------------------------------- /tests/test_tools/test_timer.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from toolkit.tools.managers import Timer 4 | 5 | 6 | def test_timer(): 7 | with Timer() as timer: 8 | time.sleep(1) 9 | assert timer.cost > 1 10 | -------------------------------------------------------------------------------- /tests/test_translate.py: -------------------------------------------------------------------------------- 1 | import time 2 | import pytest 3 | import unittest 4 | from toolkit.translator import Translator 5 | 6 | 7 | @pytest.mark.skip 8 | class TranslateTest(unittest.TestCase): 9 | 10 | def assertContainEqual(self, first, second, msg=None): 11 | if not first.count(second): 12 | msg = self._formatMessage(msg, "%s is not contain %s" % (first, second)) 13 | self.fail(msg) 14 | 15 | def trans_thread(self, site): 16 | with Translator({"WEBSITE": site}) as translator: 17 | for i in range(5): 18 | result = translator.translate("what %s fuck day it is!" % i) 19 | translator.logger.info(result) 20 | self.assertContainEqual(result, "天") 21 | time.sleep(0.1) 22 | 23 | # def test_baidu(self): 24 | # threads = list() 25 | # for i in range(10): 26 | # th = Thread(target=self.trans_thread, args=("baidu", )) 27 | # th.start() 28 | # threads.append(th) 29 | # for th in threads: 30 | # th.join() 31 | 32 | # def test_google(self): 33 | # threads = list() 34 | # for i in range(10): 35 | # th = Thread(target=self.trans_thread, args=("google", )) 36 | # th.start() 37 | # threads.append(th) 38 | # for th in threads: 39 | # th.join() 40 | 41 | # def test_bing(self): 42 | # threads = list() 43 | # for i in range(10): 44 | # th = Thread(target=self.trans_thread, args=("bing", )) 45 | # th.start() 46 | # threads.append(th) 47 | # for th in threads: 48 | # th.join() 49 | 50 | def test_qq(self): 51 | with Translator({"WEBSITE": "qq"}) as t: 52 | rs = t.translate("what a fuck day it is!") 53 | t.logger.info(rs) 54 | self.assertContainEqual(rs, "天") -------------------------------------------------------------------------------- /tests/testdata/__init__.py: -------------------------------------------------------------------------------- 1 | import abc 2 | ___version__ = '0.1d19.1d3' 3 | 4 | # 测试 -------------------------------------------------------------------------------- /tests/testdata/settings.py: -------------------------------------------------------------------------------- 1 | REDIS_HOST = "0.0.0.0" 2 | REDIS_PORT = 6379 3 | 4 | LOG_LEVEL = 'DEBUG' 5 | LOG_MAX_BYTES = 1024*1024*10 6 | LOG_BACKUPS = 5 7 | LOG_DIR = "logs" 8 | LOG_STDOUT = True 9 | LOG_JSON = False 10 | 11 | PROXY_ACCOUNT_PASSWORD = "longen:jinanlongen2016" 12 | 13 | HEADERS = { 14 | 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0', 15 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 16 | "Accept-Language": "en-US,en;q=0.5", 17 | "Accept-Encoding": "gzip, deflate", 18 | } -------------------------------------------------------------------------------- /toolkit/components/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | """ 3 | @Created on 2019/4/17 4 | @Modify on 2019/4/17 5 | @author cnaafhvk888@gmail.com 6 | """ -------------------------------------------------------------------------------- /toolkit/components/multi_worker.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | import time 3 | 4 | from threading import Thread 5 | from queue import Queue as ThreadQueue, Empty 6 | from multiprocessing import Process, Queue as ProcessQueue, cpu_count 7 | 8 | 9 | class MultiWorker(object): 10 | """ 11 | 多任务处理器 12 | """ 13 | queue_type = ThreadQueue 14 | worker_type = Thread 15 | max_worker_count = 10 16 | 17 | @property 18 | def queue(self): 19 | if self._queue is None: 20 | self._queue = self.queue_type(maxsize=self.max_worker_count) 21 | return self._queue 22 | 23 | def __init__(self, generator, target, worker_count): 24 | """ 25 | 提供一个任务输入生成器及任务函数,生成指定数量的worker,来并行处理任务 26 | 27 | @param generator: 任务生成器,每次迭代返回任务输入所需参数 28 | @param target: 任务处理函数,接收任务生成器传入的参数执行处理任务。 29 | @param worker_count: 希望的worker数量,最多不超过max_worker_count 30 | """ 31 | if worker_count is not None: 32 | self.max_worker_count = min(worker_count, self.max_worker_count) 33 | self.generator = generator 34 | self.target = target 35 | self.start = False 36 | self._queue = None 37 | 38 | def run(self): 39 | """ 40 | 任务调度开始 41 | 42 | @return: 43 | """ 44 | self.start = True 45 | workers = list() 46 | 47 | # 创建指定数量的worker, 设置为守护模式 48 | for i in range(self.max_worker_count): 49 | worker = self.worker_type(target=self.process) 50 | worker.daemon = True 51 | workers.append(worker) 52 | worker.start() 53 | # 投放任务 54 | for task in self.generator: 55 | self.queue.put(task) 56 | # 等待任务完成 57 | while not self.queue.empty() and any(w.is_alive() for w in workers): 58 | time.sleep(1) 59 | # 等待最后一波任务完成。 60 | self.start = False 61 | for worker in workers: 62 | if worker.is_alive(): 63 | worker.join() 64 | 65 | def process(self): 66 | while self.start: 67 | try: 68 | self.target(*self.queue.get_nowait()) 69 | except Empty: 70 | time.sleep(0.1) 71 | continue 72 | 73 | 74 | class MultiThreadWorker(MultiWorker): 75 | pass 76 | 77 | 78 | class MultiProcessWorker(MultiWorker): 79 | max_worker_count = cpu_count() 80 | queue_type = ProcessQueue() 81 | worker_type = Process 82 | -------------------------------------------------------------------------------- /toolkit/service/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | """ 3 | @Created on 2019/4/17 4 | @Modify on 2019/4/17 5 | @author cnaafhvk888@gmail.com 6 | """ -------------------------------------------------------------------------------- /toolkit/service/console.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | import sys 3 | import time 4 | 5 | from socket import socket 6 | from threading import Thread, local 7 | from code import InteractiveInterpreter 8 | 9 | 10 | __all__ = ["CustomInteractiveInterpreter", "_local"] 11 | _local = local() 12 | _displayhook = sys.displayhook 13 | 14 | 15 | class StringO(object): 16 | """ 17 | 替代标准输出 18 | """ 19 | def __init__(self): 20 | self._buffer = [] 21 | 22 | def isatty(self): 23 | return False 24 | 25 | def close(self): 26 | pass 27 | 28 | def flush(self): 29 | pass 30 | 31 | def seek(self, n, mode=0): 32 | pass 33 | 34 | def readline(self): 35 | if len(self._buffer) == 0: 36 | return '' 37 | ret = self._buffer[0] 38 | del self._buffer[0] 39 | return ret 40 | 41 | def reset(self): 42 | val = ''.join(self._buffer) 43 | del self._buffer[:] 44 | return val 45 | 46 | def _write(self, x): 47 | if isinstance(x, bytes): 48 | x = x.decode('utf-8', 'replace') 49 | self._buffer.append(str(x)) 50 | 51 | def write(self, x): 52 | self._write(x) 53 | 54 | def writelines(self, x): 55 | self._write(''.join(x)) 56 | 57 | 58 | class ThreadedStream(object): 59 | 60 | """Thread-local wrapper for sys.stdout for the interactive console.""" 61 | 62 | def push(): 63 | if not isinstance(sys.stdout, ThreadedStream): 64 | sys.stdout = ThreadedStream() 65 | _local.stream = StringO() 66 | push = staticmethod(push) 67 | 68 | def fetch(): 69 | try: 70 | stream = _local.stream 71 | except AttributeError: 72 | return '' 73 | return stream.reset() 74 | fetch = staticmethod(fetch) 75 | 76 | def displayhook(obj): 77 | try: 78 | stream = _local.stream 79 | except AttributeError: 80 | return _displayhook(obj) 81 | # stream._write bypasses escaping as debug_repr is 82 | # already generating HTML for us. 83 | if obj is not None: 84 | _local._current_ipy.locals['_'] = obj 85 | stream._write(obj) 86 | displayhook = staticmethod(displayhook) 87 | 88 | def __setattr__(self, name, value): 89 | raise AttributeError('read only attribute %s' % name) 90 | 91 | def __dir__(self): 92 | return dir(sys.__stdout__) 93 | 94 | def __getattribute__(self, name): 95 | if name == '__members__': 96 | return dir(sys.__stdout__) 97 | try: 98 | stream = _local.stream 99 | except AttributeError: 100 | stream = sys.__stdout__ 101 | return getattr(stream, name) 102 | 103 | def __repr__(self): 104 | return repr(sys.__stdout__) 105 | 106 | 107 | sys.displayhook = ThreadedStream.displayhook 108 | 109 | 110 | class CustomInteractiveInterpreter(InteractiveInterpreter): 111 | 112 | def __init__(self, locals): 113 | super(CustomInteractiveInterpreter, self).__init__(locals) 114 | self.buffer = [] 115 | self.stdout = sys.stdout 116 | self.more = False 117 | 118 | def runsource(self, source): 119 | source = source.rstrip() + '\n' 120 | try: 121 | ThreadedStream.push() 122 | source_to_eval = ''.join(self.buffer + [source]) 123 | result = super( 124 | CustomInteractiveInterpreter, self).runsource(source_to_eval) 125 | if (self.more or result) and source != "\n": 126 | self.buffer.append(source) 127 | self.more = True 128 | else: 129 | del self.buffer[:] 130 | self.more = False 131 | finally: 132 | output = ThreadedStream.fetch() 133 | sys.stdout = self.stdout 134 | prompt = self.more and '... ' or '>>> ' 135 | return prompt + output 136 | 137 | def write(self, data): 138 | sys.stdout.write(data) -------------------------------------------------------------------------------- /toolkit/service/monitors.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import signal 4 | import logging 5 | 6 | from .. import cache_property 7 | from ..singleton import Singleton 8 | 9 | __all__ = ["ParallelMonitor"] 10 | 11 | 12 | class ParallelMonitor(object, metaclass=Singleton): 13 | """ 14 | 支持多线程多进程统一管理 15 | """ 16 | alive = True 17 | name = "parallel_monitor" 18 | children = [] 19 | int_signal_count = 1 20 | 21 | def __init__(self): 22 | self.open() 23 | super().__init__() 24 | 25 | @cache_property 26 | def logger(self): 27 | logger = logging.getLogger(self.name) 28 | logger.setLevel(10) 29 | logger.addHandler(logging.StreamHandler(sys.stdout)) 30 | return logger 31 | 32 | def stop(self, sig=None, frame=None): 33 | if self.int_signal_count > 1: 34 | self.logger.info("Force to terminate...") 35 | for th in self.children[:]: 36 | self.stop_child(th) 37 | pid = os.getpid() 38 | os.kill(pid, 9) 39 | 40 | else: 41 | self.alive = False 42 | self.logger.info("Close process %s..." % self.name) 43 | self.int_signal_count += 1 44 | 45 | def open(self): 46 | signal.signal(signal.SIGINT, self.stop) 47 | signal.signal(signal.SIGTERM, self.stop) 48 | 49 | def stop_child(self, child): 50 | pass 51 | 52 | -------------------------------------------------------------------------------- /toolkit/settings/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | """ 3 | @Created on 2019/4/17 4 | @Modify on 2019/4/17 5 | @author cnaafhvk888@gmail.com 6 | """ 7 | from .settings import Settings, SettingsLoader, FrozenSettings 8 | 9 | __all__ = ["Settings", "SettingsLoader", "FrozenSettings"] 10 | -------------------------------------------------------------------------------- /toolkit/settings/frozen.py: -------------------------------------------------------------------------------- 1 | from collections.abc import MutableSequence, MutableMapping, MutableSet 2 | 3 | __all__ = ["Frozen", "FrozenSettings"] 4 | 5 | 6 | class Frozen(MutableSequence, MutableMapping): 7 | """ 8 | 替换json取数由[]变成.的形式 9 | """ 10 | def __new__(cls, json): 11 | if isinstance(json, (MutableSequence, MutableSet)): 12 | instance = super().__new__(cls) 13 | object.__setattr__(instance, "_json", 14 | json.__class__(cls(val) for val in json)) 15 | elif isinstance(json, MutableMapping): 16 | instance = super().__new__(cls) 17 | object.__setattr__(instance, "_json", json) 18 | else: 19 | instance = json 20 | return instance 21 | 22 | def __getattr__(self, item): 23 | if hasattr(self._json, item): 24 | return getattr(self._json, item) 25 | else: 26 | prop_val = self._json.get(item) 27 | if prop_val is None: 28 | raise AttributeError(item) 29 | elif isinstance(prop_val, (list, dict)): 30 | return self.__class__(prop_val) 31 | else: 32 | return prop_val 33 | 34 | def __setattr__(self, key, value): 35 | raise NotImplementedError 36 | 37 | def __getitem__(self, item): 38 | return self._json[item] 39 | 40 | def __iter__(self): 41 | return iter(self._json) 42 | 43 | def __bool__(self): 44 | return bool(self._json) 45 | 46 | def __len__(self): 47 | return len(self._json) 48 | 49 | def __delitem__(self, item): 50 | raise NotImplementedError 51 | 52 | def __setitem__(self, key, value): 53 | raise NotImplementedError 54 | 55 | def insert(self, index, value): 56 | raise NotImplementedError 57 | 58 | def __str__(self): 59 | return str(self._json) 60 | 61 | __repr__ = __str__ 62 | 63 | def normalize(self): 64 | """ 65 | 使用Frozen对象获取列表或字典时,为了支持级联`.`操作, 66 | 字典和列表对象被转换成了Frozen对象,如果直接使用,有时可能会引发错误。 67 | 使用此方法得到真实的列表或字典。 68 | :return: 69 | """ 70 | val = self._json 71 | if isinstance(val, (MutableSequence, MutableSet)): 72 | return val.__class__( 73 | i.normalize() if hasattr(i, "normalize") else i for i in val) 74 | else: 75 | return val 76 | 77 | 78 | class FrozenSettings(Frozen): 79 | pass 80 | 81 | 82 | if __name__ == "__main__": 83 | json = {"aa": [1, 2, 3, {"b": 3, "c": [4, 5, {"d": 33}]}]} 84 | f = Frozen(json) 85 | print(f.aa[3].c[2].d) 86 | print(f) 87 | -------------------------------------------------------------------------------- /toolkit/singleton.py: -------------------------------------------------------------------------------- 1 | """ 2 | 实例创建时,调用顺序 3 | 元类的__call__ 4 | 类的__new__ 5 | 类的__init__ 6 | 子类的元类(除type以外)必须是父类元类的子类 7 | 使用了元类的类的子类也会使用该元类创建 8 | """ 9 | from threading import RLock 10 | 11 | 12 | __all__ = ["Singleton"] 13 | 14 | 15 | class Singleton(type): 16 | """ 17 | 单例类实现 18 | """ 19 | lock = RLock() 20 | 21 | def __new__(mcs, *args, **kwargs): 22 | """ 23 | 元类msc通过__new__组建类对象,其中msc指Singleton 24 | :param args: 可以包含类构建所需要三元素,类名,父类,命名空间, 其中 25 | 命名空间中__qualname__和函数的__qualname__均含有classname做为前缀, 26 | 在这里,如果想替换类名,需要把以上全部替换才可以。 27 | :param kwargs: 可以自定义传递一些参数 28 | :return: 返回类对象,通过super(Singleton, mcs).__new__此时已经组装好了类 29 | """ 30 | class_name, bases, dict = args 31 | dict["_instance"] = None 32 | cls = super(Singleton, mcs).__new__( 33 | mcs, class_name, bases, dict, **kwargs) 34 | return cls 35 | 36 | def __call__(cls, *args, **kwargs): 37 | with cls.lock: 38 | cls._instance = cls._instance or super(Singleton, cls).__call__( 39 | *args, **kwargs) 40 | return cls._instance 41 | 42 | 43 | if __name__ == "__main__": 44 | 45 | class A(metaclass=Singleton): 46 | pass 47 | 48 | print(A() is A()) -------------------------------------------------------------------------------- /toolkit/structures/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | """ 3 | @Created on 2019/4/17 4 | @Modify on 2019/4/17 5 | @author cnaafhvk888@gmail.com 6 | """ -------------------------------------------------------------------------------- /toolkit/structures/thread_safe_collections.py: -------------------------------------------------------------------------------- 1 | from threading import RLock 2 | from collections import UserDict 3 | from collections.abc import MutableSet 4 | 5 | from .. import thread_safe_for_method 6 | 7 | 8 | class ThreadSafeSet(MutableSet): 9 | def __init__(self, *args, **kwargs): 10 | self._data = set(*args, **kwargs) 11 | self.lock = RLock() 12 | 13 | @thread_safe_for_method 14 | def add(self, value): 15 | return self._data.add(value) 16 | 17 | @thread_safe_for_method 18 | def discard(self, value): 19 | return self._data.discard(value) 20 | 21 | @thread_safe_for_method 22 | def pop_all(self): 23 | while len(self._data): 24 | yield self.pop() 25 | 26 | @thread_safe_for_method 27 | def update(self, seq): 28 | self._data.update(seq) 29 | 30 | @thread_safe_for_method 31 | def __contains__(self, item): 32 | return item in self._data 33 | 34 | @thread_safe_for_method 35 | def __iter__(self): 36 | return iter(self._data) 37 | 38 | @thread_safe_for_method 39 | def __len__(self): 40 | return len(self._data) 41 | 42 | 43 | class TreadSafeDict(UserDict): 44 | def __init__(self, *args, **kwargs): 45 | super(TreadSafeDict, self).__init__(*args, **kwargs) 46 | self.lock = RLock() 47 | 48 | @thread_safe_for_method 49 | def update(*args, **kwds): 50 | super(TreadSafeDict, args[0]).update(*args[1:], **kwds) 51 | 52 | @thread_safe_for_method 53 | def pop_all(self): 54 | while len(self): 55 | yield self.popitem() 56 | -------------------------------------------------------------------------------- /toolkit/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | """ 3 | @Created on 2019/4/17 4 | @Modify on 2019/4/17 5 | @author cnaafhvk888@gmail.com 6 | """ -------------------------------------------------------------------------------- /toolkit/tools/file_buffalo.py: -------------------------------------------------------------------------------- 1 | import time 2 | import asyncio 3 | 4 | from threading import RLock 5 | 6 | 7 | class FileBuffalo(object): 8 | """ 9 | 文件流管道, 同步写,异步读 10 | """ 11 | def __init__(self, max_size=1024*1024): 12 | self._max_size = max_size 13 | self._datas = list() 14 | self._size = 0 15 | self._finished = False 16 | self.name = "tmp" 17 | self.lock = RLock() 18 | 19 | @property 20 | def finished(self): 21 | return not self._finished and not self._datas 22 | 23 | def finish(self): 24 | self._finished = True 25 | 26 | def write(self, data): 27 | """ 28 | 同步写接口 29 | :param data: 30 | :return: 31 | """ 32 | while not self._finished: 33 | with self.lock: 34 | if self._size < self._max_size: 35 | self._datas.append(data) 36 | self._size += len(data) 37 | break 38 | time.sleep(0.1) 39 | 40 | async def read(self, size=1024000): 41 | """ 42 | 异步读接口 43 | :param size: 44 | :return: 45 | """ 46 | buffer = b"" 47 | while not self.finished and len(buffer) < size: 48 | if self._datas: 49 | with self.lock: 50 | data = self._datas.pop(0) 51 | buffer += data 52 | self._size -= len(data) 53 | else: 54 | await asyncio.sleep(0.1) 55 | return buffer 56 | -------------------------------------------------------------------------------- /toolkit/tools/managers.py: -------------------------------------------------------------------------------- 1 | import time 2 | import traceback 3 | 4 | from .. import find_caller_name 5 | 6 | __all__ = ["Blocker", "ExceptContext", "Timer"] 7 | 8 | 9 | class Blocker(object): 10 | """ 11 | 有的时候我们需要将线程停止一段时间,通常我们选择调用time.sleep(..), 12 | 当我们需要sleep很长一段时间,比如一分钟以上时,如果这时我们选择关闭程序, 13 | 而我们通过singal注册了关闭信号的监听器,用来改变当时程序的状态, 14 | 如果置self.alive = False,由于time.sleep阻塞导致我们的程序当前线程无法获知alive状态, 15 | 难以被关闭,通过使用Blocker,我们可以避免上述情况发生。 16 | eg: 17 | if Blocker(sleep_time).wait_timeout_or_notify(notify=lambda: time.time() > 1000000): 18 | `返回true, 我们知道是被唤醒了,而不是时间到了` 19 | .... 20 | """ 21 | def __init__(self, block_time): 22 | """ 23 | :param block_time: 需要阻塞的时长 24 | 这个对象会被传递给notify回调函数 25 | """ 26 | self.block_time = block_time 27 | self.interval = 0.5 28 | 29 | def wait_timeout_or_notify(self, notify=lambda: False): 30 | start_time = time.time() 31 | is_notified = False 32 | while time.time() - start_time < self.block_time: 33 | is_notified = notify() 34 | if is_notified: 35 | break 36 | time.sleep(self.interval) 37 | return is_notified 38 | 39 | 40 | class ExceptContext(object): 41 | """ 42 | 异常捕获上下文 43 | eg: 44 | def test(): 45 | with ExceptContext(Exception, errback=lambda name, *args:print(name)): 46 | raise Exception("test. ..") 47 | """ 48 | def __init__(self, exception=Exception, func_name=None, 49 | errback=lambda func_name, *args: 50 | traceback.print_exception(*args) is None, 51 | finalback=lambda got_err: got_err): 52 | """ 53 | :param exception: 指定要监控的异常 54 | :param func_name: 可以选择提供当前所在函数的名称,回调函数会提交到函数,用于跟踪 55 | :param errback: 提供一个回调函数,如果发生了指定异常, 56 | 就调用该函数,该函数的返回值为True时不会继续抛出异常 57 | :param finalback: finally要做的操作 58 | """ 59 | self.errback = errback 60 | self.finalback = finalback 61 | self.exception = exception 62 | self.got_err = False 63 | self.err_info = None 64 | self.func_name = func_name or find_caller_name(is_func=True) 65 | 66 | def __enter__(self): 67 | return self 68 | 69 | def __exit__(self, exc_type, exc_val, exc_tb): 70 | return_code = False 71 | if isinstance(exc_val, self.exception): 72 | self.got_err = True 73 | self.err_info = (exc_type, exc_val, exc_tb) 74 | return_code = self.errback(self.func_name, exc_type, exc_val, exc_tb) 75 | self.finalback(self.got_err) 76 | return return_code 77 | 78 | 79 | class Timer(object): 80 | """ 81 | 计时器,对于需要计时的代码进行with操作: 82 | with Timer() as timer: 83 | ... 84 | ... 85 | print(timer.cost) 86 | ... 87 | """ 88 | def __init__(self, start=None): 89 | self.start = start if start is not None else time.time() 90 | 91 | def __enter__(self): 92 | return self 93 | 94 | def __exit__(self, exc_type, exc_val, exc_tb): 95 | self.stop = time.time() 96 | self.cost = self.stop - self.start 97 | return exc_type is None -------------------------------------------------------------------------------- /toolkit/tools/markdown_helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from markdown import markdown 4 | from argparse import ArgumentParser 5 | 6 | from ..service.monitors import ParallelMonitor 7 | 8 | 9 | class MarkDownRender(object): 10 | tpl = """ 11 | 12 | 13 | 14 | 29 |
30 | %s 31 |
32 | 33 | """ 34 | 35 | def __init__(self, css, file_path): 36 | self.css = css 37 | self.file_path = file_path 38 | 39 | def __enter__(self): 40 | return self 41 | 42 | @staticmethod 43 | def _walk(path): 44 | for root, dir, filenames in os.walk(path): 45 | for fn in filenames: 46 | if fn.endswith(".md"): 47 | fp = os.path.join(root, fn) 48 | # 返回文件名和文件目录深度,深度用来设置css相对路径 49 | yield fp, fp.replace(path, "").strip("/").count("/") 50 | 51 | def render(self): 52 | """ 53 | 用来渲染file_path下的md,将其输出为html格式 54 | :return: 55 | """ 56 | if os.path.isfile(self.file_path): 57 | files = [(self.file_path, 0)] 58 | else: 59 | files = self._walk(self.file_path) 60 | 61 | first_page = None 62 | for input, deep in files: 63 | buffer = markdown(open(input).read(), 64 | extensions=['markdown.extensions.extra']) 65 | output = input + ".html" 66 | first_page = first_page or output 67 | 68 | with open(output, "w") as f: 69 | f.write(self.tpl % ("../" * deep + self.css, buffer)) 70 | return first_page 71 | 72 | def render_css(self): 73 | if os.path.isfile(self.file_path): 74 | self.file_path = os.path.dirname(self.file_path) 75 | 76 | with open(os.path.join(self.file_path, self.css), "w") as f: 77 | f.write(open(os.path.join(os.path.dirname(__file__), self.css)).read()) 78 | 79 | def __exit__(self, exc_type, exc_val, exc_tb): 80 | self.render_css() 81 | 82 | 83 | class MarkDownHelper(ParallelMonitor): 84 | """ 85 | 将markdown文件转换成html文件并打开 86 | """ 87 | def run(self): 88 | parser = ArgumentParser() 89 | parser.add_argument("input", help="markdown file or path.") 90 | parser.add_argument( 91 | "-c", "--css", help="css file name.", default="github-markdown.css") 92 | args = parser.parse_args() 93 | mk_render = MarkDownRender(args.css, args.input) 94 | 95 | with mk_render: 96 | output = mk_render.render() 97 | 98 | if output: 99 | self.logger.debug(f"打开{output}.") 100 | os.system(f"open {output}") 101 | else: 102 | self.logger.info("未发现可转换的文件!") 103 | 104 | 105 | def main(): 106 | MarkDownHelper().run() 107 | -------------------------------------------------------------------------------- /toolkit/tools/package_control.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | 4 | from functools import partial 5 | from argparse import ArgumentParser 6 | 7 | 8 | def get_version(package): 9 | """ 10 | Return package version as listed in `__version__` in `__init__.py`. 11 | """ 12 | init_py = open(os.path.join(package, '__init__.py')).read() 13 | mth = re.search(r"__version__\s?=\s?['\"]([^'\"]+)['\"]", init_py) 14 | if mth: 15 | return mth.group(1) 16 | else: 17 | raise RuntimeError("Cannot find version!") 18 | 19 | 20 | def install_requires(): 21 | """ 22 | Return requires in requirements.txt 23 | :return: 24 | """ 25 | try: 26 | with open("requirements.txt") as f: 27 | return [line.strip() for line in f.readlines() if line.strip()] 28 | except OSError: 29 | return [] 30 | 31 | 32 | def change_version(index=None, dev=False, package_name=None): 33 | 34 | if not index: 35 | parser = ArgumentParser() 36 | parser.add_argument("-i", "--index", type=int, help="版本顺位", default=3) 37 | parser.add_argument("-d", "--dev", action="store_true", help="是否是开发模式") 38 | args = parser.parse_args() 39 | index = args.index 40 | dev = args.dev 41 | 42 | package = package_name or os.path.basename(os.path.abspath(os.getcwd())).replace("-", "_") 43 | with open(os.path.join(package, '__init__.py'), "r+", encoding="utf-8") as f: 44 | init_py = f.read() 45 | f.seek(0) 46 | buf = re.sub( 47 | r"(__version__\s?=\s?['\"])([^'\"]+)(['\"])", 48 | partial(_repl, index=int(index), dev=dev), init_py) 49 | f.write(buf) 50 | 51 | 52 | def _repl(mth, index, dev): 53 | versions = mth.group(2).split(".") 54 | vs = versions[index - 1] 55 | length = len(vs) 56 | 57 | if vs.isdigit(): 58 | new_vs = str(int(vs) + 1) + ("dev1" if dev else "") 59 | else: 60 | def _rep(mth, dev): 61 | string = mth.group(1) 62 | # 如果是版本开始,如:1d1中的第一个1,那么无论是否dev,1都可以保持不变。 63 | # 否则,则是第二个1或者d,当是dev=False时,这些都是要被舍弃的(正式版不含dev) 64 | # 只dev=True且是第二个1时,才需要自增1,如果是d,那么保持不变就可以。 65 | if mth.start(): 66 | if dev: 67 | if string.isdigit(): 68 | return str(int(string) + 1) 69 | else: 70 | return "" 71 | 72 | return string 73 | 74 | new_vs = re.sub(r"(\d+|[a-zA-Z]+)", partial(_rep, dev=dev), vs) 75 | if not new_vs[-1].isdigit(): 76 | new_vs += "1" 77 | versions[index - 1] = new_vs 78 | blank = (length - len(new_vs)) * " " 79 | 80 | # 如果不是第三位+1。而是第二位或得第一位,则后续的位数应该清0 81 | for i in range(index, len(versions)): 82 | blank += " " * (len(versions[i]) - 1) 83 | versions[i] = "0" 84 | 85 | return mth.group(1) + ".".join(versions) + mth.group(3) + blank -------------------------------------------------------------------------------- /toolkit/tools/redis_tools.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import time 3 | 4 | 5 | class DistributedLock(object): 6 | """ 7 | Redis分布式锁 8 | """ 9 | SCRIPT = "if redis.call('get', KEYS[1]) == ARGV[1] then " \ 10 | "return redis.call('del', KEYS[1]) else return 0 end" 11 | 12 | def __init__(self, redis_conn, lock_key, expire, roll_interval=0.1): 13 | self.lock_key = lock_key 14 | self.redis_conn = redis_conn 15 | self.req_id = uuid.uuid4() 16 | self.expire = expire 17 | self.roll_interval = roll_interval 18 | 19 | def __enter__(self): 20 | self.lock() 21 | return self 22 | 23 | def __exit__(self, exc_type, exc_val, exc_tb): 24 | self.unlock() 25 | 26 | async def __aenter__(self): 27 | return await self.alock() 28 | 29 | async def __aexit__(self, exc_type, exc_val, exc_tb): 30 | await self.aunlock() 31 | 32 | def _lock(self): 33 | return self.redis_conn.set( 34 | self.lock_key, self.req_id, ex=self.expire, nx=True) 35 | 36 | def lock(self): 37 | while not self._lock(): 38 | time.sleep(self.roll_interval) 39 | 40 | def unlock(self): 41 | script = self.redis_conn.register_script(self.SCRIPT) 42 | return script(keys=[self.lock_key], args=[self.req_id]) 43 | 44 | async def _alock(self): 45 | return await self.redis_conn.set( 46 | self.lock_key, self.req_id, ex=self.expire, nx=True) 47 | 48 | async def alock(self): 49 | while not await self._alock(): 50 | time.sleep(self.roll_interval) 51 | 52 | async def aunlock(self): 53 | script = self.redis_conn.register_script(self.SCRIPT) 54 | return await script.execute(keys=[self.lock_key], args=[self.req_id]) 55 | -------------------------------------------------------------------------------- /toolkit/translator/__init__.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from redis import Redis 4 | from itertools import repeat 5 | 6 | from .translate_adapter import TranslateAdapter 7 | 8 | __all__ = ["Translator"] 9 | 10 | 11 | class Translator(TranslateAdapter): 12 | """ 13 | 基于代理池的翻译类 14 | """ 15 | name = "translator" 16 | web_site = None 17 | translate_timeout = None 18 | retry_times = None 19 | 20 | def __init__(self, settings): 21 | super(Translator, self).__init__() 22 | self.web_site = settings.get( 23 | "WEBSITE", "baidu,qq,google").split(",") 24 | self.retry_times = settings.get("TRANSLATE_RETRY_TIMES", 10) 25 | self.translate_timeout = settings.get("TRANSLATE_TIMEOUT", 10) 26 | self.headers = settings.get("HEADERS") or self.headers 27 | self.protocols = settings.get("PROTOCOLS", "http,https").split( 28 | ",") 29 | self.redis_conn = Redis( 30 | settings.get("REDIS_HOST", "0.0.0.0"), 31 | settings.get_int("REDIS_PORT", 6379)) 32 | self.proxy_sets = settings.get("PROXY_SETS", "proxy_set").split(",") 33 | self.account_password = settings.get("PROXY_ACCOUNT_PASSWORD", "") 34 | 35 | def proxy_choice(self): 36 | """ 37 | 随机选取代理 38 | :return: 代理 39 | """ 40 | proxy = self.redis_conn.srandmember(random.choice(self.proxy_sets)) 41 | if proxy: 42 | proxy_str = "http://%s%s" % ( 43 | self.account_password+"@" if self.account_password else "", 44 | proxy.decode()) 45 | self.proxy = dict(zip(self.protocols, repeat(proxy_str))) 46 | return self.proxy -------------------------------------------------------------------------------- /tools/bubble_sort.py: -------------------------------------------------------------------------------- 1 | 2 | def bubble_sort(arr): 3 | for i in range(len(arr)-1): 4 | for j in range(1, len(arr)-i): 5 | if arr[j-1] > arr[j]: 6 | arr[j-1], arr[j] = arr[j], arr[j-1] 7 | 8 | 9 | if __name__ == "__main__": 10 | arr = [5, 3, 1, 7, 3, 7, 0, 4, 8] 11 | bubble_sort(arr) 12 | print(arr) 13 | -------------------------------------------------------------------------------- /tools/coroutine.py: -------------------------------------------------------------------------------- 1 | import tornado 2 | from collections import deque 3 | from select import epoll 4 | from tornado.platform.auto import Waker 5 | 6 | from toolkit.singleton import Singleton 7 | 8 | 9 | class EPollIOLoop(metaclass=Singleton): 10 | _EPOLLIN = 0x001 11 | _EPOLLPRI = 0x002 12 | _EPOLLOUT = 0x004 13 | _EPOLLERR = 0x008 14 | _EPOLLHUP = 0x010 15 | _EPOLLRDHUP = 0x2000 16 | _EPOLLONESHOT = (1 << 30) 17 | _EPOLLET = (1 << 31) 18 | 19 | # Our events map exactly to the epoll events 20 | NONE = 0 21 | READ = _EPOLLIN 22 | WRITE = _EPOLLOUT 23 | ERROR = _EPOLLERR | _EPOLLHUP 24 | 25 | def __init__(self): 26 | self._impl = epoll() 27 | self._waker = Waker() 28 | self.add_handler(self._waker.fileno(), 29 | lambda fd, events: self._waker.consume(), 30 | self.READ) 31 | self._handlers = {} 32 | self._events = {} 33 | self.coroutine_futures = dict() 34 | self.waits = deque() 35 | self.callbacks = deque() 36 | 37 | def add_handler(self, fd, handler, events): 38 | fd, obj = self.split_fd(fd) 39 | self._handlers[fd] = (obj, handler) 40 | self._impl.register(fd, events | self.ERROR) 41 | 42 | def update_handler(self, fd, events): 43 | fd, obj = self.split_fd(fd) 44 | self._impl.modify(fd, events | self.ERROR) 45 | 46 | def remove_handler(self, fd): 47 | fd, obj = self.split_fd(fd) 48 | self._handlers.pop(fd, None) 49 | self._events.pop(fd, None) 50 | try: 51 | self._impl.unregister(fd) 52 | except Exception: 53 | print("Error deleting fd from IOLoop") 54 | 55 | def split_fd(self, fd): 56 | try: 57 | return fd.fileno(), fd 58 | except AttributeError: 59 | return fd, fd 60 | 61 | def start(self): 62 | while True: 63 | for i in range(len(self.waits)): 64 | coroutine = self.waits.popleft() 65 | if self.coroutine_futures[coroutine].is_ok(): 66 | self.callbacks.append(coroutine) 67 | else: 68 | self.waits.append(coroutine) 69 | 70 | for i in range(len(self.callbacks)): 71 | coroutine = self.callbacks.popleft() 72 | future = self.coroutine_futures[coroutine] 73 | coroutine.send(future) 74 | if future.is_ok(): 75 | self.callbacks.append(future) 76 | else: 77 | self.waits.append(future) 78 | event_pairs = self._impl.poll(10) 79 | 80 | self._events.update(event_pairs) 81 | while self._events: 82 | fd, events = self._events.popitem() 83 | fd_obj, handler_func = self._handlers[fd] 84 | handler_func(fd_obj, events) 85 | 86 | 87 | class Future(object): 88 | def __init__(self, ok_callback=lambda: True): 89 | self._done = False 90 | self._result = None 91 | self._callbacks = [] 92 | self.ok_callback = ok_callback 93 | 94 | def is_ok(self): 95 | return self.ok_callback() 96 | 97 | def set_ok_callback(self, ok_callback): 98 | self.ok_callback = ok_callback 99 | 100 | def cancel(self): 101 | return False 102 | 103 | def cancelled(self): 104 | return False 105 | 106 | def running(self): 107 | return not self._done 108 | 109 | def done(self): 110 | return self._done 111 | 112 | def result(self, timeout=None): 113 | if self._result is not None: 114 | return self._result 115 | return self._result 116 | 117 | def add_done_callback(self, fn): 118 | 119 | if self._done: 120 | fn(self) 121 | else: 122 | self._callbacks.append(fn) 123 | 124 | def set_result(self, result): 125 | self._result = result 126 | self._set_done() 127 | 128 | def _set_done(self): 129 | self._done = True 130 | for cb in self._callbacks: 131 | cb(self) 132 | self._callbacks = None 133 | 134 | import time 135 | 136 | def sleep(seconds): 137 | f = Future() 138 | f.set_ok_callback(lambda start_time=time.time(): time.time() >= start_time + seconds) 139 | yield f 140 | 141 | def main(): 142 | future = Future() 143 | def sum(a, b): 144 | yield future 145 | yield from sleep(2) 146 | return a + b 147 | result = yield from sum(1, 2) 148 | print(result) 149 | 150 | if __name__ == "__main__": 151 | main() -------------------------------------------------------------------------------- /tools/curl_to_requests.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | from toolkit import re_search 4 | 5 | 6 | def repl(mth): 7 | return mth.group().encode('utf-8').decode('unicode_escape') 8 | 9 | 10 | def cur_to_requests(curl_cmd, filename): 11 | tmpl = """import requests 12 | 13 | 14 | def main(): 15 | url = "{}" 16 | headers = {} 17 | form = {} 18 | json = {} 19 | resp = requests.{}(url, headers=headers, json=json, data=form) 20 | print(resp.text) 21 | 22 | 23 | main() 24 | """ 25 | url = re.search(r"'(http.*?)'", curl_cmd).group(1) 26 | headers = json.dumps( 27 | dict(tuple(v.strip() for v in header.split(":", 1)) for header in re.findall(r"-H '(.*?)'", curl_cmd)), indent=4) 28 | form = re_search(r"--data ?'(.*?)'", curl_cmd, default=None) 29 | 30 | json_data = re_search(r"--data-binary \$?'(.*?)'", curl_cmd, default=None) 31 | if form: 32 | form = json.dumps(dict(tuple(param.split("=", 1)) for param in form.replace("+", " ").split("&")), indent=4) 33 | if json_data: 34 | json_data = re.sub(r"\\u\w{4}", repl, json.dumps(json.loads(json_data), indent=4).replace("false", "False").replace("true", "True").replace("null", "None")) 35 | 36 | with open(filename, "w") as f: 37 | f.write(tmpl.format( 38 | url, 39 | headers, 40 | form, 41 | json_data, 42 | "post" if form or json_data else "get")) 43 | 44 | import sys 45 | cur_to_requests(sys.argv[1], sys.argv[2]) -------------------------------------------------------------------------------- /tools/get_charged.py: -------------------------------------------------------------------------------- 1 | def charged(pay_num, money_num): 2 | if pay_num == 0: 3 | return 0 4 | return min(get_min(pay_num, money_num)) + 1 5 | 6 | 7 | def get_min(pay_num, money_num): 8 | for money in money_num: 9 | last_num = pay_num - money 10 | if last_num < 0: 11 | continue 12 | try: 13 | yield charged(last_num, money_num) 14 | except ValueError: 15 | continue 16 | 17 | 18 | def money_count(pay_num, money_num): 19 | try: 20 | return charged(pay_num, money_num) 21 | except ValueError: 22 | print("无法找零") 23 | 24 | 25 | if __name__ == "__main__": 26 | print(money_count(13, [1, 3, 5])) -------------------------------------------------------------------------------- /tools/hash_set.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from collections.abc import MutableSet 4 | 5 | __all__ = ["HashSet"] 6 | 7 | 8 | class HashSet(MutableSet): 9 | """ 10 | 自实现hashset,采用多维数组的方式进行存储数据,读写的性能比set差,但内存使用量比set少 11 | """ 12 | data_tmp = [None] * 10 13 | 14 | def __init__(self): 15 | self.data = copy.copy(self.data_tmp) 16 | self.size = 0 17 | 18 | def add(self, value): 19 | self.dive(hash(value), self.data, value) 20 | 21 | def dive(self, hash_val, current_list, value, remove=False, contains=False): 22 | div, mod = divmod(hash_val, 10) 23 | if current_list[mod] is None: 24 | current_list[mod] = copy.copy(self.data_tmp) 25 | elif not isinstance(current_list[mod], list): 26 | val = current_list[mod] 27 | current_list[mod] = copy.copy(self.data_tmp) 28 | current_list[mod][0] = val 29 | if div > 9: 30 | result = self.dive(div, current_list[mod], value, remove, contains) 31 | if result: 32 | if not filter(None, current_list[mod]): 33 | current_list[mod] = None 34 | return True 35 | return result 36 | elif remove: 37 | cur_li, index, current_node = self._first(current_list[mod][div]) 38 | cur_li = cur_li or current_list[mod] 39 | index = index if index is not None else div 40 | if current_node is not None: 41 | if not contains: 42 | cur_li[index] = None 43 | self.size -= 1 44 | return True 45 | else: 46 | if not contains: 47 | raise KeyError(value) 48 | return False 49 | else: 50 | if current_list[mod][div] is None: 51 | current_list[mod][div] = value 52 | self.size += 1 53 | else: 54 | return False 55 | 56 | def _first(self, ls): 57 | if isinstance(ls, list): 58 | return ls, 0, self._first(ls[0])[2] 59 | else: 60 | return None, None, ls 61 | 62 | def __iter__(self): 63 | return self.next(self.data) 64 | 65 | def next(self, ls): 66 | for i in ls: 67 | if isinstance(i, list): 68 | yield from self.next(i) 69 | else: 70 | if i is not None: 71 | yield i 72 | 73 | def __len__(self): 74 | return self.size 75 | 76 | def __contains__(self, value): 77 | return self.dive(hash(value), self.data, value, True, True) 78 | 79 | def remove(self, value): 80 | return self.dive(hash(value), self.data, value, True) 81 | 82 | discard = remove 83 | 84 | def join_str(self, li): 85 | if isinstance(li, list): 86 | total = "" 87 | for l in li: 88 | total += self.join_str(l) 89 | return total 90 | else: 91 | if li is not None: 92 | return str(li) + "," 93 | else: 94 | return "" 95 | 96 | def __str__(self): 97 | return "{%s}"%self.join_str(self.data).strip(",") 98 | 99 | __repr__ = __str__ 100 | -------------------------------------------------------------------------------- /tools/heap_sort.py: -------------------------------------------------------------------------------- 1 | def adjust_heap(arr, start, length): 2 | while start * 2 + 1 < length - 1: 3 | tmp = arr[start] 4 | left = arr[start * 2 + 1] 5 | right = arr[start * 2 + 2] if start * 2 + 2 < length else 0 6 | if max(left, right) < tmp: 7 | break 8 | else: 9 | if left > right: 10 | arr[start * 2 + 1], arr[start] = arr[start], arr[start * 2 + 1] 11 | start = start * 2 + 1 12 | else: 13 | arr[start * 2 + 2], arr[start] = arr[start], arr[start * 2 + 2] 14 | start = start * 2 + 2 15 | 16 | 17 | def heap_sort(arr): 18 | """ 19 | 堆排序使用最大堆来排序,将排好的堆的最大值放到最后,然后继续将arr[:-1]排成堆,以此类推 20 | :param arr: 21 | :return: 22 | """ 23 | l = len(arr) 24 | for i in range(int(l/2)-1, -1, -1): 25 | adjust_heap(arr, i, l) 26 | arr[-1], arr[0] = arr[0], arr[-1] 27 | # 将数组依次减少长度传入堆中进行调整 28 | for i in range(l, 0, -1): 29 | from toolkit import debugger 30 | debugger() 31 | adjust_heap(arr, 0, i) 32 | arr[i-1], arr[0] = arr[0], arr[i-1] 33 | 34 | 35 | a = [1, 2, 3]#[5,3,1,7,3,7,0,4,8] 36 | heap_sort(a) 37 | print(a) 38 | -------------------------------------------------------------------------------- /tools/insert_sort.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def insert_sort(arr): 4 | # 将数组从i处分成两部分[0:i]和[i:],第一部分已经排好序,遍历第二部分,取每个数和第一部分比较 5 | for i in range(1, len(arr)): 6 | tmp = arr[i] 7 | # 遍历第一部分,找出该数的位置 8 | for j in range(0, i): 9 | # 交换该数与大于等于该数的元素,同时遍历进行,继续交换,相当于一个插入操作 10 | if arr[j] >= tmp: 11 | arr[j], tmp = tmp, arr[j] 12 | # 最后遍历完第一部分别忘了将数组扩张一位(j最大是i-1,j+1相当于将度为i的第一部分扩张到了i+i) 13 | arr[j+1] = tmp 14 | 15 | 16 | def insert_sort2(arr): 17 | # 将数组从i处分成两部分[0:i]和[i:],第一部分已经排好序 18 | for i in range(1, len(arr)): 19 | # 依次从后面取出元素 20 | ele = arr.pop(-1) 21 | # 遍历第一部分的元素,当最后的元素小于当前元素时,插入 22 | for j in range(i): 23 | if ele < arr[j]: 24 | arr.insert(j, ele) 25 | break 26 | # 没有找到比最后的元素更小的元素,则插到第一部分最后 27 | else: 28 | arr.insert(j+1, ele) 29 | return arr 30 | 31 | 32 | a = [5,3,1,7,3,7,0,4,8] 33 | insert_sort2(a) 34 | print(a) -------------------------------------------------------------------------------- /tools/k_word_sub_string.py: -------------------------------------------------------------------------------- 1 | def run(string, k): 2 | """ 3 | 给定字符串S和整数K. 4 | 计算长度为K且包含K个不同字符的子串数 5 | 6 | :param string: s 7 | :param k: 8 | :return: 9 | """ 10 | 所有的子串数 = 0 11 | 子串尾部对应S的索引 = -1 12 | 子串头部对应S的索引 = 0 13 | for 游标 in range(len(string)): 14 | 存在于子串中的字符索引 = string[子串头部对应S的索引: 子串尾部对应S的索引+1].find( 15 | string[游标]) 16 | if 存在于子串中的字符索引 != -1: 17 | 子串头部对应S的索引 = 子串头部对应S的索引 + 存在于子串中的字符索引 + 1 18 | else: 19 | 子串尾部对应S的索引 = 游标 20 | if 子串尾部对应S的索引 - 子串头部对应S的索引 + 1 == k: 21 | 所有的子串数 += 1 22 | 子串头部对应S的索引 += 1 23 | 24 | return 所有的子串数 25 | 26 | 27 | print(run("abdaeesdaegabas", 3)) 28 | -------------------------------------------------------------------------------- /tools/lru.py: -------------------------------------------------------------------------------- 1 | class Node(object): 2 | 3 | def __init__(self, val): 4 | self.val = val 5 | self.next = None 6 | 7 | 8 | class Lru(object): 9 | 10 | def __init__(self, vals): 11 | self._head = None 12 | for val in vals: 13 | node = Node(val) 14 | if self._head: 15 | self._head.next = node 16 | else: 17 | self._head = node 18 | 19 | def hit(self, val): 20 | cur = self._head 21 | while cur: 22 | if cur.val == val and cur is not self._head: 23 | next = cur.next 24 | if next: 25 | cur.next = next.next 26 | cur.next.next = cur 27 | next.next = self._head 28 | self._head = next 29 | return True 30 | elif cur is not self._head: 31 | return True 32 | 33 | return False 34 | -------------------------------------------------------------------------------- /tools/merge_link.py: -------------------------------------------------------------------------------- 1 | class ListNode: 2 | def __init__(self, x): 3 | self.val = x 4 | self.next = None 5 | 6 | def __iter__(self): 7 | cur = self 8 | while cur: 9 | yield cur.val 10 | cur = cur.next 11 | 12 | def __str__(self): 13 | string = "[" 14 | for i in self: 15 | string += str(i) 16 | string += "," 17 | string += "]" 18 | return string 19 | 20 | __repr__ = __str__ 21 | 22 | 23 | class Solution: 24 | # 返回合并后列表 25 | def Merge(self, pHead1, pHead2): 26 | # write code here 27 | pre = None 28 | one = pHead1 29 | two = pHead2 30 | while pHead1 and pHead2: 31 | while pHead1 and pHead2 and pHead1.val < pHead2.val: 32 | pre = pHead1 33 | pHead1 = pHead1.next 34 | if pre: 35 | pre.next = pHead2 36 | pHead2 = pre 37 | while pHead1 and pHead2 and pHead1.val >= pHead2.val: 38 | pre = pHead2 39 | pHead2 = pHead2.next 40 | if pre and pHead1: 41 | pre.next = pHead1 42 | pHead1 = pre 43 | 44 | if pHead1: 45 | return two 46 | else: 47 | return one 48 | 49 | 50 | def create(arr): 51 | pre = None 52 | cur = None 53 | 54 | for i in arr: 55 | node = ListNode(i) 56 | if pre: 57 | pre.next = node 58 | else: 59 | cur = node 60 | pre = node 61 | 62 | return cur 63 | 64 | 65 | if __name__ == "__main__": 66 | print(Solution().Merge(create([1, 3, 5]), create([1, 3, 5]))) 67 | -------------------------------------------------------------------------------- /tools/merge_sort.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | def sort(arr, left=0, right=None, temp=list()): 4 | # 1 首先创建头和尾的下标,并创建一个临时数组用来保存数据 5 | if right is None: 6 | right = len(arr) - 1 7 | # 2 递归对子序列排序 8 | if left < right: 9 | mid = int((left + right)/2) 10 | sort(arr, left, mid) 11 | sort(arr, mid + 1, right) 12 | merge(arr, left, mid, right, temp) 13 | 14 | 15 | def merge(arr, left, mid, right, temp): 16 | i = left 17 | j = mid + 1 18 | # 3 此时两个子序列是排好的,所以从两个子序列的开始进行遍历,将更小元素值的放到临时数组,直到其中一个子序列变空 19 | while i <= mid and j <= right: 20 | if arr[i] < arr[j]: 21 | temp.append(arr[i]) 22 | i += 1 23 | else: 24 | temp.append(arr[j]) 25 | j += 1 26 | # 4 将不为空的子序列所有值转移到临时数组 27 | while i <= mid: 28 | temp.append(arr[i]) 29 | i += 1 30 | 31 | while j <= right: 32 | temp.append(arr[j]) 33 | j += 1 34 | # 5 将临时数组的值转移回来 35 | arr[left: left+len(temp)] = temp 36 | temp.clear() 37 | 38 | 39 | a = [5,3,1,7,3,7,0,4,8] 40 | sort(a) 41 | print(a) 42 | -------------------------------------------------------------------------------- /tools/merge_sort2.py: -------------------------------------------------------------------------------- 1 | def merge(arr, left, mid, right, temp): 2 | i = left 3 | j = mid + 1 4 | while i <= mid and j <=right: 5 | if arr[i] <= arr[j]: 6 | temp.append(arr[i]) 7 | i += 1 8 | else: 9 | temp.append(arr[j]) 10 | j += 1 11 | 12 | 13 | while i <= mid: 14 | temp.append(arr[i]) 15 | i += 1 16 | 17 | while j <= right: 18 | temp.append(arr[j]) 19 | j += 1 20 | 21 | arr[left: left+len(temp)] = temp 22 | temp.clear() 23 | 24 | 25 | def sort(arr, left=0, right=None, temp=list()): 26 | if right is None: 27 | right = len(arr) - 1 28 | 29 | if left < right: 30 | mid = (left + right) // 2 31 | sort(arr, left, mid, temp) 32 | sort(arr, mid+1, right, temp) 33 | merge(arr, left, mid, right, temp) 34 | 35 | a = [5,3,1,7,3,7,0,4,8] 36 | sort(a) 37 | print(a) -------------------------------------------------------------------------------- /tools/odd_even.py: -------------------------------------------------------------------------------- 1 | class Solution: 2 | def reOrderArray(self, arr): 3 | # write code here 4 | if not arr: 5 | return arr 6 | for i in range(0, len(arr)): 7 | if arr[i] % 2 == 0: 8 | for j in range(i+1, len(arr)): 9 | if arr[j] % 2: 10 | for k in range(i, j): 11 | arr[k], arr[k+1] = arr[k+1], arr[k] 12 | else: 13 | break 14 | 15 | return arr 16 | 17 | 18 | if __name__ == "__main__": 19 | Solution().reOrderArray([1, 2, 3, 4, 5, 6, 7]) -------------------------------------------------------------------------------- /tools/operator_tree.py: -------------------------------------------------------------------------------- 1 | class Tree(object): 2 | 3 | def __init__(self, data, left, right): 4 | self.data = data 5 | self.left = left 6 | self.right = right 7 | 8 | def __iter__(self): 9 | if self.left: 10 | yield from iter(self.left) 11 | yield self.data 12 | if self.right: 13 | yield from iter(self.right) 14 | 15 | 16 | def make_operator_tree(operators): 17 | stack = [] 18 | for operator in operators: 19 | tree = Tree(operator, None, None) 20 | if not operator.isalpha(): 21 | tree.right = stack.pop() 22 | tree.left = stack.pop() 23 | stack.append(tree) 24 | return stack.pop() 25 | 26 | 27 | if __name__ == "__main__": 28 | for data in make_operator_tree("ab+cde+**"): 29 | print(data, end="") -------------------------------------------------------------------------------- /tools/package_problen.py: -------------------------------------------------------------------------------- 1 | def package_problem(max_weight, values, weights, count): 2 | if count == 0: 3 | return 0 4 | if weights[count-1] > max_weight: 5 | return 0 6 | va1 = package_problem(max_weight-weights[count-1], values, weights, count-1) + values[count-1] 7 | va2 = package_problem(max_weight, values, weights, count-1) 8 | return max(va1, va2) 9 | 10 | # 11 | # def package_problem2(max_weight, values, weights, count): 12 | # 13 | 14 | 15 | print(package_problem(10, [6, 3, 5, 4, 6], [2, 2, 6, 5, 4], 5)) -------------------------------------------------------------------------------- /tools/quick_sort.py: -------------------------------------------------------------------------------- 1 | def quick(arr, start, end): 2 | """ 3 | 快排知识点总结 4 | :param arr: 5 | :param start: 6 | :param end: 7 | :return: 8 | """ 9 | # 1 基准情形,两个指针相遇,开头不能大于等于结尾 10 | if start >= end: 11 | return 12 | i = start 13 | j = end 14 | # 2 使用一个变量保存中间值,使用一个bool值来决定是从后向前扫描还是相反。 15 | target = arr[i] 16 | from_end = True 17 | while i < j: 18 | if from_end: 19 | # 3 从后向前扫描,当中间值大于后值时交换 20 | if target > arr[j]: 21 | arr[i], arr[j] = arr[j], arr[i] 22 | i += 1 23 | from_end = False 24 | else: 25 | j -= 1 26 | else: 27 | # 3 从前向后扫描,当中间值小于等于前值时交换 28 | # 第3条必须保存中间值等于目标值时的情况被覆盖到 29 | if target <= arr[i]: 30 | arr[i], arr[j] = arr[j], arr[i] 31 | j -= 1 32 | from_end = True 33 | else: 34 | i += 1 35 | # 4 将数组从两个指针相遇的地方将数组分成两部分。 36 | quick(arr, start, i) 37 | quick(arr, i+1, end) 38 | 39 | 40 | def quick_sort2(arr, left=0, right=None): 41 | if right is None: 42 | right = len(arr) - 1 43 | if left >= right: 44 | return 45 | i = left 46 | j = right 47 | key = arr[i] 48 | while i < j: 49 | while i < j and arr[j] >= key: 50 | j -= 1 51 | arr[i] = arr[j] 52 | while i < j and arr[i] <= key: 53 | i += 1 54 | arr[j] = arr[i] 55 | arr[i] = key 56 | quick_sort2(arr, left, i-1) 57 | quick_sort2(arr, i+1, right) 58 | 59 | 60 | if __name__ == "__main__": 61 | arr = [6,3,8,1,4,6,9,2] 62 | #quick(arr, 0, len(arr)-1) 63 | quick_sort2(arr) 64 | print(arr) -------------------------------------------------------------------------------- /tools/quick_sort2.py: -------------------------------------------------------------------------------- 1 | def quick_sort(arr, left=0, right=None): 2 | if right is None: 3 | right = len(arr) - 1 4 | 5 | if left >= right: 6 | return 7 | 8 | l = left 9 | r = right 10 | key = arr[left] 11 | while left < right: 12 | while left < right and arr[right] >= key: 13 | right -= 1 14 | arr[left] = arr[right] 15 | 16 | while left < right and arr[left] <= key: 17 | left += 1 18 | arr[right] = arr[left] 19 | 20 | arr[right] = key 21 | quick_sort(arr, l, right-1) 22 | quick_sort(arr, right+1, r) 23 | 24 | 25 | if __name__ == "__main__": 26 | arr = [6,3,8,1,4,6,9,2] 27 | #quick(arr, 0, len(arr)-1) 28 | quick_sort(arr) 29 | print(arr) -------------------------------------------------------------------------------- /tools/rm_images_and_container.py: -------------------------------------------------------------------------------- 1 | 2 | # -*- coding:utf-8 -*- 3 | import os 4 | import re 5 | import sys 6 | from pdb import Pdb 7 | 8 | regx = re.compile("(\S+)") 9 | 10 | 11 | def fun(columns): 12 | return columns[1] == "" 13 | 14 | 15 | def rm_container(): 16 | for line in os.popen("docker ps -a").readlines(): 17 | columns = regx.findall(line) 18 | os.system("docker rm %s"%columns[0].strip()) 19 | 20 | 21 | def rm_image(): 22 | 23 | def bar(columns): 24 | return columns[2] 25 | 26 | def rm(id): 27 | os.system("docker rmi %s"%id) 28 | 29 | [rm(bar(regx.findall(line))) for line in reversed(os.popen("docker images").readlines()) if fun(regx.findall(line))] 30 | 31 | 32 | if __name__ == "__main__": 33 | eval("rm_%s()"%sys.argv[1]) 34 | -------------------------------------------------------------------------------- /tools/scrapy_header_parser.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | def parse(headers): 5 | print(json.dumps(dict((k.decode(), b"".join(v).decode())for k, v in headers.items()), indent=1)) 6 | 7 | 8 | if __name__ == "__main__": 9 | parse({b'X-Vol-App-Claims': [b'dT4DuhD38QDBERWJshp5PLGR09ny2Lks2sfN5WFe6xzPW1iQcAQHNnV6FwGwV9xkgg0z0WK5RSAmFKYMVLsOF5yO0ulF8SQlfGsPXoh/NTtWe6zrZqsmg1t2SAmZDdjweiZFMhHephAQWz9oNa+f4vRtYrMRcp4FE9BpUipu4/YAGDNWFiPxnuztvE/dRT1ETOZxCX4vU6hRCU7z0DMbQeNAkbpcOYwnoLmJP2WcVNYcggebjb99dLnS9HVjRS2ptYvT4vGVPZhMt+Ajn2qPJw=='], b'X-Vol-Currency': [b'USD'], b'X-Vol-Locale': [b'en-US'], b'X-Vol-Site': [b'16829'], b'X-Vol-Master-Catalog': [b'1'], b'X-Vol-Catalog': [b'1'], b'X-Vol-Tenant': [b'12106'], b'X-Vol-Purchase-Location': [b''], b'X-Vol-User-Claims': [b'I0pZxUNGUe9Pjl1IqCMXQDq3zUHISxQ1lhqAOYHCtok1cFgZngPpXws0G3nwKTJTXwyIYDkktzdl8I3ieVbs5XUKIJV7CF7AnxY8L1moNXleklZPiG5ayocmqKOA58dWCAx3Ta4ukUZ7bNvIcekAsUn8QE616FaYz83UCxLaEUVg4haT0FyVHgNDnBBJyFfOs1orC86OZQca8tPhWqM3IJdHx1htMSHIKmkUHAQDBohxGA0OoSWiMRtLR0W48hKt/q1Mn3OKscOdvsVJJBqkVVOc4vFusDQZhe4fFAwcHMBLQB+vHrRJzKXSyzfQ15L/yR7MBq+vKQodWhyS/bwmjBc7pd3BKanKwenGIQk65iy3R6AT9jUbjQuhWTXAbaZF'], b'Content-Type': [b'application/json'], b'Content-Length': [b'73'], b'Referer': [b'https://www.bluefly.com/adidas-adidas-mens-eqt-basketball-adv-originals-basketball-shoe/p/484094401'], b'Accept': [b'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'], b'Accept-Language': [b'en'], b'Accept-Encoding': [b'gzip, deflate'], b'User-Agent': [b'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36'], b'Cookie': [b'sb-sf-at-Prod-s=pt=&at=V7/CL/J1W4z5L5vKaue5zwx1145lm42WhHwBCrCGqL3cBgY+eb9bWI05jGs3LtJvfEvrDB7P0v/ZLQt1Ph/oIkNEWtABws087nRgK5WIr4cMfx1I0JL8zb9XWgWkdF5J+wisveUphd2hsusms0KuP3cfQp2AZQtXr7dbRLtprOr3FbEfGdHPlapXj8kZ9+thwuVE8G6CiiOgGIaa8fVHN8gDGD5fOGN+w+EYJSpSBY+38Sn8CdF0s43IJ72HngbKFd3tO7aQT859DK9+WkIQM+SaG8uEQt1enZs4aTBEzqYr7B1/oDB6Lg9FiP/pxxIm&dt=2018-03-06T03:32:23.2880226Z; sb-sf-at-Prod=pt=&at=V7/CL/J1W4z5L5vKaue5zwx1145lm42WhHwBCrCGqL3cBgY+eb9bWI05jGs3LtJvfEvrDB7P0v/ZLQt1Ph/oIkNEWtABws087nRgK5WIr4cMfx1I0JL8zb9XWgWkdF5J+wisveUphd2hsusms0KuP3cfQp2AZQtXr7dbRLtprOr3FbEfGdHPlapXj8kZ9+thwuVE8G6CiiOgGIaa8fVHN8gDGD5fOGN+w+EYJSpSBY+38Sn8CdF0s43IJ72HngbKFd3tO7aQT859DK9+WkIQM+SaG8uEQt1enZs4aTBEzqYr7B1/oDB6Lg9FiP/pxxIm; _mzvr=vexT6f4vGkuklAISIT0Clg; _mzvs=nn; _mzvt=DxFd1gpAOUW1w0uQ7r6DXQ'], b'Proxy-Authorization': [b'Basic bG9uZ2VuOmppbmFubG9uZ2VuMjAxNg==']}) -------------------------------------------------------------------------------- /tools/secret_word.py: -------------------------------------------------------------------------------- 1 | 2 | def parser(secret): 3 | """ 4 | 找到一个密码中, 后续的字符对应前面出现过的字符的下标 5 | :param secret: "abceeab" 6 | :return: {0: None, 1: None, 2: None, 3: None, 4: 3, 5: 1} 7 | """ 8 | vals_mapping = dict() 9 | indices_mapping = dict() 10 | for index, i in enumerate(secret): 11 | if i in vals_mapping: 12 | indices_mapping[index] = vals_mapping[i] 13 | else: 14 | indices_mapping[index] = None 15 | vals_mapping[i] = index 16 | return indices_mapping 17 | 18 | 19 | def run(long_str, secret): 20 | """ 21 | 判断超长的字符串中是否包含secret 22 | :param long_str: 23 | :param secret: 24 | :return: 25 | """ 26 | sec_len = len(secret) 27 | sec_rs = parser(secret) 28 | for i in range(len(long_str) - sec_len): 29 | rs = parser(long_str[i: i+ sec_len]) 30 | if rs == sec_rs: 31 | return "yes" 32 | return "no" 33 | 34 | 35 | print(run("deabceeeab", "xyzddd")) 36 | 37 | -------------------------------------------------------------------------------- /tools/send_request.py: -------------------------------------------------------------------------------- 1 | def post(): 2 | 3 | import requests 4 | 5 | resp = requests.post("http://127.0.0.1:8000/a/1", json={ 6 | "title": "aaaa", "tags": ["python"], 7 | "description": "111"}) 8 | 9 | print(resp.json()) 10 | 11 | post() 12 | -------------------------------------------------------------------------------- /tools/tree.py: -------------------------------------------------------------------------------- 1 | class Node(object): 2 | 3 | def __init__(self, data, left=None, right=None): 4 | self.data = data 5 | self.left = left 6 | self.right = right 7 | 8 | def insert(self, data): 9 | if data > self.data: 10 | if self.right: 11 | self.right.insert(data) 12 | else: 13 | self.right = Node(data) 14 | elif data < self.data: 15 | if self.left: 16 | self.left.insert(data) 17 | else: 18 | self.left = Node(data) 19 | 20 | def find_max(self): 21 | if not self.right: 22 | return self.data 23 | else: 24 | return self.right.find_max() 25 | 26 | def find_min(self): 27 | if not self.left: 28 | return self.data 29 | else: 30 | return self.left.find_max() 31 | 32 | def remove(self, data): 33 | if data == self.data: 34 | if self.left: 35 | self.data = self.left.find_max() 36 | self.left.remove(self.data) 37 | elif self.right: 38 | self.data = self.right.find_min() 39 | self.right.remove(self.data) 40 | else: 41 | self.data = None 42 | return True 43 | elif data < self.data: 44 | if self.left: 45 | return self.left.remove(data) 46 | else: 47 | return False 48 | else: 49 | if self.right: 50 | return self.right.remove(data) 51 | else: 52 | return False 53 | 54 | def __contains__(self, item): 55 | if self.data == item: 56 | return True 57 | elif item < self.data: 58 | return item in self.left 59 | else: 60 | return item in self.right 61 | 62 | def __len__(self): 63 | l = 1 64 | if self.right: 65 | l += len(self.right) 66 | if self.left: 67 | l += len(self.left) 68 | return l 69 | 70 | 71 | class Tree(object): 72 | 73 | def __init__(self): 74 | self.root = None 75 | 76 | def __len__(self): 77 | if self.root: 78 | return len(self.root) 79 | else: 80 | return 0 81 | 82 | def __contains__(self, item): 83 | if self.root: 84 | return item in self.root 85 | else: 86 | return False 87 | 88 | def insert(self, data): 89 | node = Node(data) 90 | if self.root: 91 | self._insert(self.root, node) 92 | else: 93 | self.root = node 94 | 95 | def _insert(self, root, node): 96 | if node > root: 97 | if root.right: 98 | self._insert(root.right, node) 99 | else: 100 | root.right = node 101 | elif node < root: 102 | if root.left: 103 | self._insert(root.left, node) 104 | else: 105 | root.left = node 106 | 107 | def find_max(self): 108 | if not self.root: 109 | raise Exception("No data.") 110 | else: 111 | self.root.find_max() 112 | 113 | def remove(self, data): 114 | 115 | 116 | def _remove(self, node, data): 117 | if node.data == data: 118 | 119 | if self.root: 120 | result = self.root.remove(data) 121 | if self.root.data is None: 122 | self.root = None 123 | return result 124 | else: 125 | return False -------------------------------------------------------------------------------- /tools/wine.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from fractions import Fraction 3 | from toolkit import cache_property 4 | from functools import reduce 5 | 6 | 7 | class Cup(object): 8 | """ 9 | 倒酒案例 10 | 往一个二维酒杯塔里面倒酒,上层杯子满了之后,会流入下层,不考虑酒会丢失的情况。结构为 11 | cap 12 | cap cap 13 | cap cap cap 14 | ...... 15 | 求:倒入count杯酒时,第line行第col列杯中的酒量。 16 | """ 17 | _total = set() 18 | 19 | def __init__(self, left_parent, right_parent, capacity): 20 | self.left_parent = left_parent 21 | self.right_parent = right_parent 22 | self.capacity = capacity 23 | 24 | @cache_property 25 | def right_child(self): 26 | return Cup(self, self.right_sibling, 0) 27 | 28 | @cache_property 29 | def left_child(self): 30 | if self.left_sibling: 31 | return self.left_sibling.right_child 32 | else: 33 | return Cup(self.left_sibling, self, 0) 34 | 35 | @cache_property 36 | def left_sibling(self): 37 | if self.left_parent: 38 | return self.left_parent.left_child 39 | else: 40 | return None 41 | 42 | @cache_property 43 | def right_sibling(self): 44 | if self.right_parent: 45 | return self.right_parent.right_child 46 | else: 47 | return None 48 | 49 | def pour(self, count): 50 | self._total.add(self) 51 | remainder = self.capacity + count - 1 52 | if remainder > 0: 53 | self.capacity = 1 54 | self.left_child.pour(remainder/2) 55 | self.right_child.pour(remainder/2) 56 | else: 57 | self.capacity += count 58 | 59 | def find(self, line, col): 60 | that = self 61 | for i in range(line-1): 62 | that = that.left_child 63 | 64 | for j in range(col-1): 65 | that = that.right_sibling 66 | 67 | return that 68 | 69 | def total(self): 70 | return reduce(lambda x, y: x+y, [i.capacity for i in self._total]) 71 | 72 | def __str__(self): 73 | return str(Fraction(self.capacity)) 74 | 75 | __repr__ = __str__ 76 | 77 | def print(self, line, col): 78 | find = None 79 | blunk_f = "{:^%s}" % (line * 15) 80 | ele_f = " {:-^13} " 81 | this = self 82 | 83 | for i in range(line): 84 | that = this 85 | string = ele_f.format(str(that)) 86 | count = 1 87 | 88 | while that.right_sibling: 89 | count += 1 90 | that = that.right_sibling 91 | 92 | if i == line - 1 and count == col: 93 | string += ele_f.replace("-", "=").format(str(that)) 94 | find = that 95 | else: 96 | string += ele_f.format(str(that)) 97 | 98 | print(blunk_f.format(string)) 99 | this = this.left_child 100 | 101 | if find: 102 | print(f"\n第{line}行,第{col}列杯子中的酒量为{find.capacity}。") 103 | 104 | 105 | if __name__ == "__main__": 106 | import sys 107 | # 创建一个酒杯(塔) 108 | cup = Cup(None, None, 0) 109 | # 倒入酒 110 | cup.pour(int(sys.argv[1])) 111 | # 输出酒塔 112 | cup.print(int(sys.argv[2]), int(sys.argv[3])) 113 | --------------------------------------------------------------------------------