├── .gitignore ├── .idea ├── misc.xml ├── modules.xml ├── programming_vocabulary.iml ├── vcs.xml └── workspace.xml ├── README.md ├── analysis_book.py ├── db2csv.py ├── files ├── CONTRIBUTING.md.txt ├── autobahn-python.txt ├── awesome-deep-learning.txt ├── awesome-django.txt ├── awesome-flask.txt ├── awesome-sphinxdoc.txt ├── awesome-sqlalchemy.txt ├── awesome.txt ├── aws-cli.txt ├── bashplotlib.txt ├── bcbb.txt ├── bcbio-nextgen.txt ├── caffe.txt ├── caniusepython3.txt ├── cartridge.txt ├── code2flow.txt ├── cola.txt ├── cookiecutter.txt ├── cornice.txt ├── dh-virtualenv.txt ├── diesel.txt ├── django-activity-stream.txt ├── django-allauth.txt ├── django-crispy-forms.txt ├── django-debug-toolbar.txt ├── django-devserver.txt ├── django-elastic-transcoder.txt └── xxx │ ├── awesome-ciandcd#online-build-system.txt │ ├── boto3.txt │ ├── bpython.txt │ ├── butterdb.txt │ ├── cpython.txt │ ├── dejavu.txt │ ├── demiurge.txt │ ├── django-guardian.txt │ └── django-haystack.txt ├── fortest.db ├── models_exp.py ├── python-words.xlsx ├── requirements.txt ├── settings.py ├── shanbay ├── README.md ├── __init__.py ├── add_to_shanbay.py ├── creat_word_list.py ├── shanbeisettings.py ├── workbook_id.txt └── workbook_id_test.txt ├── spiders ├── README.md ├── downloadPdf.py ├── github.py ├── onlinedocs.py ├── stackoverflow.py └── utils.py ├── t.log ├── translate.py ├── voca.db └── work.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | _pycache__ 3 | *.csv 4 | test.py 5 | *.pyc -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/programming_vocabulary.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## 单词分析 2 | 3 | #### 文件结构 4 | 5 | - settings.py 一些基本的设置 6 | - analysis_book.py 分析文件,提取单词 7 | - models_exp.py 使用 peewee 接口处理数据库设置 8 | - test.py 一些测试/与项目本身无关 9 | - translate.py 翻译接口 10 | - work.py 启动项目 11 | - spiders 收集书籍资料、文档、github、stackoverflow 的爬虫程序 12 | - shanbay 批量导入单词到扇贝 13 | 14 | #### 工作流程 15 | 16 | 1、 设置 settings.py 中的一些基本项目 17 | 18 | 2、 使用 spiders 文件夹下的爬虫收集资料 19 | 20 | 3、 运行 work.py 分析收集资料,提取单词 21 | 22 | 4、 运行 translate.py 调用翻译接口,完成翻译 23 | 24 | 5、 设置 shanbay 文件夹下的程序上传单词 25 | 26 | 6、 运行 db2csv.py 从数据库提取所有词汇到 csv 27 | 28 | #### 技术细节 29 | 30 | ##### 1.使用 peewee 库处理数据库文件 31 | 32 | 作为一名怕麻烦的程序员,写原生 sql 语句简直要命,一直希望能有 django-orm 那样的神器,用 python 的方式操作数据库,经过一番了解发现 peewee 和 SQLAlchemy 可以做到, 前者偏轻后者偏重。最后选择了 peewee 来开发。 33 | 34 | peewee 文档:[peewee](https://peewee.readthedocs.io/en/latest/) 35 | 36 | 示例代码: 37 | 38 | ```python 39 | 40 | from peewee import * 41 | 42 | # 连接到数据库 43 | new_db = SqliteDatabase('voca.db') 44 | 45 | # 建表 46 | class NewBook(Model): 47 | name = CharField() 48 | # 总词汇 49 | total = IntegerField(default=0) 50 | # 是否已经统计 51 | is_analyzed = BooleanField(default=False) 52 | # reserved columns 53 | # 保留字段,便于之后扩展 54 | re1 = CharField(default='') 55 | re2 = IntegerField(default=0) 56 | 57 | class Meta: 58 | database = new_db 59 | 60 | ``` 61 | 62 | 以上截取了项目中部分代码,使用过 django 的同学应该很熟悉这种写法。 63 | 64 | 65 | #### 2. 提取分析单词 66 | 67 | 这部分是整个项目的核心,但代码并不多,使用正则匹配出所有的单词,然后再使用 collections 下的 Counter 函数统计并输出词频较高的单词。 68 | 69 | 代码示例: 70 | 71 | ```python 72 | 73 | import re 74 | from collections import Counter 75 | # 打开文件,读取数据 76 | with open(filename, 'r', encoding='utf-8')as f: 77 | raw_words = f.read() 78 | 79 | # 正则匹配所有单词 80 | words = re.findall('[a-z]+', raw_words.lower()) 81 | 82 | # 统计单词 83 | c = Counter(words) 84 | # 提取出前词频前 100 的单词 85 | c.most_common(100) 86 | 87 | ``` 88 | 89 | #### 3. 翻译接口 90 | 91 | 项目一共尝试了 3 种翻译接口, 百度/金山/扇贝 , 最后确定使用金山家的接口,百度家的接口音标和单词意思是分开的请求,扇贝家的接口在返回信息方面不如其他两家完整,金山家的接口既有音标也有详细的解释。 92 | 93 | 接口的示例代码如下: 94 | 95 | ```python 96 | 97 | import requests 98 | 99 | 100 | def trans_ici(word): 101 | 102 | url = 'http://www.iciba.com/index.php?a=getWordMean&c=search&word=' + word 103 | try: 104 | req = requests.get(url) 105 | req.raise_for_status() 106 | info = req.json() 107 | data = info['baesInfo']['symbols'][0] 108 | assert info['baesInfo']['symbols'][0] 109 | # 去除没有音标的单词 110 | assert data['ph_am'] and data['ph_en'] 111 | # 去除没有词性的单词 112 | assert data['parts'][0]['part'] 113 | 114 | except: 115 | return 116 | 117 | # 分别获取美/英 音标 118 | ph_en = '英 [' + data['ph_en'] + ']' 119 | ph_am = '美 [' + data['ph_am'] + ']' 120 | # 获取单词解释 121 | ex = '' 122 | for part in data['parts']: 123 | ex += part['part'] + ';'.join(part['means']) + ';' 124 | 125 | return ph_en+ph_am, ex 126 | 127 | # 示例 128 | res = trans_ici('hello') 129 | print(res) 130 | # >>> ("英 [hə'ləʊ]美 [həˈloʊ]", 'int.哈喽,喂;你好,您好;表示问候;打招呼;n.“喂”的招呼声或问候声;vi.喊“喂”;') 131 | ``` 132 | 133 | ##### 4. spiders 134 | 135 | spiders 文件夹下包含了 4 种不同形式的爬虫,分别是 pdf下载爬虫/github爬虫/stackoverflow爬虫/readthedocs爬虫。在资料收集阶段根据自己的需要,输入相应的需要被爬取的地址,然后运行文件即可。 136 | 137 | 以 github 爬虫为例: 138 | 139 | 首先输入需要被爬取地址 140 | 141 | ```python 142 | # 资源整合项目地址 143 | # 类似 awesome-python 144 | # 爬虫会爬取链接下的所有链接 145 | # 接着请求获取的链接, 获取 readme 文档内容 146 | self.projectsPool = ['https://github.com/vinta/awesome-python'] 147 | 148 | # 独立的项目 149 | # 类似 django 150 | # 爬虫会直接获取该项目下的 readme 内容 151 | self.projectsUrl = [] 152 | 153 | ``` 154 | 155 | 156 | -------------------------------------------------------------------------------- /analysis_book.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.10 4 | # description 5 | # 导入某文件夹下的所有 txt 文件,逐一分析,提取出词汇,存入数据库 6 | 7 | 8 | from collections import Counter 9 | import re 10 | 11 | # 引入排除词汇 12 | from settings import exclude_list, NUMBERS 13 | # 数据库操作 14 | # from models import Word, Book 15 | from models_exp import NewBook, NewWord 16 | 17 | 18 | class AnlysisBook(): 19 | 20 | # open file and return all english words 21 | def _open_file(self, filename): 22 | 23 | with open(filename, 'r', encoding='utf-8')as f: 24 | raw_words = f.read() 25 | 26 | words = re.findall('[a-z]+', raw_words.lower()) 27 | 28 | return words 29 | 30 | # insert the information of the book into database 31 | def new_book(self, path, words): 32 | 33 | bookname = path.split('/')[-1] 34 | query_book = NewBook.select().where((NewBook.name == bookname) & (NewBook.is_analyzed == True)) 35 | if query_book: 36 | return 37 | 38 | newbook = NewBook.create( 39 | name=bookname, 40 | total=len(words) 41 | 42 | ) 43 | return newbook 44 | 45 | # filter valid words 46 | def _filter_words(self, raw_words, count=NUMBERS): 47 | 48 | new_words = [] 49 | for word in raw_words: 50 | if word not in exclude_list and len(word) > 1: 51 | new_words.append(word) 52 | 53 | # 根据书籍字数确定从该书取多少单词 54 | ct = 10 55 | for i, j in NUMBERS: 56 | if len(new_words) < i: 57 | ct = j 58 | break 59 | 60 | # print(ct) 61 | c = Counter(new_words) 62 | return c.most_common(ct) 63 | 64 | # insert words into database 65 | # firstly, it will check out if the book exist in the database 66 | # if not, the words will be inserted into database 67 | # or , it will return None cus the words has been handled 68 | # last, the book will be marked as analyzed 69 | def _insert_book(self, book, words): 70 | 71 | # 检查数据库内是否有该书籍 72 | if not book: 73 | return 74 | 75 | # 向数据库内插入数据 76 | for word, fre in words: 77 | query = NewWord.select().where(NewWord.name == word) 78 | if query: 79 | word_ins = query[0] 80 | word_ins.frequency += fre 81 | word_ins.save() 82 | else: 83 | word_ins = NewWord.create( 84 | name=word, 85 | frequency=fre, 86 | ) 87 | 88 | # print('处理了 {} 个单词'.format(len(words))) 89 | # 标记该书已经被处理 90 | book.is_analyzed = True 91 | book.save() 92 | 93 | # 对外接口 94 | def analysis(self, lst_files): 95 | 96 | # filename = 'Data+Structures+and+Algorithms+Using+Python.txt' 97 | for i in lst_files: 98 | raw_words = self._open_file(i) 99 | bookins = self.new_book(i, raw_words) 100 | filter_words = self._filter_words(raw_words) 101 | self._insert_book(bookins, filter_words) 102 | -------------------------------------------------------------------------------- /db2csv.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.17 4 | # description 5 | # extract all valid words to a csv file 6 | # 提取所有有效单词到 csv 文件 7 | 8 | 9 | from models_exp import NewWord 10 | import csv 11 | import chardet 12 | 13 | def extract(): 14 | 15 | query = NewWord.select().where((NewWord.is_valid == True) & (NewWord.re1 == 'added')).order_by(-NewWord.frequency) 16 | # print(len(query)) 17 | for word in query: 18 | # print(chardet.detect(word.name)) 19 | res = [] 20 | for i in [word.name, word.phonogram, word.explanation]: 21 | res.append(i) 22 | 23 | yield res 24 | 25 | def save(res): 26 | 27 | with open('python-words.csv', 'a+', errors='ignore', newline='')as f: 28 | csv_writer = csv.writer(f) 29 | csv_writer.writerow(res) 30 | 31 | 32 | def main(): 33 | 34 | row = extract() 35 | count = 1 36 | while True: 37 | try: 38 | row_data = next(row) 39 | except: 40 | break 41 | save(row_data) 42 | count += 1 43 | 44 | if __name__ == '__main__': 45 | main() 46 | # res = extract() 47 | # print(next(res)) 48 | # print(next(res)) 49 | # print(next(res)) 50 | -------------------------------------------------------------------------------- /files/CONTRIBUTING.md.txt: -------------------------------------------------------------------------------- 1 | Contributing 2 | Your contributions are always welcome! 3 | Guidelines 4 | 5 | Add one link per Pull Request. 6 | Add the link: * [project-name](http://example.com/) - A short description ends with a period. 7 | 8 | Keep descriptions concise. 9 | 10 | 11 | Add a section if needed. 12 | 13 | Add the section description. 14 | Add the section title to Table of Contents. 15 | 16 | 17 | Search previous suggestions before making a new one, as yours may be a duplicate. 18 | Don't mention Python in the description as it's implied. 19 | Check your spelling and grammar. 20 | Remove any trailing whitespace. 21 | Send a Pull Request with the reason why the library is awesome. 22 | 23 | -------------------------------------------------------------------------------- /files/autobahn-python.txt: -------------------------------------------------------------------------------- 1 | Autobahn|Python 2 | WebSocket & WAMP for Python on Twisted and asyncio. 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | Quick Links : Source Code - Documentation - WebSocket Examples - WAMP Examples 11 | Community : Mailing list - StackOverflow - Twitter - IRC #autobahn/chat.freenode.net 12 | Companion Projects : Autobahn|JS - Autobahn|Cpp - Autobahn|Testsuite - Crossbar.io - WAMP 13 | 14 | 15 | Introduction 16 | Autobahn|Python is a subproject of Autobahn and provides open-source 17 | implementations of 18 | 19 | The WebSocket Protocol 20 | The Web Application Messaging Protocol (WAMP) 21 | 22 | for Python 2 and 3, and running on Twisted and asyncio . 23 | You can use Autobahn|Python to create clients and servers in Python speaking just plain WebSocket or WAMP. 24 | WebSocket allows bidirectional real-time messaging on the Web and beyond, while WAMP adds real-time application communication on top of WebSocket. 25 | WAMP provides asynchronous Remote Procedure Calls and Publish & Subscribe for applications in one protocol running over WebSocket . WAMP is a routed protocol, so you need a WAMP Router to connect your Autobahn|Python based clients. We provide Crossbar.io , but there are other options as well. 26 | 27 | Features 28 | 29 | framework for WebSocket and WAMP clients and servers 30 | compatible with Python 2.7 and 3.3 or later 31 | runs on CPython , PyPy and Jython 32 | runs under Twisted and asyncio - implements WebSocket 33 | RFC6455 and Draft Hybi-10+ 34 | implements WebSocket compression 35 | implements WAMP , the Web Application Messaging Protocol 36 | high-performance, fully asynchronous implementation 37 | best-in-class standards conformance (100% strict passes with Autobahn Testsuite : Client Server ) 38 | message-, frame- and streaming-APIs for WebSocket 39 | supports TLS (secure WebSocket) and proxies 40 | Open-source ( MIT license ) 41 | 42 | 43 | Show me some code 44 | To give you a first impression, here are two examples. We have lot more in the repo . 45 | 46 | WebSocket Echo Server 47 | Here is a simple WebSocket Echo Server that will echo back any WebSocket 48 | message received: 49 | from autobahn.twisted.websocket import WebSocketServerProtocol 50 | # or: from autobahn.asyncio.websocket import WebSocketServerProtocol 51 | 52 | class MyServerProtocol ( WebSocketServerProtocol ): 53 | 54 | def onConnect ( self , request ): 55 | print ( " Client connecting: {} " .format(request.peer)) 56 | 57 | def onOpen ( self ): 58 | print ( " WebSocket connection open. " ) 59 | 60 | def onMessage ( self , payload , isBinary ): 61 | if isBinary: 62 | print ( " Binary message received: {} bytes " .format( len (payload))) 63 | else : 64 | print ( " Text message received: {} " .format(payload.decode( ' utf8 ' ))) 65 | 66 | # echo back message verbatim 67 | self .sendMessage(payload, isBinary) 68 | 69 | def onClose ( self , wasClean , code , reason ): 70 | print ( " WebSocket connection closed: {} " .format(reason)) 71 | To actually run above server protocol, you need some lines of boilerplate . 72 | 73 | WAMP Application Component 74 | Here is a WAMP Application Component that performs all four types of 75 | actions that WAMP provides: 76 | 77 | subscribe to a topic 78 | publish an event 79 | register a procedure 80 | call a procedure 81 | 82 | from autobahn.twisted.wamp import ApplicationSession 83 | # or: from autobahn.asyncio.wamp import ApplicationSession 84 | 85 | class MyComponent ( ApplicationSession ): 86 | 87 | @inlineCallbacks 88 | def onJoin ( self , details ): 89 | 90 | # 1. subscribe to a topic so we receive events 91 | def onevent ( msg ): 92 | print ( " Got event: {} " .format(msg)) 93 | 94 | yield self .subscribe(onevent, ' com.myapp.hello ' ) 95 | 96 | # 2. publish an event to a topic 97 | self .publish( ' com.myapp.hello ' , ' Hello, world! ' ) 98 | 99 | # 3. register a procedure for remote calling 100 | def add2 ( x , y ): 101 | return x + y 102 | 103 | self .register(add2, ' com.myapp.add2 ' ) ; 104 | 105 | # 4. call a remote procedure 106 | res = yield self .call( ' com.myapp.add2 ' , 2 , 3 ) 107 | print ( " Got result: {} " .format(res)) 108 | Above code will work on Twisted and asyncio by changing a single line 109 | (the base class of MyComponent ). To actually run above application component, you need some lines of boilerplate and a WAMP Router . 110 | -------------------------------------------------------------------------------- /files/awesome-flask.txt: -------------------------------------------------------------------------------- 1 | Awesome Flask 2 | 3 | A curated list of awesome Flask resources and plugins 4 | 5 | Resources 6 | 7 | Tutorial 8 | 9 | How to build a news app that never goes down and costs you practically nothing (by NPR) 10 | Building websites in Python with Flask 11 | The Flask Mega-Tutorial 12 | Implementing a RESTful Web API with Python & Flask 13 | Discover Flask - Full Stack Web Development with Flask 14 | Flaskr - Intro to Flask, Test Driven Development, and jQuery 15 | Real Python: Flask Category 16 | Flask-SQLAlchemy Tutorials and Examples 17 | Flask by Example 18 | 19 | 20 | Course: 21 | 22 | Full Stack Foundations 23 | Designing RESTful APIs 24 | 25 | 26 | Books 27 | 28 | Explore Flask 29 | Flask Web Development 30 | Real Python 31 | Learning Flask Framework 32 | Flask Blueprints 33 | Flask Framework Cookbook 34 | Mastering Flask 35 | Building Web Applications with Flask 36 | 37 | 38 | Slides 39 | 40 | Creating beautiful REST APIs with Flask 41 | Advanced Flask Patterns 42 | Flasky Goodness 43 | Domain Driven Design (... with Flask) 44 | In Flask we Trust 45 | 46 | 47 | Videos 48 | 49 | Flask by Example 50 | Writing RESTful web services with Flask 51 | Practical Flask Web Development Tutorials 52 | 53 | 54 | Build with Flask 55 | 56 | zmusic-ng - ZX2C4 Music provides a web interface for playing and downloading music files using metadata. 57 | GuitarFan - guitar tab 58 | June - python-china.org 59 | Zerqu - ZERQU is a content-focused API-based platform. eg: Python-China 60 | motiky 61 | missing - a list service called missing 62 | thenewsmeme.com 63 | overholt - Example Flask application illustrating common practices 64 | pypress - flask team blog 65 | thepast.me 66 | redispapa - another redis monitor by using flask, angular, socket.io 67 | flaskblog - a simple blog system based on flask 68 | cleanblog - a clean blog system based on flask and mongoengine 69 | Quokka CMS - CMS made with Flask and MongoDB 70 | chat - a live chat built with python (flask + gevent + apscheduler) + redis 71 | chatapp - Flask and Angular.js Chat Application using Socket.io 72 | Frozen-Flask - Freezes a Flask application into a set of static files 73 | mcflyin - A small timeseries transformation API built on Flask and Pandas 74 | Skylines - Live tracking, flight database and competition framework 75 | airflow - Airflow is a system to programmatically author, schedule and monitor data pipelines. 76 | timesketch - Collaborative forensics timeline analysis 77 | changes - A dashboard for your code. A build system. 78 | security_monkey - monitors policy changes and alerts on insecure configurations in an AWS account. 79 | securedrop - an open-source whistleblower submission system that media organizations can use to securely accept documents from and communicate with anonymous sources. 80 | sync_engine - IMAP/SMTP sync system with modern APIs 81 | cleansweep - Volunteer & Campaign Management System 82 | indico - a general-purpose event management web-based solution. It includes a full-blown conference organization workflow as well as tools for meeting management and room booking. It provides as well integration with video-conferencing solutions. 83 | 84 | 85 | App template/bootstrap/boilerplate 86 | 87 | fbone 88 | flask-base 89 | cookiecutter-flask 90 | cookiecutter-flask-pythonic 91 | Flask-Foundation 92 | Flask-Empty 93 | flask-rest-template 94 | gae-init - Flask boilerplate running on Google App Engine 95 | GAE Starter Kit - Flask, Flask-Login, WTForms, UIKit, and more, running on Google App Engine 96 | 97 | 98 | 99 | Plugins 100 | 101 | Framework 102 | 103 | Connexion - Swagger/OpenAPI First framework for Python on top of Flask with automatic endpoint validation and OAuth2 support 104 | Flask Kit - Flexible microkit for Flask microframework 105 | flask-peewee - flask integration for peewee, including admin, authentication, rest api and more 106 | Flask-MongoRest - Restful API framework wrapped around MongoEngine 107 | Eve - REST API framework powered by Flask, MongoDB and good intentions 108 | Flask-Restless - A Flask extension for creating simple ReSTful APIs from SQLAlchemy models 109 | Flask-RESTful - Simple framework for creating REST APIs 110 | Flask-RestPlus - syntaxic sugar, helpers and automatically generated Swagger documentation on top of Flask-Restful. 111 | Flask-Potion - RESTful API framework for Flask and SQLAlchemy 112 | enferno - A Flask-based Framework for the Next Decade 113 | Flasgger - Create API documentation for Flask views using Swagger 2.0 specs 114 | flask-restful-swagger - A wrapper for flask-restful which enables swagger support 115 | 116 | 117 | Admin 118 | 119 | Flask-Admin - Simple and extensible administrative interface framework for Flask 120 | Flask-SuperAdmin - The best admin interface framework for Flask. With scaffolding for MongoEngine, Django and SQLAlchemy 121 | 122 | 123 | Authentication 124 | 125 | Flask-Security - Quick and simple security for Flask applications 126 | Flask-Login - Flask user session management 127 | Authomatic - Authomatic provides out of the box support for a number of providers using OAuth 1.0a (Twitter, Tumblr and more) and OAuth 2.0 (Facebook, Foursquare, GitHub, Google, LinkedIn, PayPal and more) 128 | flask-googlefed - Google Federated Logins for Flask 129 | Flask-OpenID - Flask-OpenID adds openid support to flask applications 130 | Flask-Social - OAuth Provider Integration for Flask-Security 131 | Flask-OAuthlib - OAuthlib implementation for Flask 132 | GitHub-Flask - Flask extension for authenticating users with GitHub and making requests to the API 133 | Python-Social-Auth - Social auth made simple 134 | Flask-social-blueprint - OAuth OOP way 135 | Flask-OAuth - OAuth Support for Flask 136 | Flask-HTTPAuth - Simple extension that provides Basic and Digest HTTP authentication for Flask routes 137 | Flask-User - Customizable user account management for Flask 138 | 139 | 140 | Authorization 141 | 142 | Flask-Pundit - Extension based on Rails' Pundit gem that provides easy way to organize access control for your models 143 | 144 | 145 | Database/ORM/ODM 146 | 147 | Flask-MongoEngine - MongoEngine flask extension with WTF model forms support 148 | Flask-PyMongo - PyMongo support for Flask applications 149 | Flask-MongoAlchemy - Flask support for MongoDB using MongoAlchemy 150 | Flask-MongoKit - Flask-MongoKit simplifies the use of MongoKit (a powerful MongoDB ORM for Python) within Flask applications 151 | Flask-SQLAlchemy - Adds SQLAlchemy support to Flask 152 | Flask-Redis - Redis support for Flask. Not much else to say. 153 | Flask-And-Redis - Simple as dead support of Redis database for Flask apps 154 | Flask-CouchDBKit - Flask extension that provides integration with CouchDBKit 155 | Flask-OrientDB - Flask extension for OrientDB Graph&Document Database 156 | Flask-Z3950 - Z39.50 integration for Flask applications. 157 | Flask-Orator - Adds Orator ORM support to Flask applications. 158 | 159 | 160 | Database Migrations 161 | 162 | Flask-Migrate - SQLAlchemy database migrations for Flask applications using Alembic 163 | Flask-Alembic - A Flask Extension to provide Alembic integration with Flask 164 | 165 | 166 | Session 167 | 168 | Flask-Session - Session extension for Flask 169 | 170 | 171 | Cache 172 | 173 | flask-webcache - A Flask extension that adds HTTP based caching to Flask apps 174 | flask-heroku-cacheify - Automatic Flask cache configuration on Heroku 175 | Flask-Dogpile-Cache - Flask wrapper for dogpile.cache 176 | 177 | 178 | Data Validation 179 | 180 | Flask-WTF - Simple integration of Flask and WTForms, including CSRF, file upload and Recaptcha integration. 181 | Flask-SeaSurf - SeaSurf is a Flask extension for preventing cross-site request forgery (CSRF). 182 | Colander - A serialization/deserialization/validation library for strings, mappings and lists 183 | Flask-Validator - Data validator for SQLAlchemy using ORM events 184 | 185 | 186 | Email 187 | 188 | Flask-Mail - Flask-Mail adds SMTP mail sending to your Flask applications 189 | 190 | 191 | i18n 192 | 193 | flask-babel - i18n and l10n support for Flask based on Babel and pytz 194 | 195 | 196 | Full-text searching 197 | 198 | Flask-WhooshAlchemy - Whoosh indexing capabilities for Flask-SQLAlchemy 199 | SQLAlchemy-Searchable - Full-text searching for Flask-SQLAlchemy (Postgres only) 200 | 201 | 202 | Rate Limiting 203 | 204 | Flask-Limiter - Flask-Limiter provides rate limiting features to flask routes 205 | 206 | 207 | Queue 208 | 209 | Flask-RQ - RQ (Redis Queue) integration for Flask applications 210 | celery - Distributed Task Queue 211 | 212 | 213 | Payment 214 | 215 | PagSeguro - Brazil - Example to integrate Flask to PagSeguro Brazilian payment gateway 216 | alipay_python - not a flask plugin now but you can integrate easily 217 | flask-paypal - PayPal integration example with Flask 218 | Flask-WePay - A Flask wrapper for WePay's Python API 219 | 220 | 221 | Exception tracking 222 | 223 | Raven - Raven is a Python client for Sentry . 224 | 225 | 226 | Metrics Tracking 227 | 228 | flask-track-usage - Basic metrics tracking for the Flask framework 229 | Flask-Analytics - A Flask extention that provide user navigation tracking 230 | Flask-StatHat - StatHat extension for Flask 231 | scales - Metrics for Python 232 | flask-tracking - Tracking app for Flask that logs HTTP request and response information in a capped MongoDB collection 233 | Flask-Analytics - Analytics snippets generator extension for the Flask framework. 234 | 235 | 236 | Other SDK 237 | 238 | Flask-GoogleMaps - Build and embed google maps in our Flask templates 239 | Flask-Dropbox - Dropbox Python SDK support for Flask applications 240 | Flask-Heroku - Heroku environment variable configurations for Flask 241 | Flask-Gravatar - Small and simple gravatar usage in Flask 242 | Flask-WeasyPrint - Make PDF in your Flask app with WeasyPrint 243 | Flask-WeRoBot - Adds WeRoBot support to Flask 244 | Flask-Pusher - Pusher integration for Flask 245 | flask-pusher - Pusher support for your Flask application 246 | 247 | 248 | Frontend (Asset Pipeline/CDN) 249 | 250 | Flask-Mustache - Mustache template integration with Flask 251 | Flask-Genshi - Genshi templating for Flask 252 | flask-mako - Provides support for Mako Templates in Flask 253 | Flask-Markdown - Markdown jinja2 extension for Flask 254 | Flask-Sijax - An extension for the Flask microframework that adds Sijax support 255 | flask-apispec - simple self-documenting APIs with flask 256 | flask-assets - Flask webassets integration 257 | flask-funnel - Better asset management for Flask 258 | flask-marshmallow Flask + marshmallow for beautiful APIs 259 | jinja-assets-compressor - A Jinja extension (compatible with Flask and other frameworks) to compile and/or compress your assets 260 | flask_util_js - flask's util in javascript. such as url_for etc 261 | flask-s3 - Seamlessly serve your static assets of your Flask app from Amazon S3 262 | Flask-SSLify - Force SSL on your Flask app 263 | Flask-gzip - Gzip flask responses 264 | Flask-HTMLmin - Flask html minifier 265 | Flask-Azure-Storage - Flask extension that provides integration with Azure Storage 266 | 267 | 268 | Development (Debugging/Testing/AB Testing) 269 | 270 | flask_profiler - endpoint analyzer/profiler for Flask 271 | Flask-Testing - Unittest extensions for Flask 272 | flask_debugtoolbar_lineprofilerpanel - Line Profiler Panel for Flask Debug Toolbar 273 | Flask-DebugToolbar - A port of the django debug toolbar to flask 274 | flask-debug-toolbar-mongo - MongoDB panel for the Flask Debug Toolbar 275 | pytest-flask - A set of pytest fixtures to test Flask applications 276 | flask2postman - Generate a Postman collection from your Flask application 277 | 278 | 279 | Utils 280 | 281 | Flask-Script - Flask extension to help writing external scripts for Flask applications 282 | Flask-Split - A/B testing for your Flask application 283 | flask-jsonrpc - A basic JSON-RPC implementation for your Flask-powered sites 284 | Flask-Bcrypt - Flask-Bcrypt is a Flask extension that provides bcrypt hashing utilities for your application 285 | Flask-AppBuilder - Simple and rapid application builder framework, built on top of Flask. includes detailed security, auto form generation, google charts and much more 286 | Mixer - Mixer is application to generate instances of Django or SQLAlchemy models 287 | FlaskEx - Predefined help libraries for Flask 288 | mimerender - Python module for RESTful HTTP Content Negotiation 289 | Flask-Classy - Class based views for Flask 290 | Flask-Principal - Identity management for Flask applications 291 | Flask-ShortUrl - Short URL generator for Flask 292 | Flask-FeatureFlags - A Flask extension that enables or disables features based on configuration 293 | Flask-UUID - UUID url converter for Flask routes 294 | Flask-Reggie - Regex Converter for Flask URL Routes 295 | Flask-SocketIO - Socket.IO integration for Flask applications 296 | Flask-Moment - Formatting of dates and times in Flask templates using moment.js 297 | Flask-Paginate - Pagination support for Flask 298 | Flask-CORS - Flask extension for handling Cross Origin Resource Sharing (CORS), making cross-origin AJAX possible 299 | Flask-Styleguide - A living Styleguide for your Flask application 300 | 301 | 302 | 303 | -------------------------------------------------------------------------------- /files/awesome-sphinxdoc.txt: -------------------------------------------------------------------------------- 1 | Awesome Sphinx (Python Documentation Generator) 2 | A curated list of awesome extra libraries, software and resources for 3 | Sphinx (Python Documentation Generator). Inspired by 4 | awesome-sqlalchemy . (See also other awesome lists !) 5 | Licensed under a Creative Commons Attribution-ShareAlike 4.0 International 6 | License . 7 | Your contributions are welcome. 8 | 9 | Table of Contents 10 | 11 | Sphinx 12 | Articles & Presentations 13 | Extensions 14 | Internationalizations 15 | Miscellaneous 16 | Themes 17 | Publication 18 | 19 | 20 | 21 | Sphinx 22 | 23 | Wikipedia: https://en.wikipedia.org/wiki/Sphinx_(documentation_generator) 24 | Homepage: http://sphinx-doc.org/ 25 | Source: git https://github.com/sphinx-doc/sphinx 26 | 27 | 28 | Docs: http://sphinx-doc.org/contents.html 29 | Docs: http://sphinx-doc.org/develop.html 30 | Docs: http://sphinx-doc.org/rest.html 31 | Docs: http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html 32 | Docs: http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#line-blocks 33 | Docs: http://docutils.sourceforge.net/docs/ref/rst/directives.html 34 | Docs: https://docs.python.org/devguide/documenting.html#sections 35 | Docs: http://sphinx-doc.org/markup/index.html 36 | Docs: http://sphinx-doc.org/glossary.html 37 | 38 | 39 | Articles & Presentations 40 | 41 | Documenting Your Project With Sphinx 42 | Exercises for the Sphinx Tutorial that Brandon Rhodes often gives at PyCon. 43 | 44 | 45 | Extensions 46 | 47 | Source: git https://github.com/sphinx-doc/sphinx/tree/master/sphinx/ext 48 | Source: hg https://bitbucket.org/birkenfeld/sphinx-contrib 49 | Docs: http://sphinx-doc.org/extensions.html 50 | Docs: http://sphinx-doc.org/extdev/index.html 51 | Docs: http://sphinx-doc.org/extdev/appapi.html 52 | 53 | 54 | breathe 55 | ReStructuredText and Sphinx bridge to Doxygen. 56 | javasphinx 57 | Sphinx extension for documenting Java projects. 58 | numpydoc 59 | NumPy 's Sphinx extensions. 60 | Releases 61 | A Sphinx changelog-generating extension. 62 | sphinx-autodoc-annotation 63 | Use Python 3 annotations in sphinx-enabled docstrings. 64 | sphinx-autodoc-typehints 65 | Type hints support for the Sphinx autodoc extension 66 | sphinx-autodoc-napoleon-typehints 67 | Type hints support for the Sphinx autodoc extension with support for NumPy 68 | and Google style docstrings (see sphinxcontrib-napoleon ). 69 | sphinx-fortran-extension 70 | A Fortran domain and autodocumentation module for Sphinx. 71 | sphinx-git 72 | git Changelog for Sphinx. 73 | Sphinx-prompt 74 | Sphinx directive to add unselectable prompt. 75 | Sphinx-pyreverse 76 | Simple sphinx wrapper around pyreverse (from pylint suit) to generate 77 | UML diagramms from modules. 78 | sphinxcontrib-autojs 79 | The auto JavaScript documentation Sphinx extension. 80 | sphinxcontrib-autoprogram 81 | Provides an automated way to document CLI programs. 82 | sphinxcontrib-blockdiag 83 | Sphinx extension for embedding blockdiag diagrams. 84 | sphinxcontrib-cldomain 85 | Common Lisp domain for Sphinx. 86 | sphinxcontrib-docbookrestapi 87 | Sphinx extension that generates documentation for api-site from RST files. 88 | sphinxcontrib-fulltoc 89 | Extension for Sphinx to make the sidebar show a full table of contents 90 | instead of just the local headings. 91 | sphinxcontrib-httpdomain 92 | Provides a Sphinx domain for describing RESTful HTTP APIs. 93 | Also supports reflection for Flask, Bottle, and Tornado apps. 94 | cornice.ext.sphinxext 95 | Sphinx extension to build RESTful HTTP API documentation from 96 | Pyramid Cornice docstrings. 97 | sphinxcontrib-programoutput 98 | Sphinx extension to include program output into documents. 99 | sphinxcontrib-napoleon 100 | Napoleon is a pre-processor that parses NumPy and Google style docstrings. 101 | Tut 102 | Tut is a tool that helps you write tutorial style documentation using 103 | Sphinx. 104 | Hieroglyph 105 | Hieroglyph is an extension for Sphinx which builds HTML slides from 106 | ReStructuredText documents. 107 | 108 | 109 | Internationalizations 110 | 111 | sphinx-intl 112 | Sphinx utility that make it easy to translate and to apply translation. 113 | 114 | 115 | Miscellaneous 116 | 117 | django-sphinxdoc 118 | Integrate Sphinx documentation into a Django-powered website. Allows you to 119 | use your sites templates, auth and so on. Offers search via Haystack. 120 | ome-documentation 121 | Sphinx-based documentation for the Open Microscopy Environment. 122 | riv.vim 123 | ReStructured text editing extensions for Vim, GVim, MacVim; 124 | optionally with InstantRst gevent live reload. 125 | sphinx-gui 126 | Desktop GUI for editing Sphinx docs. 127 | sphinx-markdown-sample 128 | Markdown based sphinx the documentation generator sample. 129 | tinkerer 130 | Blog engine static HTML5 generator 131 | with categories, tags, landing page, nav sidebar, 132 | RSS powered by Sphinx and responsive Jinja templates. 133 | ablog 134 | ABlog is a Sphinx extension that converts any documentation 135 | or personal website project into a full-fledged blog with 136 | atom feeds, archive pages, blog sidebars, Disqus integration, 137 | Font-Awesome integration and easy GitHub Pages deploys 138 | 139 | 140 | Themes 141 | 142 | Docs: http://sphinx-doc.org/theming.html 143 | Docs: http://sphinx-doc.org/templating.html 144 | Source: git https://github.com/sphinx-doc/sphinx/tree/master/sphinx/themes 145 | 146 | 147 | Alabaster 148 | Modified Kr Sphinx doc theme. 149 | flask-sphinx-themes 150 | Sphinx Themes for Flask related projects and Flask itself. 151 | krTheme Sphinx Style 152 | Sphinx theme Kenneth Reitz uses for most projects e.g. Requests . 153 | Sphinx Readable Theme 154 | A clean and readable Sphinx theme with focus on autodoc – documentation 155 | from docstrings. 156 | sphinx-better-theme 157 | A theme for Sphinx that looks nice, is easy to style with CSS, works well 158 | on small screens and mobile devices, and organizes the page better. 159 | sphinx_rtd_theme 160 | Sphinx theme for readthedocs.io . 161 | sphinx-theme-graphite 162 | A slightly muted light-on-dark theme for the Sphinx document generator 163 | using only CSS. 164 | sphinxjp.themes.basicstrap 165 | Sphinx theme built with responsive Bootstrap . 166 | sublee-sphinx-themes 167 | Sphinx themes Heungsub Lee uses for his projects e.g. Energy , Korean , 168 | Ranking , TrueSkill . 169 | sphinx-py3doc-enhanced-theme 170 | A theme based on the theme of https://docs.python.org/3/ with some responsive enhancements. 171 | sphinx-bootstrap-theme 172 | Integrates Bootstrap CSS/Javascript framework responsive design with any Bootswatch CSS themes. 173 | sphinx-foundation-theme 174 | Theme based on the Foundation 4 CSS framework. 175 | sphinx-nameko-theme 176 | Forked from Sphinx Readable Theme, combined with elements of the Read The Docs theme. 177 | sphinx-guzzle-theme 178 | Sphinx theme used by Guzzle: http://guzzlephp.org 179 | sphinx-hbp-theme 180 | HumanBrainProject Collaboratory Sphinx Theme. 181 | crate-docs-theme 182 | This project provides a Sphinx theme for Crate's documentation that is compatible with ReadTheDocs. 183 | solar-theme 184 | Solar is an attempt to create a theme for the Python Sphinx documentation generator based on the Solarized color scheme. 185 | sphinxtrap-theme 186 | Sphinxtrap is a minimalist bootstrap2-based + fontawesome sphinx theme. 187 | mdn-sphinx-theme 188 | This is a version of the Mozilla Developer Network theme, for the Sphinx documentation engine. 189 | sphinx_adc_theme 190 | The Apple Developer Connection theme for sphinx 191 | 192 | 193 | Publication 194 | 195 | Read the Docs 196 | Read the Docs hosts documentation, making it fully searchable and easy to 197 | find. You can import your docs using any major version control system, 198 | including Mercurial, Git, Subversion, and Bazaar. It supports webhooks 199 | so your docs get built when you commit code. There's also support for 200 | versioning so you can build docs from tags and branches of your code in 201 | your repository. A full list of features is available. 202 | Okydoky 203 | Automated docs builder using Sphinx/GitHub/Distribute for private use. 204 | sphinx-autobuild 205 | Watch a Sphinx directory and rebuild the documentation when a change is 206 | detected. 207 | sphinx-server 208 | A universal Sphinx Server based on sphinx-autobuild with Docker support 209 | that can be used in production (self-hosted) and for documentation development, 210 | bundled with PlantUML , Graphviz and HTTP authentication . 211 | sphinx-me 212 | Wraps your README-only projects in a dynamic Sphinx shell for hosting on 213 | Read the Docs . 214 | Sphinx to GitHub 215 | Script to prepare Sphinx html output for GitHub Pages . 216 | ghp-import 217 | Script to overwrite a gh-pages / master branch with a .nojekyll file for GitHub Pages . 218 | 219 | -------------------------------------------------------------------------------- /files/awesome-sqlalchemy.txt: -------------------------------------------------------------------------------- 1 | Awesome SQLAlchemy 2 | 3 | 4 | A curated list of awesome extra libraries and resources for SQLAlchemy . Inspired by 5 | awesome-python . (See also other awesome lists !) 6 | Licensed under a Creative Commons Attribution-ShareAlike 4.0 International 7 | License . 8 | 9 | Table of Contents 10 | 11 | Data Structures 12 | Data Types 13 | Database Migration Tools 14 | Dialects 15 | Documentation 16 | File and Image Attachments 17 | Forms and Data Validations 18 | Full-text Searching 19 | GIS and Spatial Databases 20 | Internationalizations 21 | Profilers 22 | Query helpers 23 | Recipes 24 | Serialization and deserialization 25 | Testing 26 | Thin Abstractions 27 | Vendor-specific Extensions 28 | PostgreSQL 29 | 30 | 31 | Visualizations 32 | Web 33 | Framework Integrations 34 | Other 35 | 36 | 37 | 38 | 39 | 40 | Data Structures 41 | 42 | SQLAlchemy-Continuum 43 | Versioning and auditing extension for SQLAlchemy. 44 | 45 | Creates versions for inserts, deletes and updates. 46 | Does not store updates which don't change anything. 47 | Supports alembic migrations. 48 | Can revert objects data as well as all object relations at given 49 | transaction even if the object was deleted. 50 | Transactions can be queried afterwards using SQLAlchemy query syntax. 51 | Query for changed records at given transaction. 52 | Temporal relationship reflection. Version object's relationship show 53 | the parent objects relationships as they where in that point in time. 54 | Supports native versioning for PostgreSQL database (trigger based 55 | versioning). 56 | 57 | 58 | sqlalchemy_mptt 59 | Library for implementing MPTT (modified preorder tree traversal) with 60 | SQLAlchemy models and working with trees of model instances, 61 | like django-mptt . 62 | SQLAlchemy-ORM-tree 63 | An implementation for SQLAlchemy-based applications of 64 | the nested-sets/modified-pre-order-tree-traversal technique for 65 | storing hierarchical data in a relational database. 66 | vdm 67 | Versioned domain model. Python library for revisioning/versioning of databases. 68 | 69 | 70 | Data Types 71 | 72 | SQLAlchemy-Enum34 73 | SQLAlchemy type to store standard enum.Enum values. 74 | SQLAlchemy-Utc 75 | SQLAlchemy type to store aware datetime.datetime values. 76 | SQLAlchemy-Utils 77 | Various utility functions, new data types and helpers for SQLAlchemy 78 | 79 | Listeners 80 | Data types: {..., ChoiceType, CountryType, JSONType, URLType, UUIDType, ...} 81 | Range data types 82 | Aggregated attributes 83 | Generates decorator 84 | Generic relationships 85 | Database helpers: create_database, drop_database 86 | Foreign key helpers 87 | ORM helpers 88 | Utility classes 89 | Model mixins: Timestamp (created, updated times) 90 | 91 | 92 | 93 | 94 | Database Migration Tools 95 | 96 | Alembic 97 | Alembic is a lightweight database migration tool for usage with the 98 | SQLAlchemy Database Toolkit for Python. 99 | sqlalchemy-migrate 100 | Inspired by Ruby on Rails' migrations, SQLAlchemy Migrate provides 101 | a way to deal with database schema changes in SQLAlchemy projects. 102 | 103 | 104 | Dialects 105 | http://docs.sqlalchemy.org/en/latest/dialects/ 106 | 107 | redshift_sqlalchemy 108 | Amazon Redshift dialect for SQLAlchemy. 109 | sphinxalchemy 110 | SQLAlchemy dialect for iterfacing with Sphinx (search engine) via 111 | SphinxQL. 112 | 113 | 114 | Documentation 115 | 116 | http://docs.sqlalchemy.org/en/latest/ 117 | http://docs.sqlalchemy.org/en/latest/intro.html 118 | http://docs.sqlalchemy.org/en/latest/core/tutorial.html 119 | http://docs.sqlalchemy.org/en/latest/orm/tutorial.html 120 | http://docs.sqlalchemy.org/en/latest/glossary.html 121 | 122 | 123 | File and Image Attachments 124 | 125 | filedepot 126 | DEPOT is a framework for easily storing and serving files in web 127 | applications. Depot features simple integration with SQLAlchemy by providing 128 | customized model field types for storing files attached to your ORM 129 | document. 130 | SQLAlchemy-ImageAttach 131 | SQLAlchemy-ImageAttach is a SQLAlchemy extension for attaching images 132 | to entity objects. 133 | sqlalchemy-media 134 | Based on SQLAlchemy-ImageAttach but using JSON type instead of relation, 135 | and SqlAlchemy's mutable facility, Also supports multiple stores per context. 136 | 137 | 138 | Forms and Data Validations 139 | 140 | ColanderAlchemy 141 | ColanderAlchemy helps you to auto-generate Colander schemas that are based 142 | on SQLAlchemy mapped classes. 143 | Such Colander schemas can be used with libraries like Deform and helps 144 | remove the need for duplication of schema definitions. 145 | 146 | Flask-Validator 147 | Data validator for Flask and SQL-Alchemy, working at Model component 148 | with events, preventing invalid data in the columns. 149 | The extension works with event listeners from SQLAlchemy. 150 | FormAlchemy 151 | FormAlchemy eliminates boilerplate by autogenerating HTML input fields from a 152 | given model. FormAlchemy will try to figure out what kind of HTML code should 153 | be returned by introspecting the model's properties and generate ready-to-use 154 | HTML code that will fit the developer's application. 155 | WTForms-Alchemy 156 | WTForms-Alchemy is a WTForms extension toolkit for easier creation of 157 | model based forms. Strongly influenced by Django ModelForm. 158 | Sprox 159 | Sprox provides an easy way to create forms for web content which are: 160 | automatically generated, easy to customize, and validated. Sprox also 161 | has powerful tools to help you display your content the way you want 162 | to with table and record viewers. Sprox provides a way to fill your 163 | widgets, whether they are forms or other content with customizable data. 164 | 165 | 166 | Full-text Searching 167 | 168 | SQLAlchemy-Searchable 169 | Full-text searchable models for SQLAlchemy. Only supports PostgreSQL. 170 | 171 | 172 | SQLAlchemy-FullText-Search 173 | Fulltext search support with MySQL & SQLAlchemy. 174 | 175 | 176 | GIS and Spatial Databases 177 | 178 | GeoAlchemy 179 | GeoAlchemy provides extensions to SQLAlchemy to work with spatial databases. 180 | The current supported spatial database systems are PostGIS , Spatialite , 181 | MySQL, Oracle, and MS SQL Server 2008. 182 | 183 | GeoAlchemy 2 184 | GeoAlchemy 2 provides extensions to SQLAlchemy for working with 185 | spatial databases. 186 | GeoAlchemy 2 focuses on PostGIS . PostGIS 1.5 and PostGIS 2 are supported. 187 | GeoAlchemy 2 aims to be simpler than its predecessor, GeoAlchemy . 188 | Simpler to use, and simpler to maintain. 189 | 190 | 191 | 192 | Internationalizations 193 | 194 | SQLAlchemy-i18n 195 | Internationalization extension for SQLAlchemy models. 196 | 197 | Stores translations in separate tables. 198 | Reflects translation table structures based on 199 | parent model table structure. 200 | Supports forcing of given locale. 201 | Good performance (uses proxy dicts and other advanced SQLAlchemy 202 | concepts for performance optimization). 203 | 204 | 205 | 206 | 207 | Profilers 208 | 209 | flask_debugtoolbar 210 | Debug toolbar with SQLAlchemy query information for Flask. 211 | pyramid_debugtoolbar 212 | Debug toolbar with SQLAlchemy query information for Pyramid. 213 | SQLTap 214 | SQLTap is a library that allows you to profile and introspect the queries 215 | that your application makes using SQLAlchemy. 216 | SQLTap helps you understand: 217 | 218 | how many times a sql query is executed 219 | how much time your sql queries take 220 | where your application is issuing sql queries from 221 | 222 | 223 | nplusone 224 | Auto-detect the n+1 queries problem in SQLAlchemy (and other Python ORMs) 225 | nplusone detects unnecessary queries caused by lazy loading and unused eager loading. 226 | Integrates with Flask-SQLAlchemy. 227 | 228 | 229 | 230 | Query helpers 231 | 232 | sqlakeyset 233 | This library implements keyset-based paging for SQLAlchemy (both ORM and core). 234 | This library has been tested with PostgreSQL and MariaDB/MySQL. 235 | It should work with other SQLAlchemy-supported databases to provided they support row( syntax. 236 | 237 | 238 | 239 | Recipes 240 | 241 | https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes 242 | 243 | 244 | Serialization and deserialization 245 | 246 | marshmallow-sqlalchemy 247 | SQLAlchemy integration with the marshmallow (de)serialization library. 248 | 249 | 250 | Testing 251 | 252 | charlatan 253 | Fixtures management for SQLAlchemy and other systems. 254 | factory_boy 255 | Generate fake data and create random fixtures for testing in SQLAlchemy 256 | and many other Python ORM systems. 257 | mixer 258 | Generate fake data and create random fixtures for testing in SQLAlchemy 259 | and many other Python ORM systems. 260 | 261 | 262 | Thin Abstractions 263 | 264 | Dataset 265 | Easy-to-use data handling for SQL data stores in Python with support for 266 | implicit table creation, bulk loading, and transaction. Dataset also 267 | includes support for freezing data to CSV and JSON flat files. 268 | rdflib-sqlalchemy 269 | RDFLib store using SQLAlchemy dbapi as back-end. 270 | SQLSoup 271 | SQLSoup provides a convenient way to map Python objects to 272 | relational database tables, with no declarative code of any kind. 273 | It's built on top of the SQLAlchemy ORM and provides a super-minimalistic 274 | interface to an existing database. 275 | 276 | 277 | Vendor-specific Extensions 278 | 279 | PostgreSQL 280 | 281 | sqlalchemy-crosstab-postgresql 282 | New grammar for SQLAlchemy to make handling the crosstab() tablefunc 283 | (i.e. pivot tables) in PostgreSQL easy peasy. 284 | sqlalchemy-postgres-copy 285 | Wrapper for using PostgreSQL COPY with SQLAlchemy for efficient bulk data 286 | imports and exports. 287 | 288 | 289 | Visualizations 290 | 291 | sadisplay 292 | Simple package for describing SQLAlchemy schema and display raw database tables by reflecting feature. 293 | sqlalchemy_schemadisplay 294 | This module generates images from SQLAlchemy models. 295 | eralchemy 296 | ERAlchemy generates Entity Relation (ER) diagram from databases or from SQLAlchemy models. 297 | 298 | 299 | Web 300 | 301 | Framework Integrations 302 | 303 | bottle-sqlalchemy 304 | A Bottle plugin to manage SQLAlchemy session to your application. 305 | filteralchemy 306 | Declarative query builder that auto-generates filter parameters from 307 | models and parses request parameters using marshmallow-sqlalchemy 308 | and webargs . 309 | Flask-SQLAlchemy 310 | Flask-SQLAlchemy is an extension for Flask that adds support for 311 | SQLAlchemy to your application. 312 | Flask-Admin 313 | The admin interface framework for Flask . 314 | With scaffolding for SQLAlchemy, MongoEngine, pymongo and Peewee. 315 | pyramid_sqlalchemy 316 | pyramid_sqlalchemy provides everything needed to use SQLAlchemy in 317 | Pyramid applications. 318 | pyramid_restler 319 | pyramid_restler is a somewhat-opinionated toolkit for building 320 | RESTful Web services and applications on top of the 321 | Pyramid framework (with SQLAlchemy models). 322 | sacrud 323 | SACRUD will solve your problem of CRUD interface for SQLAlchemy, 324 | by providing extension for Pyramid (yet) or use it in pure form. 325 | Unlike classical CRUD interface, pyramid_sacrud allows override and 326 | flexibly customize interface (that is closer to django.contrib.admin ). 327 | SQLAlchemy-Wrapper 328 | A light and framework-independent wrapper for SQLAlchemy that makes 329 | it really easy to setup and use. 330 | 331 | Doesn't change the SQLAlchemy syntax. 332 | Can paginate the results of the queries. 333 | Support for muliple databases at the same time. 334 | 335 | 336 | zope.sqlalchemy 337 | The aim of this package is to unify the plethora of existing packages 338 | integrating SQLAlchemy with Zope 's transaction management. 339 | As such it seeks only to provide a data manager and makes no attempt 340 | to define a zopeish way to configure engines. 341 | 342 | 343 | Other 344 | 345 | paginate_sqlalchemy 346 | This module helps dividing large lists of items into pages. 347 | The user is shown one page at a time and can navigate to other pages. 348 | sandman2 349 | Generate a curl-able REST HTTP API with searching and filtering 350 | for all tables in a database and an admin UI with Flask-SQLAlchemy 351 | and HTTP Basic Authentication. 352 | sqlalchemy_mixins 353 | A set of well-tested mixins that brings Active Record, Django-like queries, nested eager load and beauty __repr__ to your SQLAlchemy. 354 | 355 | -------------------------------------------------------------------------------- /files/awesome.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | ✨ Prelaunching the Awesome Weekly newsletter! ✨ 10 | Vote it up on Product Hunt 11 | 12 | 13 | 14 | 15 | What is an awesome list?     16 | Contribution guide     17 | Creating a list     18 | Sticker 19 | 20 | 21 | 22 | Just type awesome.re to go here. Check out my blog and follow me on Twitter . 23 | 24 | Contents 25 | 26 | Platforms 27 | Programming Languages 28 | Front-End Development 29 | Back-End Development 30 | Computer Science 31 | Big Data 32 | Theory 33 | Books 34 | Editors 35 | Gaming 36 | Development Environment 37 | Entertainment 38 | Databases 39 | Media 40 | Learn 41 | Security 42 | Content Management Systems 43 | Hardware 44 | Business 45 | Work 46 | Networking 47 | Miscellaneous 48 | 49 | Platforms 50 | 51 | Node.js - JavaScript runtime built on Chrome's V8 JavaScript engine. 52 | Frontend Development 53 | iOS 54 | Android 55 | IoT & Hybrid Apps 56 | Electron 57 | Cordova 58 | React Native 59 | Xamarin 60 | Linux 61 | 62 | Containers 63 | 64 | 65 | macOS 66 | 67 | Command-Line 68 | Screensavers 69 | 70 | 71 | watchOS 72 | JVM 73 | Salesforce 74 | Amazon Web Services 75 | Windows 76 | IPFS 77 | Fuse 78 | Heroku 79 | Raspberry Pi - Credit card-sized computer aimed at teaching kids programming, but capable of a lot more. 80 | Qt - Cross-platform GUI app framework. 81 | WebExtensions - Cross-browser extension system. 82 | RubyMotion - Write cross-platform native apps for iOS, Android, macOS, tvOS, and watchOS in Ruby. 83 | 84 | Programming Languages 85 | 86 | JavaScript 87 | 88 | Promises 89 | Standard Style 90 | Must Watch Talks 91 | Tips 92 | Network Layer 93 | Micro npm Packages 94 | Mad Science npm Packages 95 | Maintenance Modules - For npm packages. 96 | npm - Package manager. 97 | AVA - Test runner. 98 | ESLint - Linter. 99 | Functional Programming 100 | Observables 101 | npm scripts - Task runner. 102 | 103 | 104 | Swift 105 | 106 | Education 107 | Playgrounds 108 | 109 | 110 | Python 111 | 112 | Asyncio - Asynchronous I/O in Python 3. 113 | Scientific Audio - Scientific research in audio/music. 114 | 115 | 116 | Rust 117 | Haskell 118 | PureScript 119 | Go 120 | Scala 121 | Ruby 122 | 123 | Events 124 | 125 | 126 | Clojure 127 | ClojureScript 128 | Elixir 129 | Elm 130 | Erlang 131 | Julia 132 | Lua 133 | C 134 | C/C++ 135 | R 136 | D 137 | Common Lisp 138 | Perl 139 | Groovy 140 | Dart 141 | Java 142 | 143 | RxJava 144 | 145 | 146 | Kotlin 147 | OCaml 148 | ColdFusion 149 | Fortran 150 | .NET 151 | 152 | Core 153 | 154 | 155 | PHP 156 | 157 | Composer - Package manager. 158 | 159 | 160 | Delphi 161 | Assembler 162 | AutoHotkey 163 | AutoIt 164 | Crystal 165 | TypeScript 166 | Frege - Haskell for the JVM. 167 | CMake - Build, test, and package software. 168 | 169 | Front-End Development 170 | 171 | ES6 Tools 172 | Web Performance Optimization 173 | Web Tools 174 | CSS 175 | 176 | Critical-Path Tools 177 | Scalability 178 | Must-Watch Talks 179 | Protips 180 | 181 | 182 | React - App framework. 183 | 184 | Relay - Framework for building data-driven React apps. 185 | 186 | 187 | Web Components 188 | Polymer 189 | Angular - App framework. 190 | Backbone - App framework. 191 | HTML5 192 | SVG 193 | Canvas 194 | KnockoutJS 195 | Dojo Toolkit 196 | Inspiration 197 | Ember - App framework. 198 | Android UI 199 | iOS UI 200 | Meteor 201 | BEM 202 | Flexbox 203 | Web Typography 204 | Web Accessibility 205 | Material Design 206 | D3 - Library for producing dynamic, interactive data visualizations. 207 | Emails 208 | jQuery 209 | 210 | Tips 211 | 212 | 213 | Web Audio 214 | Offline-First 215 | Static Website Services 216 | A-Frame VR - Virtual reality. 217 | Cycle.js - Functional and reactive JavaScript framework. 218 | Text Editing 219 | Motion UI Design 220 | Vue.js - App framework. 221 | Marionette.js - App framework. 222 | Aurelia - App framework. 223 | Charting 224 | Ionic Framework 2 225 | Chrome DevTools 226 | PostCSS - CSS preprocessor. 227 | Draft.js - Rich text editor framework for React. 228 | Service Workers 229 | Progressive Web Apps 230 | choo - App framework. 231 | Redux - State container for JavaScript apps. 232 | webpack - Module bundler. 233 | Browserify - Module bundler. 234 | Sass - CSS preprocessor. 235 | Ant Design - Enterprise-class UI design language. 236 | Less - CSS preprocessor. 237 | WebGL - JavaScript API for rendering 3D graphics. 238 | Preact - App framework. 239 | Progressive Enhancement 240 | 241 | Back-End Development 242 | 243 | Django 244 | Flask 245 | Docker 246 | Vagrant 247 | Pyramid 248 | Play1 Framework 249 | CakePHP 250 | Symfony 251 | 252 | Education 253 | 254 | 255 | Laravel 256 | 257 | Education 258 | 259 | 260 | Rails 261 | 262 | Gems - Packages. 263 | 264 | 265 | Phalcon 266 | Useful .htaccess Snippets 267 | nginx 268 | Dropwizard 269 | Kubernetes 270 | Lumen 271 | Serverless Framework 272 | Apache Wicket - Java web app framework. 273 | Vert.x - Toolkit for building reactive apps on the JVM. 274 | 275 | Computer Science 276 | 277 | University Courses 278 | Data Science 279 | Machine Learning 280 | 281 | Tutorials 282 | ML with Ruby - Learning, implementing, and applying Machine Learning using Ruby. 283 | 284 | 285 | Speech and Natural Language Processing 286 | 287 | Spanish 288 | NLP with Ruby 289 | 290 | 291 | Linguistics 292 | Cryptography 293 | Computer Vision 294 | Deep Learning - Neural networks. 295 | 296 | TensorFlow - Library for machine intelligence. 297 | Papers - The most cited deep learning papers. 298 | 299 | 300 | Deep Vision 301 | Open Source Society University 302 | Functional Programming 303 | Static Analysis & Code Quality 304 | Information Retrieval - Learn to develop your own search engine. 305 | 306 | Big Data 307 | 308 | Big Data 309 | Public Datasets 310 | Hadoop - Framework for distributed storage and processing of very large data sets. 311 | Data Engineering 312 | Streaming 313 | Apache Spark - Unified engine for large-scale data processing. 314 | 315 | Theory 316 | 317 | Papers We Love 318 | Talks 319 | Algorithms 320 | Algorithm Visualizations 321 | Artificial Intelligence 322 | Search Engine Optimization 323 | Competitive Programming 324 | Math 325 | Recursion Schemes - Traversing nested data structures. 326 | 327 | Books 328 | 329 | Free Programming Books 330 | Free Software Testing Books 331 | Go Books 332 | R Books 333 | Mind Expanding Books 334 | Book Authoring 335 | Elixir Books 336 | 337 | Editors 338 | 339 | Sublime Text 340 | Vim 341 | Emacs 342 | Atom - Open-source and hackable text editor. 343 | Visual Studio Code 344 | 345 | Gaming 346 | 347 | Game Development 348 | Game Talks 349 | Godot - Game engine. 350 | Open Source Games 351 | Unity - Game engine. 352 | Chess 353 | LÖVE - Game engine. 354 | PICO-8 - Fantasy console. 355 | Game Boy Development 356 | Construct 2 - Game engine. 357 | 358 | Development Environment 359 | 360 | Quick Look Plugins - For macOS. 361 | Dev Env 362 | Dotfiles 363 | Shell 364 | Fish - User-friendly shell. 365 | Command-Line Apps 366 | ZSH Plugins 367 | GitHub 368 | 369 | Browser Extensions 370 | Cheat Sheet 371 | 372 | 373 | Git Cheat Sheet & Git Flow 374 | Git Tips 375 | Git Add-ons - Enhance the git CLI. 376 | SSH 377 | FOSS for Developers 378 | Hyper - Cross-platform terminal app built on web technologies. 379 | PowerShell - Cross-platform object-oriented shell. 380 | Alfred Workflows - Productivity app for macOS. 381 | Terminals Are Sexy 382 | 383 | Entertainment 384 | 385 | Science Fiction - Scifi. 386 | Fantasy 387 | Podcasts 388 | Email Newsletters 389 | IT Quotes 390 | 391 | Databases 392 | 393 | Database 394 | MySQL 395 | SQLAlchemy 396 | InfluxDB 397 | Neo4j 398 | MongoDB - NoSQL database. 399 | RethinkDB 400 | TinkerPop - Graph computing framework. 401 | PostgreSQL - Object-relational database. 402 | CouchDB - Document-oriented NoSQL database. 403 | 404 | Media 405 | 406 | Creative Commons Media 407 | Fonts 408 | Codeface - Text editor fonts. 409 | Stock Resources 410 | GIF 411 | Music 412 | Open Source Documents 413 | Audio Visualization 414 | Broadcasting 415 | 416 | Learn 417 | 418 | CLI Workshoppers - Interactive tutorials. 419 | Learn to Program 420 | Speaking 421 | Tech Videos 422 | Dive into Machine Learning 423 | Computer History 424 | Programming for Kids 425 | Educational Games - Learn while playing. 426 | JavaScript Learning 427 | 428 | Security 429 | 430 | Application Security 431 | Security 432 | CTF - Capture The Flag. 433 | Malware Analysis 434 | Android Security 435 | Hacking 436 | Honeypots - Deception trap, designed to entice an attacker into attempting to compromise the information systems in an organization. 437 | Incident Response 438 | Vehicle Security and Car Hacking 439 | Web Security - Security of web apps & services. 440 | 441 | Content Management Systems 442 | 443 | Umbraco 444 | Refinery CMS - Ruby on Rails CMS. 445 | Wagtail - Django CMS focused on flexibility and user experience. 446 | Textpattern - Lightweight PHP-based CMS. 447 | Drupal - Extensible PHP-based CMS. 448 | 449 | Hardware 450 | 451 | Robotics 452 | Internet of Things 453 | Electronics - For electronic engineers and hobbyists. 454 | Bluetooth Beacons 455 | Electric Guitar Specifications - Checklist for building your own electric guitar. 456 | 457 | Business 458 | 459 | Open Companies 460 | Places to Post Your Startup 461 | OKR Methodology - Goal setting & communication best practices. 462 | 463 | Work 464 | 465 | Slack - Team collaboration. 466 | 467 | Communities 468 | 469 | 470 | Remote Jobs 471 | Productivity 472 | Niche Job Boards 473 | Programming Interviews 474 | 475 | Networking 476 | 477 | Software-Defined Networking 478 | Ripple - Open source distributed settlement network. 479 | Network Analysis 480 | PCAPTools 481 | 482 | Miscellaneous 483 | 484 | JSON 485 | 486 | GeoJSON 487 | Datasets 488 | 489 | 490 | Discounts for Student Developers 491 | Conferences 492 | Sysadmin 493 | Radio 494 | Awesome - Recursion illustrated. 495 | Analytics 496 | REST 497 | Selenium 498 | Appium - Test automation tool for apps. 499 | Continuous Delivery 500 | Services Engineering 501 | Free for Developers 502 | Bitcoin 503 | Answers - Stack Overflow, Quora, etc. 504 | Sketch - Design app for macOS. 505 | Boilerplate Projects 506 | Readme 507 | Tools 508 | Styleguides 509 | Design and Development Guides 510 | Software Engineering Blogs 511 | Self Hosted 512 | FOSS Production Apps 513 | Gulp - Task runner. 514 | AMA - Ask Me Anything. 515 | 516 | Answers 517 | 518 | 519 | Open Source Photography 520 | OpenGL - Cross-platform API for rendering 2D and 3D graphics. 521 | GraphQL 522 | Transit 523 | Research Tools 524 | Data Visualization 525 | Social Media Share Links 526 | Microservices 527 | Unicode - Unicode standards, quirks, packages and resources. 528 | 529 | Code Points 530 | 531 | 532 | Beginner-Friendly Projects 533 | Katas 534 | Tools for Activism 535 | Citizen Science - For community-based and non-institutional scientists. 536 | TAP - Test Anything Protocol. 537 | MQTT - "Internet of Things" connectivity protocol. 538 | Hacking Spots 539 | For Girls 540 | Vorpal - Node.js CLI framework. 541 | Vulkan - Low-overhead, cross-platform 3D graphics and compute API. 542 | LaTeX - Typesetting language. 543 | Economics - An economist's starter kit. 544 | Funny Markov Chains 545 | Bioinformatics 546 | Colorful - Choose your next color scheme. 547 | Steam - Digital distribution platform. 548 | Bots - Building bots. 549 | Site Reliability Engineering 550 | Empathy in Engineering - Building and promoting more compassionate engineering cultures. 551 | DTrace - Dynamic tracing framework. 552 | Userscripts - Enhance your browsing experience. 553 | Pokémon - Pokémon and Pokémon GO. 554 | ChatOps - Managing technical and business operations through a chat. 555 | Falsehood - Falsehoods programmers believe in. 556 | Domain-Driven Design - Software development approach for complex needs by connecting the implementation to an evolving model. 557 | Quantified Self - Self-tracking through technology. 558 | SaltStack - Python-based config management system. 559 | Web Design - For digital designers. 560 | JMeter - Load testing and performance measurement tool. 561 | Creative Coding - Programming something expressive instead of something functional. 562 | No-Login Web Apps - Web apps that work without login. 563 | Testing - Software testing. 564 | Free Software - Free as in freedom. 565 | Framer - Prototyping interactive UI designs. 566 | Markdown - Markup language. 567 | Dev Fun - Funny developer projects. 568 | Events in the Netherlands - Tech-related events in the Netherlands. 569 | Healthcare - Open source healthcare software for facilities, providers, developers, policy experts, and researchers. 570 | Magento 2 - Open Source eCommerce built with PHP. 571 | TikZ - Graph drawing packages for TeX/LaTeX/ConTeXt. 572 | Neuroscience - Study of the nervous system and brain. 573 | 574 | License 575 | 576 | To the extent possible under law, Sindre Sorhus has waived all copyright and related or neighboring rights to this work. 577 | -------------------------------------------------------------------------------- /files/aws-cli.txt: -------------------------------------------------------------------------------- 1 | aws-cli 2 | 3 | 4 | 5 | This package provides a unified command line interface to Amazon Web Services. 6 | The aws-cli package works on Python versions: 7 | 8 | 2.6.5 and greater 9 | 2.7.x and greater 10 | 3.3.x and greater 11 | 3.4.x and greater 12 | 3.5.x and greater 13 | 3.6.x and greater 14 | 15 | 16 | Attention! 17 | We recommend that all customers regularly monitor the 18 | Amazon Web Services Security Bulletins website for any important security bulletins related to 19 | aws-cli. 20 | 21 | 22 | Installation 23 | The easiest way to install aws-cli is to use pip : 24 | $ pip install awscli 25 | 26 | or, if you are not installing in a virtualenv : 27 | $ sudo pip install awscli 28 | 29 | If you have the aws-cli installed and want to upgrade to the latest version 30 | you can run: 31 | $ pip install --upgrade awscli 32 | 33 | 34 | Note 35 | On OS X, if you see an error regarding the version of six that came with 36 | distutils in El Capitan, use the --ignore-installed option: 37 | $ sudo pip install awscli --ignore-installed six 38 | 39 | 40 | This will install the aws-cli package as well as all dependencies. You can 41 | also just download the tarball . Once you have the 42 | awscli directory structure on your workstation, you can just run: 43 | $ cd 44 | $ python setup.py install 45 | 46 | If you want to run the develop branch of the CLI, see the 47 | "CLI Dev Version" section below. 48 | 49 | CLI Releases 50 | The release notes for the AWS CLI can be found here . 51 | You can also find a CHANGELOG 52 | in the github repo. 53 | 54 | Command Completion 55 | The aws-cli package includes a very useful command completion feature. 56 | This feature is not automatically installed so you need to configure it manually. 57 | To enable tab completion for bash either use the built-in command complete : 58 | $ complete -C aws_completer aws 59 | 60 | Or add bin/aws_bash_completer file under /etc/bash_completion.d , 61 | /usr/local/etc/bash_completion.d or any other bash_completion.d location. 62 | For tcsh: 63 | $ complete aws 'p/*/`aws_completer`/' 64 | 65 | You should add this to your startup scripts to enable it for future sessions. 66 | For zsh please refer to bin/aws_zsh_completer.sh. Source that file, e.g. 67 | from your ~/.zshrc, and make sure you run compinit before: 68 | $ source bin/aws_zsh_completer.sh 69 | 70 | For now the bash compatibility auto completion (bashcompinit) is used. 71 | For further details please refer to the top of bin/aws_zsh_completer.sh. 72 | 73 | Getting Started 74 | Before using aws-cli, you need to tell it about your AWS credentials. You 75 | can do this in several ways: 76 | 77 | Environment variables 78 | Shared credentials file 79 | Config file 80 | IAM Role 81 | 82 | The quickest way to get started is to run the aws configure command: 83 | $ aws configure 84 | AWS Access Key ID: foo 85 | AWS Secret Access Key: bar 86 | Default region name [us-west-2]: us-west-2 87 | Default output format [None]: json 88 | 89 | To use environment variables, do the following: 90 | $ export AWS_ACCESS_KEY_ID= 91 | $ export AWS_SECRET_ACCESS_KEY= 92 | 93 | To use the shared credentials file, create an INI formatted file like this: 94 | [default] 95 | aws_access_key_id=foo 96 | aws_secret_access_key=bar 97 | 98 | [testing] 99 | aws_access_key_id=foo 100 | aws_secret_access_key=bar 101 | 102 | and place it in ~/.aws/credentials (or in 103 | %UserProfile%\.aws/credentials on Windows). If you wish to place the 104 | shared credentials file in a different location than the one specified above, 105 | you need to tell aws-cli where to find it. Do this by setting 106 | the appropriate environment variable: 107 | $ export AWS_SHARED_CREDENTIALS_FILE=/path/to/shared_credentials_file 108 | 109 | To use a config file, create a configuration file like this: 110 | [default] 111 | aws_access_key_id= 112 | aws_secret_access_key= 113 | # Optional, to define default region for this profile. 114 | region=us-west-1 115 | 116 | [profile testing] 117 | aws_access_key_id= 118 | aws_secret_access_key= 119 | region=us-west-2 120 | 121 | and place it in ~/.aws/config (or in %UserProfile%\.aws\config on Windows). If you wish to place the config file in a different location than the one 122 | specified above, you need to tell aws-cli where to find it. Do this by setting 123 | the appropriate environment variable: 124 | $ export AWS_CONFIG_FILE=/path/to/config_file 125 | 126 | As you can see, you can have multiple profiles defined in both the shared 127 | credentials file and the configuration file. You can then specify which 128 | profile to use by using the --profile option. If no profile is specified 129 | the default profile is used. 130 | In the config file, except for the default profile, you 131 | must prefix each config section of a profile group with profile . 132 | For example, if you have a profile named "testing" the section header would 133 | be [profile testing] . 134 | The final option for credentials is highly recommended if you are 135 | using aws-cli on an EC2 instance. IAM Roles are 136 | a great way to have credentials installed automatically on your 137 | instance. If you are using IAM Roles, aws-cli will find them and use 138 | them automatically. 139 | 140 | Other Configurable Variables 141 | In addition to credentials, a number of other variables can be 142 | configured either with environment variables, configuration file 143 | entries or both. The following table documents these. 144 | 145 | 146 | Variable 147 | Option 148 | Config Entry 149 | Environment Variable 150 | Description 151 | 152 | 153 | 154 | profile 155 | --profile 156 | profile 157 | AWS_PROFILE 158 | Default profile name 159 | 160 | region 161 | --region 162 | region 163 | AWS_DEFAULT_REGION 164 | Default AWS Region 165 | 166 | config_file 167 |   168 |   169 | AWS_CONFIG_FILE 170 | Alternate location of config 171 | 172 | credentials_file 173 |   174 |   175 | AWS_SHARED_CREDENTIALS_FILE 176 | Alternate location of credentials 177 | 178 | output 179 | --output 180 | output 181 | AWS_DEFAULT_OUTPUT 182 | Default output style 183 | 184 | ca_bundle 185 | --ca-bundle 186 | ca_bundle 187 | AWS_CA_BUNDLE 188 | CA Certificate Bundle 189 | 190 | access_key 191 |   192 | aws_access_key_id 193 | AWS_ACCESS_KEY_ID 194 | AWS Access Key 195 | 196 | secret_key 197 |   198 | aws_secret_access_key 199 | AWS_SECRET_ACCESS_KEY 200 | AWS Secret Key 201 | 202 | token 203 |   204 | aws_session_token 205 | AWS_SESSION_TOKEN 206 | AWS Token (temp credentials) 207 | 208 | cli_timestamp_format 209 |   210 | cli_timestamp_format 211 |   212 | Output format of timestamps 213 | 214 | metadata_service_timeout 215 |   216 | metadata_service_timeout 217 | AWS_METADATA_SERVICE_TIMEOUT 218 | EC2 metadata timeout 219 | 220 | metadata_service_num_attempts 221 |   222 | metadata_service_num_attempts 223 | AWS_METADATA_SERVICE_NUM_ATTEMPTS 224 | EC2 metadata retry count 225 | 226 | parameter_validation 227 |   228 | parameter_validation 229 |   230 | Toggles local parameter validation 231 | 232 | 233 | 234 | 235 | Examples 236 | If you get tired of specifying a --region option on the command line 237 | all of the time, you can specify a default region to use whenever no 238 | explicit --region option is included using the region variable. 239 | To specify this using an environment variable: 240 | $ export AWS_DEFAULT_REGION=us-west-2 241 | 242 | To include it in your config file: 243 | [default] 244 | aws_access_key_id= 245 | aws_secret_access_key= 246 | region=us-west-1 247 | 248 | Similarly, the profile variable can be used to specify which profile to use 249 | if one is not explicitly specified on the command line via the 250 | --profile option. To set this via environment variable: 251 | $ export AWS_PROFILE=testing 252 | 253 | The profile variable can not be specified in the configuration file 254 | since it would have to be associated with a profile and would defeat the 255 | purpose. 256 | 257 | Further Information 258 | For more information about configuration options, please refer the 259 | AWS CLI Configuration Variables topic . You can access this topic 260 | from the CLI as well by running aws help config-vars . 261 | 262 | Accessing Services With Global Endpoints 263 | Some services, such as AWS Identity and Access Management (IAM) 264 | have a single, global endpoint rather than different endpoints for 265 | each region. 266 | To make access to these services simpler, aws-cli will automatically 267 | use the global endpoint unless you explicitly supply a region (using 268 | the --region option) or a profile (using the --profile option). 269 | Therefore, the following: 270 | $ aws iam list-users 271 | 272 | Will automatically use the global endpoint for the IAM service 273 | regardless of the value of the AWS_DEFAULT_REGION environment 274 | variable or the region variable specified in your profile. 275 | 276 | JSON Parameter Input 277 | Many options that need to be provided are simple string or numeric 278 | values. However, some operations require JSON data structures 279 | as input parameters either on the command line or in files. 280 | For example, consider the command to authorize access to an EC2 281 | security group. In this case, we will add ingress access to port 22 282 | for all IP addresses: 283 | $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ 284 | --ip-permissions '{"FromPort":22,"ToPort":22,"IpProtocol":"tcp","IpRanges":[{"CidrIp": "0.0.0.0/0"}]}' 285 | 286 | 287 | File-based Parameter Input 288 | Some parameter values are so large or so complex that it would be easier 289 | to place the parameter value in a file and refer to that file rather than 290 | entering the value directly on the command line. 291 | Let's use the authorize-security-group-ingress command shown above. 292 | Rather than provide the value of the --ip-permissions parameter directly 293 | in the command, you could first store the values in a file. Let's call 294 | the file ip_perms.json: 295 | {"FromPort":22, 296 | "ToPort":22, 297 | "IpProtocol":"tcp", 298 | "IpRanges":[{"CidrIp":"0.0.0.0/0"}]} 299 | 300 | Then, we could make the same call as above like this: 301 | $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ 302 | --ip-permissions file://ip_perms.json 303 | 304 | The file:// prefix on the parameter value signals that the parameter value 305 | is actually a reference to a file that contains the actual parameter value. 306 | aws-cli will open the file, read the value and pass use that value as the 307 | parameter value. 308 | This is also useful when the parameter is really referring to file-based 309 | data. For example, the --user-data option of the aws ec2 run-instances 310 | command or the --public-key-material parameter of the 311 | aws ec2 import-key-pair command. 312 | 313 | URI-based Parameter Input 314 | Similar to the file-based input described above, aws-cli also includes a 315 | way to use data from a URI as the value of a parameter. The idea is exactly 316 | the same except the prefix used is https:// or http:// : 317 | $ aws ec2 authorize-security-group-ingress --group-name MySecurityGroup \ 318 | --ip-permissions http://mybucket.s3.amazonaws.com/ip_perms.json 319 | 320 | 321 | Command Output 322 | The default output for commands is currently JSON. You can use the 323 | --query option to extract the output elements from this JSON document. 324 | For more information on the expression language used for the --query 325 | argument, you can read the 326 | JMESPath Tutorial . 327 | 328 | Examples 329 | Get a list of IAM user names: 330 | $ aws iam list-users --query Users[].UserName 331 | 332 | Get a list of key names and their sizes in an S3 bucket: 333 | $ aws s3api list-objects --bucket b --query Contents[].[Key,Size] 334 | 335 | Get a list of all EC2 instances and include their Instance ID, State Name, 336 | and their Name (if they've been tagged with a Name): 337 | $ aws ec2 describe-instances --query \ 338 | 'Reservations[].Instances[].[InstanceId,State.Name,Tags[?Key==`Name`] | [0].Value]' 339 | 340 | You may also find the jq tool useful in 341 | processing the JSON output for other uses. 342 | There is also an ASCII table format available. You can select this style with 343 | the --output table option or you can make this style your default output 344 | style via environment variable or config file entry as described above. 345 | Try adding --output table to the above commands. 346 | 347 | CLI Dev Version 348 | If you are just interested in using the latest released version of the AWS CLI, 349 | please see the "Installation" section above. This section is for anyone that 350 | wants to install the development version of the CLI. You normally would not 351 | need to do this unless: 352 | 353 | You are developing a feature for the CLI and plan on submitting a Pull 354 | Request. 355 | You want to test the latest changes of the CLI before they make it into an 356 | official release. 357 | 358 | The latest changes to the CLI are in the develop branch on github. This is 359 | the default branch when you clone the git repository. 360 | Additionally, there are several other packages that are developed in tandem 361 | with the CLI. This includes: 362 | 363 | botocore 364 | jmespath 365 | 366 | If you just want to install a snapshot of the latest development version of 367 | the CLI, you can use the requirements.txt file included in this repo. 368 | This file points to the development version of the above packages: 369 | cd 370 | pip install -r requirements.txt 371 | pip install -e . 372 | 373 | However, to keep up to date, you will continually have to run the 374 | pip install -r requirements.txt file to pull in the latest changes 375 | from the develop branches of botocore, jmespath, etc. 376 | You can optionally clone each of those repositories and run "pip install -e ." 377 | for each repository: 378 | git clone && cd jmespath/ 379 | pip install -e . && cd .. 380 | git clone && cd botocore/ 381 | pip install -e . && cd .. 382 | git clone && cd aws-cli/ 383 | pip install -e . 384 | 385 | 386 | Getting Help 387 | We use GitHub issues for tracking bugs and feature requests and have limited 388 | bandwidth to address them. Please use these community resources for getting 389 | help: 390 | 391 | Ask a question on Stack Overflow and tag it with aws-cli 392 | Come join the AWS CLI community chat on gitter 393 | Open a support ticket with AWS Support 394 | If it turns out that you may have found a bug, please open an issue 395 | 396 | -------------------------------------------------------------------------------- /files/bashplotlib.txt: -------------------------------------------------------------------------------- 1 | bashplotlib 2 | plotting in the terminal 3 | 4 | what is it? 5 | bashplotlib is a python package and command line tool for making basic plots in the terminal. It's a quick way to visualize data when you don't have a GUI. It's written in pure python and can quickly be installed anywhere using pip. 6 | installation 7 | install with pip 8 | $ pip install bashplotlib 9 | 10 | install from source 11 | $ git clone git@github.com:glamp/bashplotlib.git 12 | $ cd bashplotlib 13 | $ python setup.py install 14 | 15 | Either method will install the bashplotlib python package and will also add hist and scatter 16 | to your python scripts folder. This folder should be on your path (add it if it's not). 17 | features 18 | 19 | quick plotting from the command line 20 | customize the color, size, title, and shape of plots 21 | pipe data into plots with stdin 22 | 23 | usage 24 | command line 25 | hist takes input from either stdin or specified using the -f parameter. Input should be a single column of numbers. 26 | 27 | scatter takes x and y coordinates as input form either a comma delimited file using -f or from 2 different files using -x and -y. 28 | 29 | in python 30 | If you want to use bashplotlib from python, just import histogram and scatterplot. 31 | from bashplotlib.scatterplot import plot_scatter 32 | 33 | 34 | ``` 35 | from bashplotlib.histogram import plot_hist 36 | ``` 37 | 38 | examples 39 | $ scatter --file data/texas.txt --pch . 40 | 41 | 42 | $ hist --file data/exp.txt 43 | 44 | 45 | $ scatter -x data/x_test.txt -y data/y_test.txt 46 | 47 | 48 | todo 49 | 50 | sideways numbers for x-axis of histograms 51 | colors for individual points 52 | line charts 53 | trendlines 54 | 55 | -------------------------------------------------------------------------------- /files/bcbb.txt: -------------------------------------------------------------------------------- 1 | Collection of useful code related to biological analysis. Much of this is 2 | discussed with examples at Blue collar bioinformatics . 3 | Some projects which may be especially interesting: 4 | 5 | CloudBioLinux -- An automated environment to install useful biological software and 6 | libraries. This is used to bootstrap blank machines, such as those you'd 7 | find on Cloud providers like Amazon, to ready to go analysis workstations. 8 | See the CloudBioLinux effort for more details. This project 9 | moved to its own repository at https://github.com/chapmanb/cloudbiolinux . 10 | gff -- A GFF parsing library in Python, aimed for inclusion into Biopython. 11 | nextgen -- A python toolkit providing best-practice pipelines for fully 12 | automated high throughput sequencing analysis. This project has 13 | moved into its own repository: https://github.com/chapmanb/bcbio-nextgen 14 | distblast -- A distributed BLAST analysis running for identifying best hits in 15 | a wide variety of organisms for downstream phylogenetic analyses. The code 16 | is generalized to run on local multi-processor and distributed Hadoop 17 | clusters. 18 | 19 | -------------------------------------------------------------------------------- /files/bcbio-nextgen.txt: -------------------------------------------------------------------------------- 1 | 2 | Validated, scalable, community developed variant calling, RNA-seq and small RNA 3 | analysis. You write a high level configuration file specifying your inputs and 4 | analysis parameters. This input drives a parallel run that handles distributed 5 | execution, idempotent processing restarts and safe transactional steps. bcbio 6 | provides a shared community resource that handles the data processing component 7 | of sequencing analysis, providing researchers with more time to focus on the 8 | downstream biology. 9 | 10 | 11 | Features 12 | 13 | Community developed: We welcome contributors with the goal of 14 | overcoming the biological, algorithmic and computational challenges 15 | that face individual developers working on complex pipelines in 16 | quickly changing research areas. See our users page for examples 17 | of bcbio-nextgen deployments, and the developer documentation for 18 | tips on contributing. 19 | Installation: A single installer script prepares all 20 | third party software, data libraries and system configuration files. 21 | Automated validation : Compare variant calls against common reference 22 | materials or sample specific SNP arrays to ensure call correctness. 23 | Incorporation of multiple approaches for alignment, preparation and 24 | variant calling enable unbiased comparisons of algorithms. 25 | Distributed: Focus on parallel analysis and scaling to handle 26 | large population studies and whole genome analysis. Runs on single 27 | multicore computers, in compute clusters using IPython parallel , 28 | or on the Amazon cloud. See the parallel documentation for full 29 | details. 30 | Multiple analysis algorithms: bcbio-nextgen provides configurable 31 | variant calling, RNA-seq and small RNA pipelines . 32 | 33 | 34 | Quick start 35 | 36 | Install bcbio-nextgen with all tool dependencies and data files: 37 | wget https://raw.github.com/chapmanb/bcbio-nextgen/master/scripts/bcbio_nextgen_install.py 38 | python bcbio_nextgen_install.py /usr/local/share/bcbio --tooldir=/usr/local \ 39 | --genomes GRCh37 --aligners bwa --aligners bowtie2 40 | 41 | producing an editable system configuration file referencing the installed 42 | software, data and system information. 43 | 44 | Automatically create a processing description of sample FASTQ and BAM files 45 | from your project, and a CSV file of sample metadata: 46 | bcbio_nextgen.py -w template freebayes-variant project1.csv sample1.bam sample2_1.fq sample2_2.fq 47 | 48 | This produces a sample description file containing pipeline configuration options . 49 | 50 | Run analysis, distributed across 8 local cores: 51 | cd project1/work 52 | bcbio_nextgen.py ../config/project1.yaml -n 8 53 | 54 | 55 | 56 | 57 | Documentation 58 | See the full documentation and longer analysis-based articles . We welcome enhancements or problem reports using GitHub 59 | and discussion on the biovalidation mailing list . 60 | 61 | Contributors 62 | 63 | Miika Ahdesmaki , AstraZeneca 64 | Luca Beltrame , IRCCS "Mario Negri" Institute for Pharmacological Research, Milan, Italy 65 | Christian Brueffer , Lund University, Lund, Sweden 66 | Alla Bushoy , AstraZeneca 67 | Guillermo Carrasco , Science for Life Laboratory, Stockholm 68 | Nick Carriero , Simons Foundation 69 | Brad Chapman , Harvard Chan Bioinformatics Core 70 | Saket Choudhary , University Of Southern California 71 | Peter Cock , The James Hutton Institute 72 | Matthias De Smet , Center for Medical Genetics, Ghent University Hospital, Belgium 73 | Matt Edwards , MIT 74 | Mario Giovacchini , Science for Life Laboratory, Stockholm 75 | Karl Gutwin , Biogen 76 | Jeff Hammerbacher , Icahn School of Medicine at Mount Sinai 77 | Oliver Hofmann , Wolfson Wohl Cancer Research Center 78 | John Kern 79 | Rory Kirchner , Harvard Chan Bioinformatics Core 80 | Tetiana Khotiainsteva , Ardigen 81 | Jakub Nowacki , AstraZeneca 82 | John Morrissey , Harvard Chan Bioinformatics Core 83 | Lorena Pantano , Harvard Chan Bioinformatics Core 84 | Brent Pedersen , University of Colorado Denver 85 | James Porter , The University of Chicago 86 | Valentine Svensson , Science for Life Laboratory, Stockholm 87 | Paul Tang , UCSF 88 | Stephen Turner , University of Virginia 89 | Roman Valls , Science for Life Laboratory, Stockholm 90 | Kevin Ying , Garvan Institute of Medical Research, Sydney, Australia 91 | Vlad Saveliev , Center for Algorithmic Biotechnology, St. Petersburg University 92 | 93 | 94 | License 95 | The code is freely available under the MIT license . 96 | -------------------------------------------------------------------------------- /files/caffe.txt: -------------------------------------------------------------------------------- 1 | Caffe 2 | 3 | 4 | Caffe is a deep learning framework made with expression, speed, and modularity in mind. 5 | It is developed by Berkeley AI Research ( BAIR )/The Berkeley Vision and Learning Center (BVLC) and community contributors. 6 | Check out the project site for all the details like 7 | 8 | DIY Deep Learning for Vision with Caffe 9 | Tutorial Documentation 10 | BAIR reference models and the community model zoo 11 | Installation instructions 12 | 13 | and step-by-step examples. 14 | Custom distributions 15 | 16 | Intel Caffe (Optimized for CPU and support for multi-node), in particular Xeon processors (HSW, BDW, Xeon Phi). 17 | OpenCL Caffe e.g. for AMD or Intel devices. 18 | Windows Caffe 19 | 20 | Community 21 | 22 | Please join the caffe-users group or gitter chat to ask questions and talk about methods and models. 23 | Framework development discussions and thorough bug reports are collected on Issues . 24 | Happy brewing! 25 | License and Citation 26 | Caffe is released under the BSD 2-Clause license . 27 | The BAIR/BVLC reference models are released for unrestricted use. 28 | Please cite Caffe in your publications if it helps your research: 29 | @article{jia2014caffe, 30 | Author = {Jia, Yangqing and Shelhamer, Evan and Donahue, Jeff and Karayev, Sergey and Long, Jonathan and Girshick, Ross and Guadarrama, Sergio and Darrell, Trevor}, 31 | Journal = {arXiv preprint arXiv:1408.5093}, 32 | Title = {Caffe: Convolutional Architecture for Fast Feature Embedding}, 33 | Year = {2014} 34 | } 35 | 36 | -------------------------------------------------------------------------------- /files/caniusepython3.txt: -------------------------------------------------------------------------------- 1 | Can I Use Python 3? 2 | 3 | You can read the documentation on how to use caniusepython3 from its 4 | PyPI page . A web interface 5 | is also available. 6 | How do you tell if a project has been ported to Python 3? 7 | On PyPI each project can specify various 8 | trove classifiers 9 | (typically in a project's setup.py through a classifier 10 | argument to setup() ). 11 | There are various classifiers related to what version of Python a project can 12 | run on. E.g.: 13 | Programming Language :: Python :: 3 14 | Programming Language :: Python :: 3.0 15 | Programming Language :: Python :: 3.1 16 | Programming Language :: Python :: 3.2 17 | Programming Language :: Python :: 3.3 18 | Programming Language :: Python :: 3.4 19 | 20 | As long as a trove classifier for some version of Python 3 is specified then the 21 | project is considered to support Python 3 (project owners: it is preferred you 22 | at least specify Programming Language :: Python :: 3 as that is how you 23 | end up listed on the Python 3 Packages list on PyPI ; 24 | you can represent Python 2 support with Programming Language :: Python ). 25 | The other way is through a manual override in 26 | caniusepython3 itself. Projects ends up on this list because: 27 | 28 | They are now part of Python's standard library in some release of Python 3 29 | Their Python 3 port is under a different name 30 | They are missing a Python 3 trove classifier but have actually been ported 31 | 32 | If any of these various requirements are met, then a project is considered to 33 | support Python 3 and thus will be added to the manual overrides list. You can 34 | see the list of overrides when you use caniusepython3's CLI with verbose output 35 | turned on. 36 | What if I know of a project that should be added to the overrides file? 37 | If a project has Python 3 support in a release on PyPI but they have not added the 38 | proper trove classifier, then either submit a 39 | pull request or file an 40 | issue with the name of the 41 | project and a link to some proof that a release available on PyPI has indeed been 42 | ported (e.g. PyPI page stating the support, tox.ini file showing tests being run 43 | against Python 3, etc.). Projects that have Python 3 support in their version control 44 | system but not yet available on PyPI will not be considered for inclusion in the 45 | overrides file. 46 | How can I get a project ported to Python 3? 47 | Typically projects which have not switched to Python 3 yet are waiting for: 48 | 49 | A dependency to be ported to Python 3 50 | Someone to volunteer to put in the time and effort to do the port 51 | 52 | Since caniusepython3 will tell you what dependencies are blocking a project 53 | that you depend on from being ported, you can try to port a project farther 54 | down your dependency graph to help a more direct dependency make the transition. 55 | Which brings up the second point: volunteering to do a port. Most projects 56 | happily accept help, they just have not done the port yet because they have 57 | not had the time ("volunteering" can also take the form of paying someone to do 58 | the port on your behalf). Some projects are simply waiting for people to ask for it, 59 | so even speaking up politely and requesting a port can get the process started. 60 | If you are looking for help to port a project, you can always search online for 61 | various sources of help. If you want a specific starting point there are 62 | HOWTOs in the Python documentation 63 | on porting pure Python modules 64 | and extension modules . 65 | Change Log 66 | 5.0.1 (in development) 67 | 68 | Fix "No handler found" output under Python 2.7 69 | (patch by arnuschky ) 70 | 71 | 5.0.0 72 | 73 | Return a 3 error code when a command completes successfully but there are 74 | found blockers (patch by pcattori ; 75 | accidentally left out of the 4.0.0 release) 76 | Officially support Python 3.6 77 | Usual overrides updates 78 | 79 | 4.0.0 80 | 81 | Stop using PyPI's XML-RPC API and move to its JSON one for better performance 82 | (and switch to https://pypi.org ) 83 | Load the overrides data from GitHub when possible, falling back to the data 84 | included with the package when necessary (thanks to 85 | shafrom for adding local, one-day caching) 86 | 87 | 3.4.1 88 | 89 | Update the URL used for PyPI to https://pypi.org 90 | (patch by Chris Fournier ) 91 | Usual override updates 92 | 93 | 3.4.0 94 | 95 | Fix a dict comprehension failure with the pylint checker 96 | (patch by Jeroen Oldenburger ) 97 | Usual override updates 98 | Python 3.5 support 99 | Tests have been made less flaky 100 | Use pypi.io instead of pypi.python.org 101 | Normalize project names to help guarantee lookup success 102 | 103 | 3.3.0 104 | 105 | Made tests more robust in the face of PyPI timing out 106 | Added Python 3.5 support 107 | Dropped Python 2.6 and 3.2 support 108 | Updated tests to not use Twisted as a Python 2-only project 109 | Fixed a bug where the pylint checker was incorrectly missing from __future__ import unicode_literals ( issue #103 ; reported by David Euresti ) 110 | Usual overrides updates 111 | 112 | 3.2.0 113 | 114 | Fix a failing test due to the assumed unported project being ported =) 115 | Work around distlib 0.2.0 bug (patch by @rawrgulmuffins) 116 | Usual override updates 117 | 118 | 3.1.0 119 | 120 | Log more details when running under -v (patch by @msabramo) 121 | Print a 🎉 -- it's a party popper in case you have mojibake for it -- when the 122 | terminal supports it and there are no blocking dependencies (patch by @msabramo) 123 | Fix compatibility with pip 6.1.0 (patch by @msabramo) 124 | Fix warning of missing logger when using setup.py integration 125 | (issue #80; patch by @msabramo) 126 | Remove checkers for filter , map , range , and zip as they have been 127 | improved upon and 128 | merged upstream in Pylint 129 | Updated outdated documentation 130 | Usual override updates 131 | 132 | 3.0.0 133 | 134 | Introduce caniusepython3.pylint_checker which extends pylint --py3k with 135 | very strict porting checks 136 | Work around a bug in distlib 137 | Compatibility fix for pip 6.0 ( issue #72 ) 138 | Usual override updates 139 | 140 | 2.2.0 141 | 142 | Suppress an xmlrpclib.Fault exception under Python 2.6 when trying to close 143 | an XML-RPC connection (can't close a connection under Python 2.6 anyway and 144 | the exception has only been seen on Travis ) 145 | Move to unittest2 as a developer 146 | dependency 147 | Move mock to a developer dependency 148 | Usual override tweaks 149 | 150 | 2.1.2 151 | 152 | Avoid infinite recursion when there is a circular dependency 153 | ( issue #60 ) 154 | Usual overrides tweaks 155 | 156 | 2.1.1 157 | 158 | Normalize the names of direct dependencies for proper Python 3 compatibility 159 | checking 160 | ( issue #55 ) 161 | Properly set the logging details when executed from the entry point 162 | Usual overrides tweaks 163 | 164 | 2.1.0 165 | 166 | Verbose output will print what manual overrides are used and why 167 | (when available) 168 | Fix logging to only be configured when running as a script as well as fix a 169 | format bug 170 | Usual override updates 171 | 172 | 2.0.3 173 | 174 | Fixed setup.py caniusepython3 to work with extras_require properly 175 | Fix various errors triggered from the moving of the just_name() function to 176 | a new module in 2.0.0 (patch by Vaibhav Sagar w/ input from Jannis Leidel) 177 | Usual overrides tweaks (thanks to CyrilRoelandteNovance for contributing) 178 | 179 | 2.0.2 180 | 181 | Fix lack of unicode usage in a test 182 | Make Python 2.6 happy again due to its distate of empty XML-RPC results 183 | 184 | 2.0.1 185 | 186 | Fix syntax error 187 | 188 | 2.0.0 189 | 190 | Tweak overrides 191 | -r , -m , and -p now take 1+ arguments instead of a single comma-separated 192 | list 193 | Unrecognized projects are considered ported to prevent the lack of info on 194 | the unrecognized project perpetually suggesting that it's never been ported 195 | Introduced icanusepython3.check() 196 | 197 | 1.2.1 198 | 199 | Fix -v to actually do something again 200 | Tweaked overrides 201 | 202 | 1.2.0 203 | 204 | -r accepts a comma-separated list of file paths 205 | 206 | 1.1.0 207 | 208 | Setuptools command support 209 | Various fixes 210 | 211 | 1.0 212 | Initial release. 213 | -------------------------------------------------------------------------------- /files/cartridge.txt: -------------------------------------------------------------------------------- 1 | 2 | Created by Stephen McDonald 3 | 4 | Overview 5 | Cartridge is a shopping cart application built using the Django 6 | framework. It is BSD licensed , and designed to provide a clean and 7 | simple base for developing e-commerce websites. It purposely does not 8 | include every conceivable feature of an e-commerce website; instead, 9 | Cartridge focuses on providing core features common to most e-commerce 10 | websites. 11 | This specific focus stems from the idea that every e-commerce website 12 | is different, is tailored to the particular business and products at 13 | hand, and should therefore be as easy to customize as possible. 14 | Cartridge achieves this goal with a code-base that is as simple as 15 | possible and implements only the core features of an e-commerce 16 | website. 17 | Cartridge extends the Mezzanine content management platform. A live 18 | demo of Cartridge can be found by visiting the Mezzanine live demo . 19 | 20 | Features 21 | 22 | Hierarchical categories 23 | Easily configurable product options (colours, sizes, etc.) 24 | Hooks for tax/shipping calculations and payment gateways 25 | Sale pricing 26 | Promotional discount codes 27 | PDF invoice generation (for packing slips) 28 | Stock control 29 | Product popularity 30 | Thumbnail generation 31 | Built-in test suite 32 | Separation of presentation (no embedded markup) 33 | Smart categories (by price range, colour, etc) 34 | Registered or anonymous checkout 35 | Configurable number of checkout steps 36 | Denormalised data for accessiblilty and performance 37 | Authenticated customer accounts with transaction history 38 | 39 | 40 | Dependencies 41 | Cartridge is designed as a plugin for the Mezzanine content 42 | management platform, and therefore requires Mezzanine to be 43 | installed. The integration of the two applications should occur 44 | automatically by following the installation instructions below. 45 | 46 | Installation 47 | The easiest method is to install directly from PyPI using pip by 48 | running the command below, which will also install the required 49 | dependencies mentioned above: 50 | $ pip install -U cartridge 51 | 52 | Otherwise, you can download Cartridge and install it directly from source: 53 | $ python setup.py install 54 | 55 | Once installed, the command mezzanine-project can be used to 56 | create a new Mezzanine project, with Cartridge installed, in similar 57 | fashion to django-admin.py : 58 | $ mezzanine-project -a cartridge project_name 59 | $ cd project_name 60 | $ python manage.py createdb --noinput 61 | $ python manage.py runserver 62 | 63 | Here we specify the -a switch for the mezzanine-project command, 64 | which tells it to use an alternative package (cartridge) for the project 65 | template to use. Both Mezzanine and Cartridge contain a project template 66 | package containing the settings.py and urls.py modules for an 67 | initial project. If you'd like to add Cartridge to an existing Mezzanine 68 | or Django project, you'll need to manually configure these yourself. See 69 | the FAQ section of the Mezzanine documentation for more information. 70 | 71 | Note 72 | The createdb command is a shortcut for using Django's 73 | migrate command, which will also install some demo content, 74 | such as a contact form, image gallery, and more. If you'd like to 75 | omit this step, use the --nodata option with createdb . 76 | 77 | You should then be able to browse to http://127.0.0.1:8000/admin/ and 78 | log in using the default account ( username: admin, password: 79 | default ). If you'd like to specify a different username and password 80 | during set up, simply exclude the --noinput option included above 81 | when running createdb . 82 | 83 | Contributing 84 | Cartridge is an open source project managed using both the Git and 85 | Mercurial version control systems. These repositories are hosted on 86 | both GitHub and Bitbucket respectively, so contributing is as 87 | easy as forking the project on either of these sites and committing 88 | back your enhancements. 89 | Please note the following guidelines for contributing: 90 | 91 | Contributed code must be written in the existing style. For Python 92 | (and to a decent extent, JavaScript as well), this is as simple as 93 | following the Django coding style and (most importantly) 94 | PEP 8 . Front-end CSS should adhere to the 95 | Bootstrap CSS guidelines . 96 | Contributions must be available on a separately named branch 97 | based on the latest version of the main branch. 98 | Run the tests before committing your changes. If your changes 99 | cause the tests to break, they won't be accepted. 100 | If you are adding new functionality, you must include basic tests 101 | and documentation. 102 | 103 | Here's a quick start to hacking on Cartridge after forking it on 104 | GitHub, by using the internal "project_template" as your current 105 | project: 106 | $ git clone https://github.com/your-github-username/cartridge/ 107 | $ cd cartridge 108 | $ git checkout -b your-new-branch-name 109 | $ cp cartridge/project_template/project_name/local_settings.py{.template,} 110 | $ python setup.py develop 111 | $ python cartridge/project_template/manage.py createdb --noinput 112 | $ python cartridge/project_template/manage.py runserver 113 | 114 | "hack hack hack" 115 | 116 | $ python setup.py test 117 | $ git commit -am "A message describing what you changed." 118 | $ git push origin your-new-branch-name 119 | 120 | 121 | Note 122 | Cartridge's development branch often relies on features that exist 123 | in Mezzanine's development branch, but haven't yet made it into an 124 | official release. To install Mezzanine's development version in your 125 | environment, run: 126 | $ pip install --upgrade git+https://github.com/stephenmcd/mezzanine.git#egg=Mezzanine 127 | 128 | 129 | 130 | Language Translations 131 | Cartridge makes full use of translation strings, which allow Cartridge 132 | to be translated into multiple languages using Django's 133 | internationalization methodology. Translations are managed on the 134 | Transiflex website but can also be submitted via GitHub or 135 | Bitbucket . Consult the documentation for Django's 136 | internationalization methodology for more information on creating 137 | translations and using them. 138 | 139 | Third-party Modules 140 | The following modules have been developed outside of Cartridge. If you 141 | have developed a module to integrate with Mezzanine or Cartridge, and 142 | would like it listed in the documentation, send an email to the 143 | mezzanine-users mailing list. You can also add modules to the 144 | Mezzanine Grid on djangopackages.com . 145 | 146 | cartridge_braintree - Payment processor for Braintree . 147 | cartridge-external-payment - Allows payment on an external 148 | provider platform. 149 | cartridge-tax - Implements a handful of sales tax models. 150 | cartridge-stripe - Alternative payment backend for Stripe . 151 | cartridge-pinpayments - PIN payments integration. 152 | 153 | 154 | Donating 155 | If you would like to make a donation to continue development of 156 | Cartridge, you can do so via the Mezzanine Project website. 157 | 158 | Support 159 | To report a security issue, please send an email privately to 160 | core-team@mezzaninecms.com . This gives us a chance to fix the issue and 161 | create an official release prior to the issue being made 162 | public. 163 | For all other Cartridge support, the primary channel is the 164 | mezzanine-users mailing list. Questions, comments, and all related 165 | discussions take place here amongst knowledgeable members of the 166 | community. 167 | If you're certain you've come across a bug, then please use the 168 | GitHub issue tracker . It's crucial that enough information is 169 | provided to reproduce the bug. This includes things such as the 170 | Python stack trace generated by error pages, as well as other aspects 171 | of the development environment used, such as operating system, 172 | database, Python version, etc. If you're not sure you've found a 173 | reproducable bug, then please try the mailing list first. 174 | Finally, feel free to drop by the #mezzanine IRC channel on 175 | Freenode , for a chat! 176 | Communications in all Cartridge and Mezzanine spaces are expected to 177 | conform to the Django Code of Conduct . 178 | 179 | Sites Using Cartridge 180 | 181 | Ripe Maternity 182 | Cotton On 183 | Coopers Store 184 | Sheer Ethic 185 | tindie.com 186 | Ross A. Laird 187 | Pink Twig 188 | Parfume Planet 189 | Life is Good 190 | Brooklyn Navy Yard 191 | Cotton On Asia 192 | Manai Glitter 193 | Tactical Bags 194 | Charles Koll Jewelry 195 | Puraforce Remedies 196 | Adrenaline 197 | The Peculiar Store 198 | KisanHub 199 | Kegbot 200 | Amblitec 201 | ZigZag Bags 202 | Justine & Katie's Bowtique 203 | The Art Rebellion 204 | Engineered Arts 205 | Lipman Art 206 | ZHackers 207 | Potrillo al Pie 208 | You Name It 209 | Warwick Friendly Society Pharmacies 210 | 211 | -------------------------------------------------------------------------------- /files/code2flow.txt: -------------------------------------------------------------------------------- 1 | Notes from 2017 2 | 3 | This is an older project which I am no longer working on. It was built before ES6 existed and before Python 3 had much usage. While it was always experimental and will probably still give you insights into your code, it will increasingly show signs of age. I am also unable to addess any issues, bugs, or pull requests. 4 | Like anyone who has been an engineer for more than 6 months, I am downright embarrassed by code I wrote when I was younger and this is no exception. Tabs!?!? What was I thinking???? 5 | The domain, code2flow.com is unrelated to this project and as far as I can tell through the internet archive, they launched their service after this repository was created. I've never heard anything from them and it doesn't appear like they use anything from here 6 | 7 | code2flow 8 | Turn your Python and Javascript source code into DOT flowcharts 9 | Code2flow will sweep through your project source code looking for function definitions. Then it will do another sweep looking for where those functions are called. Code2flow connects the dots and presents you with a flowchart estimating the functional structure of your program. 10 | In other words, code2flow generates callgraphs 11 | Code2flow is especially useful for untangling spaghetti code and getting new developers up to speed. 12 | Code2flow is EXPERIMENTAL and meant to provide a rough overview of the structure of simpler projects. There are many known limitations (see below). Expect MOST aspects of this application to change in future releases. 13 | Here is what happens when you run it on jquery 14 | 15 | On the python calendar module 16 | 17 | On code2flow/languages/python.py 18 | 19 | Installation 20 | Download , navigate to the directory, and run: 21 | sudo python setup.py install 22 | If you don't have it already, you will also have to install graphviz 23 | Using apt-get: 24 | sudo apt-get install graphviz 25 | Using port (for macs): 26 | sudo port install graphviz 27 | Usage 28 | To generate a DOT file run something like: 29 | code2flow mypythonfile.py 30 | Or, for javascript 31 | code2flow myjavascriptfile.js 32 | By default, code2flow will render a DOT file, out.gv and a PNG file, out.png. 33 | You can also render the flowchart in any of the formats that graphviz supports: 34 | bmp canon cgimage cmap cmapx cmapx_np dot eps exr fig gif gv imap imap_np ismap jp2 jpe jpeg jpg pct pdf pic pict plain plain-ext png pov ps ps2 psd sgi svg svgz tga tif tiff tk vml vmlz x11 xdot xlib 35 | For example: 36 | code2flow mypythonfile.py -o myflow.jpeg 37 | Specify multiple files, import directories, and even use * 38 | code2flow project/directory/ * .js 39 | code2flow project/directory --language js 40 | Limitations 41 | Code2flow is meant to provide a reasonable conjecture of the structure of simple projects and has many known limitations. 42 | 43 | Arrays of functions are not handled 44 | The logic for whether or not a function returns is simply looking for 'return' in that function 45 | Functions not declared in the initial class/object definitions (e.g. attached later) are mostly not handled 46 | Dynamically generated and lambda functions are mostly not handled 47 | In python, functions inherited from a parent class are not handled 48 | In python, import ... as ... is not handled correctly 49 | In javascript, prototypes will result in unpredictable results 50 | And many more 51 | 52 | Basically, code2flow may not diagram your sourcecode exactly as you might expect it to 53 | Feedback / Bugs / Contact 54 | Please do email! 55 | scottmrogowski@gmail.com 56 | How to contribute 57 | 58 | 59 | You can contribute code! The project is open source and is new so any reasonably useful feature would probably be helpful and accepted. New languages are especially appreciated! 60 | 61 | 62 | You can spread the word! A simple way to help is to share this project with others. If you have a blog, mention code2flow! Linking from relevant questions on StackOverflow or other programming forums also helps quite a bit. I would do it myself but it is unfortunately against the community guidelines. The more exposure this project gets, the more I can devote my time to building it! 63 | 64 | 65 | Feature / Language Requests 66 | There is a lot in the pipeline already but email me! Those requests which keep coming up repeatedly will get priority. 67 | To get the feature you want more quickly there are two options: 68 | A. The project is open source so it is easy to contribute. 69 | B. I am available for hire on contract and will happily build your request or just do headstands for you all day for the correct amount of money. For more about me, visit http://scottrogowski.com/about 70 | -------------------------------------------------------------------------------- /files/cola.txt: -------------------------------------------------------------------------------- 1 | Cola: high-level distributed crawling framework 2 | 3 | 4 | 5 | Overview 6 | Cola is a high-level distributed crawling framework, 7 | used to crawl pages and extract structured data from websites. 8 | It provides simple and fast yet flexible way to achieve your data acquisition objective. 9 | Users only need to write one piece of code which can run under both local and distributed mode. 10 | 11 | Requirements 12 | 13 | Python2.7 (Python3+ will be supported later) 14 | Work on Linux, Windows and Mac OSX 15 | 16 | 17 | Install 18 | The quick way: 19 | pip install cola 20 | 21 | Or, download source code, then run: 22 | python setup.py install 23 | 24 | 25 | Write applications 26 | Documents will update soon, now just refer to the 27 | wiki or 28 | weibo application. 29 | 30 | Run applications 31 | For the wiki or weibo app, please ensure the installation of dependencies, weibo as an example: 32 | pip install -r /path/to/cola/app/weibo/requirements.txt 33 | 34 | 35 | Local mode 36 | In order to let your application support local mode, just add code to the entrance as below. 37 | from cola.context import Context 38 | ctx = Context( local_mode = True ) 39 | ctx.run_job(os.path.dirname(os.path.abspath( __file__ ))) 40 | Then run the application: 41 | python __init__.py 42 | 43 | Stop the local job by CTRL+C . 44 | 45 | Distributed mode 46 | Start master: 47 | coca master -s [ip:port] 48 | 49 | Start one or more workers: 50 | coca worker -s -m [ip:port] 51 | 52 | Then run the application(weibo as an example): 53 | coca job -u /path/to/cola/app/weibo -r 54 | 55 | 56 | Coca command 57 | Coca is a convenient command-line tool for the whole cola environment. 58 | 59 | master 60 | Kill master to stop the whole cluster: 61 | coca master -k 62 | 63 | 64 | job 65 | List all jobs: 66 | coca job -m [ip:port] -l 67 | 68 | Example as: 69 | list jobs at master: 10.211.55.2:11103 70 | ====> job id: 8ZcGfAqHmzc, job description: sina weibo crawler, status: stopped 71 | 72 | You can run a job which shown in the list above: 73 | coca job -r 8ZcGfAqHmzc 74 | 75 | Actually, you don't have to input the complete job name: 76 | coca job -r 8Z 77 | 78 | Part of the job name is fine if there's no conflict. 79 | You can know the status of a running job by: 80 | coca job -t 8Z 81 | 82 | The status like counters during running and so on will be output 83 | to the terminal. 84 | You can kill a job by the kill command: 85 | coca job -k 8Z 86 | 87 | 88 | startproject 89 | You can create an application by this command: 90 | coca startproject colatest 91 | 92 | Remember, help command will always be helpful: 93 | coca -h 94 | 95 | or 96 | coca master -h 97 | 98 | 99 | Notes 100 | Chinese docs(wiki) . 101 | 102 | Donation 103 | Cola is a non-profit project and by now maintained by myself, 104 | thus any donation will be encouragement for the further improvements of cola project. 105 | Alipay & Paypal: qinxuye@gmail.com 106 | -------------------------------------------------------------------------------- /files/cornice.txt: -------------------------------------------------------------------------------- 1 | Cornice 2 | 3 | 4 | Cornice provides helpers to build & document Web Services with Pyramid. 5 | The full documentation is available at: https://cornice.readthedocs.io 6 | -------------------------------------------------------------------------------- /files/dh-virtualenv.txt: -------------------------------------------------------------------------------- 1 | dh-virtualenv 2 | 3 | 4 | Contents 5 | 6 | Overview 7 | Using dh-virtualenv 8 | How does it work? 9 | Running tests 10 | Building the documentation locally 11 | Code of conduct 12 | License 13 | 14 | Overview 15 | dh-virtualenv is a tool that aims to combine Debian packaging with 16 | self-contained virtualenv based Python deployments. 17 | The idea behind dh-virtualenv is to be able to combine the power of 18 | Debian packaging with the sandboxed nature of virtualenvs. In addition 19 | to this, using virtualenv enables installing requirements via 20 | Python Package Index instead of relying on 21 | the operating system provided Python packages. The only limiting 22 | factor is that you have to run the same Python interpreter as the 23 | operating system. 24 | For complete online documentation including installation instructions, see 25 | the online documentation . 26 | Using dh-virtualenv 27 | Using dh-virtualenv is fairly straightforward. First, you need to 28 | define the requirements of your package in requirements.txt file, in 29 | the format defined by pip . 30 | To build a package using dh-virtualenv, you need to add dh-virtualenv 31 | in to your build dependencies and write following debian/rules file: 32 | %: 33 | dh $@ --with python-virtualenv 34 | 35 | Note that you might need to provide 36 | additional build dependencies too, if your requirements require them. 37 | Also, you are able to define the root path for your source directory using 38 | --sourcedirectory or -D argument: 39 | %: 40 | dh $@ --with python-virtualenv --sourcedirectory=root/srv/application 41 | 42 | NOTE: Be aware that the configuration in debian/rules expects tabs instead of spaces! 43 | Once the package is built, you have a virtualenv contained in a Debian 44 | package and upon installation it gets placed, by default, under 45 | /opt/venvs/ . 46 | For more information and usage documentation, check the accompanying 47 | documentation in the doc folder, also available at 48 | Read the Docs . 49 | How does it work? 50 | To do the packaging, dh-virtualenv extends debhelper's sequence by 51 | inserting a new dh_virtualenv command, which effectively replaces 52 | the following commands in the original sequence: 53 | 54 | dh_auto_clean 55 | dh_auto_build 56 | dh_auto_test 57 | dh_auto_install 58 | dh_python2 59 | dh_pycentral 60 | dh_pysupport 61 | 62 | In the new sequence, dh_virtualenv is inserted right before dh_installinit . 63 | Running tests 64 | $ nosetests ./test/test_deployment.py 65 | 66 | Building the documentation locally 67 | If you execute the following commands in your clone of the repository, 68 | a virtualenv with all necessary tools is created. 69 | invoke docs then builds the documentation into doc/_build/ . 70 | command . .env --yes --develop 71 | invoke docs 72 | To start a watchdog that auto-rebuilds documentation and reloads the opened browser tab on any change, 73 | call invoke docs -w -b (stop the watchdog using the -k option). 74 | Code of conduct 75 | This project adheres to the Open Code of Conduct . 76 | By participating, you are expected to honor this code. 77 | License 78 | Copyright (c) 2013-2017 Spotify AB 79 | dh-virtualenv is licensed under GPL v2 or later. Full license is 80 | available in the LICENSE file. 81 | -------------------------------------------------------------------------------- /files/diesel.txt: -------------------------------------------------------------------------------- 1 | 2 | Why Diesel? 3 | You should write your next network application using diesel . 4 | Thanks to Python the syntax is clean and the development pace is rapid. Thanks 5 | to non-blocking I/O it's fast and scalable. Thanks to greenlets there's 6 | unwind(to(callbacks(no))). Thanks to nose it's trivial to test. Thanks to 7 | Flask you don't need to write a new web framework using it. 8 | It provides a clean API for writing network clients and servers. TCP and UDP 9 | supported. It bundles battle-tested clients for HTTP, DNS, Redis, Riak and 10 | MongoDB. It makes writing network applications fun. 11 | Read the documentation, browse the API and join the community in #diesel on 12 | freenode. 13 | 14 | Prerequisites 15 | You'll need the python-dev package as well as libffi-dev, or your 16 | platform's equivalents. 17 | 18 | Installation 19 | Diesel is an active project. Your best bet to stay up with the latest at this 20 | point is to clone from github.: 21 | git clone git://github.com/jamwt/diesel.git 22 | 23 | Once you have a clone, cd to the diesel directory and install it.: 24 | pip install . 25 | 26 | or: 27 | python setup.py install 28 | 29 | or: 30 | python setup.py develop 31 | 32 | 33 | For More Information 34 | Documentation and more can be found on the diesel website. 35 | 36 | Python 3? 37 | Not yet. Here are dependencies blocking the transition: 38 | 39 | 40 | -------------------------------------------------------------------------------- /files/django-activity-stream.txt: -------------------------------------------------------------------------------- 1 | Django Activity Stream 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Django Activity Stream is a way of creating activities generated by the actions on your site. 12 | It is designed for generating and displaying streams of interesting actions and can handle following and unfollowing of different activity sources. 13 | For example, it could be used to emulate the Github dashboard in which a user sees changes to projects they are watching and the actions of users they are following. 14 | Action events are categorized by four main components. 15 | 16 | 17 | Actor . The object that performed the activity. 18 | Verb . The verb phrase that identifies the action of the activity. 19 | Action Object . (Optional) The object linked to the action itself. 20 | Target . (Optional) The object to which the activity was performed. 21 | 22 | 23 | Actor , Action Object and Target are GenericForeignKeys to any arbitrary Django object and so can represent any Django model in your project. 24 | An action is a description of an action that was performed ( Verb ) at some instant in time by some Actor on some optional Target that results in an Action Object getting created/updated/deleted. 25 | For example: justquick (actor) closed (verb) issue 2 (object) on django-activity-stream (target) 12 hours ago 26 | Nomenclature of this specification is based on the Activity Streams Spec: http://activitystrea.ms/ 27 | For complete documentation see Django Activity Stream Documentation 28 | -------------------------------------------------------------------------------- /files/django-allauth.txt: -------------------------------------------------------------------------------- 1 | Welcome to django-allauth! 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | Integrated set of Django applications addressing authentication, 14 | registration, account management as well as 3rd party (social) account 15 | authentication. 16 | 17 | Home page 18 | http://www.intenct.nl/projects/django-allauth/ 19 | Source code 20 | http://github.com/pennersr/django-allauth 21 | Mailinglist 22 | http://groups.google.com/group/django-allauth 23 | Documentation 24 | https://django-allauth.readthedocs.io/en/latest/ 25 | Stack Overflow 26 | http://stackoverflow.com/questions/tagged/django-allauth 27 | 28 | 29 | Rationale 30 | Most existing Django apps that address the problem of social 31 | authentication focus on just that. You typically need to integrate 32 | another app in order to support authentication via a local 33 | account. 34 | This approach separates the worlds of local and social 35 | authentication. However, there are common scenarios to be dealt with 36 | in both worlds. For example, an e-mail address passed along by an 37 | OpenID provider is not guaranteed to be verified. So, before hooking 38 | an OpenID account up to a local account the e-mail address must be 39 | verified. So, e-mail verification needs to be present in both worlds. 40 | Integrating both worlds is quite a tedious process. It is definitely 41 | not a matter of simply adding one social authentication app, and one 42 | local account registration app to your INSTALLED_APPS list. 43 | This is the reason this project got started -- to offer a fully 44 | integrated authentication app that allows for both local and social 45 | authentication, with flows that just work. 46 | 47 | Commercial Support 48 | This project is sponsored by IntenCT . If you require assistance on 49 | your project(s), please contact us: info@intenct.nl . 50 | 51 | Cross-Selling 52 | If you like this, you may also like: 53 | 54 | django-trackstats: https://github.com/pennersr/django-trackstats 55 | netwell: https://github.com/pennersr/netwell 56 | 57 | -------------------------------------------------------------------------------- /files/django-crispy-forms.txt: -------------------------------------------------------------------------------- 1 | django-crispy-forms 2 | 3 | 4 | The best way to have Django DRY forms. Build programmatic reusable layouts out of components, having full control of the rendered HTML without writing HTML in templates. All this without breaking the standard way of doing things in Django, so it plays nice with any other form application. 5 | django-crispy-forms supports Python 2.7/Python 3.2+ and Django 1.8+ 6 | The application mainly provides: 7 | 8 | A filter named |crispy that will render elegant div based forms. Think of it as the built-in methods: as_table , as_ul and as_p . You cannot tune up the output, but it is easy to start using it. 9 | A tag named {% crispy %} that will render a form based on your configuration and specific layout setup. This gives you amazing power without much hassle, helping you save tons of time. 10 | 11 | Django-crispy-forms supports several frontend frameworks, such as Twitter Bootstrap (versions 2 and 3), Uni-form and Foundation. You can also easily adapt your custom company's one, creating your own, see the docs for more information. You can easily switch among them using CRISPY_TEMPLATE_PACK setting variable. 12 | 13 | Authors 14 | django-crispy-forms is the new django-uni-form. django-uni-form was an application created by Daniel Greenfeld that I led since version 0.8.0. The name change tries to better explain the purpose of the application, which changed in a significant way since its birth. 15 | If you are upgrading from django-uni-form, we have instructions for helping you. 16 | 17 | Lead developer: Miguel Araujo 18 | 19 | 20 | Example 21 | This is a teaser of what you can do with latest django-crispy-forms. Find here the gist for generating this form: 22 | 23 | 24 | Documentation 25 | For extensive documentation see the docs folder or read it on readthedocs 26 | 27 | Special thanks 28 | 29 | To Daniel Greenfeld ( @pydanny ) for his support, time and the opportunity given to me to do this. 30 | The name of the project was suggested by the fantastic Audrey Roy ( @audreyr ) 31 | To Kenneth Love ( @kennethlove ) for creating django-uni-form-contrib from which bootstrap template pack was started. 32 | 33 | -------------------------------------------------------------------------------- /files/django-debug-toolbar.txt: -------------------------------------------------------------------------------- 1 | Django Debug Toolbar 2 | 3 | 4 | 5 | 6 | 7 | 8 | The Django Debug Toolbar is a configurable set of panels that display various 9 | debug information about the current request/response and when clicked, display 10 | more details about the panel's content. 11 | Here's a screenshot of the toolbar in action: 12 | 13 | In addition to the built-in panels, a number of third-party panels are 14 | contributed by the community. 15 | The current version of the Debug Toolbar is 1.8. It works on Django ≥ 1.8. 16 | Documentation, including installation and configuration instructions, is 17 | available at https://django-debug-toolbar.readthedocs.io/ . 18 | The Django Debug Toolbar is released under the BSD license, like Django 19 | itself. If you like it, please consider contributing! 20 | The Django Debug Toolbar was originally created by Rob Hudson < rob@cogit8.org > 21 | in August 2008 and was further developed by many contributors . 22 | -------------------------------------------------------------------------------- /files/django-devserver.txt: -------------------------------------------------------------------------------- 1 | 2 | About 3 | A drop in replacement for Django's built-in runserver command. Features include: 4 | 5 | An extendable interface for handling things such as real-time logging. 6 | Integration with the werkzeug interactive debugger. 7 | Threaded (default) and multi-process development servers. 8 | Ability to specify a WSGI application as your target environment. 9 | 10 | 11 | Note 12 | django-devserver works on Django 1.3 and newer 13 | 14 | 15 | Installation 16 | To install the latest stable version: 17 | pip install git+git://github.com/dcramer/django-devserver#egg=django-devserver 18 | 19 | django-devserver has some optional dependancies, which we highly recommend installing. 20 | 21 | pip install sqlparse -- pretty SQL formatting 22 | pip install werkzeug -- interactive debugger 23 | pip install guppy -- tracks memory usage (required for MemoryUseModule) 24 | pip install line_profiler -- does line-by-line profiling (required for LineProfilerModule) 25 | 26 | You will need to include devserver in your INSTALLED_APPS : 27 | INSTALLED_APPS = ( 28 | ... 29 | 'devserver', 30 | ) 31 | 32 | If you're using django.contrib.staticfiles or any other apps with management 33 | command runserver , make sure to put devserver above any of them (or below , 34 | for Django<1.7 ). Otherwise devserver will log an error, but it will fail to work 35 | properly. 36 | 37 | Usage 38 | Once installed, using the new runserver replacement is easy. You must specify verbosity of 0 to disable real-time log output: 39 | python manage.py runserver 40 | 41 | Note: This will force settings.DEBUG to True . 42 | By default, devserver would bind itself to 127.0.0.1:8000. To change this default, DEVSERVER_DEFAULT_ADDR and DEVSERVER_DEFAULT_PORT settings are available. 43 | 44 | Additional CLI Options 45 | 46 | 47 | 48 | --werkzeug 49 | Tells Django to use the Werkzeug interactive debugger, instead of it's own. 50 | 51 | --forked 52 | Use a forking (multi-process) web server instead of threaded. 53 | 54 | --dozer 55 | Enable the dozer memory debugging middleware (at /_dozer) 56 | 57 | --wsgi-app 58 | Load the specified WSGI app as the server endpoint. 59 | 60 | 61 | Please see python manage.py runserver --help for more information additional options. 62 | Note: You may also use devserver's middleware outside of the management command: 63 | MIDDLEWARE_CLASSES = ( 64 | 'devserver.middleware.DevServerMiddleware', 65 | ) 66 | 67 | 68 | Configuration 69 | The following options may be configured via your settings.py : 70 | 71 | DEVSERVER_ARGS = [] 72 | Additional command line arguments to pass to the runserver command (as defaults). 73 | DEVSERVER_DEFAULT_ADDR = '127.0.0.1' 74 | The default address to bind to. 75 | DEVSERVER_DEFAULT_PORT = '8000' 76 | The default port to bind to. 77 | DEVSERVER_WSGI_MIDDLEWARE 78 | A list of additional WSGI middleware to apply to the runserver command. 79 | DEVSERVER_MODULES = [] 80 | A list of devserver modules to load. 81 | DEVSERVER_IGNORED_PREFIXES = ['/media', '/uploads'] 82 | A list of prefixes to surpress and skip process on. By default, ADMIN_MEDIA_PREFIX , MEDIA_URL and STATIC_URL (for Django >= 1.3) will be ignored (assuming MEDIA_URL and STATIC_URL is relative) 83 | 84 | 85 | Modules 86 | django-devserver includes several modules by default, but is also extendable by 3rd party modules. This is done via the DEVSERVER_MODULES setting: 87 | DEVSERVER_MODULES = ( 88 | 'devserver.modules.sql.SQLRealTimeModule', 89 | 'devserver.modules.sql.SQLSummaryModule', 90 | 'devserver.modules.profile.ProfileSummaryModule', 91 | 92 | # Modules not enabled by default 93 | 'devserver.modules.ajax.AjaxDumpModule', 94 | 'devserver.modules.profile.MemoryUseModule', 95 | 'devserver.modules.cache.CacheSummaryModule', 96 | 'devserver.modules.profile.LineProfilerModule', 97 | ) 98 | 99 | 100 | devserver.modules.sql.SQLRealTimeModule 101 | Outputs queries as they happen to the terminal, including time taken. 102 | Disable SQL query truncation (used in SQLRealTimeModule) with the DEVSERVER_TRUNCATE_SQL setting: 103 | DEVSERVER_TRUNCATE_SQL = False 104 | 105 | Filter SQL queries with the DEVSERVER_FILTER_SQL setting: 106 | DEVSERVER_FILTER_SQL = ( 107 | re.compile('djkombu_\w+'), # Filter all queries related to Celery 108 | ) 109 | 110 | 111 | devserver.modules.sql.SQLSummaryModule 112 | Outputs a summary of your SQL usage. 113 | 114 | devserver.modules.profile.ProfileSummaryModule 115 | Outputs a summary of the request performance. 116 | 117 | devserver.modules.profile.MemoryUseModule 118 | Outputs a notice when memory use is increased (at the end of a request cycle). 119 | 120 | devserver.modules.profile.LineProfilerModule 121 | Profiles view methods on a line by line basis. There are 2 ways to profile your view functions, by setting setting.DEVSERVER_AUTO_PROFILE = True or by decorating the view functions you want profiled with devserver.modules.profile.devserver_profile. The decoration takes an optional argument follow which is a sequence of functions that are called by your view function that you would also like profiled. 122 | An example of a decorated function: 123 | @devserver_profile(follow=[foo, bar]) 124 | def home(request): 125 | result['foo'] = foo() 126 | result['bar'] = bar() 127 | 128 | When using the decorator, we recommend that rather than import the decoration directly from devserver that you have code somewhere in your project like: 129 | try: 130 | if 'devserver' not in settings.INSTALLED_APPS: 131 | raise ImportError 132 | from devserver.modules.profile import devserver_profile 133 | except ImportError: 134 | from functools import wraps 135 | class devserver_profile(object): 136 | def __init__(self, *args, **kwargs): 137 | pass 138 | def __call__(self, func): 139 | def nothing(*args, **kwargs): 140 | return func(*args, **kwargs) 141 | return wraps(func)(nothing) 142 | 143 | By importing the decoration using this method, devserver_profile will be a pass through decoration if you aren't using devserver (eg in production) 144 | 145 | devserver.modules.cache.CacheSummaryModule 146 | Outputs a summary of your cache calls at the end of the request. 147 | 148 | devserver.modules.ajax.AjaxDumpModule 149 | Outputs the content of any AJAX responses 150 | Change the maximum response length to dump with the DEVSERVER_AJAX_CONTENT_LENGTH setting: 151 | DEVSERVER_AJAX_CONTENT_LENGTH = 300 152 | 153 | 154 | devserver.modules.request.SessionInfoModule 155 | Outputs information about the current session and user. 156 | 157 | Building Modules 158 | Building modules in devserver is quite simple. In fact, it resembles the middleware API almost identically. 159 | Let's take a sample module, which simple tells us when a request has started, and when it has finished: 160 | from devserver.modules import DevServerModule 161 | 162 | class UselessModule(DevServerModule): 163 | logger_name = 'useless' 164 | 165 | def process_request(self, request): 166 | self.logger.info('Request started') 167 | 168 | def process_response(self, request, response): 169 | self.logger.info('Request ended') 170 | 171 | There are additional arguments which may be sent to logger methods, such as duration : 172 | # duration is in milliseconds 173 | self.logger.info('message', duration=13.134) 174 | 175 | -------------------------------------------------------------------------------- /files/django-elastic-transcoder.txt: -------------------------------------------------------------------------------- 1 | Django Elastic Transcoder 2 | 3 | django-elastic-transcoder is an Django app, let you integrate AWS Elastic Transcoder in Django easily. 4 | What is provided in this package? 5 | 6 | Transcoder class 7 | URL endpoint for receive SNS notification 8 | Signals for PROGRESS, ERROR, COMPLETE 9 | EncodeJob model 10 | 11 | 12 | Workflow 13 | 14 | 15 | Install 16 | First, install dj_elastictranscode with pip 17 | $ pip install django-elastic-transcoder 18 | Then, add dj_elastictranscoder to INSTALLED_APPS 19 | INSTALLED_APPS = ( 20 | ... 21 | ' dj_elastictranscoder ' , 22 | ... 23 | ) 24 | Bind urls.py 25 | urlpatterns = patterns( ' ' , 26 | ... 27 | url( r ' ^ dj_elastictranscoder/ ' , include( ' dj_elastictranscoder.urls ' )), 28 | ... 29 | ) 30 | Migrate 31 | $ ./manage.py migrate 32 | 33 | Setting up AWS Elastic Transcoder 34 | 35 | Create a new Pipeline in AWS Elastic Transcoder. 36 | Hookup every Notification. 37 | Subscribe SNS Notification through HTTP 38 | You are ready to encode! 39 | 40 | 41 | Required Django settings 42 | Please settings up variables below to make this app works. 43 | AWS_ACCESS_KEY_ID = < your aws access key id > 44 | AWS_SECRET_ACCESS_KEY = < your aws secret access key > 45 | AWS_REGION = < aws region > 46 | 47 | Usage 48 | For instance, encode an mp3 49 | from dj_elastictranscoder.transcoder import Transcoder 50 | 51 | input = { 52 | ' Key ' : ' path/to/input.mp3 ' , 53 | } 54 | 55 | outputs = [{ 56 | ' Key ' : ' path/to/output.mp3 ' , 57 | ' PresetId ' : ' 1351620000001-300040 ' # for example: 128k mp3 audio preset 58 | }] 59 | 60 | pipeline_id = ' ' 61 | 62 | transcoder = Transcoder(pipeline_id) 63 | transcoder.encode( input , outputs) 64 | 65 | # your can also create a EncodeJob for object automatically 66 | transcoder.create_job_for_object(obj) 67 | 68 | 69 | # Transcoder can also work standalone without Django 70 | # just pass region and required aws key/secret to Transcoder, when initiate 71 | 72 | transcoder = Transcoder(pipeline_id, AWS_REGION , AWS_ACCESS_KEY_ID , AWS_SECRET_ACCESS_KEY ) 73 | 74 | Setting Up AWS SNS endpoint 75 | AWS Elastic Transcoder can send various SNS notification to notify your application, like PROGRESS , ERROR , WARNING and COMPLETE 76 | So this package provide a endpoint to receieve these notifications, for you to update transcode progress. without checking by your self. 77 | Go to SNS section in AWS WebConsole to choose topic and subscribe with the url below. 78 | http:///dj_elastictranscoder/endpoint/ 79 | Before notification get started to work, you have to activate SNS subscription, you will receive email with activation link. 80 | After subscribe is done, you will receive SNS notification. 81 | 82 | Signals 83 | This package provide various signals for you to get notification, and do more things in your application. you can check the signals usage in tests.py for more usage example. 84 | 85 | transcode_onprogress 86 | transcode_onerror 87 | transcode_oncomplete 88 | 89 | -------------------------------------------------------------------------------- /files/xxx/boto3.txt: -------------------------------------------------------------------------------- 1 | Boto 3 - The AWS SDK for Python 2 | 3 | 4 | Boto3 is the Amazon Web Services (AWS) Software Development Kit (SDK) for 5 | Python, which allows Python developers to write software that makes use 6 | of services like Amazon S3 and Amazon EC2. You can find the latest, most 7 | up to date, documentation at Read the Docs , including a list of 8 | services that are supported. To see only those features which have been 9 | released, check out the stable docs . 10 | 11 | Quick Start 12 | First, install the library and set a default region: 13 | $ pip install boto3 14 | Next, set up credentials (in e.g. ~/.aws/credentials ): 15 | [default] 16 | aws_access_key_id = YOUR_KEY 17 | aws_secret_access_key = YOUR_SECRET 18 | Then, set up a default region (in e.g. ~/.aws/config ): 19 | [default] 20 | region =us-east-1 21 | Then, from a Python interpreter: 22 | >> > import boto3 23 | >> > s3 = boto3.resource( ' s3 ' ) 24 | >> > for bucket in s3.buckets.all(): 25 | print (bucket.name) 26 | 27 | Development 28 | 29 | Getting Started 30 | Assuming that you have Python and virtualenv installed, set up your 31 | environment and install the required dependencies like this instead of 32 | the pip install boto3 defined above: 33 | $ git clone https://github.com/boto/boto3.git 34 | $ cd boto3 35 | $ virtualenv venv 36 | ... 37 | $ . venv/bin/activate 38 | $ pip install -r requirements.txt 39 | $ pip install -e . 40 | 41 | Running Tests 42 | You can run tests in all supported Python versions using tox . By default, 43 | it will run all of the unit and functional tests, but you can also specify your own 44 | nosetests options. Note that this requires that you have all supported 45 | versions of Python installed, otherwise you must pass -e or run the 46 | nosetests command directly: 47 | $ tox 48 | $ tox -- unit/test_session.py 49 | $ tox -e py26,py33 -- integration/ 50 | You can also run individual tests with your default Python version: 51 | $ nosetests tests/unit 52 | 53 | Generating Documentation 54 | Sphinx is used for documentation. You can generate HTML locally with the 55 | following: 56 | $ pip install -r requirements-docs.txt 57 | $ cd docs 58 | $ make html 59 | 60 | Getting Help 61 | We use GitHub issues for tracking bugs and feature requests and have limited 62 | bandwidth to address them. Please use these community resources for getting 63 | help: 64 | 65 | Ask a question on Stack Overflow and tag it with boto3 66 | Come join the AWS Python community chat on gitter 67 | Open a support ticket with AWS Support 68 | If it turns out that you may have found a bug, please open an issue 69 | 70 | -------------------------------------------------------------------------------- /files/xxx/bpython.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | bpython: A fancy curses interface to the Python interactive interpreter 4 | bpython is a lightweight Python interpreter that adds several features common 5 | to IDEs. These features include syntax highlighting , expected parameter 6 | list , auto-indentation , and autocompletion . (See below for example 7 | usage). 8 | 9 | bpython does not aim to be a complete IDE - the focus is on implementing a 10 | few ideas in a practical, useful, and lightweight manner. 11 | bpython is a great replacement to any occasion where you would normally use the 12 | vanilla Python interpreter - testing out solutions to people's problems on IRC, 13 | quickly testing a method of doing something without creating a temporary file, 14 | etc.. 15 | You can find more about bpython - including full documentation - at our 16 | homepage . 17 | 18 | Installation & Basic Usage 19 | If you have pip installed, you can simply run: 20 | $ pip install bpython 21 | Start bpython by typing bpython in your terminal. You can exit bpython by 22 | using the exit() command or by pressing control-D like regular interactive 23 | Python. 24 | 25 | Features & Examples 26 | 27 | Readline-like autocomplete, with suggestions displayed as you type. 28 | In-line syntax highlighting. This uses Pygments for lexing the code as you 29 | type, and colours appropriately. 30 | Expected parameter list. As in a lot of modern IDEs, bpython will attempt to 31 | display a list of parameters for any function you call. The inspect module is 32 | tried first, which works with any Python function, and then pydoc if that 33 | fails. 34 | Rewind. This isn't called "Undo" because it would be misleading, but "Rewind" 35 | is probably as bad. The idea is that the code entered is kept in memory and 36 | when the Rewind function is called, the last line is popped and the entire 37 | session is re-evaluated. Use to rewind. 38 | Edit the current line or your entire session in an editor. F7 opens the current 39 | session in a text editor, and if modifications are made, the session is rerun 40 | with these changes. 41 | Pastebin code/write to file. Use the key to upload the screen's contents 42 | to pastebin, with a URL returned. 43 | Reload imported Python modules. Use to clear sys.modules and rerun your 44 | session to test changes to code in a module you're working on. 45 | 46 | 47 | Configuration 48 | See the sample-config file for a list of available options. You should save 49 | your config file as ~/.config/bpython/config (i.e. 50 | $XDG_CONFIG_HOME/bpython/config ) or specify at the command line: 51 | bpython --config /path/to/bpython/config 52 | 53 | 54 | Dependencies 55 | 56 | Pygments 57 | requests 58 | curtsies >= 0.1.18 59 | greenlet 60 | six >= 1.5 61 | Sphinx != 1.1.2 (optional, for the documentation) 62 | mock (optional, for the testsuite) 63 | babel (optional, for internationalization) 64 | watchdog (optional, for monitoring imported modules for changes) 65 | jedi (optional, for experimental multiline completion) 66 | 67 | 68 | Python 2 before 2.7.7 69 | If you are using Python 2 before 2.7.7, the following dependency is also 70 | required: 71 | 72 | requests[security] 73 | 74 | 75 | cffi 76 | If you have problems installing cffi, which is needed by OpenSSL, please take a 77 | look at cffi docs . 78 | 79 | bpython-urwid 80 | bpython-urwid requires the following additional packages: 81 | 82 | urwid 83 | 84 | 85 | Known Bugs 86 | For known bugs please see bpython's known issues and FAQ page. 87 | 88 | Contact & Contributing 89 | I hope you find it useful and please feel free to submit any bugs/patches 90 | suggestions to Robert or place them on the GitHub 91 | issues tracker . 92 | For any other ways of communicating with bpython users and devs you can find us 93 | at the community page on the project homepage , or in the community . 94 | Hope to see you there! 95 | 96 | CLI Windows Support 97 | 98 | Dependencies 99 | Curses Use the appropriate version compiled by Christoph Gohlke. 100 | pyreadline Use the version in the cheeseshop. 101 | 102 | Recommended 103 | Obtain the less program from GnuUtils. This makes the pager work as intended. 104 | It can be obtained from cygwin or GnuWin32 or msys 105 | 106 | Current version is tested with 107 | 108 | Curses 2.2 109 | pyreadline 1.7 110 | 111 | 112 | Curses Notes 113 | The curses used has a bug where the colours are displayed incorrectly: 114 | 115 | red is swapped with blue 116 | cyan is swapped with yellow 117 | 118 | To correct this I have provided a windows.theme file. 119 | This curses implementation has 16 colors (dark and light versions of the 120 | colours) 121 | 122 | Alternatives 123 | ptpython 124 | IPython 125 | Feel free to get in touch if you know of any other alternatives that people 126 | may be interested to try. 127 | -------------------------------------------------------------------------------- /files/xxx/butterdb.txt: -------------------------------------------------------------------------------- 1 | 2 | butterdb 3 | 4 | Master: 5 | Develop: 6 | 7 | Documentation | butterdb on PyPi 8 | butterdb is a library to help you work with Google Spreadsheet data. It lets you model your data as Python objects, to be easily manipulated or created. 9 | 10 | How do I use it? 11 | 12 | import butterdb 13 | import json 14 | 15 | # For getting OAuth Credential JSON file see http://gspread.readthedocs.org/en/latest/oauth2.html 16 | # Ensure that the client_email has been granted privileges to any workbooks you wish to access. 17 | 18 | with open('SomeGoogleProject-2a31d827b2a9.json') as credentials_file: 19 | json_key = json.load(credentials_file) 20 | 21 | client_email = json_key['client_email'] 22 | private_key = str(json_key['private_key']).encode('utf-8') 23 | 24 | database = butterdb.Database(name="MyDatabaseSheet", client_email=client_email, private_key=private_key) 25 | 26 | @butterdb.register(database) 27 | class User(butterdb.Model): 28 | def __init__(self, name, password): 29 | self.name = self.field(name) 30 | self.password = self.field(password) 31 | 32 | users = User.get_instances() 33 | 34 | marianne = users[1] 35 | 36 | print(marianne.password) # rainbow_trout 37 | 38 | marianne.password = "hunter2" 39 | marianne.commit() 40 | 41 | 42 | How do I make instances? 43 | bob = User("bob", "BestPassword!") 44 | bob.commit() 45 | 46 | 47 | Where do I get it? 48 | pip install butterdb 49 | 50 | Simple as that? 51 | Yep! butterdb is a simple interface around gspread . Just .commit() your objects when you want to update the spreadsheet! 52 | 53 | How do I run the tests? 54 | nosetests 55 | 56 | What works? 57 | 58 | Store data in Google Spreadsheets (the cloud!!!) 59 | Models from classes 60 | Fields as attributes. decimals, ints and strings only (as far as I know) 61 | Commits 62 | Mocked unit tests, mock database 63 | Arbitrary cell execution with =blah() (free stored procedures?) 64 | Auto backup/bad patch control 65 | 66 | 67 | What's missing? 68 | 69 | Spreadsheets must exist before connecting 70 | References 71 | Collections 72 | Customizable fields 73 | Customizable table size (arbitrarily hardcoded) 74 | 75 | 76 | Feedback 77 | Comments, concerns, issues and pull requests welcomed. Reddit /u/Widdershiny or email me at ncwjohnstone@gmail.com . 78 | 79 | License 80 | MIT License. See LICENSE file for full text. 81 | -------------------------------------------------------------------------------- /files/xxx/cpython.txt: -------------------------------------------------------------------------------- 1 | This is Python version 3.7.0 alpha 1 2 | 3 | 4 | 5 | 6 | Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 7 | 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation. All rights 8 | reserved. 9 | See the end of this file for further copyright and license information. 10 | 11 | Contents 12 | 13 | General Information 14 | Contributing to CPython 15 | Using Python 16 | Build Instructions 17 | Profile Guided Optimization 18 | Link Time Optimization 19 | 20 | 21 | What's New 22 | Documentation 23 | Converting From Python 2.x to 3.x 24 | Testing 25 | Installing multiple versions 26 | Issue Tracker and Mailing List 27 | Proposals for enhancement 28 | Release Schedule 29 | Copyright and License Information 30 | 31 | 32 | 33 | General Information 34 | 35 | Website: https://www.python.org 36 | Source code: https://github.com/python/cpython 37 | Issue tracker: https://bugs.python.org 38 | Documentation: https://docs.python.org 39 | Developer's Guide: https://docs.python.org/devguide/ 40 | 41 | 42 | Contributing to CPython 43 | For more complete instructions on contributing to CPython development, 44 | see the Developer Guide . 45 | 46 | Using Python 47 | Installable Python kits, and information about using Python, are available at 48 | python.org . 49 | 50 | Build Instructions 51 | On Unix, Linux, BSD, macOS, and Cygwin: 52 | ./configure 53 | make 54 | make test 55 | sudo make install 56 | 57 | This will install Python as python3. 58 | You can pass many options to the configure script; run ./configure --help 59 | to find out more. On macOS and Cygwin, the executable is called python.exe ; 60 | elsewhere it's just python . 61 | On macOS, if you have configured Python with --enable-framework , you 62 | should use make frameworkinstall to do the installation. Note that this 63 | installs the Python executable in a place that is not normally on your PATH, 64 | you may want to set up a symlink in /usr/local/bin . 65 | On Windows, see PCbuild/readme.txt . 66 | If you wish, you can create a subdirectory and invoke configure from there. 67 | For example: 68 | mkdir debug 69 | cd debug 70 | ../configure --with-pydebug 71 | make 72 | make test 73 | 74 | (This will fail if you also built at the top-level directory. You should do 75 | a make clean at the toplevel first.) 76 | To get an optimized build of Python, configure --enable-optimizations 77 | before you run make . This sets the default make targets up to enable 78 | Profile Guided Optimization (PGO) and may be used to auto-enable Link Time 79 | Optimization (LTO) on some platforms. For more details, see the sections 80 | below. 81 | 82 | Profile Guided Optimization 83 | PGO takes advantage of recent versions of the GCC or Clang compilers. If ran, 84 | make profile-opt will do several steps. 85 | First, the entire Python directory is cleaned of temporary files that may have 86 | resulted in a previous compilation. 87 | Then, an instrumented version of the interpreter is built, using suitable 88 | compiler flags for each flavour. Note that this is just an intermediary step 89 | and the binary resulted after this step is not good for real life workloads, as 90 | it has profiling instructions embedded inside. 91 | After this instrumented version of the interpreter is built, the Makefile will 92 | automatically run a training workload. This is necessary in order to profile 93 | the interpreter execution. Note also that any output, both stdout and stderr, 94 | that may appear at this step is suppressed. 95 | Finally, the last step is to rebuild the interpreter, using the information 96 | collected in the previous one. The end result will be a Python binary that is 97 | optimized and suitable for distribution or production installation. 98 | 99 | Link Time Optimization 100 | Enabled via configure's --with-lto flag. LTO takes advantage of the 101 | ability of recent compiler toolchains to optimize across the otherwise 102 | arbitrary .o file boundary when building final executables or shared 103 | libraries for additional performance gains. 104 | 105 | What's New 106 | We have a comprehensive overview of the changes in the What's New in Python 107 | 3.7 document. For a more 108 | detailed change log, read Misc/NEWS , but a full 109 | accounting of changes can only be gleaned from the commit history . 110 | If you want to install multiple versions of Python see the section below 111 | entitled "Installing multiple versions". 112 | 113 | Documentation 114 | Documentation for Python 3.7 is online, 115 | updated daily. 116 | It can also be downloaded in many formats for faster access. The documentation 117 | is downloadable in HTML, PDF, and reStructuredText formats; the latter version 118 | is primarily for documentation authors, translators, and people with special 119 | formatting requirements. 120 | For information about building Python's documentation, refer to Doc/README.rst . 121 | 122 | Converting From Python 2.x to 3.x 123 | Significant backward incompatible changes were made for the release of Python 124 | 3.0, which may cause programs written for Python 2 to fail when run with Python 125 | 3. For more information about porting your code from Python 2 to Python 3, see 126 | the Porting HOWTO . 127 | 128 | Testing 129 | To test the interpreter, type make test in the top-level directory. The 130 | test set produces some output. You can generally ignore the messages about 131 | skipped tests due to optional features which can't be imported. If a message 132 | is printed about a failed test or a traceback or core dump is produced, 133 | something is wrong. 134 | By default, tests are prevented from overusing resources like disk space and 135 | memory. To enable these tests, run make testall . 136 | If any tests fail, you can re-run the failing test(s) in verbose mode: 137 | make test TESTOPTS="-v test_that_failed" 138 | 139 | If the failure persists and appears to be a problem with Python rather than 140 | your environment, you can file a bug report and 141 | include relevant output from that command to show the issue. 142 | 143 | Installing multiple versions 144 | On Unix and Mac systems if you intend to install multiple versions of Python 145 | using the same installation prefix ( --prefix argument to the configure 146 | script) you must take care that your primary python executable is not 147 | overwritten by the installation of a different version. All files and 148 | directories installed using make altinstall contain the major and minor 149 | version and can thus live side-by-side. make install also creates 150 | ${prefix}/bin/python3 which refers to ${prefix}/bin/pythonX.Y . If you 151 | intend to install multiple versions using the same prefix you must decide which 152 | version (if any) is your "primary" version. Install that version using make 153 | install . Install all other versions using make altinstall . 154 | For example, if you want to install Python 2.7, 3.6, and 3.7 with 3.7 being the 155 | primary version, you would execute make install in your 3.7 build directory 156 | and make altinstall in the others. 157 | 158 | Issue Tracker and Mailing List 159 | Bug reports are welcome! You can use the issue tracker to report bugs, and/or submit pull requests on 160 | GitHub . 161 | You can also follow development discussion on the python-dev mailing list . 162 | 163 | Proposals for enhancement 164 | If you have a proposal to change Python, you may want to send an email to the 165 | comp.lang.python or python-ideas mailing lists for initial feedback. A 166 | Python Enhancement Proposal (PEP) may be submitted if your idea gains ground. 167 | All current PEPs, as well as guidelines for submitting a new PEP, are listed at 168 | python.org/dev/peps/ . 169 | 170 | Release Schedule 171 | See PEP 537 for Python 3.7 release details. 172 | 173 | Copyright and License Information 174 | Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 175 | 2012, 2013, 2014, 2015, 2016 Python Software Foundation. All rights reserved. 176 | Copyright (c) 2000 BeOpen.com. All rights reserved. 177 | Copyright (c) 1995-2001 Corporation for National Research Initiatives. All 178 | rights reserved. 179 | Copyright (c) 1991-1995 Stichting Mathematisch Centrum. All rights reserved. 180 | See the file "LICENSE" for information on the history of this software, terms & 181 | conditions for usage, and a DISCLAIMER OF ALL WARRANTIES. 182 | This Python distribution contains no GNU General Public License (GPL) code, 183 | so it may be used in proprietary projects. There are interfaces to some GNU 184 | code but these are entirely optional. 185 | All trademarks referenced herein are property of their respective holders. 186 | -------------------------------------------------------------------------------- /files/xxx/dejavu.txt: -------------------------------------------------------------------------------- 1 | dejavu 2 | Audio fingerprinting and recognition algorithm implemented in Python, see the explanation here: 3 | How it works 4 | Dejavu can memorize audio by listening to it once and fingerprinting it. Then by playing a song and recording microphone input, Dejavu attempts to match the audio against the fingerprints held in the database, returning the song being played. 5 | Note that for voice recognition, Dejavu is not the right tool! Dejavu excels at recognition of exact signals with reasonable amounts of noise. 6 | Installation and Dependencies: 7 | Read INSTALLATION.md 8 | Setup 9 | First, install the above dependencies. 10 | Second, you'll need to create a MySQL database where Dejavu can store fingerprints. For example, on your local setup: 11 | $ mysql -u root -p 12 | Enter password: ********** 13 | mysql> CREATE DATABASE IF NOT EXISTS dejavu; 14 | 15 | Now you're ready to start fingerprinting your audio collection! 16 | Quickstart 17 | $ git clone https://github.com/worldveil/dejavu.git ./dejavu 18 | $ cd dejavu 19 | $ python example.py 20 | Fingerprinting 21 | Let's say we want to fingerprint all of July 2013's VA US Top 40 hits. 22 | Start by creating a Dejavu object with your configurations settings (Dejavu takes an ordinary Python dictionary for the settings). 23 | >> > from dejavu import Dejavu 24 | >> > config = { 25 | ... " database " : { 26 | ... " host " : " 127.0.0.1 " , 27 | ... " user " : " root " , 28 | ... " passwd " : < password above > , 29 | ... " db " : < name of the database you created above > , 30 | ... } 31 | ... } 32 | >> > djv = Dejavu(config) 33 | Next, give the fingerprint_directory method three arguments: 34 | 35 | input directory to look for audio files 36 | audio extensions to look for in the input directory 37 | number of processes (optional) 38 | 39 | >> > djv.fingerprint_directory( " va_us_top_40/mp3 " , [ " .mp3 " ], 3 ) 40 | For a large amount of files, this will take a while. However, Dejavu is robust enough you can kill and restart without affecting progress: Dejavu remembers which songs it fingerprinted and converted and which it didn't, and so won't repeat itself. 41 | You'll have a lot of fingerprints once it completes a large folder of mp3s: 42 | >> > print djv.db.get_num_fingerprints() 43 | 5442376 44 | Also, any subsequent calls to fingerprint_file or fingerprint_directory will fingerprint and add those songs to the database as well. It's meant to simulate a system where as new songs are released, they are fingerprinted and added to the database seemlessly without stopping the system. 45 | Configuration options 46 | The configuration object to the Dejavu constructor must be a dictionary. 47 | The following keys are mandatory: 48 | 49 | database , with a value as a dictionary with keys that the database you are using will accept. For example with MySQL, the keys must can be anything that the MySQLdb.connect() function will accept. 50 | 51 | The following keys are optional: 52 | 53 | fingerprint_limit : allows you to control how many seconds of each audio file to fingerprint. Leaving out this key, or alternatively using -1 and None will cause Dejavu to fingerprint the entire audio file. Default value is None . 54 | database_type : as of now, only mysql (the default value) is supported. If you'd like to subclass Database and add another, please fork and send a pull request! 55 | 56 | An example configuration is as follows: 57 | >> > from dejavu import Dejavu 58 | >> > config = { 59 | ... " database " : { 60 | ... " host " : " 127.0.0.1 " , 61 | ... " user " : " root " , 62 | ... " passwd " : " Password123 " , 63 | ... " db " : " dejavu_db " , 64 | ... }, 65 | ... " database_type " : " mysql " , 66 | ... " fingerprint_limit " : 10 67 | ... } 68 | >> > djv = Dejavu(config) 69 | Tuning 70 | Inside fingerprint.py , you may want to adjust following parameters (some values are given below). 71 | FINGERPRINT_REDUCTION = 30 72 | PEAK_SORT = False 73 | DEFAULT_OVERLAP_RATIO = 0.4 74 | DEFAULT_FAN_VALUE = 10 75 | DEFAULT_AMP_MIN = 15 76 | PEAK_NEIGHBORHOOD_SIZE = 30 77 | 78 | These parameters are described in the fingerprint.py in detail. Read that in-order to understand the impact of changing these values. 79 | Recognizing 80 | There are two ways to recognize audio using Dejavu. You can recognize by reading and processing files on disk, or through your computer's microphone. 81 | Recognizing: On Disk 82 | Through the terminal: 83 | $ python dejavu.py --recognize file sometrack.wav 84 | { ' song_id ' : 1, ' song_name ' : ' Taylor Swift - Shake It Off ' , ' confidence ' : 3948, ' offset_seconds ' : 30.00018, ' match_time ' : 0.7159781455993652, ' offset ' : 646L} 85 | or in scripting, assuming you've already instantiated a Dejavu object: 86 | >> > from dejavu.recognize import FileRecognizer 87 | >> > song = djv.recognize(FileRecognizer, " va_us_top_40/wav/Mirrors - Justin Timberlake.wav " ) 88 | Recognizing: Through a Microphone 89 | With scripting: 90 | >> > from dejavu.recognize import MicrophoneRecognizer 91 | >> > song = djv.recognize(MicrophoneRecognizer, seconds = 10 ) # Defaults to 10 seconds. 92 | and with the command line script, you specify the number of seconds to listen: 93 | $ python dejavu.py --recognize mic 10 94 | Testing 95 | Testing out different parameterizations of the fingerprinting algorithm is often useful as the corpus becomes larger and larger, and inevitable tradeoffs between speed and accuracy come into play. 96 | 97 | Test your Dejavu settings on a corpus of audio files on a number of different metrics: 98 | 99 | Confidence of match (number fingerprints aligned) 100 | Offset matching accuracy 101 | Song matching accuracy 102 | Time to match 103 | 104 | 105 | An example script is given in test_dejavu.sh , shown below: 106 | # #################################### 107 | # ## Dejavu example testing script ### 108 | # #################################### 109 | 110 | # ########## 111 | # Clear out previous results 112 | rm -rf ./results ./temp_audio 113 | 114 | # ########## 115 | # Fingerprint files of extension mp3 in the ./mp3 folder 116 | python dejavu.py --fingerprint ./mp3/ mp3 117 | 118 | # ######### 119 | # Run a test suite on the ./mp3 folder by extracting 1, 2, 3, 4, and 5 120 | # second clips sampled randomly from within each song 8 seconds 121 | # away from start or end, sampling offset with random seed = 42, and finally, 122 | # store results in ./results and log to ./results/dejavu-test.log 123 | python run_tests.py \ 124 | --secs 5 \ 125 | --temp ./temp_audio \ 126 | --log-file ./results/dejavu-test.log \ 127 | --padding 8 \ 128 | --seed 42 \ 129 | --results ./results \ 130 | ./mp3 131 | The testing scripts are as of now are a bit rough, and could certainly use some love and attention if you're interested in submitting a PR! For example, underscores in audio filenames currently breaks the test scripts. 132 | How does it work? 133 | The algorithm works off a fingerprint based system, much like: 134 | 135 | Shazam 136 | MusicRetrieval 137 | Chromaprint 138 | 139 | The "fingerprints" are locality sensitive hashes that are computed from the spectrogram of the audio. This is done by taking the FFT of the signal over overlapping windows of the song and identifying peaks. A very robust peak finding algorithm is needed, otherwise you'll have a terrible signal to noise ratio. 140 | Here I've taken the spectrogram over the first few seconds of "Blurred Lines". The spectrogram is a 2D plot and shows amplitude as a function of time (a particular window, actually) and frequency, binned logrithmically, just as the human ear percieves it. In the plot below you can see where local maxima occur in the amplitude space: 141 | 142 | Finding these local maxima is a combination of a high pass filter (a threshold in amplitude space) and some image processing techniques to find maxima. A concept of a "neighboorhood" is needed - a local maxima with only its directly adjacent pixels is a poor peak - one that will not survive the noise of coming through speakers and through a microphone. 143 | If we zoom in even closer, we can begin to imagine how to bin and discretize these peaks. Finding the peaks itself is the most computationally intensive part, but it's not the end. Peaks are combined using their discrete time and frequency bins to create a unique hash for that particular moment in the song - creating a fingerprint. 144 | 145 | For a more detailed look at the making of Dejavu, see my blog post here . 146 | How well it works 147 | To truly get the benefit of an audio fingerprinting system, it can't take a long time to fingerprint. It's a bad user experience, and furthermore, a user may only decide to try to match the song with only a few precious seconds of audio left before the radio station goes to a commercial break. 148 | To test Dejavu's speed and accuracy, I fingerprinted a list of 45 songs from the US VA Top 40 from July 2013 (I know, their counting is off somewhere). I tested in three ways: 149 | 150 | Reading from disk the raw mp3 -> wav data, and 151 | Playing the song over the speakers with Dejavu listening on the laptop microphone. 152 | Compressed streamed music played on my iPhone 153 | 154 | Below are the results. 155 | 1. Reading from Disk 156 | Reading from disk was an overwhelming 100% recall - no mistakes were made over the 45 songs I fingerprinted. Since Dejavu gets all of the samples from the song (without noise), it would be nasty surprise if reading the same file from disk didn't work every time! 157 | 2. Audio over laptop microphone 158 | Here I wrote a script to randomly chose n seconds of audio from the original mp3 file to play and have Dejavu listen over the microphone. To be fair I only allowed segments of audio that were more than 10 seconds from the starting/ending of the track to avoid listening to silence. 159 | Additionally my friend was even talking and I was humming along a bit during the whole process, just to throw in some noise. 160 | Here are the results for different values of listening time ( n ): 161 | 162 | This is pretty rad. For the percentages: 163 | 164 | 165 | 166 | Number of Seconds 167 | Number Correct 168 | Percentage Accuracy 169 | 170 | 171 | 172 | 173 | 1 174 | 27 / 45 175 | 60.0% 176 | 177 | 178 | 2 179 | 43 / 45 180 | 95.6% 181 | 182 | 183 | 3 184 | 44 / 45 185 | 97.8% 186 | 187 | 188 | 4 189 | 44 / 45 190 | 97.8% 191 | 192 | 193 | 5 194 | 45 / 45 195 | 100.0% 196 | 197 | 198 | 6 199 | 45 / 45 200 | 100.0% 201 | 202 | Even with only a single second, randomly chosen from anywhere in the song, Dejavu is getting 60%! One extra second to 2 seconds get us to around 96%, while getting perfect only took 5 seconds or more. Honestly when I was testing this myself, I found Dejavu beat me - listening to only 1-2 seconds of a song out of context to identify is pretty hard. I had even been listening to these same songs for two days straight while debugging... 203 | In conclusion, Dejavu works amazingly well, even with next to nothing to work with. 204 | 3. Compressed streamed music played on my iPhone 205 | Just to try it out, I tried playing music from my Spotify account (160 kbit/s compressed) through my iPhone's speakers with Dejavu again listening on my MacBook mic. I saw no degredation in performance; 1-2 seconds was enough to recognize any of the songs. 206 | Performance 207 | Speed 208 | On my MacBook Pro, matching was done at 3x listening speed with a small constant overhead. To test, I tried different recording times and plotted the recording time plus the time to match. Since the speed is mostly invariant of the particular song and more dependent on the length of the spectrogram created, I tested on a single song, "Get Lucky" by Daft Punk: 209 | 210 | As you can see, the relationship is quite linear. The line you see is a least-squares linear regression fit to the data, with the corresponding line equation: 211 | 1.364757 * record_time - 0.034373 = time_to_match 212 | 213 | Notice of course since the matching itself is single threaded, the matching time includes the recording time. This makes sense with the 3x speed in purely matching, as: 214 | 1 (recording) + 1/3 (matching) = 4/3 ~= 1.364757 215 | 216 | if we disregard the miniscule constant term. 217 | The overhead of peak finding is the bottleneck - I experimented with mutlithreading and realtime matching, and alas, it wasn't meant to be in Python. An equivalent Java or C/C++ implementation would most likely have little trouble keeping up, applying FFT and peakfinding in realtime. 218 | An important caveat is of course, the round trip time (RTT) for making matches. Since my MySQL instance was local, I didn't have to deal with the latency penalty of transfering fingerprint matches over the air. This would add RTT to the constant term in the overall calculation, but would not effect the matching process. 219 | Storage 220 | For the 45 songs I fingerprinted, the database used 377 MB of space for 5.4 million fingerprints. In comparison, the disk usage is given below: 221 | 222 | 223 | 224 | Audio Information Type 225 | Storage in MB 226 | 227 | 228 | 229 | 230 | mp3 231 | 339 232 | 233 | 234 | wav 235 | 1885 236 | 237 | 238 | fingerprints 239 | 377 240 | 241 | There's a pretty direct trade-off between the necessary record time and the amount of storage needed. Adjusting the amplitude threshold for peaks and the fan value for fingerprinting will add more fingerprints and bolster the accuracy at the expense of more space. 242 | -------------------------------------------------------------------------------- /files/xxx/demiurge.txt: -------------------------------------------------------------------------------- 1 | demiurge 2 | PyQuery-based scraping micro-framework. 3 | Supports Python 2.x and 3.x. 4 | 5 | Documentation: http://demiurge.readthedocs.org 6 | Installing demiurge 7 | $ pip install demiurge 8 | 9 | Quick start 10 | Define items to be scraped using a declarative (Django-inspired) syntax: 11 | import demiurge 12 | 13 | class TorrentDetails ( demiurge . Item ): 14 | label = demiurge.TextField( selector = ' strong ' ) 15 | value = demiurge.TextField() 16 | 17 | def clean_value ( self , value ): 18 | unlabel = value[value.find( ' : ' ) + 1 :] 19 | return unlabel.strip() 20 | 21 | class Meta : 22 | selector = ' div#specifications p ' 23 | 24 | class Torrent ( demiurge . Item ): 25 | url = demiurge.AttributeValueField( 26 | selector = ' td:eq(2) a:eq(1) ' , attr = ' href ' ) 27 | name = demiurge.TextField( selector = ' td:eq(2) a:eq(2) ' ) 28 | size = demiurge.TextField( selector = ' td:eq(3) ' ) 29 | details = demiurge.RelatedItem( 30 | TorrentDetails, selector = ' td:eq(2) a:eq(2) ' , attr = ' href ' ) 31 | 32 | class Meta : 33 | selector = ' table.maintable:gt(0) tr:gt(0) ' 34 | base_url = ' http://www.mininova.org ' 35 | 36 | 37 | >> > t = Torrent.one( ' /search/ubuntu/seeds ' ) 38 | >> > t.name 39 | ' Ubuntu 7.10 Desktop Live CD ' 40 | >> > t.size 41 | u ' 695.81 \xa0 MB ' 42 | >> > t.url 43 | ' /get/1053846 ' 44 | >> > t.html 45 | u ' 19 \xa0 Dec \xa0 07Software... ' 46 | 47 | >> > results = Torrent.all( ' /search/ubuntu/seeds ' ) 48 | >> > len (results) 49 | 116 50 | >> > for t in results[: 3 ]: 51 | ... print t.name, t.size 52 | ... 53 | Ubuntu 7.10 Desktop Live CD 695.81 MB 54 | Super Ubuntu 2008.09 - VMware image 871.95 MB 55 | Portable Ubuntu 9.10 for Windows 559.78 MB 56 | ... 57 | 58 | >> > t = Torrent.one( ' /search/ubuntu/seeds ' ) 59 | >> > for detail in t.details: 60 | ... print detail.label, detail.value 61 | ... 62 | Category: Software > GNU / Linux 63 | Total size: 695.81  megabyte 64 | Added: 2467 days ago by Distribution 65 | Share ratio: 17 seeds, 2 leechers 66 | Last updated: 35 minutes ago 67 | Downloads: 29 , 0 85 68 | See documentation for details: http://demiurge.readthedocs.org 69 | Why demiurge ? 70 | Plato, as the speaker Timaeus, refers to the Demiurge frequently in the Socratic 71 | dialogue Timaeus, c. 360 BC. The main character refers to the Demiurge as the 72 | entity who "fashioned and shaped" the material world. Timaeus describes the 73 | Demiurge as unreservedly benevolent, and hence desirous of a world as good as 74 | possible. The world remains imperfect, however, because the Demiurge created 75 | the world out of a chaotic, indeterminate non-being. 76 | http://en.wikipedia.org/wiki/Demiurge 77 | Contributors 78 | 79 | Martín Gaitán (@mgaitan) 80 | 81 | -------------------------------------------------------------------------------- /files/xxx/django-guardian.txt: -------------------------------------------------------------------------------- 1 | django-guardian 2 | 3 | django-guardian is an implementation of per object permissions [1] on top 4 | of Django's authorization backend 5 | 6 | Documentation 7 | Online documentation is available at https://django-guardian.readthedocs.io/ . 8 | 9 | Requirements 10 | 11 | Python 2.7 or 3.4+ 12 | A supported version of Django (currently 1.8+) 13 | 14 | Travis CI tests on Django version 1.8, 1.10, and 1.11. 15 | 16 | Installation 17 | To install django-guardian simply run: 18 | pip install django-guardian 19 | 20 | 21 | Configuration 22 | We need to hook django-guardian into our project. 23 | 24 | Put guardian into your INSTALLED_APPS at settings module: 25 | 26 | INSTALLED_APPS = ( 27 | ... 28 | ' guardian ' , 29 | ) 30 | 31 | Add extra authorization backend to your settings.py : 32 | 33 | AUTHENTICATION_BACKENDS = ( 34 | ' django.contrib.auth.backends.ModelBackend ' , # default 35 | ' guardian.backends.ObjectPermissionBackend ' , 36 | ) 37 | 38 | Create guardian database tables by running: 39 | python manage.py migrate 40 | 41 | 42 | 43 | 44 | Usage 45 | After installation and project hooks we can finally use object permissions 46 | with Django . 47 | Lets start really quickly: 48 | >> > from django.contrib.auth.models import User, Group 49 | >> > jack = User.objects.create_user( ' jack ' , ' jack@example.com ' , ' topsecretagentjack ' ) 50 | >> > admins = Group.objects.create( name = ' admins ' ) 51 | >> > jack.has_perm( ' change_group ' , admins) 52 | False 53 | >> > from guardian.models import UserObjectPermission 54 | >> > UserObjectPermission.objects.assign_perm( ' change_group ' , jack, obj = admins) 55 | < UserObjectPermission: admins | jack | change_group > 56 | >> > jack.has_perm( ' change_group ' , admins) 57 | True 58 | Of course our agent jack here would not be able to change_group globally: 59 | >> > jack.has_perm( ' change_group ' ) 60 | False 61 | 62 | Admin integration 63 | Replace admin.ModelAdmin with GuardedModelAdmin for those models 64 | which should have object permissions support within admin panel. 65 | For example: 66 | from django.contrib import admin 67 | from myapp.models import Author 68 | from guardian.admin import GuardedModelAdmin 69 | 70 | # Old way: 71 | # class AuthorAdmin(admin.ModelAdmin): 72 | # pass 73 | 74 | # With object permissions support 75 | class AuthorAdmin ( GuardedModelAdmin ): 76 | pass 77 | 78 | admin.site.register(Author, AuthorAdmin) 79 | 80 | 81 | [1] Great paper about this feature is available at djangoadvent articles . 82 | 83 | 84 | -------------------------------------------------------------------------------- /files/xxx/django-haystack.txt: -------------------------------------------------------------------------------- 1 | Haystack 2 | 3 | 4 | Author: 5 | Daniel Lindsley 6 | Date: 7 | 2013/07/28 8 | 9 | 10 | Haystack provides modular search for Django. It features a unified, familiar 11 | API that allows you to plug in different search backends (such as Solr , 12 | Elasticsearch , Whoosh , Xapian , etc.) without having to modify your code. 13 | Haystack is BSD licensed, plays nicely with third-party app without needing to 14 | modify the source and supports advanced features like faceting, More Like This, 15 | highlighting, spatial search and spelling suggestions. 16 | You can find more information at http://haystacksearch.org/ . 17 | 18 | Getting Help 19 | There is a mailing list ( http://groups.google.com/group/django-haystack/ ) 20 | available for general discussion and an IRC channel (#haystack on 21 | irc.freenode.net). 22 | 23 | Documentation 24 | 25 | Development version: http://docs.haystacksearch.org/ 26 | v2.6.X: https://django-haystack.readthedocs.io/en/v2.6.0/ 27 | v2.5.X: https://django-haystack.readthedocs.io/en/v2.5.0/ 28 | v2.4.X: https://django-haystack.readthedocs.io/en/v2.4.1/ 29 | v2.3.X: https://django-haystack.readthedocs.io/en/v2.3.0/ 30 | v2.2.X: https://django-haystack.readthedocs.io/en/v2.2.0/ 31 | v2.1.X: https://django-haystack.readthedocs.io/en/v2.1.0/ 32 | v2.0.X: https://django-haystack.readthedocs.io/en/v2.0.0/ 33 | v1.2.X: https://django-haystack.readthedocs.io/en/v1.2.7/ 34 | v1.1.X: https://django-haystack.readthedocs.io/en/v1.1/ 35 | 36 | See the changelog 37 | 38 | Build Status 39 | 40 | 41 | Requirements 42 | Haystack has a relatively easily-met set of requirements. 43 | 44 | Python 2.7+ or Python 3.3+ 45 | A supported version of Django: https://www.djangoproject.com/download/#supported-versions 46 | 47 | Additionally, each backend has its own requirements. You should refer to 48 | https://django-haystack.readthedocs.io/en/latest/installing_search_engines.html for more 49 | details. 50 | -------------------------------------------------------------------------------- /fortest.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zx576/programming_vocabulary/35de38621c03c1385f59008bb8296f67eb35bdf8/fortest.db -------------------------------------------------------------------------------- /models_exp.py: -------------------------------------------------------------------------------- 1 | # coding = utf-8 2 | # author = zhouxin 3 | # date = 2017.7.14 4 | # description 5 | # expand exsited database, add some nessary column, and reserve some columns 6 | # 使用 peewee 库操作 sqlite3 7 | # 建立两个 table: word-book 8 | # 以下为 newword 的原因是由于之前创建过一次,但需要扩展字段 9 | # 所有迁移了数据,重新建了表 10 | 11 | from settings import DATABASE 12 | from peewee import * 13 | 14 | new_db = SqliteDatabase(DATABASE) 15 | 16 | 17 | class NewBook(Model): 18 | 19 | name = CharField() 20 | # 总词汇 21 | total = IntegerField(default=0) 22 | # 是否已经统计 23 | is_analyzed = BooleanField(default=False) 24 | # reserved columns 25 | # 保留字段,便于之后扩展 26 | re1 = CharField(default='') 27 | re2 = CharField(default='') 28 | re3 = IntegerField(default=0) 29 | re4 = IntegerField(default=0) 30 | 31 | class Meta: 32 | database = new_db 33 | 34 | class NewWord(Model): 35 | # foreignkey , which books the word collect from 36 | # book = ForeignKeyField(Book) 37 | # 单词名 38 | name = CharField() 39 | # 解释 40 | explanation = TextField(default='') 41 | # 词频 42 | frequency = IntegerField(default=0) 43 | # 是否有效 44 | is_valid = BooleanField(default=True) 45 | # 音标 46 | phonogram = CharField(default='') 47 | # reserved columns 48 | # 保留字段,便于之后扩展 49 | re1 = CharField(default='') 50 | re2 = CharField(default='') 51 | re3 = IntegerField(default=0) 52 | re4 = IntegerField(default=0) 53 | 54 | class Meta: 55 | database = new_db -------------------------------------------------------------------------------- /python-words.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zx576/programming_vocabulary/35de38621c03c1385f59008bb8296f67eb35bdf8/python-words.xlsx -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | peewee 2 | requests 3 | bs4 4 | lxml 5 | -------------------------------------------------------------------------------- /settings.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.11 4 | # description 5 | # 该项目的一些设置 6 | 7 | import os 8 | 9 | _BASEDIR = os.path.dirname(__file__) 10 | 11 | # 数据库名 12 | DATABASE = os.path.join(_BASEDIR, 'fortest.db') 13 | 14 | 15 | # 需要遍历的文件夹 16 | DIRS = [ 17 | # 示例, 该文件夹在项目文件夹下,名为 'files' 18 | os.path.join(_BASEDIR, 'files'), 19 | ] 20 | 21 | 22 | # 文件也可以单独添加 23 | FILES = [ 24 | # 示例, 该文件在项目文件夹下, 名为 'python.txt' 25 | # os.path.join(_BASEDIR, 'fortest.txt') 26 | 27 | ] 28 | 29 | # 每本书抓取的词汇量 30 | NUMBERS = [ 31 | (100, 10), # 小于 100 取 10 个 32 | (1000, 100), # 100 - 1000 取 100 个 33 | (5000, 300), 34 | (10000, 500), 35 | (50000, 1000), 36 | (2**31, 1500) # 大于 50000 统一取 1500 37 | ] 38 | 39 | 40 | # 收集一些需要被排除的词汇 41 | exclude_list = [ 42 | # 代词 43 | 'i', 'you', 'he', 'she', 'it', 'we', 'they', # 主格 44 | 'me', 'him', 'her', 'us', 'them', # 宾格 45 | 'my', 'your', 'his', 'her', 'its', 'our', 'their', # 形容词性 46 | 'mine', 'yours', 'his', 'hers', 'ours', 'yours', 'theirs', # 名词性 47 | 'myself', 'yourself', 'himself', 'herself', 'itself', 'ourselves', 'yourselves', 'themselves', # 反身代词 48 | 'this', 'that', 'such', 'these', 'those', 'some', 49 | 'who', 'whom', 'whose', 'which', 'what', 'whoever', 'whichever', 'whatever', 'when', 50 | 'as', 'self', 51 | 'one', 'some', 'any', 'each', 'every', 'none', 'no', 'many', 'much', 'few', 'little', 52 | 'other', 'another', 'all', 'both', 'neither', 'either', 53 | # 冠词 54 | 'a', 'an', 'the', 55 | 56 | # 简单介词 57 | 'about', 'with', 58 | 'into', 'out', 'of' , 'without', 59 | 'at', 'in', 'on', 'by', 'to', 60 | 61 | # 简单连词 62 | 'and', 'also', 'too','not', 'but', 63 | 64 | # 简单量词 65 | 'one', 'two', 'three', 'four', 'five', 66 | # 简单动词 67 | 'is', 'am', 'are', 'was', 'were', 'be', 68 | # 其他 69 | 'or', 'if', 'else', 'for','have', 'must', 'has', 'new', 'time', 70 | 71 | ] 72 | 73 | -------------------------------------------------------------------------------- /shanbay/README.md: -------------------------------------------------------------------------------- 1 | ## 批量添加扇贝单词 2 | 3 | #### 文件说明 4 | 5 | - shanbeisettings.py 一些基本的设置 6 | - creat_word_list.py 在扇贝上创建单词章节 7 | - add_to_shanbay.py 提取出单词,逐一将其加入到单词章节中 8 | 9 | 10 | #### 使用说明 11 | 12 | 在使用之前最好亲自在扇贝上走一遍流程 13 | 14 | 1、更改 settings 设置 15 | 16 | - 设置创建多少个章节,章节名,描述 17 | - 手动 F12 将 headers 信息复制到 HEADER 18 | - 更改单词书 id 19 | 20 | 21 | 2、运行 creat_word_list.py 22 | 23 | 本程序的作用是在 扇贝 上创建设置好的 单词章节 24 | 25 | 3、运行 add_to_shanbay.py 26 | 27 | 本程序的作用是 将 单词逐一添加到单词章节中 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /shanbay/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zx576/programming_vocabulary/35de38621c03c1385f59008bb8296f67eb35bdf8/shanbay/__init__.py -------------------------------------------------------------------------------- /shanbay/add_to_shanbay.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.14 4 | 5 | # description 6 | # 将单词添加到扇贝,手动版 7 | 8 | import requests 9 | import bs4 10 | 11 | from models_exp import NewWord 12 | from shanbay.shanbeisettings import HEADER,WORKBOOK_PATH, WORKBOOKID 13 | 14 | 15 | class ShanBay: 16 | 17 | def __init__(self): 18 | 19 | # 带有登录信息的 header 20 | self.header = HEADER 21 | self.url = 'https://www.shanbay.com/api/v1/wordlist/vocabulary/' 22 | self.listid = [] 23 | self.book_url = 'https://www.shanbay.com/wordbook/{}/'.format(str(WORKBOOKID)) 24 | print(self.header) 25 | print(self.book_url) 26 | # 存储创建的所有单词章节 id 27 | def _parse_id(self): 28 | 29 | req = requests.get(self.book_url, headers=self.header, timeout=2) 30 | req.raise_for_status() 31 | soup = bs4.BeautifulSoup(req.text, 'lxml') 32 | soup_a = soup.find_all('a', attrs={'desc': True}) 33 | 34 | for a in soup_a: 35 | id = a['unit-id'] 36 | print(id) 37 | self._save_id(id) 38 | 39 | # 保存 单词章节 id 到制定的文件 40 | def _save_id(self, id): 41 | with open(WORKBOOK_PATH, 'a+')as f: 42 | f.write(str(id)) 43 | f.write('\n') 44 | 45 | # 读取单词章节 id 46 | def _open_bookid(self): 47 | with open(WORKBOOK_PATH, 'r')as f: 48 | for i in f.readlines(): 49 | self.listid.append(int(i)) 50 | 51 | # print(self.listid) 52 | 53 | # 将某个特定的单词添加到 指定的单词章节 54 | 55 | def _add_one(self, word, listid): 56 | 57 | dct = { 58 | 'id': listid, 59 | 'word': word 60 | } 61 | # print(dct) 62 | # 请求错误 63 | try: 64 | req = requests.post(self.url, dct, headers=self.header) 65 | req.raise_for_status() 66 | res = req.json() 67 | print('单词 {}'.format(word), res) 68 | # print(req.status_code) 69 | except: 70 | return '1' 71 | 72 | return res['msg'] 73 | 74 | # 添加单词 75 | # 如果扇贝反馈无此单词 - 跳过 76 | # 如果反馈 单词章节的单词已满 - 则更换单词章节添加 77 | def add(self): 78 | 79 | query = NewWord.select().where((NewWord.is_valid == True) & (NewWord.re1 == '')).order_by(-NewWord.frequency) 80 | iter_word = iter(query) 81 | self._open_bookid() 82 | iter_lst = iter(self.listid) 83 | id = next(iter_lst) 84 | while True: 85 | # 单词添加完毕,程序结束 86 | try: 87 | next_word = next(iter_word) 88 | except: 89 | break 90 | res = self._add_one(next_word.name, id) 91 | # 设置请求错误处理 92 | if res == '1': 93 | print('请求错误,稍后再试') 94 | break 95 | 96 | # 设置单词无效处理 97 | elif 'NOT' in res: 98 | next_word.re1 = 'invalid' 99 | next_word.save() 100 | continue 101 | 102 | # 换单词表 103 | elif '过上限' in res: 104 | id = next(iter_lst) 105 | self.list_count = 0 106 | self._add_one(next_word.name, id) 107 | 108 | # 标记该单词已经添加 109 | next_word.re1 = 'added' 110 | next_word.save() 111 | 112 | 113 | # 测试单个单词添加是否成功 114 | def test_add(self): 115 | self._add_one('define', 539857) 116 | 117 | if __name__ == '__main__': 118 | 119 | s = ShanBay() 120 | s._parse_id() 121 | s.add() 122 | # s._open_bookid() -------------------------------------------------------------------------------- /shanbay/creat_word_list.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.15 4 | # description 5 | # 创建单词书章节 6 | 7 | import requests 8 | import time 9 | 10 | from shanbay.shanbeisettings import CHAPTER_NAME, HEADER, WORKBOOKID 11 | 12 | class Create_list: 13 | 14 | def __init__(self): 15 | self.chapter = CHAPTER_NAME 16 | # self.cookie = COOKIE 17 | self.bookid = WORKBOOKID 18 | self.header = HEADER 19 | self.url = 'https://www.shanbay.com/api/v1/wordbook/wordlist/' 20 | 21 | def _create_lst(self, name, description): 22 | 23 | keywords = { 24 | 'name': name, 25 | 'description': description, 26 | 'wordbook_id': self.bookid 27 | } 28 | print(keywords) 29 | # self.header['cookie'] = self.cookie 30 | # print(self.header) 31 | try: 32 | req = requests.post(self.url, keywords, headers=self.header) 33 | req.raise_for_status() 34 | print(req.status_code) 35 | print(req.json()['data']['id']) 36 | assert req.json()['data'] 37 | except Exception as e: 38 | print(e) 39 | return 40 | 41 | return req.json()['data']['id'] 42 | 43 | 44 | def create(self): 45 | 46 | for key in self.chapter: 47 | # if '1' in key or '2' in key: 48 | # continue 49 | print('创建单词章节{0}, 描述为{1}'.format(key, self.chapter[key])) 50 | id = self._create_lst(key, self.chapter[key]) 51 | 52 | time.sleep(1) 53 | 54 | if __name__ == '__main__': 55 | 56 | c = Create_list() 57 | c.create() -------------------------------------------------------------------------------- /shanbay/shanbeisettings.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.15 4 | # description 5 | # batch upload words to shanbay.com 6 | # 7 | 8 | from collections import OrderedDict 9 | 10 | 11 | # 需创建的单词章节名称 12 | def create(): 13 | count = 1 14 | # 总共创建多少个 章节 15 | total = 2 16 | dct = OrderedDict() 17 | # 章节名 18 | fix_key = 'Chapter-' 19 | # 描述 20 | fix_value = '第 {} 单元' 21 | while True: 22 | if count == total: 23 | break 24 | 25 | dct[fix_key+str(count)] = fix_value.format(str(count)) 26 | 27 | count += 1 28 | 29 | # dct = tuple(dct) 30 | # print(dct) 31 | # for i in dct: 32 | # print(i, dct[i]) 33 | return dct 34 | 35 | CHAPTER_NAME = create() 36 | 37 | 38 | # 登录凭证 39 | # 扇贝账户与密码 - 未实现 40 | # 或者手动复制 cookie 信息 41 | 42 | HEADER = { 43 | # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 44 | # 'Accept-Encoding':'gzip, deflate, br', 45 | # 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.6', 46 | # 'Cache - Control': 'no-cache', 47 | # 'Connection': 'keep-alive', 48 | # 'Content-Length': '21', 49 | # 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 50 | 'Cookie': """sessionid=".eJyrVopPLC3JiC8tTi2KT0pMzk7NS1GyUkrOz83Nz9MDS0FFi_WcE5MzUn3zU1JznKAKdZB1ZwI1mpgZmppYmtYCAJS6HyY:1dVqdm:lsde3ncYWWJnay2wgBTkLXjxcTk"; csrftoken=HQDY8b1bHbSyiJVD1RgG2qBTqwQL6VYw; _ga=GA1.2.1254111374.1497746577; __utmt=1; auth_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VybmFtZSI6Inp4NTc2IiwiZGV2aWNlIjowLCJpc19zdGFmZiI6ZmFsc2UsImlkIjo0NjE1NDk1LCJleHAiOjE1MDExNDkxMDB9.eo3vJ-ylhqCaGs3DKK0pV_ny8H8rq_NSHY3ei1Lfe70; userid=4615495; __utma=183787513.1254111374.1497746577.1500258334.1500285087.13; __utmb=183787513.5.10.1500285087; __utmc=183787513; __utmz=183787513.1499999350.5.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic""", 51 | # 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36', 52 | # 'X-Requested-With': 'XMLHttpRequest', 53 | # 'Host': 'www.shanbay.com', 54 | # 'Pragma': 'no-cache', 55 | # 'Origin': 'https://www.shanbay.com' 56 | } 57 | 58 | 59 | # 创建的单词书ID 60 | WORKBOOKID = 187633 61 | 62 | # workbook_id 输出文件地址 63 | WORKBOOK_PATH = 'workbook_id_test.txt' 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /shanbay/workbook_id.txt: -------------------------------------------------------------------------------- 1 | 569452 2 | 569455 3 | 569458 4 | 569461 5 | 569464 6 | 569467 7 | 569470 8 | 569473 9 | 569476 10 | 569479 11 | 569482 12 | 569485 13 | 569488 14 | 569491 15 | 569494 16 | 569497 17 | 569500 18 | 569503 19 | 569506 20 | 569509 21 | 569512 22 | 569515 23 | 569518 24 | 569521 25 | 569524 26 | 569527 27 | 569530 28 | 569533 29 | 569536 30 | 569539 31 | 569542 32 | 569545 33 | 569548 34 | 569551 35 | 569554 36 | 569557 37 | 569560 38 | 569563 39 | 569566 40 | 569569 41 | 569572 42 | 569575 43 | 569578 44 | 569581 45 | -------------------------------------------------------------------------------- /shanbay/workbook_id_test.txt: -------------------------------------------------------------------------------- 1 | 541513 2 | 541522 3 | 541513 4 | 541522 5 | 541513 6 | 541522 7 | -------------------------------------------------------------------------------- /spiders/README.md: -------------------------------------------------------------------------------- 1 | ## 下载文档或者网页内容的爬虫 2 | 3 | #### 文件说明 4 | 5 | - downloadPdf.py 下载 pdf 文件 6 | - github.py 下载 github 上的说明文档 7 | - onlinedocs.py 下载在线文档 8 | - stackoverflow.py 下载 st 上的一些方法 9 | - utils.py 抽象出的一些通用爬虫方法 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /spiders/downloadPdf.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # description 4 | # 下载 pdf 文件, 将 pdf 下载地址添加到 downlst ,运行程序即可 5 | 6 | import requests 7 | from spiders.utils import Utils 8 | 9 | PATH_DIR = 'download/' 10 | util = Utils() 11 | 12 | def download(url): 13 | 14 | util.checkpath(PATH_DIR) 15 | 16 | req = requests.get(url) 17 | c = req.content 18 | name = url.split('/')[-1] 19 | with open(PATH_DIR+name, 'wb')as f: 20 | f.write(c) 21 | 22 | 23 | downlst = [ 24 | # 'http://files2.syncfusion.com/Downloads/Ebooks/SciPy_Programming_Succinctly.pdf', 25 | # 'https://docs.google.com/file/d/0B8IUCMSuNpl7MnpaQ3hhN2R0Z1k/edit' 26 | # 'http://stock.ethop.org/pdf/python/Learning%20Python,%205th%20Edition.pdf', 27 | # 'http://slav0nic.org.ua/static/books/python/OReilly%20-%20Core%20Python%20Programming.pdf', 28 | # /////////// 29 | # 'http://www.oreilly.com/programming/free/files/functional-programming-python.pdf', 30 | # 'https://doc.lagout.org/programmation/python/Python%20Pocket%20Reference_%20Python%20in%20Your%20Pocket%20%285th%20ed.%29%20%5BLutz%202014-02-09%5D.pdf', 31 | # 'http://www.oreilly.com/programming/free/files/a-whirlwind-tour-of-python.pdf', 32 | # 'http://www.oreilly.com/programming/free/files/20-python-libraries-you-arent-using-but-should.pdf', 33 | # 'http://www.oreilly.com/programming/free/files/hadoop-with-python.pdf', 34 | # 'http://www.oreilly.com/programming/free/files/how-to-make-mistakes-in-python.pdf', 35 | # 'http://www.oreilly.com/programming/free/files/functional-programming-python.pdf', 36 | # 'http://www.oreilly.com/programming/free/files/python-in-education.pdf', 37 | # 'http://www.oreilly.com/programming/free/files/from-future-import-python.pdf' 38 | # 'http://trickntip.com/wp-content/uploads/2017/01/Head-First-Python-ora-2011.pdf' 39 | # '''''''''''''''' 40 | # 'http://victoria.lviv.ua/html/fl5/NaturalLanguageProcessingWithPython.pdf', 41 | # 'http://www3.canisius.edu/~yany/python/Python4DataAnalysis.pdf', 42 | # 'ftp://ftp.micronet-rostov.ru/linux-support/books/programming/Python/[O%60Reilly]%20-%20Programming%20Python,%204th%20ed.%20-%20[Lutz]/[O%60Reilly]%20-%20Programming%20Python,%204th%20ed.%20-%20[Lutz].pdf 43 | # ..for 44 | # 'https://media.readthedocs.org/pdf/requests/latest/requests.pdf', 45 | # 'http://gsl.mit.edu/media/programs/nigeria-summer-2012/materials/python/django.pdf', 46 | # 'https://media.readthedocs.org/pdf/beautiful-soup-4/latest/beautiful-soup-4.pdf', 47 | # 'https://media.readthedocs.org/pdf/flask/0.7/flask.pdf', 48 | 49 | # 'https://media.readthedocs.org/pdf/jinja2/latest/jinja2.pdf', 50 | # 'http://lxml.de/3.4/lxmldoc-3.4.4.pdf', 51 | # 'https://docs.scipy.org/doc/numpy-1.11.0/numpy-ref-1.11.0.pdf', 52 | # 'https://pandas.pydata.org/pandas-docs/stable/pandas.pdf', 53 | # 'https://media.readthedocs.org/pdf/peewee/latest/peewee.pdf', 54 | # 'https://media.readthedocs.org/pdf/pillow/latest/pillow.pdf', 55 | # 'https://media.readthedocs.org/pdf/scrapy/1.0/scrapy.pdf', 56 | 'https://media.readthedocs.org/pdf/xlwt/latest/xlwt.pdf' 57 | # 'http://1.droppdf.com/files/X06AR/fluent-python-2015-.pdf', 58 | # 'http://files.meetup.com/18552511/Learn%20Python%20The%20Hard%20Way%203rd%20Edition%20V413HAV.pdf', 59 | 60 | 61 | ] 62 | 63 | if __name__ == '__main__': 64 | 65 | for l in downlst: 66 | download(l) -------------------------------------------------------------------------------- /spiders/github.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.12 4 | # description 5 | # crawl README's words about most projects of awesome-python on the github.com 6 | # 爬取 github 上的资源集合, 或者某个独立的项目 7 | 8 | import bs4 9 | import re 10 | 11 | from spiders.utils import Utils 12 | 13 | 14 | PATH_DIR = 'github/' 15 | # add some github projects 16 | # return all projects' urls base on githubur 17 | # 添加一些 github 项目/仓库地址, 返回所有仓库地址 18 | class _Settings: 19 | 20 | def __init__(self): 21 | 22 | # github projects which contain many python directories 23 | # 资源集合 24 | self.projectsPool = [ 25 | # 'https://github.com/vinta/awesome-python' 26 | ] 27 | # dependent directories 28 | # 独立的仓库 29 | self.projectsUrl = [ 30 | 'https://github.com/zx576/scancode_backend' 31 | ] 32 | # invoke general class 33 | # 爬虫工具箱 34 | self.util = Utils() 35 | 36 | # parse projects(like awesome-python) 37 | # return all directories' url which domain url are github.com 38 | # 解析类似 awesome-python 的项目,返回所有项目的 github 地址,过滤掉指向站外的 url 39 | def _parse_pool(self): 40 | 41 | if not self.projectsPool: 42 | return [] 43 | 44 | links = [] 45 | for project in self.projectsPool: 46 | page = self.util.req(project) 47 | if not page: 48 | continue 49 | links += self._parse_html_get_links(page) 50 | 51 | return links 52 | 53 | # use bs4 parse html 54 | # return all links 55 | def _parse_html_get_links(self, page): 56 | 57 | soup = bs4.BeautifulSoup(page, 'lxml') 58 | soup_a = soup.find_all('a', href=re.compile('https://github.com/')) 59 | links = [] 60 | for a in soup_a: 61 | links.append(a['href']) 62 | 63 | return links 64 | 65 | 66 | def parse(self): 67 | 68 | # deduplicate urls 69 | return list(set(self.projectsUrl+self._parse_pool())) 70 | 71 | 72 | # 爬虫程序 73 | class GitSpider: 74 | 75 | def __init__(self): 76 | self.links = _Settings().parse() 77 | self.util = Utils() 78 | 79 | def _get_words(self, url): 80 | text = self.util.req(url) 81 | if not text: 82 | return 83 | 84 | soup = bs4.BeautifulSoup(text, 'lxml') 85 | soup_article = soup.find('article') 86 | 87 | return soup_article.get_text(' ') if soup_article else None 88 | 89 | 90 | def _save(self, url, words): 91 | 92 | self.util.checkpath(PATH_DIR) 93 | if not words: 94 | return 95 | title = url.split('/')[-1] 96 | with open(PATH_DIR+'{}.txt'.format(title), 'w')as f: 97 | f.write(words) 98 | 99 | def start(self): 100 | 101 | if not self.links: 102 | return 103 | 104 | for url in self.links: 105 | words = self._get_words(url) 106 | self._save(url, words) 107 | print('successfully get {0} '.format(url)) 108 | 109 | 110 | if __name__ == '__main__': 111 | gs = GitSpider() 112 | gs.start() -------------------------------------------------------------------------------- /spiders/onlinedocs.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.13 4 | # description 5 | # download online books, 6 | # 资源来自与 readthedocs.io 7 | # 下载某个在线的文档所有的文字 8 | 9 | import bs4 10 | import queue 11 | 12 | from spiders.utils import Utils 13 | 14 | PATH_DIR = 'docs/' 15 | 16 | class _Down: 17 | 18 | def __init__(self): 19 | self.util = Utils() 20 | 21 | def _save(self, title, words): 22 | 23 | self.util.checkpath(PATH_DIR) 24 | if not words: 25 | return 26 | with open(PATH_DIR+title, 'a+')as f: 27 | f.write(words) 28 | 29 | # 递归抓取某文档所有链接 30 | def _download(self, qu, domain, title,switch=True): 31 | # print(title) 32 | if qu.empty(): 33 | return 34 | 35 | url = qu.get() 36 | text = self.util.req(url) 37 | 38 | if not text: 39 | # qu.put(url) 40 | return self._download(qu,domain, title, False) 41 | 42 | if switch: 43 | res = self._download_links(domain, text) 44 | for i in res: 45 | qu.put(i) 46 | 47 | words = self._download_docs(text) 48 | self._save(title,words) 49 | 50 | return self._download(qu, domain, title,switch=False) 51 | 52 | def _download_docs(self, page): 53 | 54 | soup = bs4.BeautifulSoup(page, 'lxml') 55 | soup_body = soup.find('body') 56 | words = '' 57 | if soup_body: 58 | words += soup_body.get_text(' ') 59 | 60 | return words 61 | 62 | def _download_links(self, domain, page): 63 | 64 | lst = [] 65 | soup = bs4.BeautifulSoup(page, 'lxml') 66 | soup_link = soup.find_all('a') 67 | for link in soup_link: 68 | lst.append(domain+link['href']) 69 | 70 | return lst 71 | 72 | def download(self, url, domain, title): 73 | # title = 'Problem Solving with Algorithms and Data Structures using Python.pdf' 74 | qu = queue.Queue() 75 | qu.put(url) 76 | 77 | return self._download(qu, domain, title) 78 | 79 | 80 | class Pat1(_Down): 81 | 82 | def __init__(self): 83 | # super(_Down, self).__init__() 84 | self.util = Utils() 85 | # 某文档信息 86 | # self.url = 'https://interactivepython.org/courselib/static/pythonds/index.html' 87 | # self.domain = 'https://interactivepython.org/courselib/static/pythonds/' 88 | # self.title = 'Problem Solving with Algorithms and Data Structures using Python.txt' 89 | # self.url = 'http://chimera.labs.oreilly.com/books/1230000000393/index.html' 90 | # self.domain = 'http://chimera.labs.oreilly.com/books/1230000000393/' 91 | # self.title = 'Python Cookbook.txt' 92 | self.url = 'http://docs.peewee-orm.com/en/stable/' 93 | self.domain = self.url 94 | self.title = 'peewee.txt' 95 | 96 | def _download_links(self, domain, page): 97 | lst = [] 98 | soup = bs4.BeautifulSoup(page, 'lxml') 99 | soup_li = soup.find_all('li', class_="toctree-l1") 100 | for li in soup_li: 101 | lst.append(domain + li.a['href']) 102 | res = list(set(lst)) 103 | # print(len(res)) 104 | return res 105 | 106 | def get(self): 107 | 108 | return self.download(self.url, self.domain, self.title) 109 | 110 | if __name__ == '__main__': 111 | p1 = Pat1() 112 | p1.get() 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /spiders/stackoverflow.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.12 4 | # description 5 | # crwal stacloverflow's topic 6 | 7 | import bs4 8 | 9 | from spiders.utils import Utils 10 | 11 | PATH_DIR = 'stack/' 12 | 13 | class _Settings(): 14 | 15 | def __init__(self): 16 | 17 | # 手动设置 18 | # topic links 19 | self.topic = [ 20 | # python topic 21 | # 'https://stackoverflow.com/documentation/python/topics' 22 | # 'https://stackoverflow.com/documentation/django/topics', 23 | # 'https://stackoverflow.com/documentation/algorithm/topics', 24 | 'https://stackoverflow.com/documentation/git/topics', 25 | # 'https://stackoverflow.com/documentation/design-patterns/topics', 26 | # 'https://stackoverflow.com/documentation/flask/topics' 27 | ] 28 | # question links 29 | self.res = [] 30 | # ======================= 31 | #  dont change anything below 32 | self.util = Utils() 33 | self.domain = 'https://stackoverflow.com' 34 | 35 | # 解析这个 topic 下的所有答案链接 36 | def _parse_topic(self): 37 | if not self.topic: 38 | return 39 | for url in self.topic: 40 | self._add_url(url) 41 | 42 | def _add_url(self, url): 43 | 44 | page = self.util.req(url) 45 | if not page: 46 | return 47 | soup = bs4.BeautifulSoup(page, 'lxml') 48 | soup_a = soup.find_all('a', class_='doc-topic-link') 49 | for a in soup_a: 50 | 51 | last = a.get('href', None) 52 | self.res.append(self.domain+last) 53 | 54 | soup_next = soup.find('a', attrs={'rel': 'next'}) 55 | # get next page 56 | if soup_next: 57 | 58 | next_url =self.domain + soup_next['href'] 59 | return self._add_url(next_url) 60 | 61 | 62 | def parse(self): 63 | 64 | self._parse_topic() 65 | return self.res 66 | 67 | 68 | class Stspider: 69 | 70 | def __init__(self): 71 | self.links = _Settings().parse() 72 | self.util = Utils() 73 | 74 | # 获取所有文字内容 75 | def _get_words(self, url): 76 | page = self.util.req(url) 77 | if not page: 78 | return 79 | soup = bs4.BeautifulSoup(page, 'lxml') 80 | body = soup.find('body') 81 | if not body: 82 | return 83 | else: 84 | words = body.get_text(' ') 85 | 86 | return words 87 | 88 | # 保存文字内容 89 | def _save(self, url, words): 90 | 91 | self.util.checkpath(PATH_DIR) 92 | if not words: 93 | return 94 | title = url.split('/')[-1] 95 | with open(PATH_DIR + '{}.txt'.format(title), 'w')as f: 96 | f.write(words) 97 | 98 | # 启动 99 | def start(self): 100 | 101 | if not self.links: 102 | return 103 | 104 | for url in self.links: 105 | words = self._get_words(url) 106 | self._save(url, words) 107 | print('successfully get {0} '.format(url)) 108 | 109 | 110 | if __name__ == '__main__': 111 | st = Stspider() 112 | st.start() 113 | 114 | 115 | -------------------------------------------------------------------------------- /spiders/utils.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.12 4 | 5 | # description 6 | # 一些通用方法, 目前只有请求网页的方法 7 | 8 | import requests 9 | import os 10 | 11 | class Utils: 12 | 13 | def __init__(self): 14 | # self.pr = _ProxyList() 15 | self.header = {'User-Agent': 'Mozilla/5.0 (Macintosh; U;' 16 | ' Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'} 17 | 18 | def _req_url(self, url, headers, proxies): 19 | 20 | try: 21 | req = requests.get(url, headers=headers, proxies=proxies, timeout=2) 22 | req.raise_for_status() 23 | return req.text 24 | 25 | except: 26 | return None 27 | 28 | def req(self, url, error=0): 29 | 30 | if error == 5: 31 | print('请求网页 {0} 失败'.format(url)) 32 | return None 33 | 34 | # proxies = self.pr.get_proxy() 35 | proxies = None 36 | return self._req_url(url, headers=self.header, proxies=proxies) or self.req(url, error=error + 1) 37 | 38 | def checkpath(self, path): 39 | 40 | created = os.path.exists(path) 41 | if not created: 42 | os.mkdir(path) 43 | -------------------------------------------------------------------------------- /translate.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # author=zhouxin 3 | # date=2017.7.10 4 | # description 5 | # 调用翻译接口,翻译数据库内词汇 6 | 7 | import requests 8 | import time 9 | 10 | from models_exp import NewWord 11 | 12 | 13 | class Translate: 14 | 15 | def __init__(self): 16 | # self.util = Utils() 17 | pass 18 | 19 | # translation api, tranlate a english word to chinese 20 | # return translation result 21 | # 百度翻译接口 22 | def _trans(self, word): 23 | # res = self.trans.translate('hello', dest='zh-CN') 24 | url = 'http://fanyi.baidu.com/sug' 25 | dct = {'kw': word} 26 | req = requests.post(url, dct) 27 | req.raise_for_status() 28 | res = req.json().get('data') 29 | if not res: 30 | return None 31 | return res[0].get('v', None) 32 | 33 | # iciba api / 金山词典 api 34 | # baidu api dont contain Phonogram , so change an api 35 | def _trans_ici(self, word): 36 | 37 | url = 'http://www.iciba.com/index.php?a=getWordMean&c=search&word=' + word 38 | try: 39 | req = requests.get(url) 40 | req.raise_for_status() 41 | info = req.json() 42 | data = info['baesInfo']['symbols'][0] 43 | assert info['baesInfo']['symbols'][0] 44 | # 去除没有音标的单词 45 | assert data['ph_am'] and data['ph_en'] 46 | # 去除没有词性的单词 47 | assert data['parts'][0]['part'] 48 | 49 | except: 50 | return 51 | 52 | ph_en = '英 [' + data['ph_en'] + ']' 53 | ph_am = '美 [' + data['ph_am'] + ']' 54 | ex = '' 55 | for part in data['parts']: 56 | ex += part['part'] + ';'.join(part['means']) + ';' 57 | 58 | return ph_en+ph_am, ex 59 | 60 | # 扇贝单词 api 61 | def _trans_shanbay(self, word): 62 | url = 'https://api.shanbay.com/bdc/search/?word=' + word 63 | req = requests.get(url) 64 | print(req.json()) 65 | 66 | 67 | # 使用 金山单词 翻译接口 68 | # 百度接口没有音标 69 | # 扇贝接口包含的信息不如其他两家 70 | def trans(self): 71 | 72 | query = NewWord.select().where(NewWord.explanation == '') 73 | # print(len(query)) 74 | if not query: 75 | return 76 | for word in query: 77 | 78 | res = self._trans_ici(word.name) 79 | # print(res) 80 | if res: 81 | word.phonogram = res[0] 82 | # word. 83 | word.explanation = res[1] 84 | 85 | else: 86 | word.is_valid = False 87 | word.save() 88 | # print('suc save word : {}'.format(word.name)) 89 | time.sleep(1) 90 | 91 | 92 | if __name__ == '__main__': 93 | 94 | t = Translate() 95 | # res = t._trans_shanbay('hello') 96 | # print(res) 97 | t.trans() 98 | # res = t._trans_ici('hello') 99 | # print(res) -------------------------------------------------------------------------------- /voca.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zx576/programming_vocabulary/35de38621c03c1385f59008bb8296f67eb35bdf8/voca.db -------------------------------------------------------------------------------- /work.py: -------------------------------------------------------------------------------- 1 | # conding=utf-8 2 | # author = zhouxin 3 | # date = 2017.7.11 4 | # description 5 | # 该项目启动文件 6 | 7 | import os 8 | 9 | from settings import DIRS,FILES,DATABASE 10 | from analysis_book import AnlysisBook 11 | from models_exp import new_db, NewWord, NewBook 12 | 13 | # 解析所有文件路径 14 | class ParseFile: 15 | 16 | # 解析 settings 中的文件夹地址 17 | # 程序只会将 txt 文件添加到待获取列表 18 | def _parse_dirs(self, dirs): 19 | 20 | assert isinstance(dirs, list), 'type(dirs) should be list ' 21 | if not dirs: 22 | return dirs 23 | 24 | files = [] 25 | for path in dirs: 26 | if not os.path.isdir(path): 27 | continue 28 | for pathname, dirname, filenames in os.walk(path): 29 | for filename in filenames: 30 | # 仅获取 txt 文件 31 | if '.txt' in filename: 32 | file_path = pathname + os.sep + filename 33 | files.append(file_path) 34 | 35 | return files 36 | 37 | # 解析单个文件 38 | def _parse_files(self, files): 39 | 40 | assert isinstance(files, list), 'type(files) should be list ' 41 | f = [] 42 | for path in files: 43 | if not os.path.isfile(path) or '.txt' not in path: 44 | continue 45 | f.append(path) 46 | 47 | return f 48 | 49 | def parse(self, dirs, files): 50 | # print(dirs, files) 51 | f1 = self._parse_dirs(dirs) 52 | f2 = self._parse_files(files) 53 | 54 | return f1 + f2 55 | 56 | 57 | # 创建数据库 58 | class Dt: 59 | 60 | def __init__(self): 61 | self.build() 62 | 63 | def build(self): 64 | 65 | created = os.path.exists(DATABASE) 66 | 67 | if not created: 68 | new_db.connect() 69 | new_db.create_tables([NewBook, NewWord]) 70 | 71 | 72 | if __name__ == '__main__': 73 | 74 | # 建表 75 | dt = Dt() 76 | # 解析文件路径 77 | s = ParseFile() 78 | res = s.parse(DIRS, FILES) 79 | # print(len(res)) 80 | # extract words from books 81 | ana = AnlysisBook() 82 | ana.analysis(res) 83 | # print(res) 84 | 85 | 86 | --------------------------------------------------------------------------------