├── baike ├── __init__.py ├── html_outputer.py ├── url_manager.py ├── html_downloader.py ├── html_parser.py └── main.py ├── fund ├── __init__.py └── main.py ├── meizi ├── __init__.py ├── main.py └── Mezi.py ├── ocr ├── __init__.py └── main.py ├── config.ini ├── README.md ├── .idea ├── vcs.xml └── markdown-navigator.xml ├── .gitignore └── LICENSE /baike/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /fund/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /meizi/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ocr/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /config.ini: -------------------------------------------------------------------------------- 1 | [jqdata] 2 | name = 15010296975 3 | password = zlk19930802 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spiders 2 | python爬虫Demo集合 3 | 1. baike 百度百科的爬虫,已使用多进程优化 4 | 2. meizi 妹子图网站图片多进程下载 -------------------------------------------------------------------------------- /baike/html_outputer.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhonglikui/Spiders/HEAD/baike/html_outputer.py -------------------------------------------------------------------------------- /baike/url_manager.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhonglikui/Spiders/HEAD/baike/url_manager.py -------------------------------------------------------------------------------- /baike/html_downloader.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhonglikui/Spiders/HEAD/baike/html_downloader.py -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | config.ini 91 | -------------------------------------------------------------------------------- /fund/main.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | from jqdatasdk import * 3 | from pandas.core.frame import DataFrame 4 | import pandas as pd 5 | 6 | config = configparser.ConfigParser() 7 | config.sections() 8 | config.read("../config.ini") 9 | name = config.get("jqdata", "name") 10 | password = config.get("jqdata", "password") 11 | # print("账号信息{}:{}".format(name,password)) 12 | auth(name, password) 13 | print("登录状态{},剩余查询条数{}".format(is_auth(), get_query_count())) 14 | df=get_concepts() 15 | df.to_excel("股票概念.xls") 16 | print(df) 17 | # type = "股票型" 18 | key = "医疗" 19 | # df = finance.run_query( 20 | # query(finance.FUND_MAIN_INFO).filter(finance.FUND_MAIN_INFO.underlying_asset_type == type).limit(2100)) 21 | dfK = df["name"].str.contains(key) 22 | dfAll = df.loc[dfK] 23 | print(dfAll) 24 | # dfAll.to_excel("基金.xls") 25 | for row in dfAll.itertuples(): 26 | code=getattr(row,"Index") 27 | name=getattr(row,"name") 28 | stock= get_concept_stocks(code, date=None) 29 | child=DataFrame(stock) 30 | print(child) 31 | fileName="{}-{}.xls".format(name,code) 32 | child.to_excel(fileName) 33 | # q = query(finance.FUND_PORTFOLIO_STOCK).filter(finance.FUND_PORTFOLIO_STOCK.code == getattr(row, "main_code")).order_by( 34 | # finance.FUND_PORTFOLIO_STOCK.pub_date.desc()).limit(500) 35 | # df = finance.run_query(q) 36 | # child = finance.run_query(q) 37 | # fileName="{}-{}.xls".format(getattr(row, "name"),getattr(row, "main_code")) 38 | # child.to_excel(fileName) 39 | -------------------------------------------------------------------------------- /meizi/main.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import multiprocessing 3 | import time 4 | import urllib.request 5 | from multiprocessing import Pool 6 | 7 | import chardet 8 | from bs4 import BeautifulSoup 9 | 10 | 11 | def downloadImage(imageUrl): 12 | print("开始下载%s" % imageUrl) 13 | req = urllib.request.Request(imageUrl) 14 | req.add_header("User-Agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)") 15 | req.add_header("Referer", "http://i.meizitu.net") 16 | f = open("%s.jpg" % str(time.time()), "wb") 17 | f.write(urllib.request.urlopen(req).read()) 18 | f.close() 19 | 20 | 21 | def getHtml(url): 22 | req = urllib.request.Request(url) 23 | req.add_header("User-Agent", 24 | "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36") 25 | data = urllib.request.urlopen(req).read() 26 | print(chardet.detect(data)) 27 | return data.decode('GB2312', 'igonre') 28 | 29 | 30 | if __name__ == "__main__": 31 | 32 | url = "http://www.meizitu.com/a/pure_1.html" 33 | result = getHtml(url) 34 | 35 | soup = BeautifulSoup(result, 'html.parser') 36 | all_a = soup.find('ul', class_='wp-list clearfix').find_all( 37 | "img") # .find_all('a', target='_blank')#,href=re.compile('limg.jpg') 38 | pool = Pool(multiprocessing.cpu_count()) 39 | t1 = datetime.datetime.now() 40 | for a in all_a: 41 | src = a['src'] 42 | pool.apply_async(downloadImage, args=(src,)) 43 | pool.close() 44 | pool.join() 45 | 46 | t2 = datetime.datetime.now() 47 | print("耗时 %s 秒" % (t2 - t1).seconds) # 23-21 48 | -------------------------------------------------------------------------------- /baike/html_parser.py: -------------------------------------------------------------------------------- 1 | # coding=gbk 2 | ''' 3 | 4 | @author: zhong 5 | ''' 6 | 7 | import re 8 | import urllib 9 | 10 | from bs4 import BeautifulSoup 11 | 12 | 13 | class HtmlPraser(object): 14 | def _get_new_urls(self, page_url, soup): 15 | new_urls = set() 16 | # /view/123.htm 17 | # /item/Python 18 | links = soup.find_all('a', href=re.compile(r"/item/")) 19 | for link in links: 20 | new_url = link['href'] 21 | new_full_url = urllib.parse.urljoin(page_url, new_url) 22 | new_urls.add(new_full_url) 23 | return new_urls 24 | 25 | def _get_new_data(self, page_url, soup): 26 | res_data = {} 27 | res_data['url'] = page_url 28 | #
29 | #

Python

30 | #
31 | title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1') 32 | res_data['title'] = title_node.get_text() 33 | 34 | #
35 | summary_node = soup.find('div', class_="lemma-summary") 36 | res_data['summary'] = summary_node.get_text() 37 | return res_data 38 | 39 | def prase(self, page_url, html_content): 40 | if page_url is None or html_content is None: 41 | return 42 | 43 | try: 44 | soup = BeautifulSoup(html_content, 'html.parser') 45 | except (Exception) as e: 46 | print(e) 47 | 48 | new_urls = self._get_new_urls(page_url, soup) 49 | 50 | new_data = self._get_new_data(page_url, soup) 51 | return new_urls, new_data 52 | -------------------------------------------------------------------------------- /baike/main.py: -------------------------------------------------------------------------------- 1 | from concurrent.futures.thread import ThreadPoolExecutor 2 | from datetime import datetime 3 | import time 4 | from urllib.parse import unquote 5 | 6 | from baike import url_manager, html_downloader, html_parser, html_outputer 7 | 8 | 9 | class SpiderMain(object): 10 | def __init__(self): 11 | self.urls = url_manager.UrlManager() 12 | self.downloader = html_downloader.HtmlDownLoader() 13 | self.praser = html_parser.HtmlPraser() 14 | self.outputer = html_outputer.HtmlOutputer() 15 | 16 | def craw(self, root_url): 17 | 18 | # 添加新的url 19 | self.urls.add_new_url(root_url) 20 | # 当前的条数 21 | self.task(0) 22 | time.sleep(1) 23 | time1 = datetime.now() 24 | with ThreadPoolExecutor(max_workers=4) as executor: 25 | for i in range(999): 26 | executor.submit(self.task, i) 27 | # 输出收集好的数据 28 | self.outputer.output_html() 29 | time2 = datetime.now() 30 | print("耗时 %s 秒" % (time2 - time1).seconds) # 23-21 31 | 32 | def task(self, count): 33 | try: 34 | # 获取一条url 35 | new_url = self.urls.get_new_url() 36 | print("%d : %s" % (count, unquote(new_url))) 37 | # 下载网页 38 | html_cont = self.downloader.download(new_url) 39 | # 解析网页,得到新的url列表和数据 40 | new_urls, new_data = self.praser.prase(new_url, html_cont) 41 | # 将url列表添加的url管理器 42 | self.urls.add_new_urls(new_urls) 43 | # 收集数据 44 | self.outputer.collect_data(new_data) 45 | except(Exception) as e: 46 | print("craw fail:%s" % (e)) 47 | 48 | 49 | if __name__ == "__main__": 50 | root_url = "https://baike.baidu.com/item/Python/407313" 51 | obj_spider = SpiderMain() 52 | obj_spider.craw(root_url) 53 | -------------------------------------------------------------------------------- /meizi/Mezi.py: -------------------------------------------------------------------------------- 1 | # coding=utf- 2 | #https://blog.csdn.net/Luenci379/article/details/90728048 3 | from bs4 import BeautifulSoup 4 | import os 5 | 6 | all_url = 'http://www.mzitu.com' 7 | 8 | # http请求头 9 | Hostreferer = { 10 | 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 11 | 'Referer': 'http://www.mzitu.com' 12 | } 13 | Picreferer = { 14 | 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)', 15 | 'Referer': 'http://i.meizitu.net' 16 | } 17 | # 此请求头破解盗链 18 | 19 | start_html = requests.get(all_url, headers=Hostreferer) 20 | 21 | # 保存地址 22 | path = "D:\\mzitu\\" 23 | 24 | # 找寻最大页数 25 | soup = BeautifulSoup(start_html.text, "html.parser") 26 | page = soup.find_all('a', class_='page-numbers') 27 | max_page = page[-2].text 28 | 29 | same_url = 'http://www.mzitu.com/all/' 30 | for n in range(1, int(max_page) + 1): 31 | ul = same_url + str(n) 32 | start_html = requests.get(ul, headers=Hostreferer) 33 | soup = BeautifulSoup(start_html.text, "html.parser") 34 | all_a = soup.find('div', class_='all').find_all('a', target='_blank') 35 | for a in all_a: 36 | title = a.get_text() # 提取文本 37 | if (title != ''): 38 | print("准备扒取:" + title) 39 | 40 | # win不能创建带?的目录 41 | if (os.path.exists(path + title.strip().replace('?', ''))): 42 | # print('目录已存在') 43 | flag = 1 44 | else: 45 | os.makedirs(path + title.strip().replace('?', '').replace(':', '')) 46 | flag = 0 47 | os.chdir(path + title.strip().replace('?', '').replace(':', '')) 48 | href = a['href'] 49 | html = requests.get(href, headers=Hostreferer) 50 | mess = BeautifulSoup(html.text, "html.parser") 51 | pic_max = mess.find_all('span') 52 | try: 53 | pic_max = pic_max[9].text # 最大页数 54 | if (flag == 1 and len(os.listdir(path + title.strip().replace('?', ''))) >= int(pic_max)): 55 | print('已经保存完毕,跳过') 56 | continue 57 | for num in range(1, int(pic_max) + 1): 58 | pic = href + '/' + str(num) 59 | html = requests.get(pic, headers=Hostreferer) 60 | mess = BeautifulSoup(html.text, "html.parser") 61 | pic_url = mess.find('img', alt=title) 62 | print(pic_url['src']) 63 | # exit(0) 64 | html = requests.get(pic_url['src'], headers=Picreferer) 65 | file_name = pic_url['src'].split(r'/')[-1] 66 | f = open(file_name, 'wb') 67 | f.write(html.content) 68 | f.close() 69 | except Exception: 70 | pass 71 | print('完成 ') 72 | print('第', n, '页完成') 73 | -------------------------------------------------------------------------------- /ocr/main.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | import os 3 | import time 4 | 5 | import requests 6 | import base64 7 | from openpyxl import Workbook 8 | 9 | api_key = "GXYzfmt42Gb7DoTW4j9QgN5c" 10 | secret_key = "wGgn9dCiI8zBBrq5nVtt2EfmU66enntV" 11 | tokenUrl = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={}&client_secret={}" 12 | #ocrUrl = "https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic?access_token={}" 13 | ocrUrl = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic?access_token={}" 14 | list = [] 15 | 16 | 17 | def getToken(): 18 | try: 19 | response = requests.get(tokenUrl.format(api_key, secret_key)) 20 | if response: 21 | print("getToken:{}".format(response.json())) 22 | return response.json().get("access_token") 23 | except Exception as e: 24 | print(e) 25 | 26 | 27 | def getResule(filePath, token): 28 | try: 29 | f = open('{}'.format(filePath), 'rb') 30 | img = base64.b64encode(f.read()) 31 | params = {"image": img} 32 | request_url = ocrUrl.format(token) 33 | headers = {'content-type': 'application/x-www-form-urlencoded'} 34 | response = requests.post(request_url, data=params, headers=headers) 35 | if response: 36 | return response.json() 37 | except Exception as e: 38 | print(e) 39 | 40 | 41 | def getImages(folderPath): 42 | if os.path.exists(folderPath) & os.path.isdir(folderPath): 43 | for root, dirs, files in os.walk(folderPath): 44 | for d in dirs: 45 | getImages(d) 46 | for f in files: 47 | type = f.split(".")[-1] 48 | if type == "jpg" or type == "png": 49 | absPath = os.path.abspath(f) 50 | print("filePath:{}".format(absPath)) 51 | if absPath not in list: 52 | list.append(absPath) 53 | 54 | 55 | if __name__ == "__main__": 56 | folderPath = os.getcwd() 57 | getImages(folderPath) 58 | token = getToken() 59 | wb = Workbook() 60 | ws = wb.active 61 | c1 = 0 62 | c2 = 2 63 | c3 = 4 64 | c4 = 6 65 | c5 = 8 66 | c6 = 10 67 | c7 = 12 68 | c8 = 14 69 | ws.cell(1, 1, ).value = "姓名" 70 | ws.cell(1, 2).value = "别名" 71 | ws.cell(1, 3).value = "电子邮件" 72 | ws.cell(1, 4).value = "电话" 73 | ws.cell(1, 5).value = "移动电话" 74 | ws.cell(1, 6).value = "职务" 75 | ws.cell(1, 7).value = "部门" 76 | ws.cell(1, 8).value = "公司" 77 | for index, file in enumerate(list): 78 | print("开始请求第{}/{}条:{}".format(index, len(list), file)) 79 | result = getResule(file, token) 80 | # print("result: {}".format(result)) 81 | # id = result.get("log_id") 82 | # num = result.get("words_result_num") 83 | words_result = result.get("words_result") 84 | # print("结果:id:{} num:{}".format(id, num)) 85 | # 3、姓名 6、部门 8.工号 10 部门 13电话 86 | row = index + 2 87 | for indexColum, word in enumerate(words_result): 88 | print("{} : {}".format(indexColum, word.get("words"))) 89 | continue 90 | value = word.get("words") 91 | if indexColum == c1: 92 | ws.cell(row, 1).value = value 93 | elif indexColum == c2: 94 | ws.cell(row, 2).value = value 95 | elif indexColum == c3: 96 | ws.cell(row, 3).value = value 97 | elif indexColum == c4: 98 | ws.cell(row, 4).value = value 99 | elif indexColum == c5: 100 | ws.cell(row, 5).value = value 101 | elif indexColum == c6: 102 | ws.cell(row, 6).value = value 103 | elif indexColum == c7: 104 | ws.cell(row, 7).value = value 105 | elif indexColum == c8: 106 | ws.cell(row, 8).value = value 107 | 108 | wb.save("地产开发-中电事业部.xlsx") 109 | print("解析完一张---------------------------------------------------------------------") 110 | time.sleep(1) 111 | -------------------------------------------------------------------------------- /.idea/markdown-navigator.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 34 | 35 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------