├── example_image
├── img01.png
└── img02.png
├── requirements.txt
├── config.py
├── README.md
├── server
└── create_knowledge_base.py
├── webui.py
├── api.py
└── LICENSE
/example_image/img01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhouhaoJiang/PdfReader-LangChian-LLM/HEAD/example_image/img01.png
--------------------------------------------------------------------------------
/example_image/img02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ZhouhaoJiang/PdfReader-LangChian-LLM/HEAD/example_image/img02.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.101.1
2 | gradio==3.40.1
3 | langchain==0.0.268
4 | pydantic==1.10.12
5 | Requests==2.31.0
6 | sse_starlette==1.6.1
7 | starlette==0.31.0
8 | torch==2.0.1+cu117
9 | transformers==4.30.2
10 | uvicorn==0.23.2
11 | search_engine_parser
12 |
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | # 当前目录
4 | # BASE_DIR = os.path.dirname(os.path.abspath(__file__))
5 |
6 | # LLM_MODEL_PATH = r"D:\Project\ChatPdf\chatglm2-6b-int4"
7 | LLM_MODEL_PATH = "THUDM/chatglm2-6b-int4" # 对话模型
8 |
9 | # EMBEDDING_MODEL_PATH = r'D:\Project\ChatPdf\text2vec-base-chinese'
10 | EMBEDDING_MODEL_PATH = 'shibing624/text2vec-base-multilingual' # 检索模型文件 or huggingface远程仓库
11 |
12 | PDF_FILE_PATH = r"D:\Project\Langchain-LLM-PdfReader\pdf_file"
13 | KNOWLEDGE_FILE_PATH = r"D:\Project\Langchain-LLM-PdfReader\knowledge_base"
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 这个项目存在很多需要优化的地方
2 | 跑完这个项目你能学会 本地化部署ChatGLM LangChain基础的使用
3 |
4 | There are a lot of improvements to be made in this project
5 | By the end of this project you will be able to localize the basics of ChatGLM and LangChain
6 |
7 | # PdfReader-LangChian-LLM
8 | Implement PDF parsing based on LangChain and LLM language model
9 | 基于LangChain和LLM语言模型实现PDF解析阅读
10 | #### 参考项目
11 | - [LangChain](https://github.com/langchain-ai/langchain)
12 | - [ChatGLM2-6B](https://github.com/THUDM/ChatGLM2-6B)
13 | ## 1. 项目介绍
14 | 实现了一个简单的基于LangChain和LLM语言模型实现PDF解析阅读, 通过Langchain的Embedding对输入的PDF进行向量化,
15 | 然后通过LLM语言模型对向量化后的PDF进行解码, 得到PDF的文本内容,进而根据用户提问,来匹配PDF具体内容,进而交给语言模型处理,得到答案。
16 |
17 | __项目截图__
18 | 
19 | 
20 | __项目使用的模型有__
21 | text2vec-base-multilingual
22 | chatglm2-6b-int4
23 |
24 | ## 2. 项目启动方法
25 |
26 | ### 2.1 config.py文件
27 | 检查模型文件和文件存放路径
28 |
29 | ### 2.2 环境配置
30 | python3.8
31 |
32 | pip install -r requirements.txt
33 | ### 2.3 启动项目
34 | 需要先启动api再启动webui
35 |
36 | python api.py
37 | python webui.py
38 |
39 |
--------------------------------------------------------------------------------
/server/create_knowledge_base.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 |
4 | from langchain.document_loaders import PyPDFLoader, UnstructuredFileLoader
5 | from langchain.embeddings import HuggingFaceEmbeddings
6 | from langchain.text_splitter import RecursiveCharacterTextSplitter
7 | from langchain.vectorstores import Chroma
8 | import config
9 |
10 |
11 | def create_base(file_path, kb_name):
12 | """
13 | 创建PDF文件向量库
14 | :param kb_name:
15 | :param file_path: 文件路径
16 | :return:
17 | """
18 | try:
19 | print(f'file: {file_path}')
20 | print("Start building vector database... %s", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
21 | # loader = UnstructuredFileLoader(file_path, model="element")
22 | loader = PyPDFLoader(file_path)
23 | docs = loader.load()
24 | # print(f'docs: {docs}')
25 |
26 | # 文本分割
27 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=200)
28 | docs = text_splitter.split_documents(docs)
29 |
30 | # 向量化
31 | embedding = HuggingFaceEmbeddings(model_name=config.EMBEDDING_MODEL_PATH)
32 | # 构造向量库+conversation_id
33 | persist_directory = os.path.join(config.KNOWLEDGE_FILE_PATH, kb_name)
34 |
35 | # 创建向量数据库
36 | vectordb = Chroma.from_documents(
37 | documents=docs,
38 | embedding=embedding,
39 | persist_directory=persist_directory
40 | )
41 | print("vectordb:", vectordb._collection.count())
42 | vectordb.persist()
43 | print("Vector database building finished. %s", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
44 |
45 | return {"status": 200, "message": "success"}
46 |
47 | except Exception as e:
48 | return {"status": 500, "message": str(e)}
49 |
--------------------------------------------------------------------------------
/webui.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import json
3 | import os
4 | import random
5 | import time
6 | import requests
7 | import config
8 | import gradio as gr
9 |
10 |
11 | # 重置会话
12 | def clear():
13 | return '', None
14 |
15 |
16 | def create_base(kb_name, filname, pdf_file):
17 | """
18 | 创建知识库
19 | :param kb_name:
20 | :param filname:
21 | :param pdf_file:
22 | :return:
23 | """
24 | # 二进制转base64
25 | pdf_file = base64.b64encode(pdf_file)
26 | pdf_file = pdf_file.decode("utf-8")
27 |
28 | params = {
29 | "kb_name": kb_name,
30 | "file_name": filname,
31 | "pdf_file": pdf_file,
32 | }
33 | gr.Info("知识库创建中...")
34 | response = requests.post("http://127.0.0.1:9999/create_knowledge_base", json=params)
35 | if response.status_code == 200:
36 | print("Connection established. Receiving data...")
37 | gr.Info("知识库创建成功")
38 | return kb_name
39 | else:
40 | print("Failed to connect. Status code:", response.status_code)
41 | gr.Error("知识库创建失败")
42 | return None
43 |
44 |
45 | # 请求会话api
46 | def request_chatglm(kb_name, query, chat_history, chat_type):
47 | """
48 | 请求会话api
49 | :param chat_type:
50 | :param kb_name:
51 | :param query:
52 | :param chat_history:
53 | :return:
54 | """
55 | print(query)
56 | if chat_type == "知识库对话":
57 | params = {
58 | "kb_name": kb_name,
59 | "query": query,
60 | }
61 | print("params:", params)
62 | response = requests.post("http://127.0.0.1:9999/PdfReader", json=params, stream=True)
63 | if response.status_code == 200:
64 | print("Connection established. Receiving data...")
65 | chat_history = [[query, ""]]
66 | # chat_history.append([query, ""])
67 | for line in response.iter_lines(decode_unicode=True):
68 | try:
69 | data_dict = json.loads(line[6:])
70 | chat_response = data_dict["response"]
71 | print("chat_response:", chat_response)
72 | print("chat_history:", chat_history)
73 | chat_history[-1][1] = chat_response
74 | yield chat_history
75 | except Exception as e:
76 | print("Exception in handle_sse_response:", e)
77 | # yield None
78 | else:
79 | print("Failed to connect. Status code:", response.status_code)
80 | gr.Error("Failed to connect")
81 | else:
82 | params = {
83 | "query": query,
84 | }
85 | response = requests.post("http://127.0.0.1:9999/Stream_chat", json=params, stream=True)
86 | if response.status_code == 200:
87 | print("Connection established. Receiving data...")
88 | print("chat_history:", chat_history)
89 | chat_history.append([query, ""])
90 | for line in response.iter_lines(decode_unicode=True):
91 | try:
92 | data_dict = json.loads(line[6:])
93 | chat_response = data_dict["response"]
94 | # print("chat_response:", chat_response)
95 | # print("chat_history:", chat_history)
96 | chat_history[-1][1] = chat_response
97 | yield chat_history
98 | except Exception as e:
99 | print("Exception in handle_sse_response:", e)
100 | # yield None
101 | else:
102 | print("Failed to connect. Status code:", response.status_code)
103 | gr.Error("Failed to connect. Status code:", response.status_code)
104 |
105 |
106 | with gr.Blocks(title="PdfReader") as webui:
107 | gr.Markdown(
108 | """
109 |
PdfReader With LangChain
110 | """
111 | )
112 | with gr.Row():
113 | with gr.Column(scale=1):
114 | embedding_model = gr.Dropdown(label="Embedding Model", choices=["text2vec"], value="text2vec")
115 | llm_model = gr.Dropdown(label="LLM Model", choices=["chatglm2-6b-int4"], value="chatglm2-6b-int4")
116 |
117 | chat_type = gr.Radio(label="对话方式", choices=["知识库对话", "模型对话"], value="模型对话")
118 |
119 | # kb_name = gr.Radio(choices=[name for name in os.listdir(config.KNOWLEDGE_FILE_PATH) if
120 | # os.path.isdir(os.path.join(config.KNOWLEDGE_FILE_PATH, name))],
121 | # label="知识库", value="无", live=True)
122 | # kb_submit = gr.Button(value="加载知识库", variant="primary")
123 |
124 | pdf_file = gr.File(label="PDF File", file_types=["pdf"], type="binary")
125 | writer_kb_name = gr.Textbox(label="填写知识库名称")
126 | create_button = gr.Button(value="构造知识库", variant="primary")
127 |
128 | with gr.Column(scale=3):
129 | chatbot = gr.Chatbot().style(height=500)
130 | query = gr.Textbox()
131 | with gr.Row():
132 | submit = gr.Button(value="发送", variant="primary")
133 | clear_btn = gr.Button(value="清空", variant="secondary")
134 |
135 | create_button.click(create_base, [writer_kb_name, writer_kb_name, pdf_file], outputs=[writer_kb_name])
136 | submit.click(request_chatglm, inputs=[writer_kb_name, query, chatbot, chat_type], outputs=[chatbot])
137 | clear_btn.click(clear, outputs=[query, chatbot])
138 | # submit.click(test, inputs=[kb_name, query, chatbot], outputs=[chatbot])
139 |
140 | if __name__ == '__main__':
141 | webui.launch(inline=False, share=True, debug=True, server_name="0.0.0.0", server_port=7788,
142 | enable_queue=True, show_error=True)
143 |
--------------------------------------------------------------------------------
/api.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import base64
3 | import json
4 | import logging
5 | import os
6 | import sys
7 | import time
8 | from typing import List, Tuple
9 |
10 | import requests
11 | from fastapi import Body
12 | from pydantic import BaseModel
13 |
14 | import config
15 | import fastapi
16 | import uvicorn
17 | import logging
18 | import sys
19 | import torch
20 | from sse_starlette import EventSourceResponse, ServerSentEvent
21 | from langchain.embeddings import HuggingFaceEmbeddings
22 | from langchain.vectorstores import Chroma
23 | from server.create_knowledge_base import create_base
24 | from transformers import AutoTokenizer, AutoModel
25 | from starlette.middleware.cors import CORSMiddleware
26 |
27 |
28 | # get-logger用于记录日志
29 | def getLogger(name, file_name, use_formatter=True):
30 | logger = logging.getLogger(name)
31 | logger.setLevel(logging.INFO)
32 | console_handler = logging.StreamHandler(sys.stdout)
33 | formatter = logging.Formatter('%(asctime)s %(message)s')
34 | console_handler.setFormatter(formatter)
35 | console_handler.setLevel(logging.INFO)
36 | logger.addHandler(console_handler)
37 | if file_name:
38 | handler = logging.FileHandler(file_name, encoding='utf8')
39 | handler.setLevel(logging.INFO)
40 | if use_formatter:
41 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
42 | handler.setFormatter(formatter)
43 | logger.addHandler(handler)
44 | return logger
45 |
46 |
47 | class ChatGLM():
48 | def __init__(self) -> None:
49 | logger.info("Start initialize model...")
50 | self.tokenizer = AutoTokenizer.from_pretrained(config.LLM_MODEL_PATH, trust_remote_code=True)
51 | self.model = AutoModel.from_pretrained(config.LLM_MODEL_PATH, trust_remote_code=True).cuda()
52 | # 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量
53 | # from utils import load_model_on_gpus
54 | # self.model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2)
55 | self.model.eval()
56 | logger.info("Model initialization finished.")
57 |
58 | def clear(self) -> None:
59 | if torch.cuda.is_available():
60 | with torch.cuda.device:
61 | torch.cuda.empty_cache()
62 | torch.cuda.ipc_collect()
63 |
64 | def answer(self, query: str, history, prompt):
65 | print("answer:", query, history, prompt)
66 | response, history = self.model.chat(self.tokenizer, query, prompt, history=history, max_length=8192)
67 | history = [list(h) for h in history]
68 | return response, history
69 |
70 | def stream(self, query, history, page, prompt):
71 | if query is None or history is None:
72 | yield {"query": "", "response": "", "history": [], "finished": True}
73 | size = 0
74 | response = ""
75 | for response, history in self.model.eval().stream_chat(
76 | self.tokenizer,
77 | query=query,
78 | history=history,
79 | prompt=prompt,
80 | max_length=8192,
81 | top_p=0.9,
82 | temperature=0.9,
83 | past_key_values=None,
84 | return_past_key_values=False):
85 | this_response = response[size:]
86 | history = [list(h) for h in history]
87 | size = len(response)
88 | yield {"delta": this_response, "response": response, "finished": False}
89 | logger.info("Answer - {}".format(response))
90 | yield {"response": response, "query": query, "page": page, "delta": "[EOS]", "history": history,
91 | "finished": True}
92 | # yield {"query": query, "delta": "[EOS]", "response": response, "page": page, "finished": True}
93 |
94 |
95 | logger = getLogger('ChatGLM', 'chatlog.log')
96 | MAX_HISTORY = 3 # 最大历史记录数
97 |
98 |
99 | def start_server():
100 | env = os.environ
101 | app = fastapi.FastAPI()
102 | bot = ChatGLM()
103 |
104 | # 配置 CORS 中间件
105 | app.add_middleware(
106 | CORSMiddleware,
107 | allow_origins=["*"], # 允许所有来源,可以根据需求进行配置
108 | allow_credentials=True,
109 | allow_methods=["*"], # 允许所有请求方法
110 | allow_headers=["*"], # 允许所有请求头
111 | )
112 |
113 | class CreateKnowledgeBaseRequest(BaseModel):
114 | kb_name: str = fastapi.Form(..., description="知识库名称")
115 | file_name: str = fastapi.Form(..., description="文件名称")
116 | pdf_file: str = fastapi.Form(..., description="base64编码的pdf文件内容")
117 |
118 | @app.post("/create_knowledge_base", tags=["Knowledge Base Management"], summary="创建知识库")
119 | async def create_knowledgebase(
120 | kbRequest: CreateKnowledgeBaseRequest = Body(..., context_type="application/json",
121 | description="创建知识库")):
122 | """
123 | 创建知识库
124 | :param kbRequest:
125 | :param file_name:
126 | :param kb_name:
127 | :param pdf_file:
128 | :return:
129 | """
130 | kb_name = kbRequest.kb_name
131 | file_name = kbRequest.file_name
132 | pdf_file = kbRequest.pdf_file
133 |
134 | # 保存文件至pdf_file/conversation_id
135 | file_path = os.path.join(config.PDF_FILE_PATH, kb_name)
136 | if not os.path.exists(file_path):
137 | os.makedirs(file_path)
138 | file_path = os.path.join(file_path, f"{file_name}.pdf")
139 | # 写入base64文件
140 | with open(file_path, "wb") as f:
141 | f.write(base64.b64decode(pdf_file))
142 |
143 | # 创建知识库
144 | return create_base(file_path, kb_name)
145 |
146 | class SteamChatRequest(BaseModel):
147 | query: str = fastapi.Form(..., description="对话问题")
148 | history: List[Tuple[str, str]] = None
149 |
150 | @app.post("/Stream_chat", tags=["Chat"], summary="与llm模型对话")
151 | async def stream_chat(
152 | chatRequest: SteamChatRequest = Body(..., context_type="application/json", description="对话请求")):
153 | """
154 | 与ChatGlm对话
155 | :param chatRequest:
156 | :return:
157 | """
158 | query = chatRequest.query
159 | history = json.dumps(chatRequest.history)
160 | history = json.loads(history)
161 |
162 | def decorate(generator):
163 | print("generator", generator)
164 | for item in generator:
165 | yield ServerSentEvent(json.dumps(item, ensure_ascii=False), event='delta')
166 |
167 | try:
168 | text = query
169 | # ori_history = history
170 | page = []
171 | prompt = "你是一个助手,负责用中文聊天和解决用户的问题。"
172 | logger.info("Query - {}".format(text))
173 | # if len(ori_history) > 0:
174 | # logger.info("History - {}".format(ori_history))
175 | # history = ori_history[-MAX_HISTORY:]
176 | # history = [tuple(h) for h in history]
177 | # return EventSourceResponse(decorate(bot.stream(text, history)))
178 |
179 | return EventSourceResponse(decorate(bot.stream(query=query, history=history, page=page, prompt=prompt)))
180 | except Exception as e:
181 | logger.error(f"error: {e}")
182 | return EventSourceResponse(decorate(bot.stream(None, None, None, None)))
183 |
184 | class PdfReaderRequest(BaseModel):
185 | kb_name: str = fastapi.Form(..., description="知识库名称")
186 | query: str = fastapi.Form(..., description="对话问题")
187 | history: List[Tuple[str, str]] = None
188 |
189 | @app.post("/PdfReader", tags=["Chat"], summary="与llm模型对话PDF")
190 | async def chatpdf(
191 | PdfRequest: PdfReaderRequest = Body(..., context_type="application/json", description="对话请求")):
192 | """
193 | 与ChatGlm对话PDF
194 | :param PdfRequest:
195 | :param kb_name:
196 | :param history: [[问题,回答],[问题,回答],......] 空为[]
197 | :param query: 对话问题
198 | :return:
199 | """
200 | kb_name = PdfRequest.kb_name
201 | query = PdfRequest.query
202 | history = json.dumps(PdfRequest.history)
203 | history = json.loads(history)
204 | print("history:", history)
205 |
206 | persist_directory = os.path.join(config.KNOWLEDGE_FILE_PATH, kb_name)
207 | print(persist_directory)
208 |
209 | # 联网搜索
210 | # try:
211 | # from search_engine_parser.core.engines.bing import Search as BingSearch
212 | # bsearch = BingSearch()
213 | # search_args = (query, 1)
214 | # results = await bsearch.async_search(*search_args)
215 | # web_content = results["description"][:5]
216 | # logger.info("Web_Search - {}".format(web_content))
217 | # except Exception as e:
218 | # logger.error("Web_Search - {}".format(e))
219 | # web_content = ""
220 | web_content = ""
221 |
222 | # 从目录加载向量
223 | logger.info("Start load vector database... %s", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
224 | embedding = HuggingFaceEmbeddings(model_name=config.EMBEDDING_MODEL_PATH)
225 | vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
226 | print(vectordb._collection.count())
227 | logger.info("Load database building finished. %s", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
228 |
229 | # docs = vectordb.similarity_search(query, k=3)
230 | docs = vectordb.similarity_search(query, k=5)
231 |
232 | page = list(set([docs.metadata['page'] for docs in docs]))
233 | page.sort()
234 |
235 | context = [docs.page_content for docs in docs]
236 | prompt = f"已知PDF内容:\n{context}\n根据已知信息回答问题:\n{query}\n网络检索内容:\n{web_content}"
237 |
238 | def decorate(generator):
239 | print("generator", generator)
240 | for item in generator:
241 | yield ServerSentEvent(json.dumps(item, ensure_ascii=False), event='delta')
242 |
243 | try:
244 | text = query
245 | query = f"内容为:{context}\n根据已知信息回答问题:{query}"
246 | # 使用Langchain处理PDF去除历史记录
247 | history = []
248 | # 给历史记录加上问题
249 | # new_message = [f"这是我提供的文章内容{context}", "收到"]
250 | # history.append(new_message)
251 | # history = json.dumps(history, ensure_ascii=False)
252 | ori_history = history
253 | logger.info("Query - {}".format(text))
254 | if len(ori_history) > 0:
255 | logger.info("History - {}".format(ori_history))
256 | history = ori_history[-MAX_HISTORY:]
257 | history = [tuple(h) for h in history]
258 | # return EventSourceResponse(decorate(bot.stream(text, history)))
259 |
260 | return EventSourceResponse(decorate(bot.stream(query, history, page=page, prompt=prompt)))
261 | except Exception as e:
262 | logger.error(f"error: {e}")
263 | return EventSourceResponse(decorate(bot.stream(None, None, None, None)))
264 |
265 | @app.get("/free_gc", tags=["GPU"], summary="释放GPU缓存")
266 | def free_gpu_cache():
267 | try:
268 | bot.clear()
269 | return {"success": True}
270 | except Exception as e:
271 | logger.error(f"error: {e}")
272 | return {"success": False}
273 |
274 | host = env.get("HOST") if env.get("HOST") is not None else "0.0.0.0"
275 | port = int(env.get("PORT")) if env.get("PORT") is not None else 9999
276 | # uvicorn.run(app=app, host=host, port=port, reload=True)
277 | uvicorn.run(app=app, host=host, port=port)
278 |
279 |
280 | if __name__ == '__main__':
281 | start_server()
282 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------