├── .gitignore ├── FTEmbed.py ├── LICENSE ├── README.en.md ├── README.md ├── custom ├── glmfz.py ├── prompt.py ├── query.py └── retriever.py ├── data └── testdata.txt ├── ft_data ├── qa_finetune_train_dataset.json ├── qa_finetune_val_dataset.json ├── train_dataset.json └── val_dataset.json ├── image ├── img.png └── img_1.png ├── main.py ├── requirements.txt └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | 141 | BAAI 142 | llamaindex-practices.ipynb 143 | milvus-practices.ipynb 144 | qdrant-practices.ipynb 145 | rag.ipynb 146 | .idea 147 | .gitee 148 | stores 149 | hybrid_Store 150 | checkpoints 151 | 152 | -------------------------------------------------------------------------------- /FTEmbed.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from llama_index.core import SimpleDirectoryReader 5 | from llama_index.core.node_parser import SentenceSplitter 6 | from llama_index.core.schema import MetadataMode 7 | from llama_index.finetuning import generate_qa_embedding_pairs 8 | from llama_index.core.evaluation import EmbeddingQAFinetuneDataset 9 | from llama_index.llms.openai import OpenAI 10 | from dotenv import load_dotenv, find_dotenv 11 | from dotenv import dotenv_values 12 | from custom.glmfz import ChatGLM 13 | from llama_index.core import Settings 14 | from llama_index.finetuning import SentenceTransformersFinetuneEngine 15 | from llama_index.core import ServiceContext, VectorStoreIndex 16 | from llama_index.core.schema import TextNode 17 | from tqdm.notebook import tqdm 18 | from sentence_transformers.evaluation import InformationRetrievalEvaluator 19 | from sentence_transformers import SentenceTransformer 20 | from pathlib import Path 21 | import pandas as pd 22 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding 23 | 24 | 25 | def load_corpus(files, verbose=False): 26 | if verbose: 27 | print(f"Loading files {files}") 28 | 29 | reader = SimpleDirectoryReader(input_files=files) 30 | docs = reader.load_data() 31 | if verbose: 32 | print(f"Loaded {len(docs)} docs") 33 | 34 | parser = SentenceSplitter() 35 | nodes = parser.get_nodes_from_documents(docs, show_progress=verbose) 36 | 37 | if verbose: 38 | print(f"Parsed {len(nodes)} nodes") 39 | 40 | return nodes 41 | 42 | 43 | def evaluate( 44 | dataset, 45 | embed_model, 46 | top_k=5, 47 | verbose=False, 48 | ): 49 | corpus = dataset.corpus 50 | queries = dataset.queries 51 | relevant_docs = dataset.relevant_docs 52 | 53 | nodes = [TextNode(id_=id_, text=text) for id_, text in corpus.items()] 54 | index = VectorStoreIndex( 55 | nodes, embed_model=embed_model, show_progress=True 56 | ) 57 | retriever = index.as_retriever(similarity_top_k=top_k) 58 | 59 | eval_results = [] 60 | for query_id, query in tqdm(queries.items()): 61 | retrieved_nodes = retriever.retrieve(query) 62 | retrieved_ids = [node.node.node_id for node in retrieved_nodes] 63 | expected_id = relevant_docs[query_id][0] 64 | is_hit = expected_id in retrieved_ids # assume 1 relevant doc 65 | 66 | eval_result = { 67 | "is_hit": is_hit, 68 | "retrieved": retrieved_ids, 69 | "expected": expected_id, 70 | "query": query_id, 71 | } 72 | eval_results.append(eval_result) 73 | return eval_results 74 | 75 | 76 | def evaluate_st( 77 | dataset, 78 | model_id, 79 | name, 80 | ): 81 | corpus = dataset.corpus 82 | queries = dataset.queries 83 | relevant_docs = dataset.relevant_docs 84 | 85 | evaluator = InformationRetrievalEvaluator( 86 | queries, corpus, relevant_docs, name=name 87 | ) 88 | model = SentenceTransformer(model_id) 89 | output_path = "results/" 90 | Path(output_path).mkdir(exist_ok=True, parents=True) 91 | return evaluator(model, output_path=output_path) 92 | 93 | 94 | _ = load_dotenv(find_dotenv()) # 导入环境 95 | config = dotenv_values(".env") 96 | Settings.llm = ChatGLM( 97 | api_key=config["GLM_KEY"], 98 | model="glm-4", 99 | api_base="https://open.bigmodel.cn/api/paas/v4/", 100 | is_chat_model=True, 101 | ) 102 | Settings.embed_model = HuggingFaceEmbedding( 103 | model_name="BAAI/bge-large-en-v1.5", 104 | cache_folder="./BAAI/", 105 | embed_batch_size=128, 106 | local_files_only=True, # 仅加载本地模型,不尝试下载 107 | device="cuda", 108 | ) 109 | 110 | 111 | def finetuning_data_preparation(all_data: list, llm, verbose: bool = False, 112 | train_dataset_dir: str = "ft_data/train_dataset.json", 113 | val_dataset_dir: str = "ft_data/val_dataset.json", 114 | qa_finetune_train_dataset_dir: str = "ft_data/qa_finetune_train_dataset.json", 115 | qa_finetune_val_dataset_dir: str = "ft_data/qa_finetune_val_dataset.json"): 116 | docs_nodes = load_corpus(all_data, verbose=verbose) 117 | train_nodes = docs_nodes[:int(2 * len(docs_nodes) / 3)] 118 | val_nodes = docs_nodes[int(2 * len(docs_nodes) / 3 + 1):] 119 | 120 | # 自带保存 121 | train_dataset = generate_qa_embedding_pairs(llm=llm, nodes=train_nodes, verbose=verbose, 122 | output_path="ft_data/qa_finetune_train_dataset.json") 123 | val_dataset = generate_qa_embedding_pairs(llm=llm, nodes=val_nodes, verbose=verbose, 124 | output_path="ft_data/qa_finetune_val_dataset.json") 125 | train_dataset.save_json(train_dataset_dir) 126 | val_dataset.save_json(val_dataset_dir) 127 | 128 | 129 | def finetuning_embedding(train_dataset_dir: str = "ft_data/train_dataset.json", 130 | val_dataset_dir: str = "ft_data/val_dataset.json", 131 | model_name: str = "BAAI/bge-large-en-v1.5", 132 | model_output_path: str = "BAAI/ft-bge-large-en-v1.5"): 133 | # [Optional] Load 134 | train_dataset = EmbeddingQAFinetuneDataset.from_json(train_dataset_dir) 135 | val_dataset = EmbeddingQAFinetuneDataset.from_json(val_dataset_dir) 136 | finetune_engine = SentenceTransformersFinetuneEngine( 137 | train_dataset, 138 | model_id=model_name, 139 | model_output_path=model_output_path, 140 | val_dataset=val_dataset, 141 | ) 142 | if not os.path.exists(model_output_path): # 如果已经存在微调好的模型就不用重新微调了 143 | finetune_engine.finetune() 144 | embed_model = finetune_engine.get_finetuned_model() 145 | print(embed_model) 146 | return embed_model 147 | 148 | 149 | def eval_finetuning_embedding(embed_model, val_dataset_dir: str = "ft_data/val_dataset.json", 150 | model_name: str = "bge-large-en-v1.5"): 151 | val_dataset = EmbeddingQAFinetuneDataset.from_json(val_dataset_dir) 152 | bge_val_results = evaluate(val_dataset, embed_model) 153 | df_bge = pd.DataFrame(bge_val_results) 154 | hit_rate_bge = df_bge['is_hit'].mean() 155 | print(f"{model_name}模型的准确率为:{hit_rate_bge}") 156 | # bge-large-en-v1.5模型的准确率为:0.6155913978494624 157 | # 微调后的bge-large-en-v1.5模型的准确率为:0.7093023255813954 158 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /README.en.md: -------------------------------------------------------------------------------- 1 | # RAG最佳实践 2 | 3 | #### Description 4 | 大模型检索增强生成技术最佳实践。 5 | 6 | #### Software Architecture 7 | Software architecture description 8 | 9 | #### Installation 10 | 11 | 1. xxxx 12 | 2. xxxx 13 | 3. xxxx 14 | 15 | #### Instructions 16 | 17 | 1. xxxx 18 | 2. xxxx 19 | 3. xxxx 20 | 21 | #### Contribution 22 | 23 | 1. Fork the repository 24 | 2. Create Feat_xxx branch 25 | 3. Commit your code 26 | 4. Create Pull Request 27 | 28 | 29 | #### Gitee Feature 30 | 31 | 1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md 32 | 2. Gitee blog [blog.gitee.com](https://blog.gitee.com) 33 | 3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore) 34 | 4. The most valuable open source project [GVP](https://gitee.com/gvp) 35 | 5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help) 36 | 6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RAG最佳实践 2 | 3 | #### 介绍 4 | 大模型检索增强生成技术最佳实践。本项目对论文[Searching for Best Practices in Retrieval-Augmented 5 | Generation](https://arxiv.org/abs/2407.01219)中提到的方案进行实现,论文中将RAG过程划分为如下阶段,并通过实验说明每个阶段模块选择最佳实践方案。 6 | ![image/img.png](image/img.png) 7 | - Query Classification:并非所有查询都需要检索增强。 8 | - Chunking:块大小显著影响性能。更大的块提供了更多的上下文,增强了理解,但增加了处理时间。较小的分块提高了检索的召回率,减少了时间,但可能缺乏足够的上下文。使用sliding window技术更加有效,即将文本按照句子进行划分,每个块包含窗口大小个句子。 9 | - Embedding:嵌入模型选择LLM-Embedder,其与BAAI/big-large-en的效果相当,但前者的大小比后者小三倍。 10 | - Vector Database:Milvus支持多种索引类型、十亿规模的向量支持、混合搜索和云原生能力。 11 | - Retrieval:HyDE(pseudoDoc+query)+Hybrid Search(=0.3*BM25+Original embedding)。 12 | - Reranking:monoT5模型参数量小且准确率相对较高,RankLLaMA绝对准确率更高。 13 | - Repacking:reverse方式最好。 14 | - Summarization:Recomp | Fangyuan Xu,Weijia Shi, and Eunsol Choi. Recomp: Improving retrieval-augmented lms with compression and selective augmentation. arXiv preprint arXiv:2310.04408, 2023. 15 | - Generator Fine-tuning:混合相关和随机上下文可以增强生成器对无关信息的鲁棒性,同时保证相关信息的有效利用。用一个相关文档和一个随机选择的文档来训练。 16 | 17 | 我将对上述模块进行逐一实现,develop分支将持续更新,master分支版本使用较为稳定。 18 | 19 | #### 软件架构 20 | 项目基于 LlamaIndex RAG框架实现,向量数据库选择Qdrant。 21 | 大模型选择基于Ollama本地调用qwen2-1.5b模型,嵌入模型选择BAAI/bge-large-zh-v1.5。 22 | 选择原因: 23 | 1. LlamaIndex框架对当前较为常用的技术进行了模块化封装,个人认为相较于langchain框架来说,其抽象层级更高,把更多的时间用于高层次的思考,而不是陷入编程的细节。 24 | 2. Qdrant数据库比Milvus更容易部署,且文档较为详细直观。 25 | ![img_1.png](image%2Fimg_1.png) 26 | 27 | #### 安装教程 28 | 29 | 1. 通过镜像安装依赖库; 30 | `pip install -i https://pypi.doubanio.com/simple/ -r requirements.txt` 31 | 2. 基于Ollama下载qwen2-1.5b到本地(设备允许的话建议选择参数量更大的模型,直接调用商业api也可以,代码中也给出了使用glm4模型的示例); 32 | 3. 首次运行时修改main.py中HuggingFaceEmbedding函数参数:local_files_only=False 33 | 4. 运行main.py。 34 | 35 | #### 参与贡献 36 | 37 | 1. Fork 本仓库 38 | 2. 新建 Feat_xxx 分支 39 | 3. 提交代码 40 | 4. 新建 Pull Request 41 | 42 | #### 引用 43 | 项目基于如下论文: 44 | ``` 45 | @inproceedings{Wang2024SearchingFB, 46 | title={Searching for Best Practices in Retrieval-Augmented Generation}, 47 | author={Xiaohua Wang and Zhenghua Wang and Xuan Gao and Feiran Zhang and Yixin Wu and Zhibo Xu and Tianyuan Shi and Zhengyuan Wang and Shizheng Li and Qi Qian and Ruicheng Yin and Changze Lv and Xiaoqing Zheng and Xuanjing Huang}, 48 | year={2024}, 49 | url={https://api.semanticscholar.org/CorpusID:270870251} 50 | } 51 | ``` -------------------------------------------------------------------------------- /custom/glmfz.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List, Mapping, Any, Sequence, Dict 2 | from llama_index.core.bridge.pydantic import Field, PrivateAttr 3 | from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS 4 | from llama_index.core.llms import ( 5 | CustomLLM, 6 | CompletionResponse, 7 | CompletionResponseGen, 8 | LLMMetadata, 9 | ChatMessage, 10 | ChatResponse, 11 | ) 12 | from llama_index.embeddings.openai import OpenAIEmbedding 13 | from llama_index.core import Settings 14 | from llama_index.core.llms import ChatMessage, MessageRole 15 | from llama_index.core.llms.callbacks import llm_completion_callback, llm_chat_callback 16 | from llama_index.core import SimpleDirectoryReader, SummaryIndex 17 | from zhipuai import ZhipuAI 18 | 19 | DEFAULT_MODEL = 'glm-4' 20 | 21 | 22 | def to_message_dicts(messages: Sequence[ChatMessage]) -> List: 23 | return [ 24 | {"role": message.role.value, "content": message.content, } 25 | for message in messages if all([value is not None for value in message.values()]) 26 | ] 27 | 28 | 29 | def get_additional_kwargs(response) -> Dict: 30 | return { 31 | "token_counts": response.usage.total_tokens, 32 | "prompt_tokens": response.usage.prompt_tokens, 33 | "completion_tokens": response.usage.completion_tokens, 34 | } 35 | 36 | 37 | class ChatGLM(CustomLLM): 38 | num_output: int = DEFAULT_NUM_OUTPUTS 39 | context_window: int = Field(default=DEFAULT_CONTEXT_WINDOW, 40 | description="The maximum number of context tokens for the model.", gt=0, ) 41 | model: str = Field(default=DEFAULT_MODEL, description="The ChatGlM model to use. glm-4 or glm-3-turbo") 42 | api_key: str = Field(default=None, description="The ChatGLM API key.") 43 | reuse_client: bool = Field(default=True, description=( 44 | "Reuse the client between requests. When doing anything with large " 45 | "volumes of async API calls, setting this to false can improve stability."), ) 46 | _client: Optional[Any] = PrivateAttr() 47 | 48 | def __init__( 49 | self, 50 | model: str = DEFAULT_MODEL, 51 | reuse_client: bool = True, 52 | api_key: Optional[str] = None, 53 | **kwargs: Any, 54 | ) -> None: 55 | super().__init__( 56 | model=model, 57 | api_key=api_key, 58 | reuse_client=reuse_client, 59 | **kwargs, 60 | ) 61 | self._client = None 62 | 63 | def _get_client(self) -> ZhipuAI: 64 | if not self.reuse_client: 65 | return ZhipuAI(api_key=self.api_key) 66 | 67 | if self._client is None: 68 | self._client = ZhipuAI(api_key=self.api_key) 69 | return self._client 70 | 71 | @classmethod 72 | def class_name(cls) -> str: 73 | return "chatglm_llm" 74 | 75 | @property 76 | def metadata(self) -> LLMMetadata: 77 | """Get LLM metadata.""" 78 | return LLMMetadata( 79 | context_window=self.context_window, 80 | num_output=self.num_output, 81 | model_name=self.model, 82 | ) 83 | 84 | def _chat(self, messages: List, stream=False) -> Any: 85 | response = self._get_client().chat.completions.create( 86 | model=self.model, # 填写需要调用的模型名称 87 | messages=messages, 88 | ) 89 | # print(f"_chat, response: {response}") 90 | return response 91 | 92 | @llm_completion_callback() 93 | def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: 94 | messages = [{"role": "user", "content": prompt}] 95 | global rsp 96 | # print(f"complete: messages {messages} ") 97 | try: 98 | response = self._chat(messages, stream=False) 99 | 100 | rsp = CompletionResponse(text=str(response.choices[0].message.content), 101 | raw=response, 102 | additional_kwargs=get_additional_kwargs(response), ) 103 | # print(f"complete: {rsp} ") 104 | except Exception as e: 105 | print(f"complete: exception {e}") 106 | 107 | return rsp 108 | 109 | @llm_completion_callback() 110 | def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: 111 | response_txt = "" 112 | messages = [{"role": "user", "content": prompt}] 113 | response = self._chat(messages, stream=True) 114 | # print(f"stream_complete: {response} ") 115 | for chunk in response: 116 | # chunk.choices[0].delta # content='```' role='assistant' tool_calls=None 117 | token = chunk.choices[0].delta.content 118 | response_txt += token 119 | yield CompletionResponse(text=response_txt, delta=token) 120 | 121 | 122 | # Settings.llm = ChatGLM(api_key="de0391a95e7549617d95f1e6ea82c8b9.Mj7e05IXWgj6Oclg") 123 | # 124 | # # define embed model 125 | # Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-large") 126 | # 127 | # # Load the your data 128 | # documents = SimpleDirectoryReader("./test").load_data() 129 | # index = SummaryIndex.from_documents(documents) 130 | # 131 | # # Query and print response 132 | # query_engine = index.as_query_engine() 133 | # response = query_engine.query("小明多大了") 134 | # print(response) -------------------------------------------------------------------------------- /custom/prompt.py: -------------------------------------------------------------------------------- 1 | qa_prompt_tmpl_str = """ 2 | Context information is below. 3 | --------------------- 4 | {context_str} 5 | --------------------- 6 | Given the context information and not prior knowledge, answer the query. 7 | Query: {query_str} 8 | Answer: 9 | """ 10 | 11 | # 根据如下description选择是否需要进行检索增强生成 12 | simple_qa_prompt_tmpl_str = """ 13 | Please answer the query. 14 | Query: {query_str} 15 | Answer: 16 | """ 17 | rag_description = "Useful for answering questions that require specific contextual knowledge to be answered accurately." 18 | norag_rag_description = ("Used to answer questions that do not require specific contextual knowledge to be answered " 19 | "accurately.") -------------------------------------------------------------------------------- /custom/query.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | from llama_index.core.schema import BaseNode 3 | from llama_index.core.indices.vector_store.base import VectorStoreIndex 4 | from llama_index.retrievers.bm25 import BM25Retriever 5 | from llama_index.core.retrievers import VectorIndexRetriever 6 | import Stemmer 7 | from llama_index.core.prompts import BasePromptTemplate 8 | from llama_index.core.query_engine import RetrieverQueryEngine 9 | from custom.retriever import CustomRetriever 10 | from llama_index.core.postprocessor import MetadataReplacementPostProcessor 11 | from llama_index.core import get_response_synthesizer 12 | from llama_index.core.response_synthesizers.type import ResponseMode 13 | from llama_index.core import Settings 14 | from llama_index.core.postprocessor import LLMRerank 15 | from llama_index.postprocessor.flag_embedding_reranker import FlagEmbeddingReranker 16 | 17 | 18 | def build_query_engine(index: VectorStoreIndex, 19 | response_mode: ResponseMode = ResponseMode.TREE_SUMMARIZE, 20 | qa_prompt_tmpl: Optional[BasePromptTemplate] = None, 21 | with_hybrid_search: bool = False, 22 | top_k: int = 5, 23 | top_k_rerank: int = 2, 24 | with_rerank: bool = True, 25 | nodes: Optional[List[BaseNode]] = None): 26 | reranker = FlagEmbeddingReranker( 27 | top_n=top_k_rerank, 28 | model="BAAI/bge-reranker-large", 29 | use_fp16=True, 30 | ) 31 | if with_hybrid_search: 32 | if with_rerank: 33 | rag_query_engine = index.as_query_engine(similarity_top_k=top_k, 34 | text_qa_template=qa_prompt_tmpl, 35 | node_postprocessors=[reranker, MetadataReplacementPostProcessor( 36 | target_metadata_key="window")], 37 | sparse_top_k=12, 38 | vector_store_query_mode="hybrid", 39 | response_synthesizer=get_response_synthesizer( 40 | response_mode=response_mode, 41 | # refine_template=PromptTemplate(refine_tmpl_str) 42 | ), 43 | ) 44 | else: 45 | rag_query_engine = index.as_query_engine(similarity_top_k=top_k, 46 | text_qa_template=qa_prompt_tmpl, 47 | node_postprocessors=[MetadataReplacementPostProcessor( 48 | target_metadata_key="window")], 49 | sparse_top_k=12, 50 | vector_store_query_mode="hybrid", 51 | response_synthesizer=get_response_synthesizer( 52 | response_mode=response_mode, 53 | # refine_template=PromptTemplate(refine_tmpl_str) 54 | ), 55 | ) 56 | 57 | else: 58 | # Build a tree index over the set of candidate nodes, with a summary prompt seeded with the query. with LLM reranker 59 | if with_rerank: 60 | rag_query_engine = index.as_query_engine(similarity_top_k=top_k, 61 | text_qa_template=qa_prompt_tmpl, 62 | node_postprocessors=[ 63 | reranker, 64 | MetadataReplacementPostProcessor(target_metadata_key="window"), 65 | ], 66 | response_synthesizer=get_response_synthesizer( 67 | response_mode=response_mode), 68 | ) 69 | else: 70 | rag_query_engine = index.as_query_engine(similarity_top_k=top_k, 71 | text_qa_template=qa_prompt_tmpl, 72 | node_postprocessors=[ 73 | MetadataReplacementPostProcessor(target_metadata_key="window"), 74 | ], 75 | response_synthesizer=get_response_synthesizer( 76 | response_mode=response_mode), 77 | ) 78 | return rag_query_engine 79 | -------------------------------------------------------------------------------- /custom/retriever.py: -------------------------------------------------------------------------------- 1 | from llama_index.core.retrievers import ( 2 | BaseRetriever, 3 | VectorIndexRetriever, 4 | ) 5 | from llama_index.core import QueryBundle 6 | from llama_index.retrievers.bm25 import BM25Retriever 7 | from llama_index.core.schema import NodeWithScore 8 | from typing import List 9 | 10 | 11 | class CustomRetriever(BaseRetriever): 12 | """Custom retriever that performs both semantic search and hybrid search.""" 13 | 14 | def __init__( 15 | self, 16 | vector_retriever: VectorIndexRetriever, 17 | bm25_retriever: BM25Retriever, 18 | mode: str = "AND", 19 | alpha: float = 0.5, 20 | ) -> None: 21 | """Init params.""" 22 | 23 | self._vector_retriever = vector_retriever 24 | self._keyword_retriever = bm25_retriever 25 | if mode not in ("AND", "OR"): 26 | raise ValueError("Invalid mode.") 27 | self._mode = mode 28 | self._alpha = alpha 29 | super().__init__() 30 | 31 | def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: 32 | """Retrieve nodes given query.""" 33 | 34 | vector_nodes = self._vector_retriever.retrieve(query_bundle) 35 | keyword_nodes = self._keyword_retriever.retrieve(query_bundle) 36 | # for node in vector_nodes: 37 | # print(node) 38 | # print("----------") 39 | # for node in keyword_nodes: 40 | # print(node) 41 | # print("----------") 42 | vector_ids = {n.node.node_id for n in vector_nodes} 43 | keyword_ids = {n.node.node_id for n in keyword_nodes} 44 | 45 | # 归一化 46 | vector_score = [n.score for n in vector_nodes] 47 | bm25_score = [n.score for n in keyword_nodes] 48 | minV = min(vector_score) 49 | maxV = max(vector_score) 50 | minB = min(bm25_score) 51 | maxB = max(bm25_score) 52 | for n in vector_nodes: 53 | n.score = (n.score - minV) / (maxV - minV) 54 | for n in keyword_nodes: 55 | n.score = (n.score - minB) / (maxB - minB) 56 | 57 | # for node in vector_nodes: 58 | # print(node) 59 | # print("----------") 60 | # for node in keyword_nodes: 61 | # print(node) 62 | # print("----------") 63 | 64 | # 分数加权合并 65 | combined_dict = {n.node.node_id: n for n in vector_nodes} 66 | for n in keyword_nodes: 67 | if n.node.node_id in combined_dict.keys(): 68 | combined_dict[n.node.node_id].score += self._alpha * float(n.score) 69 | 70 | if self._mode == "AND": 71 | retrieve_ids = vector_ids.intersection(keyword_ids) 72 | else: 73 | retrieve_ids = vector_ids.union(keyword_ids) 74 | 75 | retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids] 76 | 77 | for node in retrieve_nodes: 78 | print(node) 79 | print("----------") 80 | return retrieve_nodes 81 | -------------------------------------------------------------------------------- /image/img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoql/rag-best-practices/cbe12e21df644ecac67b1ec9ff03014161f1f78e/image/img.png -------------------------------------------------------------------------------- /image/img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoql/rag-best-practices/cbe12e21df644ecac67b1ec9ff03014161f1f78e/image/img_1.png -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding 2 | from llama_index.llms.ollama import Ollama 3 | from llama_index.core.node_parser import SentenceWindowNodeParser 4 | from llama_index.core import PromptTemplate, get_response_synthesizer, StorageContext, VectorStoreIndex, \ 5 | SimpleDirectoryReader, Settings 6 | from FTEmbed import finetuning_data_preparation, finetuning_embedding, eval_finetuning_embedding 7 | from llama_index.core.indices.query.query_transform import HyDEQueryTransform 8 | from llama_index.core.query_engine import TransformQueryEngine 9 | from llama_index.core.response_synthesizers.type import ResponseMode 10 | from custom.glmfz import ChatGLM 11 | from llama_index.core.tools import QueryEngineTool 12 | from llama_index.core.query_engine import RouterQueryEngine 13 | from llama_index.core.selectors import LLMSingleSelector 14 | from dotenv import load_dotenv, find_dotenv 15 | from dotenv import dotenv_values 16 | from custom.query import build_query_engine 17 | from custom.prompt import qa_prompt_tmpl_str, simple_qa_prompt_tmpl_str, rag_description, norag_rag_description 18 | from utils import load_hybrid_data, load_txt_data 19 | import warnings 20 | 21 | warnings.filterwarnings('ignore') 22 | # 导入环境 23 | _ = load_dotenv(find_dotenv()) 24 | config = dotenv_values(".env") 25 | 26 | # 设置参数 27 | with_hyde = False # 是否采用假设文档 28 | persist_dir = "store" # 向量存储地址 29 | with_hybrid_search = True # 是否采用混合检索 30 | # 5选2 31 | top_k = 5 32 | top_k_rerank = 2 33 | with_sliding_window = True # 是否采用滑动窗口 34 | response_mode = ResponseMode.SIMPLE_SUMMARIZE # RAG架构,最佳实践为为TREE_SUMMARIZE 35 | with_query_classification = False # 是否对输入的问题进行分类 36 | with_rerank = True # 是否采用重排序 37 | with_local_llm = False # 是否采用本地基于Ollama的大模型 38 | with_Finetuning_embedding = False # 是否微调嵌入模型 39 | with_Finetuning_embedding_eval = False # 是否测评微调嵌入模型的命中率 40 | # 提问 41 | # query_str = "Did Fang Hung-chien kiss Miss Bao?" 42 | query_str = "In the text, which lady did Fang Hongjian kiss on the ship, and under what circumstances did it happen?" 43 | 44 | # 加载嵌入模型 45 | if with_Finetuning_embedding: 46 | # 微调需要开启VPN 47 | finetuning_data_preparation(all_data=["data/testdata.txt"], llm=Settings.llm, verbose=False, 48 | train_dataset_dir="ft_data/train_dataset.json", 49 | val_dataset_dir="ft_data/val_dataset.json", 50 | qa_finetune_train_dataset_dir="ft_data/qa_finetune_train_dataset.json", 51 | qa_finetune_val_dataset_dir="ft_data/qa_finetune_val_dataset.json") 52 | Settings.embed_model = finetuning_embedding(train_dataset_dir="ft_data/train_dataset.json", 53 | val_dataset_dir="ft_data/val_dataset.json", 54 | model_name="BAAI/bge-large-en-v1.5", 55 | model_output_path="BAAI/ft-bge-large-en-v1.5/") 56 | # 测评微调后的嵌入模型命中率 57 | if with_Finetuning_embedding_eval: 58 | eval_finetuning_embedding(embed_model=Settings.embed_model, val_dataset_dir="ft_data/val_dataset.json", 59 | model_name="ft-bge-large-en-v1.5") 60 | else: 61 | Settings.embed_model = HuggingFaceEmbedding( 62 | model_name="BAAI/bge-large-en-v1.5", 63 | cache_folder="./BAAI/", 64 | embed_batch_size=128, 65 | local_files_only=True, # 仅加载本地模型,不尝试下载 66 | device="cuda", 67 | ) 68 | 69 | # 加载大模型 70 | if with_local_llm: 71 | Settings.llm = Ollama(model="qwen2:1.5b", request_timeout=30.0, temperature=0) 72 | else: 73 | Settings.llm = ChatGLM( 74 | api_key=config["GLM_KEY"], 75 | model="glm-4", 76 | api_base="https://open.bigmodel.cn/api/paas/v4/", 77 | is_chat_model=True, 78 | ) 79 | 80 | # load data(是否采用混合检索) 81 | if with_hybrid_search: 82 | index, nodes = load_hybrid_data(input_file=["data/testdata.txt"], persist_dir="hybrid_Store") 83 | else: 84 | index, nodes = load_txt_data(input_file=["data/testdata.txt"], persist_dir="hybrid_Store", 85 | with_sliding_window=with_sliding_window, chunk_size=512, chunk_overlap=128) 86 | 87 | # prompt 88 | qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str) 89 | simple_qa_prompt_tmpl = PromptTemplate(simple_qa_prompt_tmpl_str) # norag 90 | 91 | # Build query_engine 92 | rag_query_engine = build_query_engine(index, response_mode, qa_prompt_tmpl, with_hybrid_search, top_k, 93 | top_k_rerank, with_rerank, nodes) 94 | simple_query_engine = index.as_query_engine(similarity_top_k=top_k, 95 | text_qa_template=simple_qa_prompt_tmpl, 96 | response_synthesizer=get_response_synthesizer( 97 | response_mode=ResponseMode.GENERATION), 98 | ) 99 | 100 | # HyDE(当问题较为简单时,不需要该模块参与) 101 | if with_hyde: 102 | hyde = HyDEQueryTransform(include_original=True) 103 | rag_query_engine = TransformQueryEngine(rag_query_engine, hyde) 104 | 105 | # response 106 | # Router Query Engine(Query Classification) 107 | if with_query_classification: 108 | rag_tool = QueryEngineTool.from_defaults( 109 | query_engine=rag_query_engine, 110 | description=rag_description, 111 | ) 112 | simple_tool = QueryEngineTool.from_defaults( 113 | query_engine=simple_query_engine, 114 | description=norag_rag_description, 115 | ) 116 | query_engine = RouterQueryEngine( 117 | selector=LLMSingleSelector.from_defaults(), 118 | query_engine_tools=[ 119 | rag_tool, 120 | simple_tool, 121 | ], 122 | ) 123 | response = query_engine.query(query_str) 124 | else: 125 | response = rag_query_engine.query(query_str) 126 | 127 | print(f"Question: {str(query_str)}") 128 | print("------------------") 129 | print(f"Response: {str(response)}") 130 | print("------------------") 131 | if not with_query_classification or response.metadata['selector_result'].ind == 0: 132 | window = response.source_nodes[0].node.metadata["window"] # 长度为3的窗口,包含了文本两侧的上下文。 133 | sentence = response.source_nodes[0].node.metadata["original_sentence"] # 检索到的文本 134 | print(f"Window: {window}") 135 | print("------------------") 136 | print(f"Original Sentence: {sentence}") 137 | print("------------------") 138 | 139 | """ 140 | 示例1: 141 | Question: Did Fang Hung-chien kiss Miss Bao? 142 | ------------------ 143 | Response: Yes, Fang Hung-chien kissed Miss Bao. 144 | ------------------ 145 | Window: A big wave shook the hull badly, and Miss Bao could not stand steadily. Fang hung-chien hooked her waist and stayed by the railing, kissing her greedily. Miss Bao's lips suggested that the body followed, and this hasty and rude kiss gradually stabilized and grew properly and densely. Miss Bao deftly pushed off Fang Hung-chien's arm, took a deep breath in her mouth and said, "I'm suffocated by you! I have a cold and I can't breathe in my nose-it's too cheap for you, and you haven't asked me to love you! " 146 | "I beg you now, ok?" It seems that all men who have never been in love, Fang Hung-chien regards the word "love" too noble and serious and refuses to apply it to women casually; He only felt that he wanted Miss Bao and didn't love her, so he was so evasive. 147 | ------------------ 148 | Original Sentence: Miss Bao deftly pushed off Fang Hung-chien's arm, took a deep breath in her mouth and said, "I'm suffocated by you! 149 | ------------------ 150 | 151 | 示例2: 152 | Question: In the text, which lady did Fang Hongjian kiss on the ship, and under what circumstances did it happen? 153 | ------------------ 154 | Response: In the text, Fang Hongjian kissed Miss Bao on the ship. It happened in the dark shadows of the deck, after a big wave shook the hull, causing Miss Bao to lose her balance. Fang Hongjian hooked her waist to steady her and then kissed her. 155 | ------------------ 156 | Window: After ten o'clock, there were only three or five pairs of men and women on the deck, all hiding in the dark shadows where the lights could not shine. Fang Hung-chien and Miss Bao walked side by side without talking. A big wave shook the hull badly, and Miss Bao could not stand steadily. Fang hung-chien hooked her waist and stayed by the railing, kissing her greedily. Miss Bao's lips suggested that the body followed, and this hasty and rude kiss gradually stabilized and grew properly and densely. Miss Bao deftly pushed off Fang Hung-chien's arm, took a deep breath in her mouth and said, "I'm suffocated by you! I have a cold and I can't breathe in my nose-it's too cheap for you, and you haven't asked me to love you! " 157 | ------------------ 158 | Original Sentence: Fang hung-chien hooked her waist and stayed by the railing, kissing her greedily. 159 | ------------------ 160 | """ 161 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chaoql/rag-best-practices/cbe12e21df644ecac67b1ec9ff03014161f1f78e/requirements.txt -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding 2 | from llama_index.llms.ollama import Ollama 3 | from llama_index.core.node_parser import SentenceWindowNodeParser 4 | from llama_index.core import StorageContext, VectorStoreIndex, SimpleDirectoryReader 5 | from llama_index.core import load_index_from_storage 6 | from qdrant_client import QdrantClient 7 | from llama_index.vector_stores.qdrant import QdrantVectorStore 8 | from llama_index.core.node_parser import SentenceSplitter 9 | 10 | 11 | def load_hybrid_data(input_file, persist_dir): 12 | documents = SimpleDirectoryReader(input_files=input_file).load_data() 13 | # Sliding windows chunking & Extract nodes from documents 14 | node_parser = SentenceWindowNodeParser.from_defaults( 15 | # how many sentences on either side to capture 16 | window_size=3, 17 | # the metadata key that holds the window of surrounding sentences 18 | window_metadata_key="window", 19 | # the metadata key that holds the original sentence 20 | original_text_metadata_key="original_sentence", 21 | ) 22 | nodes = node_parser.get_nodes_from_documents(documents, show_progress=True) 23 | 24 | # 创建一个持久化的索引到磁盘 25 | client = QdrantClient(path=persist_dir) 26 | # 创建启用混合索引的向量存储 27 | vector_store = QdrantVectorStore( 28 | "test", client=client, enable_hybrid=True, batch_size=20 29 | ) 30 | try: 31 | storage_context = StorageContext.from_defaults(vector_store=vector_store, persist_dir=persist_dir) 32 | index = load_index_from_storage(storage_context, show_progress=True) 33 | except: 34 | storage_context = StorageContext.from_defaults(vector_store=vector_store) 35 | index = VectorStoreIndex( 36 | nodes=nodes, 37 | storage_context=storage_context, 38 | show_progress=True, 39 | ) 40 | index.storage_context.persist(persist_dir=persist_dir) 41 | return index, nodes 42 | 43 | 44 | def load_txt_data(input_file, persist_dir, with_sliding_window: bool, chunk_size=512, chunk_overlap=128): 45 | documents = SimpleDirectoryReader(input_files=input_file).load_data() 46 | if with_sliding_window: 47 | # Sliding windows chunking & Extract nodes from documents 48 | node_parser = SentenceWindowNodeParser.from_defaults( 49 | # how many sentences on either side to capture 50 | window_size=3, 51 | # the metadata key that holds the window of surrounding sentences 52 | window_metadata_key="window", 53 | # the metadata key that holds the original sentence 54 | original_text_metadata_key="original_sentence", 55 | ) 56 | else: 57 | node_parser = SentenceSplitter(chunk_size=512, chunk_overlap=128) 58 | 59 | nodes = node_parser.get_nodes_from_documents(documents, show_progress=True) 60 | 61 | # indexing & storing 62 | try: 63 | storage_context = StorageContext.from_defaults(persist_dir=persist_dir) 64 | index = load_index_from_storage(storage_context, show_progress=True) 65 | except: 66 | index = VectorStoreIndex(nodes=nodes, show_progress=True) 67 | index.storage_context.persist(persist_dir=persist_dir) 68 | return index, nodes 69 | --------------------------------------------------------------------------------