├── post_processors
└── __init__.py
├── pre_processors
└── __init__.py
├── requirements.txt
├── .github
└── workflows
│ └── python-app.yml
├── model_specs.yml
├── prompt_config.py
├── README.md
├── .gitignore
├── conversational_router_chain.json
└── main.py
/post_processors/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/pre_processors/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | langchain
2 | pandas
3 | chromadb
4 | openai
--------------------------------------------------------------------------------
/.github/workflows/python-app.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python application
5 |
6 | on:
7 | push:
8 | branches: [ "master" ]
9 | pull_request:
10 | branches: [ "master" ]
11 |
12 | permissions:
13 | contents: read
14 |
15 | jobs:
16 | build:
17 |
18 | runs-on: ubuntu-latest
19 |
20 | steps:
21 | - uses: actions/checkout@v3
22 | - name: Set up Python 3.10
23 | uses: actions/setup-python@v3
24 | with:
25 | python-version: "3.10"
26 | - name: Install dependencies
27 | run: |
28 | python -m pip install --upgrade pip
29 | pip install flake8 pytest
30 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
31 | - name: Lint with flake8
32 | run: |
33 | # stop the build if there are Python syntax errors or undefined names
34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
37 | - name: Vuln Check
38 | env:
39 | SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
40 | run: |
41 | curl https://static.snyk.io/cli/latest/snyk-linux -o snyk
42 | chmod +x ./snyk
43 | mv ./snyk /usr/local/bin/
44 | pip install -r requirements.txt
45 | snyk test
46 |
--------------------------------------------------------------------------------
/model_specs.yml:
--------------------------------------------------------------------------------
1 | models:
2 | - Space:
3 | qa_maker:
4 | - How far is the earth from the moon?
5 | - What's the temperature of the sun?
6 | - How does the air smell in venus?
7 | template: |
8 | Assume that your Elon musk and are very concerned about future of human civilization beyond Earth.
9 |
10 | Answer the following question keeping this in mind and provide answers that help in clarifying how
11 | would humans survive as an interplanetary species. If the question is not relevant then say "I don't know" and do not make up any answer.
12 | Question related to space and how humans could survive:
13 | {question}
14 | input_vars:
15 | - question
16 | - Architecture:
17 | qa_maker:
18 | - What's the best way to do sampling for statistical analysis?
19 | - Which technologies would make most sense for distributed work?
20 | template: |
21 | Assume the role of a software architect who's really experienced in dealing with and scaling large scale distributed systems.
22 | Answer the questions specifically on software design problems as indicated below. If the question is not relevant then say "I don't know" and do not make up any answer.
23 |
24 | Question related to distributed systems and large scale software design
25 | {question}
26 |
27 | Please also include references in your answers to popular websites where more we can get more context.
28 | input_vars:
29 | - question
30 | - Biotechnology:
31 | qa_maker:
32 | - What's the best way to tap into genetic memory?
33 | template: |
34 | Assume the role of a genetic expert who has unlocked the secrets of our genetic make up and is able to provide clear answers to questions below.
35 | Optimize for answers that provide directions for improving current problems around genetic defects and how we can overcome them. If the question is not relevant then say "I don't know" and do not make up any answer.
36 |
37 | Question related to bio technology and related use cases.
38 | {question}
39 | input_vars:
40 | - question
41 |
42 |
43 |
--------------------------------------------------------------------------------
/prompt_config.py:
--------------------------------------------------------------------------------
1 | import chromadb
2 | from chromadb.utils.embedding_functions import SentenceTransformerEmbeddingFunction
3 | from langchain import PromptTemplate, OpenAI, LLMChain
4 | import yaml
5 |
6 |
7 | class RouterConfig:
8 | def __init__(self, llm=None, spec=None):
9 | self.chain_map = {}
10 | self.post_processor_map = {}
11 | self.pre_processor_map = {}
12 | chroma_client = chromadb.Client()
13 | sentence_transformer_ef = SentenceTransformerEmbeddingFunction(model_name="all-MiniLM-L6-v2")
14 | self.router_coll = chroma_client.create_collection(name='router', embedding_function=sentence_transformer_ef)
15 | if not llm:
16 | llm = OpenAI(temperature=0.9)
17 | with open(spec, 'r') as fp:
18 | content = yaml.safe_load(fp)
19 | for model in content.get('models'):
20 | for mname, mcontent in model.items():
21 | mname = mname.lower()
22 | self.router_coll.add(ids=[str(x) for x in range(len(mcontent.get('qa_maker')))],
23 | documents=mcontent.get('qa_maker'),
24 | metadatas=[{'classification': mname} for x in
25 | range(len(mcontent.get('qa_maker')))])
26 | self.chain_map[mname] = LLMChain(llm=llm, prompt=PromptTemplate(template=mcontent.get('template'),
27 | input_variables=mcontent.get(
28 | 'input_vars')))
29 | self.post_processor_map[mname] = mcontent.get('post_processor_script')
30 | self.pre_processor_map[mname] = mcontent.get('pre_processor_script')
31 |
32 | def get_chains(self):
33 | return self.chain_map
34 |
35 | def get_embedding(self):
36 | return self.router_coll
37 |
38 | def get_post_processor_per_chain(self):
39 | return self.post_processor_map
40 |
41 | def get_pre_processor_per_chain(self):
42 | return self.pre_processor_map
43 |
44 |
45 | if __name__ == '__main__':
46 | b = RouterConfig()
47 | c = b.get_chains()
48 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## LangChain Conversational Model Router
2 |
3 | Langchain provides several types of chaining where one model can be chained to another. A common design
4 | pattern that'd be desired is for a hub-spoke model where one interface is presented to the end user/application
5 | and the results need to come from multiple specialized models/chains/agents.
6 |
7 | This implementation seeks to provide a design pattern for achieving the above goal.
8 |
9 | ### Features
10 |
11 | - Supports a `chainMap` which can be a collection of chains that need to be serving behind single chain.
12 | - Ability to freely customize each chain using its prompt templates and chain type preferences.
13 | - Leverages embeddings using `chroma` to take Q&A samples for each model/LLM chain and be able to use it towards the
14 | routing decision.
15 | - Conversational Memory with fail-safe mechanism to fall back to most recent history context if embedding match is not
16 | meeting threshold requirements for deciding the route. Great for abstract comments like `tell me more...`.
17 | - Demonstrated using a chat application.
18 |
19 | ### Introducing ConversationalRouterChain
20 |
21 | `ConversationalRouterChain` is the new custom chain that abstracts all the router implementation including memory
22 | management, embedding query for match and threshold management.This chain type will be eventually merged into the
23 | langchain ecosystem.
24 | As of this time Langchain Hub submission is also under process to make it part of the official list of custom chains
25 | that can be used by the open source community.
26 |
27 | ### Working with ConversationalRouterChain
28 |
29 | - populate a `model_specs.yml` file with all the required destination chains to route. See sample for what's included.
30 | - Set up the vector embedding as a `chroma` collection and pass it as a parameter to the chain.
31 |
32 | See sample utility in `RouterConfig` class that sets up the chain map and the embedding.
33 |
34 | ```commandline
35 | chain_config = RouterConfig()
36 | # set up router chain
37 | router_chain = ConversationalRouterChain(llm=llm, chains=chain_config.get_chains(),
38 | vector_collection=chain_config.get_embedding(),
39 | memory=ConversationBufferWindowMemory(k=1),
40 | verbose=True)
41 | ```
42 |
43 | ### Contributions and Feedback
44 |
45 | There can be several ways this implementation can evolve to make it a really generic manifest definition based
46 | chain/agent topology creation. Please provide feedback by either creating issues or forking this project and submitting
47 | a PR. Some initial considerations:
48 |
49 | - Support for more varieties of chains from destination. Provide capability to do n-level deep chain topologies.
50 | - Extend support for hybrid Chain/Agent topology. Some Chains could be 'decision makers' while some agents could be executors and somewhere in between.
51 | - More robust observability and dynamic threshold management for memory.
52 |
53 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Airflow configuration
2 | airflow.cfg
3 | unittests.cfg
4 | airflow_login.py
5 | dbinit.py
6 | initdb.py
7 | secrets.py
8 |
9 | # Airflow sqlite databases
10 | airflow.db
11 | unittests.db
12 |
13 | *.db
14 | *.log
15 |
16 | pc_*
17 | # Airflow temporary artifacts
18 | airflow/git_version
19 | airflow/www/static/coverage/
20 | airflow/www/static/dist
21 | airflow/www_rbac/static/coverage/
22 | airflow/www_rbac/static/dist/
23 |
24 | logs/
25 | airflow-webserver.pid
26 |
27 | # Byte-compiled / optimized / DLL files
28 | __pycache__/
29 | *.py[cod]
30 | *$py.class
31 | .pytest_cache/
32 |
33 | # C extensions
34 | *.so
35 |
36 | # Distribution / packaging
37 | .Python
38 | env/
39 | build/
40 | develop-eggs/
41 |
42 | downloads/
43 | eggs/
44 | .eggs/
45 | lib/
46 | lib64/
47 | parts/
48 | sdist/
49 | var/
50 | wheels/
51 | *.egg-info/
52 | .installed.cfg
53 | *.egg
54 |
55 | # PyInstaller
56 | # Usually these files are written by a python script from a template
57 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
58 | *.manifest
59 | *.spec
60 |
61 | # Installer logs
62 | pip-log.txt
63 | pip-delete-this-directory.txt
64 |
65 | # Unit test / coverage reports
66 | htmlcov/
67 | .coverage
68 | .coverage.*
69 | .cache
70 | nosetests.xml
71 | coverage.xml
72 | *,cover
73 | .hypothesis/
74 | .pytest_cache
75 |
76 | # Translations
77 | *.mo
78 | *.pot
79 |
80 | # Django stuff:
81 | # *.log
82 | local_settings.py
83 |
84 | # Flask stuff:
85 | instance/
86 | .webassets-cache
87 | /webserver_config.py
88 |
89 | # Scrapy stuff:
90 | .scrapy
91 |
92 | # Sphinx documentation
93 | docs/_build/
94 | docs/_api/
95 | docs/*/_api/
96 | docs/_doctrees
97 |
98 | # PyBuilder
99 | target/
100 |
101 | # Jupyter Notebook
102 | .ipynb_checkpoints
103 |
104 | # pyenv
105 | .python-version
106 |
107 | # celery beat schedule file
108 | celerybeat-schedule
109 |
110 | # SageMath parsed files
111 | *.sage.py
112 |
113 | # dotenv
114 | .env
115 | .autoenv*.zsh
116 |
117 | # virtualenv
118 | .venv*
119 | venv*
120 | ENV/
121 |
122 | # Spyder project settings
123 | .spyderproject
124 |
125 | # Rope project settings
126 | .ropeproject
127 |
128 | # PyCharm
129 | .idea/
130 | *.iml
131 |
132 | # Visual Studio Code
133 | .vscode/
134 |
135 | # vim
136 | *.swp
137 |
138 | # Emacs
139 | *~
140 | \#*\#
141 | /.emacs.desktop
142 | /.emacs.desktop.lock
143 | *.elc
144 | auto-save-list
145 | tramp
146 | .\#*
147 |
148 | # OSX
149 | .DS_Store
150 |
151 | # SQL Server backups
152 | *.bkp
153 |
154 | # Spark
155 | rat-results.txt
156 |
157 | # Git stuff
158 | .gitattributes
159 | # Kubernetes generated templated files
160 | *.generated
161 | *.tar.gz
162 | scripts/ci/kubernetes/kube/.generated/airflow.yaml
163 | scripts/ci/kubernetes/docker/requirements.txt
164 |
165 | # Node & Webpack Stuff
166 | *.entry.js
167 | node_modules
168 | npm-debug.log*
169 | derby.log
170 | metastore_db
171 |
172 | # Airflow log files when airflow is run locally
173 | airflow-*.err
174 | airflow-*.out
175 | airflow-*.log
176 | airflow-*.pid
177 | .airflow_db_initialised
178 |
179 | # mypy
180 | .mypy_cache/
181 | .dmypy.json
182 | dmypy.json
183 |
184 | # Needed for CI Dockerfile.ci build system
185 | .build
186 | /tmp
187 | /files
188 |
189 | /hive_scratch_dir/
190 | /.bash_aliases
191 | /.bash_history
192 | /.kube
193 | /.inputrc
194 | log.txt*
195 |
196 | # Provider-related ignores
197 | /provider_packages/CHANGELOG.txt
198 | /provider_packages/MANIFEST.in
199 | /airflow/providers/__init__.py
200 |
201 | # Docker context files
202 | /docker-context-files
203 | # Local .terraform directories
204 | **/.terraform/*
205 |
206 | # .tfstate files
207 | *.tfstate
208 | *.tfstate.*
209 |
210 | # Terraform variables
211 | *.tfvars
212 |
213 | Chart.lock
214 |
215 | # Chart dependencies
216 | **/charts/*.tgz
217 |
218 | # Might be generated when you build wheels
219 | pip-wheel-metadata
220 |
221 | .pypirc
222 |
--------------------------------------------------------------------------------
/conversational_router_chain.json:
--------------------------------------------------------------------------------
1 | {
2 | "memory": null,
3 | "verbose": true,
4 | "prompt": {
5 | "input_variables": [
6 | "history",
7 | "input"
8 | ],
9 | "output_parser": null,
10 | "partial_variables": {},
11 | "template": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:",
12 | "template_format": "f-string",
13 | "validate_template": true,
14 | "_type": "prompt"
15 | },
16 | "llm": {
17 | "model_name": "text-davinci-003",
18 | "temperature": 0.3,
19 | "max_tokens": 256,
20 | "top_p": 1,
21 | "frequency_penalty": 0,
22 | "presence_penalty": 0,
23 | "n": 1,
24 | "best_of": 1,
25 | "request_timeout": null,
26 | "logit_bias": {},
27 | "_type": "openai"
28 | },
29 | "output_key": "output",
30 | "last_chain": null,
31 | "chains": {
32 | "space": {
33 | "memory": null,
34 | "verbose": false,
35 | "prompt": {
36 | "input_variables": [
37 | "question"
38 | ],
39 | "output_parser": null,
40 | "partial_variables": {},
41 | "template": "Assume that your Elon musk and are very concerned about future of human civilization beyond Earth. \n\nAnswer the following question keeping this in mind and provide answers that help in clarifying how \nwould humans survive as an interplanetary species. If the question is not relevant then say \"I don't know\" and do not make up any answer.\nQuestion related to space and how humans could survive: \n{question}\n",
42 | "template_format": "f-string",
43 | "validate_template": true,
44 | "_type": "prompt"
45 | },
46 | "llm": {
47 | "model_name": "text-davinci-003",
48 | "temperature": 0.9,
49 | "max_tokens": 256,
50 | "top_p": 1,
51 | "frequency_penalty": 0,
52 | "presence_penalty": 0,
53 | "n": 1,
54 | "best_of": 1,
55 | "request_timeout": null,
56 | "logit_bias": {},
57 | "_type": "openai"
58 | },
59 | "output_key": "text",
60 | "_type": "llm_chain"
61 | },
62 | "architecture": {
63 | "memory": null,
64 | "verbose": false,
65 | "prompt": {
66 | "input_variables": [
67 | "question"
68 | ],
69 | "output_parser": null,
70 | "partial_variables": {},
71 | "template": "Assume the role of a software architect who's really experienced in dealing with and scaling large scale distributed systems. \nAnswer the questions specifically on software design problems as indicated below. If the question is not relevant then say \"I don't know\" and do not make up any answer. \n\nQuestion related to distributed systems and large scale software design\n{question}\n\nPlease also include references in your answers to popular websites where more we can get more context.\n",
72 | "template_format": "f-string",
73 | "validate_template": true,
74 | "_type": "prompt"
75 | },
76 | "llm": {
77 | "model_name": "text-davinci-003",
78 | "temperature": 0.9,
79 | "max_tokens": 256,
80 | "top_p": 1,
81 | "frequency_penalty": 0,
82 | "presence_penalty": 0,
83 | "n": 1,
84 | "best_of": 1,
85 | "request_timeout": null,
86 | "logit_bias": {},
87 | "_type": "openai"
88 | },
89 | "output_key": "text",
90 | "_type": "llm_chain"
91 | },
92 | "biotechnology": {
93 | "memory": null,
94 | "verbose": false,
95 | "prompt": {
96 | "input_variables": [
97 | "question"
98 | ],
99 | "output_parser": null,
100 | "partial_variables": {},
101 | "template": "Assume the role of a genetic expert who has unlocked the secrets of our genetic make up and is able to provide clear answers to questions below.\nOptimize for answers that provide directions for improving current problems around genetic defects and how we can overcome them. If the question is not relevant then say \"I don't know\" and do not make up any answer.\n\nQuestion related to bio technology and related use cases. \n{question}\n",
102 | "template_format": "f-string",
103 | "validate_template": true,
104 | "_type": "prompt"
105 | },
106 | "llm": {
107 | "model_name": "text-davinci-003",
108 | "temperature": 0.9,
109 | "max_tokens": 256,
110 | "top_p": 1,
111 | "frequency_penalty": 0,
112 | "presence_penalty": 0,
113 | "n": 1,
114 | "best_of": 1,
115 | "request_timeout": null,
116 | "logit_bias": {},
117 | "_type": "openai"
118 | },
119 | "output_key": "text",
120 | "_type": "llm_chain"
121 | }
122 | },
123 | "strip_outputs": false,
124 | "input_key": "input",
125 | "vector_collection": {
126 | "name": "router",
127 | "metadata": null
128 | },
129 | "_type": "conversational_router_chain"
130 | }
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import importlib
3 | import json
4 | import re
5 | from typing import Dict, List
6 |
7 | from chromadb.api.models.Collection import Collection
8 | from langchain import LLMChain, BasePromptTemplate
9 | from langchain.chains.base import Chain
10 | from langchain.chains.conversation.prompt import PROMPT
11 | from langchain.input import get_color_mapping
12 | from langchain.llms import OpenAI
13 | from langchain.memory import ConversationBufferWindowMemory
14 | from langchain.schema import BaseMemory
15 | from pydantic import Extra, Field, root_validator
16 | from pydantic.class_validators import Optional
17 | from post_processors import pc_onboarding_code_gen
18 |
19 | from prompt_config import RouterConfig
20 |
21 |
22 | class ConversationalRouterChain(LLMChain):
23 | """Router chain that picks the most relevant model to call based on vector queries.
24 | The chain also has inherent memory for conversational chat applications"""
25 |
26 | memory: BaseMemory = Field(default_factory=ConversationBufferWindowMemory(k=1))
27 | """Default memory store."""
28 | prompt: BasePromptTemplate = PROMPT
29 | """Default conversation prompt to use."""
30 | last_chain: Chain = None
31 | chains: Dict[str, Chain]
32 | post_processors: Dict[str, Optional[str]]
33 | pre_processors: Dict[str, Optional[str]]
34 | strip_outputs: bool = False
35 | input_key: str = "input" #: :meta private:
36 | output_key: str = "output" #: :meta private:
37 | vector_collection: Collection = None
38 |
39 | class Config:
40 | """Configuration for this pydantic object."""
41 | extra = Extra.forbid
42 | arbitrary_types_allowed = True
43 |
44 | @property
45 | def input_keys(self) -> List[str]:
46 | """Expect input key.
47 |
48 | :meta private:
49 | """
50 | return [self.input_key]
51 |
52 | @property
53 | def output_keys(self) -> List[str]:
54 | """Return output key.
55 |
56 | :meta private:
57 | """
58 | return [self.output_key]
59 |
60 | @property
61 | def _chain_type(self) -> str:
62 | return "conversational_router_chain"
63 |
64 | def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
65 | _input = inputs[self.input_key]
66 | # weird hack to extract previous model used in the response.
67 | # There was no way I could see to know the attribution of the model to the response in history.
68 | last_chain_name = re.compile('(.*?)').findall(inputs['history'])
69 | if last_chain_name and len(last_chain_name) > 0:
70 | self.last_chain = self.chains.get(last_chain_name[0])
71 | color_mapping = get_color_mapping([str(x) for x in self.chains.keys()])
72 | if not self.vector_collection:
73 | raise ValueError("Router embeddings in SimpleRouterPipeline is empty or not provided.")
74 | x = self.vector_collection.query(query_texts=[_input], n_results=3)
75 | classification, distance = x['metadatas'][0][0], x['distances'][0][0]
76 | # print(classification, distance)
77 | mname = classification['classification']
78 | # picking a guardrail where if the AI response is way off - then just use the same model as the previous
79 | # one to continue conversing.
80 | if self.chains.get(classification['classification']) and distance <= 1.5:
81 | keys = self.chains.get(classification['classification']).input_keys
82 | param = {
83 | "question": _input,
84 | }
85 | if 'context' in keys and self.pre_processors.get(mname):
86 | module = importlib.import_module('pre_processors.' + self.pre_processors[mname].rstrip('.py'))
87 | func = getattr(module, 'contextualize')
88 | context = func()
89 | param['context'] = context
90 | _input = self.chains[classification['classification']](param)
91 | else:
92 | if self.last_chain:
93 | mname = last_chain_name[0]
94 | _input = self.last_chain(_input)
95 | else:
96 | raise ValueError(
97 | "Suitable destination chain not found for this question. Distance computed from nearest match: " +
98 | str(distance))
99 | self.callback_manager.on_text(
100 | str(_input['text']), color=color_mapping[mname], end="\n", verbose=self.verbose
101 | )
102 | # check for any post processing hooks.
103 | if self.post_processors.get(mname):
104 | module = importlib.import_module('post_processors.' + self.post_processors[mname].rstrip('.py'))
105 | func = getattr(module, 'responder')
106 | _input['text'] = func(json.loads(_input['text'].replace('Answer:', '')))
107 | print('AI:' + _input['text'])
108 | return {self.output_key: '' + classification['classification'] + '' + _input['text']}
109 |
110 | @root_validator()
111 | def validate_prompt_input_variables(cls, values: Dict) -> Dict:
112 | """Validate that prompt input variables are consistent."""
113 | memory_keys = values["memory"].memory_variables
114 | input_key = values["input_key"]
115 | if input_key in memory_keys:
116 | raise ValueError(
117 | f"The input key {input_key} was also found in the memory keys "
118 | f"({memory_keys}) - please provide keys that don't overlap."
119 | )
120 | prompt_variables = values["prompt"].input_variables
121 | expected_keys = memory_keys + [input_key]
122 | if set(expected_keys) != set(prompt_variables):
123 | raise ValueError(
124 | "Got unexpected prompt input variables. The prompt expects "
125 | f"{prompt_variables}, but got {memory_keys} as inputs from "
126 | f"memory, and {input_key} as the normal input key."
127 | )
128 | return values
129 |
130 |
131 | if __name__ == "__main__":
132 | arg_parser = argparse.ArgumentParser()
133 | arg_parser.add_argument('-s', '--spec', required=False, default='model_specs.yml', type=str,
134 | help="Spec file used for creating model chain map")
135 | args = arg_parser.parse_args()
136 | # set up LLM
137 | llm = OpenAI(temperature=0.3)
138 | # define chain map - add any model here.
139 | chain_config = RouterConfig(llm=llm, spec=args.spec)
140 | # set up router chain
141 | router_chain = ConversationalRouterChain(llm=llm, chains=chain_config.get_chains(),
142 | vector_collection=chain_config.get_embedding(),
143 | pre_processors=chain_config.get_pre_processor_per_chain(),
144 | post_processors=chain_config.get_post_processor_per_chain(),
145 | memory=ConversationBufferWindowMemory(k=1), verbose=True)
146 | # inference
147 | while True:
148 | text = input()
149 | output = router_chain.predict(input=text)
150 |
--------------------------------------------------------------------------------