├── .gitattributes ├── .gitignore ├── README.md ├── app.py ├── assets └── chatserver-min.png ├── pyproject.toml ├── setup.cfg ├── setup.py └── src ├── chatserver ├── __init__.py ├── chatbot_chain.py ├── components │ ├── __init__.py │ └── llm_serve.py ├── lightning_client.py └── ui │ ├── __init__.py │ ├── main.py │ └── templates.py └── requirements.txt /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 105 | __pypackages__/ 106 | 107 | # Celery stuff 108 | celerybeat-schedule 109 | celerybeat.pid 110 | 111 | # SageMath parsed files 112 | *.sage.py 113 | 114 | # Environments 115 | .env 116 | .venv 117 | env/ 118 | venv/ 119 | ENV/ 120 | env.bak/ 121 | venv.bak/ 122 | 123 | # Spyder project settings 124 | .spyderproject 125 | .spyproject 126 | 127 | # Rope project settings 128 | .ropeproject 129 | 130 | # mkdocs documentation 131 | /site 132 | 133 | # mypy 134 | .mypy_cache/ 135 | .dmypy.json 136 | dmypy.json 137 | 138 | # Pyre type checker 139 | .pyre/ 140 | 141 | # pytype static type analyzer 142 | .pytype/ 143 | 144 | # Cython debug symbols 145 | cython_debug/ 146 | 147 | # PyCharm 148 | # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can 149 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 150 | # and can be added to the global gitignore or merged into this file. For a more nuclear 151 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 152 | #.idea/ 153 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ChatServer 2 | 3 | ChatBot System built with LangChain and Lightning AI 4 | 5 | ## How to run 6 | 7 | ```bash 8 | git clone https://github.com/aniketmaurya/chatbot-server.git 9 | cd chatbot-server 10 | 11 | pip install -e . 12 | lightning run app app.py 13 | ``` 14 | 15 | Please initiate a conversation with the chatbot. 16 | 17 | ![](./assets/chatserver-min.png) 18 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import lightning as L 2 | import lightning.app.frontend as frontend 3 | 4 | from chatserver.components import LLMServe 5 | from chatserver.ui import ui_render_fn 6 | 7 | 8 | class ChatBotApp(L.LightningFlow): 9 | def __init__(self): 10 | super().__init__() 11 | self.llm_serve = LLMServe( 12 | model_id="google/flan-ul2", cloud_compute=L.CloudCompute("gpu") 13 | ) 14 | self.llm_url = "" 15 | 16 | def run(self): 17 | self.llm_serve.run() 18 | if self.llm_serve.url: 19 | self.llm_url = self.llm_serve.url 20 | 21 | def configure_layout(self): 22 | return frontend.StreamlitFrontend(render_fn=ui_render_fn) 23 | 24 | 25 | app = L.LightningApp(ChatBotApp()) 26 | -------------------------------------------------------------------------------- /assets/chatserver-min.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aniketmaurya/chatbot-server/034ae0eb17cbcddff53f99d42cbfe22a7bc2187b/assets/chatserver-min.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = chatbot 3 | version = 0.0.1 4 | 5 | [options] 6 | package_dir = 7 | = src 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | with open("src/requirements.txt") as f: 4 | required = f.read().splitlines() 5 | 6 | setup( 7 | install_requires=required, 8 | ) 9 | -------------------------------------------------------------------------------- /src/chatserver/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aniketmaurya/chatbot-server/034ae0eb17cbcddff53f99d42cbfe22a7bc2187b/src/chatserver/__init__.py -------------------------------------------------------------------------------- /src/chatserver/chatbot_chain.py: -------------------------------------------------------------------------------- 1 | from langchain.chains import ConversationChain 2 | from langchain.chains.conversation.memory import ConversationSummaryBufferMemory 3 | 4 | from chatserver.lightning_client import LightningChain 5 | 6 | 7 | def lit_chain(url: str): 8 | return load_chain(LightningChain(url=url)) 9 | 10 | 11 | def load_chain(llm): 12 | """Logic for loading the chain you want to use should go here.""" 13 | 14 | input_key = "input" 15 | output_key = "response" 16 | memory = ConversationSummaryBufferMemory( 17 | llm=llm, output_key=output_key, input_key=input_key 18 | ) 19 | chain = ConversationChain( 20 | llm=llm, verbose=True, memory=memory, output_key=output_key, input_key=input_key 21 | ) 22 | return chain 23 | -------------------------------------------------------------------------------- /src/chatserver/components/__init__.py: -------------------------------------------------------------------------------- 1 | from .llm_serve import LLMServe 2 | -------------------------------------------------------------------------------- /src/chatserver/components/llm_serve.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Any, Optional 3 | 4 | import lightning as L 5 | from lightning.app.components import PythonServer, Text 6 | from pydantic import BaseModel 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | _DEFAULT_MODEL_ID = "google/flan-T5-base" 11 | 12 | 13 | def load_hf_llm(model_id: str): 14 | from langchain.llms import HuggingFacePipeline 15 | from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline 16 | 17 | tokenizer = AutoTokenizer.from_pretrained(model_id) 18 | model = AutoModelForSeq2SeqLM.from_pretrained(model_id) 19 | pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) 20 | 21 | return HuggingFacePipeline(pipeline=pipe) 22 | 23 | 24 | class PromptSchema(BaseModel): 25 | # prompt: str = Field(title="Your msg to chatbot", max_length=300, min_length=1) 26 | prompt: str 27 | 28 | 29 | class LLMServe(PythonServer): 30 | def __init__(self, model_id: Optional[str] = None, **kwargs): 31 | super().__init__(input_type=PromptSchema, output_type=Text, **kwargs) 32 | self.model_id = model_id or _DEFAULT_MODEL_ID 33 | 34 | def setup(self, *args, **kwargs) -> None: 35 | self._model = load_hf_llm(self.model_id) 36 | 37 | def predict(self, request: PromptSchema) -> Any: 38 | return {"text": self._model(request.prompt)} 39 | 40 | 41 | if __name__ == "main": 42 | app = L.LightningApp(LLMServe()) 43 | -------------------------------------------------------------------------------- /src/chatserver/lightning_client.py: -------------------------------------------------------------------------------- 1 | """Wrapper around Lightning App.""" 2 | import logging 3 | from typing import List, Optional 4 | 5 | import requests 6 | from langchain.llms.base import LLM 7 | from pydantic import BaseModel 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class LightningChain(LLM, BaseModel): 13 | url: str = "" 14 | 15 | def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: 16 | """Run the LLM on the given prompt and input.""" 17 | if self.url == "": 18 | raise Exception("Server URL not set!") 19 | 20 | headers = { 21 | "accept": "application/json", 22 | "Content-Type": "application/json", 23 | } 24 | assert isinstance(prompt, str) 25 | json_data = {"prompt": prompt} 26 | response = requests.post( 27 | url=self.url + "/predict", headers=headers, json=json_data 28 | ) 29 | logger.error(response.raise_for_status()) 30 | return response.json()["text"] 31 | 32 | @property 33 | def _llm_type(self) -> str: 34 | """Return type of llm.""" 35 | return "Lightning" 36 | -------------------------------------------------------------------------------- /src/chatserver/ui/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import run as ui_render_fn 2 | -------------------------------------------------------------------------------- /src/chatserver/ui/main.py: -------------------------------------------------------------------------------- 1 | """Python file to serve as the frontend""" 2 | import rich 3 | import streamlit as st 4 | from streamlit_chat import message 5 | 6 | from chatserver.chatbot_chain import lit_chain 7 | 8 | 9 | def run(lightning_app_state): 10 | if not lightning_app_state.llm_url: 11 | st.info("Waiting for server to get ready... :clock:") 12 | return 13 | 14 | print("lightning_app_state", lightning_app_state) 15 | 16 | if "model" not in st.session_state: 17 | chain = lit_chain(lightning_app_state.llm_url) 18 | st.session_state["model"] = chain 19 | 20 | else: 21 | chain = st.session_state["model"] 22 | 23 | # From here down is all the StreamLit UI. 24 | st.set_page_config(page_title="LangChain Demo", page_icon=":robot:") 25 | st.header("ChatBot Demo") 26 | 27 | if "generated" not in st.session_state: 28 | st.session_state["generated"] = [] 29 | 30 | if "past" not in st.session_state: 31 | st.session_state["past"] = [] 32 | 33 | def get_text(): 34 | input_text = st.text_input("You: ", "Hello, how are you?", key="input") 35 | return input_text 36 | 37 | user_input = get_text() 38 | 39 | if user_input: 40 | rich.print("user input:", user_input) 41 | output = chain.predict(input=user_input) 42 | rich.print("buffer:", chain.memory.buffer) 43 | 44 | st.session_state.past.append(user_input) 45 | st.session_state.generated.append(output) 46 | 47 | if st.session_state["generated"]: 48 | for i in range(len(st.session_state["generated"]) - 1, -1, -1): 49 | message(st.session_state["generated"][i], key=str(i)) 50 | message(st.session_state["past"][i], is_user=True, key=str(i) + "_user") 51 | -------------------------------------------------------------------------------- /src/chatserver/ui/templates.py: -------------------------------------------------------------------------------- 1 | chatgpt_template = """Assistant is a large language model trained by OpenAI. 2 | 3 | Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. 4 | 5 | Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. 6 | 7 | Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. 8 | 9 | {history} 10 | Human: {human_input} 11 | Assistant:""" 12 | 13 | 14 | question_template = """Question: {question} 15 | 16 | Answer:""" 17 | -------------------------------------------------------------------------------- /src/requirements.txt: -------------------------------------------------------------------------------- 1 | lightning 2 | langchain>=0.0.94 3 | openai>=0.26.5 4 | streamlit>=1.19.0 5 | streamlit-chat>=0.0.2.1 6 | transformers>=4.26.1 7 | fastapi>=0.88.0 8 | --------------------------------------------------------------------------------