├── .env.example ├── README.md ├── twitter.png ├── app.py ├── chainlit.md ├── elements.py ├── autogpt.py ├── requirements.txt ├── pdfqa.py ├── githubqa.py └── .gitignore /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= 2 | SERPAPI_API_KEY= -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Example of Chainlit based Apps 2 | -------------------------------------------------------------------------------- /twitter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sugarforever/chainlit-example/HEAD/twitter.png -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from langchain import PromptTemplate, LLMChain 2 | from langchain.chat_models import ChatOpenAI 3 | import chainlit as cl 4 | 5 | template = """Question: {question} 6 | 7 | Answer: Let's think step by step.""" 8 | 9 | @cl.langchain_factory(use_async=True) 10 | def factory(): 11 | prompt = PromptTemplate(template=template, input_variables=["question"]) 12 | llm_chain = LLMChain(prompt=prompt, llm=ChatOpenAI(temperature=0, streaming=True), verbose=True) 13 | 14 | return llm_chain -------------------------------------------------------------------------------- /chainlit.md: -------------------------------------------------------------------------------- 1 | # Welcome to Chainlit! 🚀🤖 2 | 3 | Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs. 4 | 5 | ## Useful Links 🔗 6 | 7 | - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚 8 | - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/ZThrUxbAYw) to ask questions, share your projects, and connect with other developers! 💬 9 | 10 | We can't wait to see what you create with Chainlit! Happy coding! 💻😊 11 | 12 | ## Welcome screen 13 | 14 | To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty. 15 | -------------------------------------------------------------------------------- /elements.py: -------------------------------------------------------------------------------- 1 | import chainlit as cl 2 | 3 | async def start_globally(): 4 | # Send the elements globally 5 | await cl.Image(path="./twitter.png", name="image1", display="inline").send() 6 | await cl.Text(content="Here is a side text document", name="text1", display="side").send() 7 | await cl.Text(content="Here is a page text document", name="text2", display="page").send() 8 | 9 | # Send the same message twice 10 | content = "Here is image1, a nice image of a cat! As well as text1 and text2!" 11 | 12 | await cl.Message( 13 | content=content, 14 | ).send() 15 | 16 | await cl.Message( 17 | content=content, 18 | ).send() 19 | 20 | await cl.Message( 21 | content="Here is image1, a nice image of a cat! As well as text1 and text3!", 22 | ).send() 23 | 24 | async def start_scoped(): 25 | # Send the first message without the elements 26 | content = "Here is image1, a nice image of a cat! As well as text1 and text2!" 27 | 28 | await cl.Message( 29 | content=content, 30 | ).send() 31 | 32 | elements = [ 33 | cl.Image(path="./twitter.png", name="image1", display="inline"), 34 | cl.Text(content="Here is a side text document", name="text1", display="side"), 35 | cl.Text(content="Here is a page text document", name="text2", display="page"), 36 | ] 37 | 38 | # Send the second message with the elements 39 | await cl.Message( 40 | content=content, 41 | elements=elements, 42 | ).send() 43 | 44 | # Send the second message with the elements 45 | await cl.Message( 46 | content=content, 47 | elements=[ 48 | cl.Image(path="./twitter.png", name="image1", display="inline"), 49 | cl.Text(content="Here is a side text document", name="text1", display="side"), 50 | ], 51 | ).send() 52 | 53 | 54 | @cl.on_chat_start 55 | async def start(): 56 | await start_scoped() 57 | -------------------------------------------------------------------------------- /autogpt.py: -------------------------------------------------------------------------------- 1 | from langchain.utilities import SerpAPIWrapper 2 | from langchain.agents import Tool 3 | from langchain.tools.file_management.write import WriteFileTool 4 | from langchain.tools.file_management.read import ReadFileTool 5 | 6 | from langchain.vectorstores import FAISS 7 | from langchain.docstore import InMemoryDocstore 8 | from langchain.embeddings import OpenAIEmbeddings 9 | import faiss 10 | 11 | from langchain.experimental import AutoGPT 12 | from langchain.chat_models import ChatOpenAI 13 | import chainlit as cl 14 | 15 | # The search tool has no async implementation, we fall back to sync 16 | @cl.langchain_factory(use_async=False) 17 | def agent(): 18 | search = SerpAPIWrapper() 19 | tools = [ 20 | Tool( 21 | name="search", 22 | func=search.run, 23 | description="useful for when you need to answer questions about current events. You should ask targeted questions", 24 | ), 25 | WriteFileTool(), 26 | ReadFileTool(), 27 | ] 28 | embeddings_model = OpenAIEmbeddings() 29 | embedding_size = 1536 30 | index = faiss.IndexFlatL2(embedding_size) 31 | 32 | vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) 33 | 34 | callbacks = [cl.LangchainCallbackHandler()] 35 | agent = AutoGPT.from_llm_and_tools( 36 | ai_name="Xiaoming", 37 | ai_role="Assistant", 38 | tools=tools, 39 | llm=ChatOpenAI( 40 | temperature=0, 41 | streaming=True, 42 | callbacks=callbacks 43 | ), 44 | memory=vectorstore.as_retriever(), 45 | ) 46 | # Set verbose to be true 47 | agent.chain.verbose = True 48 | agent.chain.callbacks = callbacks 49 | return agent 50 | 51 | 52 | @cl.langchain_run 53 | async def run(agent, input): 54 | # Since the agent is sync, we need to make it async 55 | res = await cl.make_async(agent.run)([input]) 56 | await cl.Message(content=res).send() 57 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiofiles==23.1.0 2 | aiohttp==3.8.4 3 | aiosignal==1.3.1 4 | anyio==3.7.1 5 | async-timeout==4.0.2 6 | asyncer==0.0.2 7 | attrs==23.1.0 8 | auth0-python==4.3.0 9 | backoff==2.2.1 10 | bidict==0.22.1 11 | certifi==2023.5.7 12 | cffi==1.15.1 13 | chainlit==0.5.2 14 | charset-normalizer==3.2.0 15 | chroma-hnswlib==0.7.1 16 | chromadb==0.4.2 17 | click==8.1.6 18 | coloredlogs==15.0.1 19 | cryptography==41.0.2 20 | dataclasses-json==0.5.12 21 | Deprecated==1.2.14 22 | faiss-cpu==1.7.4 23 | fastapi==0.97.0 24 | fastapi-socketio==0.0.10 25 | filetype==1.2.0 26 | flatbuffers==23.5.26 27 | frozenlist==1.4.0 28 | google-search-results==2.4.2 29 | googleapis-common-protos==1.59.1 30 | greenlet==2.0.2 31 | grpcio==1.56.0 32 | h11==0.14.0 33 | httpcore==0.17.3 34 | httptools==0.6.0 35 | httpx==0.24.1 36 | humanfriendly==10.0 37 | idna==3.4 38 | importlib-metadata==6.0.1 39 | importlib-resources==6.0.0 40 | Jinja2==3.1.2 41 | langchain==0.0.235 42 | langsmith==0.0.7 43 | MarkupSafe==2.1.3 44 | marshmallow==3.19.0 45 | monotonic==1.6 46 | mpmath==1.3.0 47 | multidict==6.0.4 48 | mypy-extensions==1.0.0 49 | nest-asyncio==1.5.6 50 | nodeenv==1.8.0 51 | numexpr==2.8.4 52 | numpy==1.25.1 53 | onnxruntime==1.15.1 54 | openai==0.27.8 55 | openapi-schema-pydantic==1.2.4 56 | opentelemetry-api==1.18.0 57 | opentelemetry-exporter-otlp==1.18.0 58 | opentelemetry-exporter-otlp-proto-common==1.18.0 59 | opentelemetry-exporter-otlp-proto-grpc==1.18.0 60 | opentelemetry-exporter-otlp-proto-http==1.18.0 61 | opentelemetry-instrumentation==0.39b0 62 | opentelemetry-proto==1.18.0 63 | opentelemetry-sdk==1.18.0 64 | opentelemetry-semantic-conventions==0.39b0 65 | overrides==7.3.1 66 | packaging==23.1 67 | pandas==2.0.3 68 | posthog==3.0.1 69 | prisma==0.9.1 70 | protobuf==4.23.4 71 | pulsar-client==3.2.0 72 | pycparser==2.21 73 | pydantic==1.10.11 74 | PyJWT==2.8.0 75 | PyMuPDF==1.22.5 76 | PyPika==0.48.9 77 | python-dateutil==2.8.2 78 | python-dotenv==1.0.0 79 | python-engineio==4.5.1 80 | python-graphql-client==0.4.3 81 | python-socketio==5.8.0 82 | pytz==2023.3 83 | PyYAML==6.0.1 84 | regex==2023.6.3 85 | requests==2.31.0 86 | six==1.16.0 87 | sniffio==1.3.0 88 | SQLAlchemy==2.0.19 89 | starlette==0.27.0 90 | sympy==1.12 91 | syncer==2.0.3 92 | tenacity==8.2.2 93 | tiktoken==0.4.0 94 | tokenizers==0.13.3 95 | tomli==2.0.1 96 | tomlkit==0.11.8 97 | tqdm==4.65.0 98 | typing-inspect==0.9.0 99 | typing_extensions==4.7.1 100 | tzdata==2023.3 101 | uptrace==1.18.0 102 | urllib3==2.0.3 103 | uvicorn==0.22.0 104 | uvloop==0.17.0 105 | watchfiles==0.19.0 106 | websockets==11.0.3 107 | wrapt==1.15.0 108 | yarl==1.9.2 109 | zipp==3.16.2 110 | -------------------------------------------------------------------------------- /pdfqa.py: -------------------------------------------------------------------------------- 1 | from langchain.document_loaders import PyMuPDFLoader 2 | from langchain.embeddings.openai import OpenAIEmbeddings 3 | from langchain.text_splitter import RecursiveCharacterTextSplitter 4 | from langchain.vectorstores import Chroma 5 | from langchain.chains import RetrievalQAWithSourcesChain 6 | from langchain.chat_models import ChatOpenAI 7 | from langchain.prompts.chat import ( 8 | ChatPromptTemplate, 9 | SystemMessagePromptTemplate, 10 | HumanMessagePromptTemplate, 11 | ) 12 | from chainlit.types import ( 13 | AskFileResponse 14 | ) 15 | import chainlit as cl 16 | 17 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) 18 | 19 | system_template = """Use the following pieces of context to answer the users question. 20 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 21 | ALWAYS return a "SOURCES" part in your answer. 22 | The "SOURCES" part should be a reference to the source of the document from which you got your answer. 23 | 24 | Example of your response should be: 25 | 26 | ``` 27 | The answer is foo 28 | SOURCES: xyz 29 | ``` 30 | 31 | Begin! 32 | ---------------- 33 | {summaries}""" 34 | messages = [ 35 | SystemMessagePromptTemplate.from_template(system_template), 36 | HumanMessagePromptTemplate.from_template("{question}"), 37 | ] 38 | prompt = ChatPromptTemplate.from_messages(messages) 39 | chain_type_kwargs = {"prompt": prompt} 40 | 41 | def store_uploaded_file(uploaded_file: AskFileResponse): 42 | file_path = f"./tmp/{uploaded_file.name}" 43 | open(file_path, "wb").write(uploaded_file.content) 44 | return file_path 45 | 46 | @cl.langchain_factory(use_async=True) 47 | async def init(): 48 | files = None 49 | 50 | # Wait for the user to upload a file 51 | while files == None: 52 | files = await cl.AskFileMessage( 53 | content="Please upload a PDF file to begin!", accept=["application/pdf"] 54 | ).send() 55 | 56 | file = files[0] 57 | 58 | msg = cl.Message(content=f"Processing `{file.name}`...") 59 | await msg.send() 60 | 61 | file_path = store_uploaded_file(file) 62 | 63 | # Load PDF file into documents 64 | docs = PyMuPDFLoader(file_path).load() 65 | 66 | msg = cl.Message(content=f'You have {len(docs)} document(s) in the PDF file.') 67 | await msg.send() 68 | 69 | # Split the documents into chunks 70 | split_docs = text_splitter.split_documents(docs) 71 | 72 | # Create a Chroma vector store 73 | embeddings = OpenAIEmbeddings() 74 | docsearch = await cl.make_async(Chroma.from_documents)( 75 | split_docs, embeddings, collection_name=file.name 76 | ) 77 | # Create a chain that uses the Chroma vector store 78 | chain = RetrievalQAWithSourcesChain.from_chain_type( 79 | ChatOpenAI(temperature=0), 80 | chain_type="stuff", 81 | retriever=docsearch.as_retriever(), 82 | ) 83 | 84 | # Let the user know that the system is ready 85 | await msg.update(content=f"`{file.name}` processed. You can now ask questions!") 86 | 87 | return chain 88 | 89 | 90 | @cl.langchain_postprocess 91 | async def process_response(res): 92 | answer = res["answer"] 93 | await cl.Message(content=answer).send() 94 | -------------------------------------------------------------------------------- /githubqa.py: -------------------------------------------------------------------------------- 1 | from langchain.document_loaders import TextLoader 2 | from langchain.embeddings.openai import OpenAIEmbeddings 3 | from langchain.text_splitter import RecursiveCharacterTextSplitter 4 | from langchain.vectorstores import Chroma 5 | from langchain.chains import RetrievalQAWithSourcesChain 6 | from langchain.chat_models import ChatOpenAI 7 | from langchain.prompts.chat import ( 8 | ChatPromptTemplate, 9 | SystemMessagePromptTemplate, 10 | HumanMessagePromptTemplate, 11 | ) 12 | from chainlit.types import ( 13 | AskFileResponse 14 | ) 15 | import chainlit as cl 16 | import os 17 | 18 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) 19 | 20 | system_template = """Use the following pieces of context to answer the users question. 21 | If you don't know the answer, just say that you don't know, don't try to make up an answer. 22 | ALWAYS return a "SOURCES" part in your answer. 23 | The "SOURCES" part should be a reference to the source of the document from which you got your answer. 24 | 25 | Example of your response should be: 26 | 27 | ``` 28 | The answer is foo 29 | SOURCES: xyz 30 | ``` 31 | 32 | Begin! 33 | ---------------- 34 | {summaries}""" 35 | messages = [ 36 | SystemMessagePromptTemplate.from_template(system_template), 37 | HumanMessagePromptTemplate.from_template("{question}"), 38 | ] 39 | prompt = ChatPromptTemplate.from_messages(messages) 40 | chain_type_kwargs = {"prompt": prompt} 41 | 42 | @cl.langchain_factory(use_async=True) 43 | async def init(): 44 | msg = cl.Message(content=f"Starting up Chainlit github chatbot...") 45 | await msg.send() 46 | 47 | docs = [] 48 | for dirpath, dirnames, filenames in os.walk("./tmp/chainlit"): 49 | for file in filenames: 50 | try: 51 | if file.endswith(".md"): 52 | msg = cl.Message(content=f"Processing...{file}") 53 | await msg.send() 54 | loader = TextLoader(os.path.join(dirpath, file)) 55 | docs.extend(loader.load()) 56 | except Exception as e: 57 | pass 58 | 59 | split_docs = text_splitter.split_documents(docs) 60 | 61 | # Create a Chroma vector store 62 | embeddings = OpenAIEmbeddings() 63 | docsearch = await cl.make_async(Chroma.from_documents)( 64 | split_docs, embeddings, collection_name="github" 65 | ) 66 | # Create a chain that uses the Chroma vector store 67 | chain = RetrievalQAWithSourcesChain.from_chain_type( 68 | ChatOpenAI(temperature=0), 69 | chain_type="stuff", 70 | retriever=docsearch.as_retriever(), 71 | ) 72 | 73 | # Let the user know that the system is ready 74 | await msg.update(content=f"Chainlit github chatbot is ready. You can now ask questions!") 75 | 76 | return chain 77 | 78 | 79 | @cl.langchain_postprocess 80 | async def process_response(res): 81 | answer = res["answer"] 82 | 83 | actions = [ 84 | cl.Action(name="like_button", label="Like!", value="like", description="Like!"), 85 | cl.Action(name="dislike_button", label="Dislike!", value="dislike", description="Dislike!") 86 | ] 87 | 88 | await cl.Message(content=answer, actions=actions).send() 89 | 90 | @cl.action_callback("like_button") 91 | async def on_action(action): 92 | await cl.Message(content=f"Executed {action.name}").send() 93 | # Optionally remove the action button from the chatbot user interface 94 | await action.remove() 95 | 96 | @cl.action_callback("dislike_button") 97 | async def on_action(action): 98 | await cl.Message(content=f"Executed {action.name}").send() 99 | # Optionally remove the action button from the chatbot user interface 100 | await action.remove() -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | .chainlit 163 | 164 | tmp --------------------------------------------------------------------------------