├── example.env ├── requirements.txt ├── .github └── FUNDING.yml ├── langchain_falcon.py ├── LICENSE ├── README.md ├── langchain_falcon.ipynb └── .gitignore /example.env: -------------------------------------------------------------------------------- 1 | HUGGINGFACEHUB_API_TOKEN=your-huggingfacehub-api-token -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | huggingface_hub 3 | chainlit 4 | python-dotenv 5 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: datasciencebasics 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /langchain_falcon.py: -------------------------------------------------------------------------------- 1 | from langchain import HuggingFaceHub 2 | from langchain import PromptTemplate, LLMChain 3 | import os 4 | 5 | from dotenv import load_dotenv 6 | import chainlit as cl 7 | 8 | # Load environment variables from .env file 9 | load_dotenv() 10 | 11 | 12 | HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") 13 | 14 | repo_id = "tiiuae/falcon-7b-instruct" 15 | llm = HuggingFaceHub(huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, 16 | repo_id=repo_id, 17 | model_kwargs={"temperature":0.7, "max_new_tokens":500}) 18 | 19 | 20 | template = """ 21 | You are a helpful AI assistant and provide the answer for the question asked politely. 22 | 23 | {question} 24 | """ 25 | 26 | @cl.langchain_factory 27 | def factory(): 28 | prompt = PromptTemplate(template=template, input_variables=["question"]) 29 | llm_chain = LLMChain(prompt=prompt, llm=llm) 30 | 31 | return llm_chain 32 | 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Sudarshan Koirala 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # langchain-falcon-chainlit 2 | Simple Chat UI using Falcon model, LangChain and Chainlit 3 | 4 | ### [Youtube Video Covering this GitHub Repo](https://youtu.be/gnyUUY8X-G4) 5 | 6 | ### Open Source in Action 🚀 7 | - [Falcon](https://falconllm.tii.ae/) as Large Language model 8 | - [LangChain](https://python.langchain.com/en/latest/modules/models/llms/integrations/huggingface_hub.html) as a Framework for LLM 9 | - [Falcon model](https://huggingface.co/tiiuae/falcon-7b-instruct) from Huggingface Website 10 | - [Chainlit](https://docs.chainlit.io/langchain) for deploying. 11 | 12 | ## System Requirements 13 | 14 | You must have Python 3.10 or later installed. Earlier versions of python may not compile. 15 | 16 | ## Steps to Replicate 17 | 18 | 1. Fork this repository and create a codespace in GitHub as I showed you in the youtube video OR Clone it locally. 19 | ``` 20 | git clone https://github.com/sudarshan-koirala/langchain-falcon-chainlit.git 21 | cd langchain-falcon-chainlit 22 | ``` 23 | 24 | 2. Rename example.env to .env with `cp example.env .env`and input the huggingfacehub api token as follows. Get Huggingfacehub api token from this [URL](https://huggingface.co/settings/tokens). You need to create an account in Huggingface if you haven't already. 25 | ``` 26 | HUGGINGFACEHUB_API_TOKEN=your_huggingface_token 27 | ``` 28 | 29 | 3. Run the following command in the terminal to install necessary python packages: 30 | ``` 31 | pip install -r requirements.txt 32 | ``` 33 | 34 | 4. Run the following command in your terminal to start the chat UI: 35 | ``` 36 | chainlit run langchain_falcon.py -w 37 | ``` 38 | 39 | ## Disclaimer 40 | This is test project and is presented in my youtube video to learn new stuffs using the available open source projects and model. It is not meant to be used in production as it's not production ready. You can modify the code and use for your usecases ✌️ 41 | -------------------------------------------------------------------------------- /langchain_falcon.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "\n", 9 | " \"Open\n", 10 | "" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "%%capture\n", 20 | "%pip install langchain huggingface_hub watermark" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "\n", 30 | "%load_ext watermark\n", 31 | "%watermark -a \"Sudarshan Koirala\" -vmp langchain,huggingface_hub" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "# get your Huggingface access token from https://huggingface.co/settings/tokens 🔑\n", 41 | "from getpass import getpass\n", 42 | "import os\n", 43 | "\n", 44 | "HUGGINGFACE_API_TOKEN = getpass()\n", 45 | "os.environ[\"HUGGINGFACE_API_TOKEN\"] = HUGGINGFACE_API_TOKEN " 46 | ] 47 | }, 48 | { 49 | "attachments": {}, 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "#### Let's use falcon-7b-instruct model from [Huggingface website](https://huggingface.co/tiiuae/falcon-7b-instruct)" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "from langchain import HuggingFaceHub\n", 63 | "\n", 64 | "repo_id = \"tiiuae/falcon-7b-instruct\"\n", 65 | "llm = HuggingFaceHub(huggingfacehub_api_token=HUGGINGFACE_API_TOKEN, \n", 66 | " repo_id=repo_id, \n", 67 | " model_kwargs={\"temperature\":0.7, \"max_new_tokens\":700})" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "from langchain import PromptTemplate, LLMChain\n", 77 | "\n", 78 | "template = \"\"\"\n", 79 | "You are a helpful AI assistant and provide the answer for the question asked politely.\n", 80 | "\n", 81 | "{question}\n", 82 | "Answer: Let's think step by step.\n", 83 | "\"\"\"\n", 84 | "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", 85 | "llm_chain = LLMChain(prompt=prompt, llm=llm)\n", 86 | "\n", 87 | "question = \"How to cook Pizza ?\"\n", 88 | "\n", 89 | "print(llm_chain.run(question))" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [] 98 | } 99 | ], 100 | "metadata": { 101 | "kernelspec": { 102 | "display_name": "Python 3 (ipykernel)", 103 | "language": "python", 104 | "name": "python3" 105 | }, 106 | "language_info": { 107 | "codemirror_mode": { 108 | "name": "ipython", 109 | "version": 3 110 | }, 111 | "file_extension": ".py", 112 | "mimetype": "text/x-python", 113 | "name": "python", 114 | "nbconvert_exporter": "python", 115 | "pygments_lexer": "ipython3", 116 | "version": "3.10.4" 117 | }, 118 | "orig_nbformat": 4 119 | }, 120 | "nbformat": 4, 121 | "nbformat_minor": 2 122 | } 123 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # GitHub 163 | .github/ 164 | --------------------------------------------------------------------------------