├── requirements ├── requirements-dev.txt └── requirements.txt ├── .github └── workflows │ └── ci.yaml ├── Dockerfile ├── Makefile ├── README.md ├── app.py └── .gitignore /requirements/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | ruff==0.7.2 2 | -------------------------------------------------------------------------------- /requirements/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-ollama==0.2.0 2 | streamlit==1.40.0 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: Check lint rules 2 | on: 3 | push: 4 | branches: 5 | - '*' 6 | pull_request: 7 | branches: 8 | - '*' 9 | jobs: 10 | Linter: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Setup Python 15 | uses: actions/setup-python@v5 16 | with: 17 | python-version: 3.11 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | pip install ruff 22 | - name: Run Linter 23 | run: make check 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/docker/library/python:3.11.10-slim-bookworm 2 | 3 | ENV STREAMLIT_SERVER_PORT=8501 4 | ENV STREAMLIT_SERVER_ADDRESS=0.0.0.0 5 | ENV OLLAMA_HOST=host.docker.internal 6 | 7 | WORKDIR /app 8 | 9 | COPY requirements/ requirements 10 | 11 | COPY Makefile . 12 | 13 | RUN < /dev/null 2>&1; then \ 28 | echo "✅ Image $(IMAGE_NAME) exists"; \ 29 | else \ 30 | echo "❌ Image $(IMAGE_NAME) does not exist"; \ 31 | echo "🔨 Bulding image..."; \ 32 | docker build -t $(IMAGE_NAME) .; \ 33 | fi 34 | 35 | run-docker: build-image # Run the application in a Docker container 36 | @docker run --name $(IMAGE_NAME) -p $(LOCAL_PORT):8501 -d $(IMAGE_NAME) 37 | @echo "🎉 Goto http://localhost:$(LOCAL_PORT) to get started!" 38 | 39 | setup: # Initial project setup 40 | @echo "Creating virtual env at: $(VENV_DIR)"s 41 | @python3 -m venv $(VENV_DIR) 42 | @echo "Installing dependencies..." 43 | @source $(VENV_DIR)/bin/activate && pip install -r requirements/requirements-dev.txt && pip install -r requirements/requirements.txt 44 | @echo -e "\n✅ Done.\n🎉 Run the following commands to get started:\n\n ➡️ source $(VENV_DIR)/bin/activate\n ➡️ make run\n" 45 | 46 | 47 | help: # Show this help 48 | @egrep -h '\s#\s' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?# "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 𝑓 Function Calling Demo Application 2 | 3 | Demo function calling app for the YouTube video. 4 | 5 | Watch the video 👇 6 | 7 | 8 | 9 | 10 | 11 | ## 🔨 Setting up locally 12 | 13 | Create virtualenv and install dependencies. 14 | 15 | This step is not required if you are running in docker. 16 | 17 | ```sh 18 | make setup 19 | ``` 20 | 21 | ## ⚡️ Running the application 22 | 23 | Make sure you have [Ollama](https://ollama.com/download) installed and running on your machine. 24 | 25 | By default, the app uses [mistral-nemo](https://ollama.com/library/mistral-nemo) model but you can use [Llama3.1](https://ollama.com/library/llama3.1) or [Llama3.2](https://ollama.com/library/llama3.2). 26 | 27 | Download these models before running the application. Update [app.py](https://github.com/yankeexe/llm-function-calling-demo/blob/55b73c6947f05d460f284d92136285b4e1d233bd/app.py#L66) to change the model if necessary. 28 | 29 | ### Running locally 30 | 31 | ```sh 32 | make run 33 | ``` 34 | 35 | ### Running in a container 36 | 37 | ```sh 38 | make run-docker 39 | ``` 40 |
41 | ⚠️ Does not work with Linux 🐧 42 | 43 | Application running inside of the container uses a special DNS name `host.docker.internal` to communicate with Ollama running on the host machine. 44 | 45 | However, this DNS name is not resolvable in Linux. 46 |
47 | 48 | ## ✨ Linters and Formatters 49 | 50 | Check for linting rule violations: 51 | 52 | ```sh 53 | make check 54 | ``` 55 | 56 | Auto-fix linting violations: 57 | 58 | ```sh 59 | make fix 60 | ``` 61 | 62 | ## 🤸‍♀️ Getting Help 63 | 64 | ```sh 65 | make 66 | 67 | # OR 68 | 69 | make help 70 | ``` 71 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import sys 3 | from datetime import datetime 4 | from zoneinfo import ZoneInfo 5 | 6 | import streamlit as st 7 | from langchain_core.messages import HumanMessage, ToolMessage 8 | from langchain_core.tools import tool 9 | from langchain_ollama import ChatOllama 10 | 11 | messages = [] 12 | 13 | prompt = st.text_input("Enter your prompt") 14 | 15 | 16 | @tool(parse_docstring=True) 17 | def get_disk_usage(): 18 | """Retrieves disk usage. Call this whenever you need to know the disk usage, for example when a customer asks "What is the disk usage?" 19 | Args: None 20 | 21 | Note: View JSON Schema: get_disk_usage.args_schema.schema() 22 | 23 | Returns: 24 | dict: A dictionary containing disk usage statistics with the following keys: 25 | - total (str): Total disk space in GB 26 | - used (str): Used disk space in GB 27 | - free (str): Free disk space in GB 28 | """ 29 | path = "/" 30 | total, used, free = shutil.disk_usage(path) 31 | gb = 1024 * 1024 * 1024 32 | 33 | return { 34 | "total": f"{total / gb:.2f} GB", 35 | "used": f"{used / gb:.2f} GB", 36 | "free": f"{free / gb:.2f} GB", 37 | } 38 | 39 | 40 | @tool(parse_docstring=True) 41 | def get_time_in_timezone(timezone_name: str) -> str: 42 | """Returns the current time for a given timezone. Call this whenever you need to know the current time of any timezone, for example when a customer asks "What is the time in Kathmandu?" 43 | 44 | Args: 45 | timezone_name: IANA timezone name (e.g., 'America/New_York') 46 | 47 | Note: View JSON Schema: get_time_in_timezone.args_schema.schema() 48 | 49 | 50 | Returns: 51 | str: Current time in the specified timezone 52 | """ 53 | try: 54 | current_time = datetime.now(ZoneInfo(timezone_name)) 55 | return current_time.strftime("%Y-%m-%d %H:%M:%S %Z") 56 | except Exception as e: 57 | return f"Error: Invalid timezone: {str(e)}" 58 | 59 | 60 | tools_list = { 61 | "get_time_in_timezone": get_time_in_timezone, 62 | "get_disk_usage": get_disk_usage, 63 | } 64 | 65 | if prompt: 66 | llm = ChatOllama(model="mistral-nemo:latest") 67 | llm_with_tools = llm.bind_tools(list(tools_list.values())) 68 | 69 | messages.append(HumanMessage(prompt)) 70 | ai_response = llm_with_tools.invoke(messages) 71 | messages.append(ai_response) 72 | 73 | if not ai_response.tool_calls: 74 | with st.container(height=500, border=True): 75 | st.write(ai_response.content) 76 | sys.exit() 77 | 78 | for tool_call in ai_response.tool_calls: 79 | selected_tool = tools_list.get(tool_call["name"].lower()) 80 | tool_response = selected_tool.invoke(tool_call["args"]) 81 | messages.append(ToolMessage(tool_response, tool_call_id=tool_call["id"])) 82 | 83 | final_response = llm_with_tools.stream(messages) 84 | with st.container(height=500, border=True): 85 | st.write_stream(final_response) 86 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/python 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python 3 | 4 | 5 | ### Python ### 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | share/python-wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .nox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | *.py,cover 55 | .hypothesis/ 56 | .pytest_cache/ 57 | cover/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | .pybuilder/ 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | # For a library or package, you might want to ignore these files since the code is 92 | # intended to run in multiple environments; otherwise, check them in: 93 | # .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # poetry 103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 104 | # This is especially recommended for binary packages to ensure reproducibility, and is more 105 | # commonly ignored for libraries. 106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 107 | #poetry.lock 108 | 109 | # pdm 110 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 111 | #pdm.lock 112 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 113 | # in version control. 114 | # https://pdm.fming.dev/#use-with-ide 115 | .pdm.toml 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # mypy 147 | .mypy_cache/ 148 | .dmypy.json 149 | dmypy.json 150 | 151 | # Pyre type checker 152 | .pyre/ 153 | 154 | # pytype static type analyzer 155 | .pytype/ 156 | 157 | # Cython debug symbols 158 | cython_debug/ 159 | 160 | # PyCharm 161 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 162 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 163 | # and can be added to the global gitignore or merged into this file. For a more nuclear 164 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 165 | #.idea/ 166 | 167 | ### Python Patch ### 168 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration 169 | poetry.toml 170 | 171 | # ruff 172 | .ruff_cache/ 173 | 174 | # LSP config files 175 | pyrightconfig.json 176 | 177 | # End of https://www.toptal.com/developers/gitignore/api/python 178 | --------------------------------------------------------------------------------