"
25 | ]
26 | },
27 | "execution_count": 5,
28 | "metadata": {},
29 | "output_type": "execute_result"
30 | }
31 | ],
32 | "source": [
33 | "import mlflow\n",
34 | "from typing import Literal\n",
35 | "from langchain_core.messages import AIMessage, ToolCall\n",
36 | "from langchain_core.outputs import ChatGeneration, ChatResult\n",
37 | "from langchain_core.tools import tool\n",
38 | "from langchain_openai import ChatOpenAI\n",
39 | "from langgraph.prebuilt import create_react_agent\n",
40 | "from dotenv import load_dotenv\n",
41 | "load_dotenv()\n",
42 | "\n",
43 | "mlflow.langchain.autolog()\n",
44 | "\n",
45 | "mlflow.set_tracking_uri(\"http://localhost:5000\")\n",
46 | "mlflow.set_experiment(\"LangGraph\")\n"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "id": "b3f57637",
52 | "metadata": {},
53 | "source": [
54 | "### Define our Tool & Graph\n",
55 | "Below is the code snippet provided in your request. We define a simple tool to get weather (with limited city options) and create a ReAct-style agent using LangGraph."
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 6,
61 | "id": "a674fe47",
62 | "metadata": {
63 | "tags": []
64 | },
65 | "outputs": [],
66 | "source": [
67 | "@tool\n",
68 | "def get_weather(city: Literal[\"nyc\", \"sf\"]):\n",
69 | " \"\"\"Use this to get weather information.\"\"\"\n",
70 | " if city == \"nyc\":\n",
71 | " return \"It might be cloudy in nyc\"\n",
72 | " elif city == \"sf\":\n",
73 | " return \"It's always sunny in sf\"\n",
74 | "\n",
75 | "# Instantiate the LLM\n",
76 | "llm = ChatOpenAI(model=\"gpt-4o-mini\") # placeholder model name\n",
77 | "\n",
78 | "# Create the ReAct agent\n",
79 | "tools = [get_weather]\n",
80 | "graph = create_react_agent(llm, tools)\n"
81 | ]
82 | },
83 | {
84 | "cell_type": "markdown",
85 | "id": "8716eea8",
86 | "metadata": {},
87 | "source": [
88 | "### Invoke the Graph\n",
89 | "We now call `graph.invoke` with a user request about the weather in SF. "
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 7,
95 | "id": "5d0631e3",
96 | "metadata": {
97 | "tags": []
98 | },
99 | "outputs": [
100 | {
101 | "name": "stdout",
102 | "output_type": "stream",
103 | "text": [
104 | "Agent response: {'messages': [HumanMessage(content='what is the weather in sf?', additional_kwargs={}, response_metadata={}, id='81a232ed-b6f0-4d47-8f3d-0c70c17aa4d1'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_yok4TNqxoU2s6vHoCZqyo4Jf', 'function': {'arguments': '{\"city\":\"sf\"}', 'name': 'get_weather'}, 'type': 'function'}], 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_27322b4e16', 'id': 'chatcmpl-BF4ucn1Ex6HVdykOaITebDcCZw9jQ', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-aeccf54e-6c93-4b46-86b5-3c5f12efbfe7-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_yok4TNqxoU2s6vHoCZqyo4Jf', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}), ToolMessage(content=\"It's always sunny in sf\", name='get_weather', id='a8767e92-3503-42e7-914f-8e6740be610b', tool_call_id='call_yok4TNqxoU2s6vHoCZqyo4Jf'), AIMessage(content='The weather in San Francisco is sunny!', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 85, 'total_tokens': 95, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_27322b4e16', 'id': 'chatcmpl-BF4udE60FlEmnIF7MPBzGvFrjoPGr', 'finish_reason': 'stop', 'logprobs': None}, id='run-4e5c87bf-caa4-43d9-9a59-64dc59c5007e-0', usage_metadata={'input_tokens': 85, 'output_tokens': 10, 'total_tokens': 95, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]}\n"
105 | ]
106 | },
107 | {
108 | "data": {
109 | "text/html": [
110 | "\n",
111 | "\n",
112 | " \n",
129 | " \n",
139 | " \n",
144 | "
\n"
145 | ],
146 | "text/plain": [
147 | "Trace(request_id=747be10c0e7245de8616e89df06d26da)"
148 | ]
149 | },
150 | "metadata": {},
151 | "output_type": "display_data"
152 | }
153 | ],
154 | "source": [
155 | "result = graph.invoke({\n",
156 | " \"messages\": [\n",
157 | " {\"role\": \"user\", \"content\": \"what is the weather in sf?\"}\n",
158 | " ]\n",
159 | "})\n",
160 | "print(\"Agent response:\", result)"
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": null,
166 | "id": "4f0ef46d",
167 | "metadata": {},
168 | "outputs": [
169 | {
170 | "ename": "SyntaxError",
171 | "evalue": "invalid syntax (2375966683.py, line 1)",
172 | "output_type": "error",
173 | "traceback": [
174 | "\u001b[1;36m Cell \u001b[1;32mIn[8], line 1\u001b[1;36m\u001b[0m\n\u001b[1;33m https://www.mlflow.org/docs/latest/tracing/api/manual-instrumentation/\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n"
175 | ]
176 | },
177 | {
178 | "ename": "",
179 | "evalue": "",
180 | "output_type": "error",
181 | "traceback": [
182 | "\u001b[1;31mThe Kernel crashed while executing code in the current cell or a previous cell. \n",
183 | "\u001b[1;31mPlease review the code in the cell(s) to identify a possible cause of the failure. \n",
184 | "\u001b[1;31mClick here for more info. \n",
185 | "\u001b[1;31mView Jupyter log for further details."
186 | ]
187 | }
188 | ],
189 | "source": [
190 | "https://www.mlflow.org/docs/latest/tracing/api/manual-instrumentation/"
191 | ]
192 | }
193 | ],
194 | "metadata": {
195 | "colab": {
196 | "name": "mlflow_langgraph_example.ipynb"
197 | },
198 | "kernelspec": {
199 | "display_name": ".venv",
200 | "language": "python",
201 | "name": "python3"
202 | },
203 | "language_info": {
204 | "codemirror_mode": {
205 | "name": "ipython",
206 | "version": 3
207 | },
208 | "file_extension": ".py",
209 | "mimetype": "text/x-python",
210 | "name": "python",
211 | "nbconvert_exporter": "python",
212 | "pygments_lexer": "ipython3",
213 | "version": "3.11.0"
214 | }
215 | },
216 | "nbformat": 4,
217 | "nbformat_minor": 5
218 | }
219 |
--------------------------------------------------------------------------------
/my_chroma_db/store1/04895bc9-25a9-4a93-9aa6-033530988dd6/header.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/my_chroma_db/store1/04895bc9-25a9-4a93-9aa6-033530988dd6/header.bin
--------------------------------------------------------------------------------
/my_chroma_db/store1/04895bc9-25a9-4a93-9aa6-033530988dd6/length.bin:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/my_chroma_db/store1/04895bc9-25a9-4a93-9aa6-033530988dd6/link_lists.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/my_chroma_db/store1/04895bc9-25a9-4a93-9aa6-033530988dd6/link_lists.bin
--------------------------------------------------------------------------------
/my_chroma_db/store1/chroma.sqlite3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/my_chroma_db/store1/chroma.sqlite3
--------------------------------------------------------------------------------
/my_chroma_db/store2/chroma.sqlite3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/my_chroma_db/store2/chroma.sqlite3
--------------------------------------------------------------------------------
/my_chroma_db/store3/chroma.sqlite3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/my_chroma_db/store3/chroma.sqlite3
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/requirements.txt
--------------------------------------------------------------------------------
/responsesapi.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "af480eb1",
6 | "metadata": {},
7 | "source": [
8 | "# Responses API Tutorial: From Introduction to Summary\n",
9 | "\n",
10 | "Below is a comprehensive tutorial and set of examples demonstrating:\n",
11 | "\n",
12 | "1. **How the Responses API works and how it differs from Chat Completions** (particularly around stateful vs. stateless usage).\n",
13 | "2. **Examples** of multi-turn conversation (using `previous_response_id` for stateful flows) and built-in tools like web search and file search.\n",
14 | "3. **How to disable storage** (`store: false`) if you **do not** want your conversation state to persist on OpenAI’s servers—effectively making it stateless.\n",
15 | "\n",
16 | "---\n",
17 | "## 1. Chat Completions (Stateless) vs. Responses (Stateful)\n",
18 | "\n",
19 | "- **Chat Completions**:\n",
20 | " - Typically stateless: each new request must supply the entire conversation history in `messages`.\n",
21 | " - Stored by default only for new accounts; can be disabled.\n",
22 | "\n",
23 | "- **Responses**:\n",
24 | " - By default, **stateful**: each response has its own `id`. You can pass `previous_response_id` in subsequent calls, and the system automatically includes the prior context.\n",
25 | " - Provides built-in **tools** (web search, file search, etc.) that the model can call if relevant.\n",
26 | " - **Stored** by default. If you want ephemeral usage, set `store: false`.\n",
27 | "\n",
28 | "When you get a response back from the Responses API, the returned object differs slightly from Chat Completions:\n",
29 | "\n",
30 | "- Instead of a simple list of message choices, you receive a typed `response` object with top-level fields (e.g. `id`, `output`, `usage`, etc.).\n",
31 | "- To continue a conversation, pass `previous_response_id` to the next request.\n",
32 | "- If you do **not** want it stored, set `store: false`.\n"
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "id": "98ba103f",
38 | "metadata": {},
39 | "source": [
40 | "---\n",
41 | "## 2. Multi-Turn Flow (Stateful) Example\n",
42 | "\n",
43 | "Using `previous_response_id` means the Responses API will store and automatically incorporate the entire conversation. Here’s a simple demonstration:"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": null,
49 | "id": "0bc2fb43",
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "from dotenv import load_dotenv\n",
54 | "\n",
55 | "load_dotenv()"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": null,
61 | "id": "f729187f",
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "from openai import OpenAI\n",
66 | "\n",
67 | "client = OpenAI()\n",
68 | "\n",
69 | "\n",
70 | "resp1 = client.responses.create(\n",
71 | " model=\"gpt-4o-mini\",\n",
72 | " input=\"Hello there! You're a helpful math tutor. Could you help me with a question? What's 2 + 2?\"\n",
73 | ")\n",
74 | "print(\"First response:\\n\", resp1.output_text)\n"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "id": "d1b8116f",
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "resp2 = client.responses.create(\n",
85 | " model=\"gpt-4o\",\n",
86 | " input=\"Sure. How you you come to this conclusion?\",\n",
87 | " previous_response_id=resp1.id\n",
88 | ")\n",
89 | "print(\"\\nSecond response:\\n\", resp2.output_text)"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "id": "891a378a",
95 | "metadata": {},
96 | "source": [
97 | "---\n",
98 | "## 3. Using Built-In Tools\n",
99 | "\n",
100 | "### 3.1 Web Search\n",
101 | "Allows the model to gather recent info from the internet if relevant."
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": null,
107 | "id": "2857562e",
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "# Example usage of the built-in web_search tool\n",
112 | "r1 = client.responses.create(\n",
113 | " model=\"gpt-4o\",\n",
114 | " input=\"Please find recent positive headlines about quantum computing.\",\n",
115 | " tools=[{\"type\": \"web_search\"}] # enabling built-in web search\n",
116 | ")\n",
117 | "print(r1.output_text)"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "id": "32a4f76c",
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "# Continue the conversation referencing previous response\n",
128 | "r2 = client.responses.create(\n",
129 | " model=\"gpt-4o\",\n",
130 | " input=\"Interesting! Summarize the second article.\",\n",
131 | " previous_response_id=r1.id\n",
132 | ")\n",
133 | "print(\"\\nFollow-up:\\n\", r2.output_text)"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "id": "599a206e",
139 | "metadata": {},
140 | "source": [
141 | "### 3.2 File Upload + File Search\n",
142 | "\n",
143 | "Below is the corrected snippet showing how to:\n",
144 | "1. **Upload** a local PDF (e.g., `dragon_book.pdf`).\n",
145 | "2. **Create** a vector store from that file.\n",
146 | "3. **Use** `file_search` in the Responses API to reference it.\n"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "id": "efc24105",
153 | "metadata": {},
154 | "outputs": [],
155 | "source": [
156 | "upload_resp = client.files.create(\n",
157 | " file=open(\"dragon_book.txt\", \"rb\"),\n",
158 | " purpose=\"user_data\"\n",
159 | ")\n",
160 | "file_id = upload_resp.id\n",
161 | "print(\"Uploaded file ID:\", file_id)"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": null,
167 | "id": "dc2b122e",
168 | "metadata": {},
169 | "outputs": [],
170 | "source": [
171 | "client.files.list()"
172 | ]
173 | },
174 | {
175 | "cell_type": "code",
176 | "execution_count": null,
177 | "id": "749870bc",
178 | "metadata": {},
179 | "outputs": [],
180 | "source": [
181 | "vstore_resp = client.vector_stores.create(\n",
182 | " name=\"DragonData\",\n",
183 | " file_ids=[file_id]\n",
184 | ")\n",
185 | "vstore_id = vstore_resp.id\n",
186 | "print(\"Vector store ID:\", vstore_id)"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": null,
192 | "id": "dada70c3",
193 | "metadata": {},
194 | "outputs": [],
195 | "source": [
196 | "resp1 = client.responses.create(\n",
197 | " model=\"gpt-4o\",\n",
198 | " tools=[{\n",
199 | " \"type\": \"file_search\",\n",
200 | " \"vector_store_ids\": [vstore_id],\n",
201 | " \"max_num_results\": 3\n",
202 | " }],\n",
203 | " input=\"What Information do you have about red dragons?\"\n",
204 | ")\n",
205 | "print(resp1.output_text)"
206 | ]
207 | },
208 | {
209 | "cell_type": "markdown",
210 | "id": "ed7d1705",
211 | "metadata": {},
212 | "source": [
213 | "---\n",
214 | "## 4. Disable Storage (Stateless Mode)\n",
215 | "\n",
216 | "Although the Responses API is **stateful** by default, you can make calls **not** store any conversation by setting `store=False`. Then `previous_response_id` won’t work, because no data is retained on OpenAI’s servers."
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "id": "553d63e3",
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "# An ephemeral request that won't be stored\n",
227 | "ephemeral_resp = client.responses.create(\n",
228 | " model=\"gpt-4o\",\n",
229 | " input=\"Hello, let's do a single-turn question about geometry.\",\n",
230 | " store=False # ephemeral usage\n",
231 | ")\n",
232 | "print(ephemeral_resp.output_text)"
233 | ]
234 | },
235 | {
236 | "cell_type": "markdown",
237 | "id": "366df08f",
238 | "metadata": {},
239 | "source": [
240 | "### LangChain Integration"
241 | ]
242 | },
243 | {
244 | "cell_type": "code",
245 | "execution_count": null,
246 | "id": "bcf04310",
247 | "metadata": {},
248 | "outputs": [],
249 | "source": [
250 | "from langchain_openai import ChatOpenAI\n",
251 | "\n",
252 | "llm = ChatOpenAI(model=\"gpt-4o-mini\", use_responses_api=True)\n",
253 | "\n",
254 | "\n",
255 | "tool = {\"type\": \"web_search_preview\"}\n",
256 | "\n",
257 | "\n",
258 | "llm_with_tools = llm.bind_tools([tool])\n",
259 | "\n",
260 | "\n",
261 | "response = llm_with_tools.invoke(input=\"What was a positive news story from today?\")\n",
262 | "\n",
263 | "print(\"Text content:\", response.content)\n",
264 | "print(\"Tool calls:\", response.tool_calls)"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": null,
270 | "id": "11d14f03",
271 | "metadata": {},
272 | "outputs": [],
273 | "source": [
274 | "from langchain_openai import ChatOpenAI\n",
275 | "\n",
276 | "llm_stateful = ChatOpenAI(\n",
277 | " model=\"gpt-4o-mini\",\n",
278 | " use_responses_api=True,\n",
279 | ")\n",
280 | "\n",
281 | "respA = llm_stateful.invoke(\"Hi, I'm Bob. Please remember my name.\")\n",
282 | "print(\"Response A:\", respA.content)\n",
283 | "print(\"A's ID:\", respA.response_metadata[\"id\"])\n",
284 | "\n",
285 | "respB = llm_stateful.invoke(\n",
286 | " \"What is my name?\",\n",
287 | " previous_response_id=respA.response_metadata[\"id\"]\n",
288 | ")\n",
289 | "print(\"Response B:\", respB.content)\n"
290 | ]
291 | }
292 | ],
293 | "metadata": {
294 | "kernelspec": {
295 | "display_name": ".venv",
296 | "language": "python",
297 | "name": "python3"
298 | },
299 | "language_info": {
300 | "codemirror_mode": {
301 | "name": "ipython",
302 | "version": 3
303 | },
304 | "file_extension": ".py",
305 | "mimetype": "text/x-python",
306 | "name": "python",
307 | "nbconvert_exporter": "python",
308 | "pygments_lexer": "ipython3",
309 | "version": "3.11.0"
310 | }
311 | },
312 | "nbformat": 4,
313 | "nbformat_minor": 5
314 | }
315 |
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
1 | import random
2 | import requests
3 | from mcp.server.fastmcp import FastMCP
4 |
5 | # Erstelle Server
6 | mcp = FastMCP("Echo Server")
7 |
8 |
9 | import requests
10 | import random
11 | import uuid
12 | from datetime import datetime, timedelta
13 |
14 |
15 | @mcp.tool(name="basic_calculator", description="Performs basic arithmetic.")
16 | def basic_calculator(operation: str, a: float, b: float) -> float:
17 | if operation == "add":
18 | return a + b
19 | elif operation == "subtract":
20 | return a - b
21 | elif operation == "multiply":
22 | return a * b
23 | elif operation == "divide":
24 | if b == 0:
25 | raise ZeroDivisionError("Cannot divide by zero.")
26 | return a / b
27 | else:
28 | raise ValueError(f"Unknown operation: {operation}")
29 |
30 |
31 | @mcp.tool(name="weather_lookup", description="Fetch the current weather for a city.")
32 | def weather_lookup(city: str) -> str:
33 | url = f"https://wttr.in/{city}?format=3"
34 | response = requests.get(url)
35 | return response.text
36 |
37 |
38 | @mcp.tool(name="uuid_generator", description="Generates a random UUID.")
39 | def uuid_generator() -> str:
40 | return str(uuid.uuid4())
41 |
42 |
43 | @mcp.tool(name="random_joke", description="Returns a random joke from an example API.")
44 | def random_joke() -> str:
45 | url = "https://official-joke-api.appspot.com/random_joke"
46 | r = requests.get(url)
47 | data = r.json()
48 | return f"{data['setup']} - {data['punchline']}"
49 |
50 |
51 | @mcp.tool(
52 | name="wikipedia_summary",
53 | description="Look up a term on Wikipedia and return a short summary.",
54 | )
55 | def wikipedia_summary(topic: str) -> str:
56 | api_url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
57 | r = requests.get(api_url)
58 | if r.status_code == 200:
59 | data = r.json()
60 | return data.get("extract", "No summary found.")
61 | else:
62 | return "Wikipedia page not found."
63 |
64 |
65 | @mcp.tool(
66 | name="language_translator",
67 | description="Translates text from one language to another (mocked).",
68 | )
69 | def language_translator(text: str, source_lang: str, target_lang: str) -> str:
70 | return f"[{source_lang} -> {target_lang}] {text}"
71 |
72 |
73 | @mcp.tool(name="sentiment_analysis", description="Analyzes sentiment of text (mocked).")
74 | def sentiment_analysis(text: str) -> str:
75 | if "love" in text.lower():
76 | return "Positive"
77 | elif "hate" in text.lower():
78 | return "Negative"
79 | else:
80 | return "Neutral"
81 |
82 |
83 | @mcp.tool(
84 | name="date_calculator", description="Adds or subtracts days from a given date."
85 | )
86 | def date_calculator(start_date: str, days: int) -> str:
87 | dt = datetime.strptime(start_date, "%Y-%m-%d")
88 | new_date = dt + timedelta(days=days)
89 | return new_date.strftime("%Y-%m-%d")
90 |
91 |
92 | @mcp.tool(
93 | name="password_generator",
94 | description="Generates a random alphanumeric password of given length.",
95 | )
96 | def password_generator(length: int) -> str:
97 | import string
98 | import secrets
99 |
100 | chars = string.ascii_letters + string.digits
101 | return "".join(secrets.choice(chars) for _ in range(length))
102 |
103 |
104 | @mcp.tool(
105 | name="stock_price_lookup",
106 | description="Retrieves the (mock) price of a given stock symbol.",
107 | )
108 | def stock_price_lookup(symbol: str) -> float:
109 | return round(random.uniform(10, 500), 2)
110 |
111 |
112 | @mcp.tool(
113 | name="crypto_price_lookup",
114 | description="Retrieves the (mock) price of a given cryptocurrency.",
115 | )
116 | def crypto_price_lookup(symbol: str) -> float:
117 | return round(random.uniform(1000, 45000), 2)
118 |
119 |
120 | @mcp.tool(
121 | name="currency_converter",
122 | description="Convert an amount between two currencies (mock).",
123 | )
124 | def currency_converter(amount: float, from_currency: str, to_currency: str) -> float:
125 | random_rate = random.uniform(0.5, 1.5)
126 | return round(amount * random_rate, 2)
127 |
128 |
129 | @mcp.tool(
130 | name="extract_named_entities",
131 | description="Extracts named entities from text (mock).",
132 | )
133 | def extract_named_entities(text: str) -> list[str]:
134 | words = text.split()
135 | return [w for w in words if w.istitle()]
136 |
137 |
138 | @mcp.tool(
139 | name="recommend_movies",
140 | description="Given a genre, return a few (mock) recommended movie titles.",
141 | )
142 | def recommend_movies(genre: str) -> list[str]:
143 | sample_movies = {
144 | "action": ["Fast & Furious 9", "Mad Max: Fury Road", "John Wick"],
145 | "comedy": ["Anchorman", "Step Brothers", "Superbad"],
146 | "sci-fi": ["Inception", "Interstellar", "The Matrix"],
147 | "romance": ["The Notebook", "Pride & Prejudice", "Notting Hill"],
148 | "horror": ["The Conjuring", "Get Out", "It Follows"],
149 | }
150 | genre_lower = genre.lower()
151 | if genre_lower in sample_movies:
152 | return random.sample(
153 | sample_movies[genre_lower], k=min(3, len(sample_movies[genre_lower]))
154 | )
155 | else:
156 | return ["No recommendations found for this genre. Try 'action', 'comedy', etc."]
157 |
158 |
159 | if __name__ == "__main__":
160 | mcp.run(transport="sse")
161 |
--------------------------------------------------------------------------------
/sql/example.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangGraph-Tutorial/7c2cf4ba7ef1bbcb835a5fad92e1622b2d40a9ec/sql/example.db
--------------------------------------------------------------------------------
/sql/setup_db.py:
--------------------------------------------------------------------------------
1 | import os
2 | from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Float
3 | from sqlalchemy.ext.declarative import declarative_base
4 | from sqlalchemy.orm import sessionmaker, relationship
5 |
6 | Base = declarative_base()
7 |
8 |
9 | class User(Base):
10 | __tablename__ = "users"
11 |
12 | id = Column(Integer, primary_key=True, index=True)
13 | name = Column(String, index=True)
14 | age = Column(Integer)
15 | email = Column(String, unique=True, index=True)
16 |
17 | orders = relationship("Order", back_populates="user")
18 |
19 |
20 | class Food(Base):
21 | __tablename__ = "food"
22 |
23 | id = Column(Integer, primary_key=True, index=True)
24 | name = Column(String, unique=True, index=True)
25 | price = Column(Float)
26 |
27 | orders = relationship("Order", back_populates="food")
28 |
29 |
30 | class Order(Base):
31 | __tablename__ = "orders"
32 |
33 | id = Column(Integer, primary_key=True, index=True)
34 | food_id = Column(Integer, ForeignKey("food.id"))
35 | user_id = Column(Integer, ForeignKey("users.id"))
36 |
37 | user = relationship("User", back_populates="orders")
38 | food = relationship("Food", back_populates="orders")
39 |
40 |
41 | DATABASE_URL = "sqlite:///example.db"
42 |
43 | engine = create_engine(DATABASE_URL)
44 | SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
45 |
46 |
47 | def init_db():
48 | Base.metadata.create_all(bind=engine)
49 |
50 | session = SessionLocal()
51 |
52 | users = [
53 | User(name="Alice", age=30, email="alice@example.com"),
54 | User(name="Bob", age=25, email="bob@example.com"),
55 | User(name="Charlie", age=35, email="charlie@example.com"),
56 | ]
57 | session.add_all(users)
58 | session.commit()
59 |
60 | foods = [
61 | Food(name="Pizza Margherita", price=12.5),
62 | Food(name="Spaghetti Carbonara", price=15.0),
63 | Food(name="Lasagne", price=14.0),
64 | ]
65 | session.add_all(foods)
66 | session.commit()
67 |
68 | orders = [
69 | Order(food_id=1, user_id=1),
70 | Order(food_id=2, user_id=1),
71 | Order(food_id=3, user_id=2),
72 | ]
73 | session.add_all(orders)
74 | session.commit()
75 |
76 | session.close()
77 | print("Datenbank wurde erfolgreich erweitert und mit Beispieldaten gefüllt.")
78 |
79 |
80 | if __name__ == "__main__":
81 | if not os.path.exists("example.db"):
82 | init_db()
83 | else:
84 | print("Die Datenbank 'example.db' existiert bereits.")
85 |
--------------------------------------------------------------------------------
/testscript.py:
--------------------------------------------------------------------------------
1 | x = 2
2 | y = "test"
3 | print(str(x) + y)
4 |
--------------------------------------------------------------------------------