├── .gitignore
├── README.md
├── index.html
├── main.py
├── test.py
└── test_stream.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | __pycache__/
3 | .idea
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Coding-Crashkurse/LangChain-FastAPI-Streaming/a0979ff93f496ae8298e97c4ad9bb4b82ec0ad00/README.md
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
50 |
51 |
52 |
53 |
54 |
55 |
Chat with AI
56 |
57 |
58 |
59 |
60 |
61 |
62 |
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import AsyncIterable
3 |
4 | from dotenv import load_dotenv
5 | from fastapi import FastAPI
6 | from fastapi.middleware.cors import CORSMiddleware
7 | from fastapi.responses import StreamingResponse
8 | from langchain.callbacks import AsyncIteratorCallbackHandler
9 | from langchain.chat_models import ChatOpenAI
10 | from langchain.schema import HumanMessage
11 | from pydantic import BaseModel
12 |
13 | load_dotenv()
14 |
15 | app = FastAPI()
16 | app.add_middleware(
17 | CORSMiddleware,
18 | allow_origins=["*"],
19 | allow_credentials=True,
20 | allow_methods=["*"],
21 | allow_headers=["*"],
22 | )
23 |
24 |
25 | class Message(BaseModel):
26 | content: str
27 |
28 |
29 | async def send_message(content: str) -> AsyncIterable[str]:
30 | callback = AsyncIteratorCallbackHandler()
31 | model = ChatOpenAI(
32 | streaming=True,
33 | verbose=True,
34 | callbacks=[callback],
35 | )
36 |
37 | task = asyncio.create_task(
38 | model.agenerate(messages=[[HumanMessage(content=content)]])
39 | )
40 |
41 | try:
42 | async for token in callback.aiter():
43 | yield token
44 | except Exception as e:
45 | print(f"Caught exception: {e}")
46 | finally:
47 | callback.done.set()
48 |
49 | await task
50 |
51 |
52 | @app.post("/stream_chat/")
53 | async def stream_chat(message: Message):
54 | generator = send_message(message.content)
55 | return StreamingResponse(generator, media_type="text/event-stream")
56 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | from dotenv import load_dotenv
2 | from langchain.chat_models import ChatOpenAI
3 | from langchain.schema import HumanMessage
4 |
5 | load_dotenv()
6 |
7 | from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
8 |
9 | chat = ChatOpenAI(
10 | streaming=True, callbacks=[StreamingStdOutCallbackHandler()], temperature=0
11 | )
12 | print(chat([HumanMessage(content="Write me a song about sparkling water.")]))
13 |
--------------------------------------------------------------------------------
/test_stream.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import requests
4 |
5 | url = "http://localhost:6677/stream_chat"
6 | message = "Hello, how are you?"
7 | data = {"content": message}
8 |
9 | headers = {"Content-type": "application/json"}
10 |
11 | with requests.post(url, data=json.dumps(data), headers=headers, stream=True) as r:
12 | for chunk in r.iter_content(1024):
13 | print(chunk)
14 |
--------------------------------------------------------------------------------