├── data
└── bispap138.pdf
├── requirements.txt
├── screen-capture.gif
├── templates
└── index.html
├── static
├── css
│ └── main.css
└── js
│ └── app.js
├── README.md
├── .gitignore
└── app.py
/data/bispap138.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mickymultani/Streaming-LLM-Chat/HEAD/data/bispap138.pdf
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mickymultani/Streaming-LLM-Chat/HEAD/requirements.txt
--------------------------------------------------------------------------------
/screen-capture.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mickymultani/Streaming-LLM-Chat/HEAD/screen-capture.gif
--------------------------------------------------------------------------------
/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Local Chat Interface
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/static/css/main.css:
--------------------------------------------------------------------------------
1 |
2 | body, html {
3 | height: 60%;
4 | margin: 0;
5 | font-family: 'Rubik', sans-serif;
6 | }
7 | .chat-container {
8 | width: 100%;
9 | max-width: 600px;
10 | margin: 40px auto; /* Adds margin to the top and bottom */
11 | display: flex;
12 | flex-direction: column;
13 | height: calc(80vh - 50px); /* Adjusted to account for the added margin */
14 | border: 1px solid #ccc;
15 | box-shadow: 0 2px 10px rgba(0,0,0,0.1);
16 | background-color: #fff;
17 | overflow: hidden;
18 | }
19 |
20 | .chat-area {
21 | padding: 10px;
22 | overflow-y: auto;
23 | flex-grow: 1;
24 | }
25 | .input-container {
26 | display: flex;
27 | padding: 10px;
28 | }
29 | .input-container input {
30 | flex-grow: 1;
31 | padding: 10px;
32 | margin-right: 10px;
33 | border: 1px solid #ccc;
34 | border-radius: 4px;
35 | }
36 | .input-container button {
37 | padding: 10px 20px;
38 | border: none;
39 | border-radius: 4px;
40 | background-color: #007bff;
41 | color: white;
42 | cursor: pointer;
43 | }
44 | .input-container button:hover {
45 | background-color: #0056b3;
46 | }
47 | .message {
48 | margin: 5px 0;
49 | padding: 10px;
50 | border-radius: 10px;
51 | max-width: 100%; /* Adjusted width here */
52 | }
53 | .user-message {
54 | background-color: #dcf8c6;
55 | align-self: flex-end;
56 | }
57 | .bot-message {
58 | background-color: #e5e5ea;
59 | align-self: flex-start;
60 | }
61 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Streaming LLM Chat
2 |
3 | Streaming LLM Chat is a web application built with Flask and OpenAI's GPT-4 models, designed to provide an interactive chat experience using large language models (LLMs).
4 |
5 | 
6 |
7 |
8 | ## Features
9 |
10 | - Interactive chat interface with real-time response streaming.
11 | - Integration with OpenAI's GPT-4 for intelligent and coherent responses.
12 | - Simple and easy to set up with minimal configuration required.
13 | - Browser automation for easy access to the chat interface upon starting the application.
14 | - Requires Python 3.10.x
15 |
16 | ## Installation
17 |
18 | To run Streaming LLM Chat on your local machine, follow these steps:
19 |
20 | 1. Clone the repository:
21 | ```
22 | git clone https://github.com/mickymultani/Streaming-LLM-Chat.git
23 | cd Streaming-LLM-Chat
24 | ```
25 |
26 | 2. Create and activate a virtual environment:
27 | ```
28 | # For Windows
29 | python -m venv venv
30 | .\venv\Scripts\activate
31 |
32 | # For Unix or MacOS
33 | python3 -m venv venv
34 | source venv/bin/activate
35 | ```
36 |
37 | 3. Install the required packages:
38 | ```
39 | pip install -r requirements.txt
40 | ```
41 |
42 | 4. Set your OpenAI API key as an environment variable in a `.env` file in the root directory:
43 | ```
44 | OPENAI_API_KEY=your_api_key_here
45 | ```
46 |
47 | ## Usage
48 |
49 | Run the application:
50 | ```
51 | python app.py
52 | ```
53 |
54 | The application will start a local server and automatically open the chat interface in your default web browser.
55 |
56 | ## Contributing
57 |
58 | Contributions are welcome! If you have suggestions or contributions to the code, please follow the standard GitHub pull request process to propose your changes.
59 |
60 | ## License
61 |
62 | This project is licensed under the MIT License.
63 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .nox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 | db.sqlite3-journal
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 | .dmypy.json
107 | dmypy.json
108 |
109 | # Pyre type checker
110 | .pyre/
111 |
112 | # pytype static type analyzer
113 | .pytype/
114 |
115 | # Cython debug symbols
116 | cython_debug/
117 |
118 | # vscode settings
119 | .vscode/
120 |
121 | # JetBrains IDEs
122 | .idea/
123 | *.iml
124 |
125 | # Others
126 | .DS_Store
127 | Thumbs.db
128 |
--------------------------------------------------------------------------------
/static/js/app.js:
--------------------------------------------------------------------------------
1 | async function sendChat() {
2 | let userInput = document.getElementById("userInput").value.trim();
3 | if (!userInput) return; // Don't send empty messages
4 |
5 | // Add user's message to the chat area
6 | appendMessage(userInput, 'user-message');
7 |
8 | // Clear input after sending
9 | document.getElementById("userInput").value = '';
10 |
11 | try {
12 | // Start the POST request to send the message
13 | const response = await fetch('/chat', {
14 | method: 'POST',
15 | headers: {
16 | 'Content-Type': 'application/json'
17 | },
18 | body: JSON.stringify({ message: userInput })
19 | });
20 |
21 | let reader = response.body.getReader();
22 | let decoder = new TextDecoder();
23 |
24 | // Create a container for the bot's message
25 | let botMessageContainer = createMessageContainer('bot-message');
26 |
27 | while (true) {
28 | const { value, done } = await reader.read();
29 | if (done) break;
30 | let chunk = decoder.decode(value, { stream: true });
31 | // Replace newline characters with HTML
tags
32 | chunk = chunk.replace(/\n/g, '
');
33 | botMessageContainer.innerHTML += chunk;
34 | }
35 | // Scroll the bot's message container into view
36 | botMessageContainer.scrollIntoView({ behavior: 'smooth' });
37 | } catch (error) {
38 | console.error('Error:', error);
39 | }
40 | }
41 |
42 | function appendMessage(text, className) {
43 | let messageContainer = createMessageContainer(className);
44 | messageContainer.innerHTML = text.replace(/\n/g, '
'); // Replace newlines with
45 | document.getElementById('chatArea').appendChild(messageContainer);
46 | messageContainer.scrollIntoView({ behavior: 'smooth' });
47 | }
48 |
49 | function createMessageContainer(className) {
50 | let div = document.createElement('div');
51 | div.classList.add('message', className);
52 | document.getElementById('chatArea').appendChild(div);
53 | return div;
54 | }
55 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, render_template, request, Response, stream_with_context
2 | from flask_cors import CORS # Import CORS
3 | from dotenv import load_dotenv
4 |
5 | import webbrowser
6 | from threading import Timer
7 | import os
8 | from llama_index import (
9 | ServiceContext,
10 | VectorStoreIndex,
11 | SimpleDirectoryReader,
12 | set_global_service_context,
13 | )
14 | from llama_index.llms import OpenAI
15 | from llama_index.memory import ChatMemoryBuffer
16 |
17 | app = Flask(__name__)
18 | CORS(app) # Enable CORS for all routes
19 |
20 | # Load environment variables
21 | load_dotenv()
22 |
23 | openai_api_key = os.getenv("OPENAI_API_KEY")
24 | if not openai_api_key:
25 | raise ValueError("The OPENAI_API_KEY environment variable is not set.")
26 |
27 |
28 | # Set up the service context for llama-index with the desired OpenAI model
29 | service_context = ServiceContext.from_defaults(
30 | llm=OpenAI(model="gpt-4", temperature=0)
31 | )
32 | set_global_service_context(service_context)
33 |
34 | # Load the data from the "data" directory
35 | data = SimpleDirectoryReader("data").load_data()
36 |
37 | # Create the index
38 | index = VectorStoreIndex.from_documents(data)
39 |
40 | # Configure the chat engine with a memory buffer
41 | memory = ChatMemoryBuffer.from_defaults(token_limit=20000)
42 | chat_engine = index.as_chat_engine(
43 | chat_mode="context",
44 | memory=memory,
45 | system_prompt=(
46 | "Act as an experienced risk and financial policy analyst"
47 | "You are now able to intelligently answer questions about the information you have been provided"
48 | ),
49 | )
50 |
51 | @app.route('/')
52 | def index():
53 | return render_template('index.html')
54 |
55 | @app.route('/chat', methods=['POST'])
56 | def chat():
57 | user_message = request.json.get('message')
58 | response = chat_engine.stream_chat(user_message)
59 | buffer = []
60 | buffer_size = 3
61 |
62 | def generate():
63 | for token in response.response_gen:
64 | buffer.append(token)
65 | if len(buffer) >= buffer_size:
66 | yield ''.join(buffer)
67 | buffer.clear()
68 | if buffer:
69 | yield ''.join(buffer)
70 |
71 | return Response(stream_with_context(generate()), content_type='text/plain')
72 |
73 | def open_browser():
74 | webbrowser.open_new('http://127.0.0.1:5000/')
75 |
76 | if __name__ == '__main__':
77 | if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
78 | Timer(1, open_browser).start()
79 | app.run(debug=True)
80 |
81 |
--------------------------------------------------------------------------------