├── src ├── __init__.py ├── logo.py ├── config.json ├── endpoints.json ├── header.py ├── styles.py ├── session.py ├── sidebar.py ├── request.py └── context.py ├── requirements.txt ├── static ├── logo.png └── favicon.png ├── .streamlit └── config.toml ├── .vscode ├── settings.json └── extensions.json ├── streamlit_app.py ├── LICENSE ├── README.md └── .gitignore /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | streamlit 3 | urllib3 -------------------------------------------------------------------------------- /static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/3x3cut0r/llama-cpp-python-streamlit/HEAD/static/logo.png -------------------------------------------------------------------------------- /static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/3x3cut0r/llama-cpp-python-streamlit/HEAD/static/favicon.png -------------------------------------------------------------------------------- /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | [server] 2 | enableStaticServing = true 3 | 4 | [browser] 5 | gatherUsageStats = false -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.defaultFormatter": "esbenp.prettier-vscode", 3 | "editor.formatOnSave": true, 4 | "editor.tabSize": 4, 5 | "prettier.tabWidth": 4, 6 | "files.autoSave": "onFocusChange", 7 | "files.encoding": "utf8", 8 | "files.insertFinalNewline": true 9 | } 10 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "esbenp.prettier-vscode", 4 | "ms-python.python", 5 | "ms-python.vscode-pylance", 6 | "aaron-bond.better-comments", 7 | "gruntfuggly.todo-tree", 8 | "formulahendry.auto-rename-tag", 9 | "donjayamanne.git-extension-pack" 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /src/logo.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # render logo 4 | def render(): 5 | st.markdown( 6 | f""" 7 | 15 | """, 16 | unsafe_allow_html=True, 17 | ) 18 | -------------------------------------------------------------------------------- /src/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "api_url": "http://localhost:8000", 3 | "page_title": "Llama-2-7b-Chat", 4 | "n_ctx": 2048, 5 | "enable_context": "True", 6 | "stream": "True", 7 | "max_tokens": "256", 8 | "temperature": "0.2", 9 | "top_p": "0.95", 10 | "top_k": "40", 11 | "repeat_penalty": "1.1", 12 | "stop": "###", 13 | "system_content": "User asks Questions to the AI. AI is helpful, kind, obedient, honest, and knows its own limits.", 14 | "prompt": "### Instructions:\n{prompt}\n\n### Response:\n" 15 | } 16 | -------------------------------------------------------------------------------- /src/endpoints.json: -------------------------------------------------------------------------------- 1 | { 2 | "/v1/chat/completions": { 3 | "messages": [ 4 | { 5 | "content": "You are a helpful assistant.", 6 | "role": "system" 7 | }, 8 | { 9 | "content": "What is the capital of France?", 10 | "role": "user" 11 | } 12 | ] 13 | }, 14 | "/v1/completions": { 15 | "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", 16 | "stop": ["\n", "###"] 17 | }, 18 | "/completions": { 19 | "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", 20 | "stop": ["\n", "###"] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /streamlit_app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import src.header as header 3 | import src.request as request 4 | import src.context as context 5 | 6 | # render header 7 | header.render() 8 | 9 | # render content_container 10 | content_container = st.empty() 11 | 12 | # render context 13 | if 'context' in st.session_state: 14 | context.render(content_container) 15 | 16 | # render message-text_area + generate-submit_button 17 | with st.form("Prompt Form", clear_on_submit=True): 18 | col1, col2 = st.columns([2,1]) 19 | 20 | with col1: 21 | user_content = st.text_area(label="Enter your message", value="", height=55, label_visibility="collapsed", placeholder="Enter your message") 22 | 23 | with col2: 24 | generate_button = st.form_submit_button('Generate') 25 | 26 | if generate_button: 27 | context.append_question(user_content) 28 | context.render(content_container) 29 | 30 | with st.spinner('Generating response...'): 31 | request.send(content_container) 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 3x3cut0r 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/header.py: -------------------------------------------------------------------------------- 1 | import json 2 | import streamlit as st 3 | from PIL import Image 4 | favicon = Image.open('static/favicon.png') 5 | import src.logo as logo 6 | import src.styles as styles 7 | import src.session as session 8 | import src.sidebar as sidebar 9 | 10 | # load title from config-file 11 | with open("src/config.json", "r", encoding="utf-8") as file: 12 | config = json.load(file) 13 | 14 | # call render method to set the header on every page 15 | def render(page_title = None): 16 | 17 | # load page_title from config 18 | if page_title == None: 19 | if 'page_title' in config: 20 | page_title = config['page_title'] 21 | else: 22 | page_title = "Model: Llama-2-7b-Chat" 23 | 24 | # page setup 25 | st.set_page_config( 26 | page_icon=favicon, 27 | page_title=page_title, 28 | layout='wide', 29 | initial_sidebar_state='expanded', 30 | ) 31 | 32 | # render logo 33 | logo.render() 34 | 35 | # apply styles 36 | styles.render() 37 | 38 | # load config 39 | session.load() 40 | 41 | # render sidebar 42 | sidebar.render() 43 | 44 | # title 45 | st.title(page_title) 46 | -------------------------------------------------------------------------------- /src/styles.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | # render styles 4 | def render(): 5 | st.markdown( 6 | f""" 7 | 46 | """, 47 | unsafe_allow_html=True, 48 | ) 49 | -------------------------------------------------------------------------------- /src/session.py: -------------------------------------------------------------------------------- 1 | import json 2 | import streamlit as st 3 | 4 | # load config-file 5 | def load(): 6 | # initialize context in session state if not present 7 | if 'context' not in st.session_state: 8 | st.session_state['context'] = [] 9 | 10 | # load config in session state 11 | with open("src/config.json", "r", encoding="utf-8") as file: 12 | config = json.load(file) 13 | st.session_state['api_url'] = config['api_url'] if 'api_url' in config else "http://localhost:8000" 14 | st.session_state['title'] = config['title'] if 'title' in config else "Llama-2-7b-Chat" 15 | st.session_state['n_ctx'] = int(config['n_ctx']) if 'n_ctx' in config else 2048 16 | st.session_state['enable_context'] = config['enable_context'] if 'enable_context' in config else True 17 | st.session_state['stream'] = config['stream'] if 'stream' in config else True 18 | st.session_state['max_tokens'] = int(config['max_tokens']) if 'max_tokens' in config else 256 19 | st.session_state['temperature'] = float(config['temperature']) if 'temperature' in config else 0.2 20 | st.session_state['top_p'] = float(config['top_p']) if 'top_p' in config else 0.95 21 | st.session_state['top_k'] = int(config['top_k']) if 'top_k' in config else 40 22 | st.session_state['repeat_penalty'] = float(config['repeat_penalty']) if 'repeat_penalty' in config else 1.1 23 | st.session_state['stop'] = config['stop'] if 'stop' in config else "###" 24 | st.session_state['system_content'] = config['system_content'] if 'system_content' in config else "User asks Questions to the AI. AI is helpful, kind, obedient, honest, and knows its own limits." 25 | st.session_state['prompt'] = config['prompt'] if 'prompt' in config else "### Instructions:\n{prompt}\n\n### Response:\n" 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # llama-cpp-python-streamlit 2 | 3 | **A streamlit app for using a llama-cpp-python high level api** 4 | 5 | ![llama-cpp-python-streamlit](https://github.com/3x3cut0r/llama-cpp-python-streamlit/assets/1408580/b22fa516-2f32-4d16-a9f2-429eee2a6f50) 6 | 7 | ## Index 8 | 9 | 1. [Installation](#install) 10 | 2. [Configuration](#config) 11 | 3. [Usage](#usage) 12 | 3.1 [deploy streamlit app](#deploy) 13 | 3.2 [use](#use) 14 | 4. [Find Me](#findme) 15 | 5. [License](#license) 16 | 17 | ## 1 Installation 18 | 19 | - install python3 from [python.org](https://www.python.org/downloads/) or from repo: 20 | 21 | ```shell 22 | apt install python3 23 | ``` 24 | 25 | - install requirements 26 | 27 | ```shell 28 | pip install -r requirements.txt 29 | ``` 30 | 31 | ## 2 Configuration 32 | 33 | - change the api url in src/config.json to your llama-cpp-python high level api 34 | - set your page_title to whatever you want 35 | - set n_ctx value to the value of your api 36 | - set default values to the model settings 37 | 38 | **src/config.json** 39 | 40 | ```json 41 | { 42 | "api_url": "https://llama-cpp-python.mydomain.com", 43 | "page_title": "Llama-2-7b-Chat", 44 | "n_ctx": 2048, 45 | "enable_context": "True", 46 | "stream": "True", 47 | "max_tokens": "256", 48 | "temperature": "0.2", 49 | "top_p": "0.95", 50 | "top_k": "40", 51 | "repeat_penalty": "1.1", 52 | "stop": "###", 53 | "system_content": "User asks Questions to the AI. AI is helpful, kind, obedient, honest, and knows its own limits.", 54 | "prompt": "### Instructions:\n{prompt}\n\n### Response:\n" 55 | } 56 | ``` 57 | 58 | - to change the logo or favicon, just replace the files inside the `./static` folder 59 | 60 | ## 3 Usage 61 | 62 | ### 3.1 deploy streamlit app 63 | 64 | - run streamlit app 65 | 66 | ```shell 67 | streamlit run streamlit_app.py 68 | ``` 69 | 70 | ### 3.2 use 71 | 72 | - browse [http://localhost:8501/](http://localhost:8501/) 73 | - choose supported endpoint 74 | - optional: adjust model settings/parameters 75 | - enter your message 76 | 77 | ### 4 Find Me 78 | 79 | ![E-Mail](https://img.shields.io/badge/E--Mail-julianreith%40gmx.de-red) 80 | 81 | - [GitHub](https://github.com/3x3cut0r) 82 | - [DockerHub](https://hub.docker.com/u/3x3cut0r) 83 | 84 | ### 5 License 85 | 86 | [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) - This project is licensed under the GNU General Public License - see the [gpl-3.0](https://www.gnu.org/licenses/gpl-3.0.en.html) for details. 87 | -------------------------------------------------------------------------------- /src/sidebar.py: -------------------------------------------------------------------------------- 1 | import json 2 | import streamlit as st 3 | 4 | def render(): 5 | with st.sidebar: 6 | 7 | # load endpoints from enpoints.json 8 | endpoints = [] 9 | with open("src/endpoints.json", "r", encoding="utf-8") as file: 10 | data = json.load(file) 11 | endpoints = list(data.keys()) 12 | 13 | # endpoint 14 | endpoint = st.selectbox("endpoint", endpoints) 15 | st.session_state['endpoint'] = endpoint 16 | 17 | # sidebar_title 18 | st.title("Model Settings") 19 | st.session_state['sidebar_title'] = "Model Settings" 20 | 21 | # user_content 22 | user_content = "" 23 | st.session_state['user_content'] = user_content 24 | 25 | # enable_context 26 | enable_context = st.toggle("enable context?", value=st.session_state['enable_context']) 27 | st.session_state['enable_context'] = enable_context 28 | 29 | # stream 30 | stream = st.toggle("stream results?", value=st.session_state['stream']) 31 | st.session_state['stream'] = stream 32 | 33 | # max_tokens 34 | max_tokens = st.number_input("max_tokens", value=st.session_state['max_tokens'], min_value=16, max_value=st.session_state['n_ctx'], step=1) 35 | st.session_state['max_tokens'] = max_tokens 36 | 37 | # temperature 38 | temperature = st.number_input("temperature", value=st.session_state['temperature'], min_value=0.01, max_value=1.99, step=0.05) 39 | st.session_state['temperature'] = temperature 40 | 41 | # top_p 42 | top_p = st.number_input("top_p", value=st.session_state['top_p'], min_value=0.0, max_value=1.0, step=0.05) 43 | st.session_state['top_p'] = top_p 44 | 45 | # top_k 46 | top_k = st.number_input("top_k", value=st.session_state['top_k'], min_value=1, max_value=200, step=1) 47 | st.session_state['top_k'] = top_k 48 | 49 | # repeat_penalty 50 | repeat_penalty = st.number_input("repeat_penalty", value=st.session_state['repeat_penalty'], min_value=1.0, max_value=1.5, step=0.05) 51 | st.session_state['repeat_penalty'] = repeat_penalty 52 | 53 | # stop 54 | stop = st.text_input("stop", value=st.session_state['stop']) 55 | stop = stop.encode().decode('unicode_escape') 56 | stop = stop.replace(" ", "").split(",") 57 | st.session_state['stop'] = stop 58 | 59 | # system_content 60 | if endpoint == "/v1/chat/completions": 61 | system_content = st.text_area("system_content", value=st.session_state['system_content'], height=200) 62 | st.session_state['system_content'] = system_content 63 | 64 | # prompt 65 | else: 66 | prompt = st.text_area("prompt", value=st.session_state['prompt'], height=200) 67 | prompt = prompt.encode().decode('unicode_escape') 68 | st.markdown("hint: the expression `{prompt}` must exist!", unsafe_allow_html=True) 69 | st.session_state['prompt'] = prompt 70 | 71 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /src/request.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import streamlit as st 4 | import src.context as context 5 | import urllib3 6 | 7 | urllib3.disable_warnings() 8 | 9 | # send request to API 10 | def send(content_container): 11 | 12 | # create static json_data for all requests 13 | json_data = { 14 | "max_tokens": st.session_state['max_tokens'], 15 | "temperature": st.session_state['temperature'], 16 | "top_p": st.session_state['top_p'], 17 | "top_k": st.session_state['top_k'], 18 | "repeat_penalty": st.session_state['repeat_penalty'], 19 | "stop": st.session_state['stop'], 20 | "stream": st.session_state['stream'] 21 | } 22 | 23 | # add endpoint specific json_data 24 | # endpoint = /v1/chat/completions 25 | if st.session_state['endpoint'] == "/v1/chat/completions": 26 | 27 | # add previous context to messages 28 | json_data['messages'] = context.get_history() 29 | 30 | # other endpoints 31 | else: 32 | json_data['prompt'] = context.get_history() 33 | 34 | # send json_data to endpoint 35 | try: 36 | s = requests.Session() 37 | headers = None 38 | with s.post(st.session_state["api_url"] + st.session_state['endpoint'], 39 | json=json_data, 40 | headers=headers, 41 | stream=st.session_state['stream'], 42 | timeout=240, 43 | verify=False 44 | ) as response: 45 | 46 | # if stream is True 47 | if st.session_state['stream']: 48 | 49 | # store chunks into context 50 | for chunk in response.iter_lines(chunk_size=None, decode_unicode=True): 51 | 52 | # fix error: Connection broken: InvalidChunkLength(got length b'', 0 bytes read) 53 | if chunk == b'': 54 | continue 55 | 56 | # process chunk 57 | elif chunk: 58 | # skip [DONE] message 59 | if chunk.startswith("data: [DONE]"): 60 | continue 61 | # remove "data: "-prefix, if present 62 | elif chunk.startswith("data: "): 63 | chunk = chunk[6:] 64 | # skip ping messages 65 | elif chunk.startswith(": ping"): 66 | continue 67 | 68 | # append chunks content to the context 69 | try: 70 | chunk_dict = json.loads(chunk) 71 | context.append(chunk_dict) 72 | context.render(content_container) 73 | except json.JSONDecodeError: 74 | st.error(f'invalid JSON-String: {chunk}') 75 | 76 | # if stream is False 77 | else: 78 | # append complete content to the context 79 | try: 80 | if response.ok: 81 | context.append(response.json()) 82 | context.render(content_container) 83 | else: 84 | raise Exception(f'Error: {response.text}') 85 | except json.JSONDecodeError: 86 | st.error(f'invalid JSON-String: {chunk}') 87 | 88 | except Exception as e: 89 | st.error(str(e)) 90 | 91 | # stop request 92 | def stop(endpoint, stop): 93 | # send stop request to endpoint 94 | try: 95 | requests.post( 96 | st.session_state["api_url"] + endpoint, 97 | json={"messages": stop[0]}, 98 | verify=False 99 | ) 100 | except Exception as e: 101 | st.error(str(e)) 102 | -------------------------------------------------------------------------------- /src/context.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import streamlit as st 3 | 4 | # render context in app 5 | def render(container): # container = st.container() 6 | container.empty() 7 | 8 | with container.container(): 9 | if st.session_state['context'] != []: 10 | for element in st.session_state['context']: 11 | 12 | # if question 13 | # todo: still vulnerable to code (html) injection, e.g.: 14 | if 'question' in element: 15 | q = element['question'] 16 | st.markdown( 17 | f"""

{q}

""", 18 | unsafe_allow_html=True, 19 | ) 20 | 21 | # if response 22 | elif 'choices' in element and element['choices']: 23 | 24 | # if /v1/chat/completions endpoint 25 | if 'message' in element['choices'][0]: 26 | if 'content' in element['choices'][0]['message']: 27 | c = element['choices'][0]['message']['content'] 28 | st.markdown(c) 29 | 30 | # if /v1/completions entpoint 31 | elif 'text' in element['choices'][0]: 32 | c = element['choices'][0]['text'] 33 | st.markdown(c) 34 | 35 | # append user_content to context 36 | def append_question(user_content): # user_content = question = string 37 | if st.session_state['context'] == [] or 'question' not in st.session_state['context'][-1] or st.session_state['context'][-1]['question'] != user_content: 38 | now = int(datetime.now().timestamp()) 39 | st.session_state['context'].append({ 40 | "id": 0, # todo: add question id here 41 | "question": user_content, 42 | "created": now 43 | }) 44 | 45 | # append context to context 46 | def append(ctx): # ctx = python dict 47 | 48 | # rename ctx['choices'][0]['delta'] -> ctx['choices'][0]['message'] if ctx = chunk 49 | if 'choices' in ctx and ctx['choices']: 50 | if 'delta' in ctx['choices'][0]: 51 | ctx['choices'][0]['message'] = ctx['choices'][0].pop('delta') 52 | 53 | # check if 'id', 'created' and 'choices' exist 54 | if all(key in ctx for key in ('id', 'created', 'choices')): 55 | 56 | # check if ctx is already last element in context 57 | if st.session_state['context'] != [] and st.session_state['context'][-1]['id'] == ctx['id'] and st.session_state['context'][-1]['created'] == ctx['created']: 58 | 59 | # append chunk 'content' to existing (last) chunk 'content' 60 | if 'choices' in ctx and ctx['choices']: 61 | 62 | # for /v1/chat/completions endpoint 63 | if 'message' in ctx['choices'][0]: 64 | if 'content' in ctx['choices'][0]['message']: 65 | st.session_state['context'][-1]['choices'][0]['message']['content'] += ctx['choices'][0]['message'].get('content', '') 66 | 67 | # for /v1/completions entpoint 68 | elif 'text' in ctx['choices'][0]: 69 | st.session_state['context'][-1]['choices'][0]['text'] += ctx['choices'][0].get('text', '') 70 | else: 71 | 72 | # append ctx to context 73 | if 'choices' in ctx and ctx['choices']: 74 | 75 | # for /v1/chat/completions endpoint 76 | if 'message' in ctx['choices'][0]: 77 | if 'content' in ctx['choices'][0]['message']: 78 | st.session_state['context'].append(ctx) 79 | 80 | # for /v1/completions entpoint 81 | elif 'text' in ctx['choices'][0]: 82 | st.session_state['context'].append(ctx) 83 | 84 | # raise error if no context was found 85 | else: 86 | raise Exception(f'Error: no context to append or wrong api endpoint\n\nmessage: {ctx}') 87 | 88 | # return message from context 89 | def get_message(ctx_element): 90 | 91 | # if question 92 | if 'question' in ctx_element: 93 | return "User: " + ctx_element['question'] + "\n" 94 | 95 | # if response 96 | elif 'choices' in ctx_element and ctx_element['choices']: 97 | 98 | # if /v1/chat/completions endpoint 99 | if 'message' in ctx_element['choices'][0]: 100 | if 'content' in ctx_element['choices'][0]['message']: 101 | return "System: " + ctx_element['choices'][0]['message']['content'] + "\n" 102 | 103 | # if /v1/completions entpoint 104 | elif 'text' in ctx_element['choices'][0]: 105 | return "System: " + ctx_element['choices'][0]['text'] + "\n" 106 | 107 | # return context history 108 | def get_history(): 109 | history = "" 110 | 111 | messages = [{ 112 | "role": "system", 113 | "content": st.session_state['system_content'] 114 | }] 115 | 116 | if st.session_state['context'] != []: 117 | 118 | # if context is enabled return all elements 119 | if st.session_state['enable_context']: 120 | for ctx_element in st.session_state['context']: 121 | history += get_message(ctx_element) 122 | 123 | # cut history to n_ctx length of llama.cpp server 124 | # todo: cut complete user and/or system message instead of cutting somewhere in the middle 125 | n_ctx = st.session_state['n_ctx'] 126 | history = (history[-n_ctx:]) if len(history) >= n_ctx else history 127 | 128 | # if context is disabled return last element 129 | else: 130 | history += get_message(st.session_state['context'][-1]) 131 | 132 | # message dict for /v1/chat/completions endpoint 133 | messages.append({ 134 | "role": "user", 135 | "content": history 136 | }) 137 | 138 | if st.session_state['endpoint'] == "/v1/chat/completions": 139 | return messages 140 | else: 141 | return st.session_state['prompt'].replace('{prompt}', history) 142 | --------------------------------------------------------------------------------