├── user.png
├── assistant.ico
├── assistant.png
├── mistralai.png
├── st-MistralApp-run.gif
├── st-MistralApp-runLG.gif
├── README.md
└── st-Mistral-API.py
/user.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/MistralAPI-streamlit/main/user.png
--------------------------------------------------------------------------------
/assistant.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/MistralAPI-streamlit/main/assistant.ico
--------------------------------------------------------------------------------
/assistant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/MistralAPI-streamlit/main/assistant.png
--------------------------------------------------------------------------------
/mistralai.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/MistralAPI-streamlit/main/mistralai.png
--------------------------------------------------------------------------------
/st-MistralApp-run.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/MistralAPI-streamlit/main/st-MistralApp-run.gif
--------------------------------------------------------------------------------
/st-MistralApp-runLG.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/MistralAPI-streamlit/main/st-MistralApp-runLG.gif
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MistralAPI-streamlit
2 | A streamlit ChatBot running Mistral-Small-24B with free API calls
3 |
4 |
5 |
6 |
7 | ## Instructions
8 |
9 | - Clone the repo
10 | - create the virtual environment
11 | ```bash
12 | python -m venv venv
13 | ```
14 | - activate the virtual environment
15 | ```bash
16 | .\venv\Scripts\activate
17 | ```
18 | - Install the dependencies
19 | ```bash
20 | pip install streamlit mistralai
21 | ```
22 | - from the terminal with the venv activated, run
23 | ```bash
24 | streamlit run .\st-Mistral-API.py
25 | ```
26 |
27 | ---
28 |
29 |
30 | ### The running app
31 | - you need to add your own API key (alert message if missing)
32 | - you can choose among 3 models
33 | - you can clear the chat (alert message)
34 | - in the terminal you can see what model is called
35 |
36 |
--------------------------------------------------------------------------------
/st-Mistral-API.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | from mistralai import Mistral
3 | import warnings
4 | warnings.filterwarnings(action='ignore')
5 | import datetime
6 | import random
7 | import string
8 | from PIL import Image
9 | import os
10 | import sys
11 |
12 | # Function for handling local path for images
13 | def resource_path(relative_path):
14 | try:
15 | base_path = sys._MEIPASS
16 | except Exception:
17 | base_path = os.path.abspath(".")
18 |
19 | return os.path.join(base_path, relative_path)
20 |
21 | nCTX = '32k'
22 | modelname = "Mistral AI"
23 | model = 'mistral-small-latest'
24 | # Set the webpage title
25 | st.set_page_config(
26 | page_title=f"Your LocalGPT ✨ with {modelname}",
27 | page_icon="🌟",
28 | layout="wide")
29 |
30 | if "mistral_model" not in st.session_state:
31 | st.session_state.mistral_model = ""
32 |
33 | # Initialize chat history for the LLM
34 | if "messages" not in st.session_state:
35 | st.session_state.messages = []
36 |
37 | # Initialize the ChatMEssages for visualization only
38 | if "chatMessages" not in st.session_state:
39 | st.session_state.chatMessages = []
40 |
41 | if "temperature" not in st.session_state:
42 | st.session_state.temperature = 0.1
43 |
44 | if "maxlength" not in st.session_state:
45 | st.session_state.maxlength = 500
46 |
47 | if "numOfTurns" not in st.session_state:
48 | st.session_state.numOfTurns = 0
49 |
50 | if "maxTurns" not in st.session_state:
51 | st.session_state.maxTurns = 11 #must be odd number, greater than equal to 5
52 |
53 | def writehistory(filename,text):
54 | with open(filename, 'a', encoding='utf-8') as f:
55 | f.write(text)
56 | f.write('\n')
57 | f.close()
58 |
59 | def genRANstring(n):
60 | """
61 | n = int number of char to randomize
62 | """
63 | N = n
64 | res = ''.join(random.choices(string.ascii_uppercase +
65 | string.digits, k=N))
66 | return res
67 |
68 | # function to call as @resource the Mistral Endpoints
69 | @st.cache_resource
70 | def create_chat(apikey):
71 | from mistralai import Mistral
72 | client = Mistral(api_key=apikey)
73 | modelname = "Mistral AI"
74 | print(f'Loaded remote model {modelname}...')
75 | return client
76 |
77 |
78 | # create THE SESSIoN STATES
79 | if "logfilename" not in st.session_state:
80 | ## Logger file
81 | logfile = f'{genRANstring(5)}_log.txt'
82 | st.session_state.logfilename = logfile
83 | #Write in the history the first 2 sessions
84 | writehistory(st.session_state.logfilename,f'{str(datetime.datetime.now())}\n\nYour own LocalGPT with 🌀 {modelname}\n---\n🧠🫡: You are a helpful assistant.')
85 | writehistory(st.session_state.logfilename,f'🌀: How may I help you today?')
86 |
87 |
88 | #AVATARS
89 | av_us = Image.open(resource_path('user.png'))
90 | av_ass = Image.open(resource_path('assistant.png'))
91 |
92 | ### START STREAMLIT UI
93 | # Create a header element
94 | st.image(Image.open(resource_path('mistralai.png')), width=700)
95 | mytitle = f'> *🌟 {modelname} with {nCTX} tokens Context window* - Turn based Chat available with max capacity of :orange[**{st.session_state.maxTurns} messages**].'
96 | st.markdown(mytitle, unsafe_allow_html=True)
97 |
98 | # CREATE THE SIDEBAR
99 | with st.sidebar:
100 | mistral_api_key = st.text_input("OpenRouter API Key", key="or_api_key", type="password")
101 | "[Get a Mistral.ai API key](https://console.mistral.ai/)"
102 | st.session_state.mistral_model = st.selectbox("Mistral Model", ['mistral-small-latest','open-mistral-nemo','open-codestral-mamba'], index=0,
103 | placeholder="Choose an option", disabled=False, label_visibility="visible")
104 | st.session_state.temperature = st.slider('Temperature:', min_value=0.0, max_value=1.0, value=0.65, step=0.01)
105 | st.session_state.maxlength = st.slider('Length reply:', min_value=150, max_value=2000,
106 | value=550, step=50)
107 | st.session_state.turns = st.toggle('Turn based', value=True, help='Activate Conversational Turn Chat with History',
108 | disabled=False, label_visibility="visible")
109 | st.markdown(f"*Number of Max Turns*: {st.session_state.maxTurns}")
110 | actualTurns = st.markdown(f"*Chat History Lenght*: :green[Good]")
111 | statstime = st.markdown(f'⏳ gen.time: 0 sec')
112 | btnClear = st.button("Clear History",type="primary", use_container_width=True)
113 | st.markdown(f"**Logfile**: {st.session_state.logfilename}")
114 |
115 | def clearChat():
116 | st.session_state.messages = []
117 | st.info("Chat history cleared. Old messages in TXT log file.")
118 |
119 | # Display chat messages from history on app rerun
120 | for message in st.session_state.chatMessages:
121 | if message["role"] == "user":
122 | with st.chat_message(message["role"],avatar=av_us):
123 | st.markdown(message["content"])
124 | else:
125 | with st.chat_message(message["role"],avatar=av_ass):
126 | st.markdown(message["content"])
127 | # Accept user input
128 | if btnClear:
129 | clearChat()
130 | if myprompt := st.chat_input("What is an AI model?"):
131 | if not mistral_api_key:
132 | st.info("Please add your valid Mistral.ai API key to continue.")
133 | st.stop()
134 | llm = create_chat(mistral_api_key)
135 | st.session_state.messages.append({"role": "user", "content": myprompt})
136 | st.session_state.chatMessages.append({"role": "user", "content": myprompt})
137 | st.session_state.numOfTurns = len(st.session_state.messages)
138 | # Display user message in chat message container
139 | with st.chat_message("user", avatar=av_us):
140 | st.markdown(myprompt)
141 | usertext = f"user: {myprompt}"
142 | writehistory(st.session_state.logfilename,usertext)
143 | # Display assistant response in chat message container
144 | with st.chat_message("assistant",avatar=av_ass):
145 | message_placeholder = st.empty()
146 | with st.spinner("Thinking..."):
147 | start = datetime.datetime.now()
148 | response = ''
149 | conv_messages = []
150 | if st.session_state.turns:
151 | if st.session_state.numOfTurns > st.session_state.maxTurns:
152 | conv_messages = st.session_state.messages[-st.session_state.maxTurns:]
153 | actualTurns.markdown(f"*Chat History Lenght*: :red[Trimmed]")
154 | else:
155 | conv_messages = st.session_state.messages
156 | else:
157 | conv_messages.append(st.session_state.messages[-1])
158 | full_response = ""
159 | print(st.session_state.mistral_model) #print what model is called in the terminal
160 | response = llm.chat.complete(
161 | model=st.session_state.mistral_model,
162 | messages=conv_messages,
163 | temperature=st.session_state.temperature,
164 | max_tokens=st.session_state.maxlength)
165 | full_response = response.choices[0].message.content
166 |
167 | delta = datetime.datetime.now() - start
168 | totalseconds = delta.total_seconds()
169 | statstime.markdown(f'⏳ gen.time: {int(totalseconds)} sec')
170 | toregister = full_response + f"""
171 | ```
172 | ⏳ generation time: {delta}
173 | ```"""
174 | message_placeholder.markdown(full_response)
175 | asstext = f"assistant: {toregister}"
176 | writehistory(st.session_state.logfilename,asstext)
177 | st.session_state.messages.append({"role": "assistant", "content": full_response})
178 | st.session_state.chatMessages.append({"role": "assistant", "content": full_response})
179 | st.session_state.numOfTurns = len(st.session_state.messages)
--------------------------------------------------------------------------------