├── .env ├── .gitattributes ├── ChatApp.jpg ├── README.md ├── huggingChat.py └── requirements.txt /.env: -------------------------------------------------------------------------------- 1 | HUGGINGFACEHUB_API_TOKEN= -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /ChatApp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PromtEngineer/Chat-App-OpenAssistant-API/44b5d3f96cbecfab1f3551e0b3f9eba24e816a64/ChatApp.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Chat-App-OpenAssistant-API 2 | A quick tutorial on how to interact with Open Assistant API and use it in your own Apps. GUI is based on streamlit. 3 | 4 | ![ChatApp](https://github.com/PromtEngineer/Chat-App-OpenAssistant-API/assets/134474669/dbcd36be-55f5-44e6-b3e7-1c1eb110d854) 5 | 6 | 7 | ## Clone the Repo: 8 | Clone the repository. 9 | ```shell 10 | git clone https://github.com/PromtEngineer/Chat-App-OpenAssistant-API.git 11 | ``` 12 | 13 | ## Environment Setup 14 | In order to set your environment up to run the code here, first install all requirements: 15 | 16 | ```shell 17 | pip install -r requirements.txt 18 | ``` 19 | 20 | ## OpenAI API Key 21 | 22 | You will need the OpenAI API key to run this, get your HuggingFace key from [here](https://huggingface.co/settings/tokens) 23 | In the `.env` set your API key. 24 | 25 | ```shell 26 | HUGGINGFACEHUB_API_TOKEN= 27 | ``` 28 | 29 | ## Run the WebApp: 30 | 31 | ```shell 32 | streamlit run huggingChat.py 33 | ``` 34 | -------------------------------------------------------------------------------- /huggingChat.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from streamlit_chat import message 3 | from streamlit_extras.colored_header import colored_header 4 | from streamlit_extras.add_vertical_space import add_vertical_space 5 | from langchain import PromptTemplate, HuggingFaceHub, LLMChain 6 | from dotenv import load_dotenv 7 | 8 | # load the Environment Variables. 9 | load_dotenv() 10 | st.set_page_config(page_title="OpenAssistant Powered Chat App") 11 | 12 | # Sidebar contents 13 | with st.sidebar: 14 | st.title('🤗💬 HuggingChat App') 15 | st.markdown(''' 16 | ## About 17 | This app is an LLM-powered chatbot built using: 18 | - [Streamlit](https://streamlit.io/) 19 | - [LangChain](https://python.langchain.com/) 20 | - [OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5](https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5) LLM model 21 | 22 | ''') 23 | add_vertical_space(3) 24 | st.write('Made with ❤️ by [Prompt Engineer](https://youtube.com/@engineerprompt)') 25 | 26 | st.header("Your Personal Assistant 💬") 27 | 28 | def main(): 29 | 30 | # Generate empty lists for generated and user. 31 | ## Assistant Response 32 | if 'generated' not in st.session_state: 33 | st.session_state['generated'] = ["I'm Assistant, How may I help you?"] 34 | 35 | ## user question 36 | if 'user' not in st.session_state: 37 | st.session_state['user'] = ['Hi!'] 38 | 39 | # Layout of input/response containers 40 | response_container = st.container() 41 | colored_header(label='', description='', color_name='blue-30') 42 | input_container = st.container() 43 | 44 | # get user input 45 | def get_text(): 46 | input_text = st.text_input("You: ", "", key="input") 47 | return input_text 48 | 49 | ## Applying the user input box 50 | with input_container: 51 | user_input = get_text() 52 | 53 | def chain_setup(): 54 | 55 | 56 | template = """<|prompter|>{question}<|endoftext|> 57 | <|assistant|>""" 58 | 59 | prompt = PromptTemplate(template=template, input_variables=["question"]) 60 | 61 | llm=HuggingFaceHub(repo_id="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", model_kwargs={"max_new_tokens":1200}) 62 | 63 | llm_chain=LLMChain( 64 | llm=llm, 65 | prompt=prompt 66 | ) 67 | return llm_chain 68 | 69 | 70 | # generate response 71 | def generate_response(question, llm_chain): 72 | response = llm_chain.run(question) 73 | return response 74 | 75 | ## load LLM 76 | llm_chain = chain_setup() 77 | 78 | # main loop 79 | with response_container: 80 | if user_input: 81 | response = generate_response(user_input, llm_chain) 82 | st.session_state.user.append(user_input) 83 | st.session_state.generated.append(response) 84 | 85 | if st.session_state['generated']: 86 | for i in range(len(st.session_state['generated'])): 87 | message(st.session_state['user'][i], is_user=True, key=str(i) + '_user') 88 | message(st.session_state["generated"][i], key=str(i)) 89 | 90 | if __name__ == '__main__': 91 | main() 92 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | hugchat 3 | streamlit-chat 4 | streamlit-extras --------------------------------------------------------------------------------