├── man.png
├── lamini.png
├── LogoArticle.png
├── Huggingfacehub.png
├── model
└── 248M_weights.md
├── model77M
└── weightshere.md
├── README.md
├── test.py
└── stapp.py
/man.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/LaminiChat/main/man.png
--------------------------------------------------------------------------------
/lamini.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/LaminiChat/main/lamini.png
--------------------------------------------------------------------------------
/LogoArticle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/LaminiChat/main/LogoArticle.png
--------------------------------------------------------------------------------
/Huggingfacehub.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabiomatricardi/LaminiChat/main/Huggingfacehub.png
--------------------------------------------------------------------------------
/model/248M_weights.md:
--------------------------------------------------------------------------------
1 | Download here the model weights
2 | from https://huggingface.co/MBZUAI/LaMini-Flan-T5-248M/tree/main
3 |
--------------------------------------------------------------------------------
/model77M/weightshere.md:
--------------------------------------------------------------------------------
1 | 77M parameter model weights download them here
2 | from https://huggingface.co/MBZUAI/LaMini-Flan-T5-77M/tree/main
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LaminiChat
2 | Repo of the code from the Medium article Say Goodbye to OpenAI part2
3 |
4 | Link to the [article here: ](https://artificialcorner.com/say-goodbye-to-openai-create-a-chatbot-on-your-local-pc-part-2-104c8887d802)
5 |
6 |
7 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3 | from langchain.llms import HuggingFacePipeline
4 | ### INITIALIZING LAMINI MODEL
5 | checkpoint = "./model/"
6 | tokenizer = AutoTokenizer.from_pretrained(checkpoint)
7 | base_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint,
8 | device_map='auto',
9 | torch_dtype=torch.float32)
10 | ### INITIALIZING PIPELINE CHAIN WITH LANGCHAIN
11 | llm = HuggingFacePipeline.from_model_id(model_id=checkpoint,
12 | task = 'text2text-generation',
13 | model_kwargs={"temperature":0.45,"min_length":30, "max_length":350, "repetition_penalty": 5.0})
14 |
15 | from langchain import PromptTemplate, LLMChain
16 | template = """{text}"""
17 | prompt = PromptTemplate(template=template, input_variables=["text"])
18 | chat = LLMChain(prompt=prompt, llm=llm)
19 |
20 | yourprompt = "Describe the impact of AI on healthcare."
21 |
22 | reply = chat.run(yourprompt)
23 | print(reply)
24 |
--------------------------------------------------------------------------------
/stapp.py:
--------------------------------------------------------------------------------
1 | # For graphic unterface
2 | import streamlit as st
3 | # Internal usage
4 | from time import sleep
5 | #### IMPORTS FOR AI PIPELINES ###############
6 | import torch
7 | from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
8 | from langchain.llms import HuggingFacePipeline
9 |
10 | #AVATARS
11 | av_us = './man.png' #"🦖" #A single emoji, e.g. "🧑💻", "🤖", "🦖". Shortcodes are not supported.
12 | av_ass = './lamini.png'
13 |
14 | # FUNCTION TO LOG ALL CHAT MESSAGES INTO chathistory.txt
15 | def writehistory(text):
16 | with open('chathistory.txt', 'a') as f:
17 | f.write(text)
18 | f.write('\n')
19 | f.close()
20 | ### INITIALIZING LAMINI MODEL
21 | checkpoint = "./model/"
22 | tokenizer = AutoTokenizer.from_pretrained(checkpoint)
23 | base_model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint,
24 | device_map='auto',
25 | torch_dtype=torch.float32)
26 | ### INITIALIZING PIPELINE CHAIN WITH LANGCHAIN
27 | llm = HuggingFacePipeline.from_model_id(model_id=checkpoint,
28 | task = 'text2text-generation',
29 | model_kwargs={"temperature":0.45,"min_length":30, "max_length":350, "repetition_penalty": 5.0})
30 | from langchain import PromptTemplate, LLMChain
31 | template = """{text}"""
32 | prompt = PromptTemplate(template=template, input_variables=["text"])
33 | chat = LLMChain(prompt=prompt, llm=llm)
34 |
35 |
36 | st.title("LaMiniGPT ChatBot")
37 | st.subheader("All the power of your local 248M parameter AI model")
38 |
39 | repo="MBZUAI/LaMini-Flan-T5-248M"
40 |
41 | # Set a default model
42 | if "hf_model" not in st.session_state:
43 | st.session_state["hf_model"] = "MBZUAI/LaMini-Flan-T5-248M"
44 |
45 | # Initialize chat history
46 | if "messages" not in st.session_state:
47 | st.session_state.messages = []
48 |
49 | # Display chat messages from history on app rerun
50 | for message in st.session_state.messages:
51 | if message["role"] == "user":
52 | with st.chat_message(message["role"],avatar=av_us):
53 | st.markdown(message["content"])
54 | else:
55 | with st.chat_message(message["role"],avatar=av_ass):
56 | st.markdown(message["content"])
57 |
58 | # Accept user input
59 | if myprompt := st.chat_input("What can you do for me?"):
60 | # Add user message to chat history
61 | st.session_state.messages.append({"role": "user", "content": myprompt})
62 | # Display user message in chat message container
63 | with st.chat_message("user", avatar=av_us):
64 | st.markdown(myprompt)
65 | usertext = f"user: {myprompt}"
66 | writehistory(usertext)
67 | # Display assistant response in chat message container
68 | with st.chat_message("assistant", avatar=av_ass):
69 | message_placeholder = st.empty()
70 | full_response = ""
71 | res = chat.run(myprompt)
72 | response = res.split(" ")
73 | for r in response:
74 | full_response = full_response + r + " "
75 | message_placeholder.markdown(full_response + "▌")
76 | sleep(0.1)
77 | message_placeholder.markdown(full_response)
78 | asstext = f"assistant: {full_response}"
79 | writehistory(asstext)
80 | st.session_state.messages.append({"role": "assistant", "content": full_response})
81 |
--------------------------------------------------------------------------------