├── .gitignore
├── GPT4All Lib Eg.ipynb
├── README.md
├── app-chain.py
├── app-comparison.py
├── app.py
└── requirements.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | app.py
2 | nonopenai
3 | .ipynb_checkpoints
4 |
--------------------------------------------------------------------------------
/GPT4All Lib Eg.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 8,
6 | "id": "0a783a54-acb9-4713-a330-838a8568a7e4",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "import gpt4all"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 9,
16 | "id": "05a0994e-2b57-4960-b1b5-5d5a6c1ffbb5",
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "base = 'C:/Users/User/AppData'"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 10,
26 | "id": "8dd77159-97cb-47bc-90a1-2d5bfcfa3a91",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "MODELPATH = \"C:/Users/User/AppData/Local/nomic.ai/GPT4All/ggml-mpt-7b-instruct.bin\""
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 11,
36 | "id": "3bccab41-cc1d-475e-98b8-fb4ef4343cde",
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "name": "stdout",
41 | "output_type": "stream",
42 | "text": [
43 | "Found model file.\n"
44 | ]
45 | }
46 | ],
47 | "source": [
48 | "mpt = gpt4all.GPT4All(MODELPATH, model_type='mpt')"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 12,
54 | "id": "b264ceb9-0bd5-49b9-a9f6-63c75233c4dc",
55 | "metadata": {
56 | "tags": []
57 | },
58 | "outputs": [],
59 | "source": [
60 | "message = [{\"role\": \"user\", \"content\": \"Name 3 colors\"}]"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": null,
66 | "id": "74415811-24ec-4eaf-84ac-195973396942",
67 | "metadata": {
68 | "jupyter": {
69 | "source_hidden": true
70 | },
71 | "tags": []
72 | },
73 | "outputs": [],
74 | "source": [
75 | "task = 'name three colors'"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": null,
81 | "id": "65d573ce-84ad-431c-b77d-a4e6ff1ffe12",
82 | "metadata": {
83 | "jupyter": {
84 | "source_hidden": true
85 | },
86 | "tags": []
87 | },
88 | "outputs": [],
89 | "source": [
90 | "prompt = f\"\"\"\n",
91 | "### Instruction: \n",
92 | "The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.\n",
93 | "### Prompt: \n",
94 | "{task}\n",
95 | "### Response:\n",
96 | "\"\"\""
97 | ]
98 | },
99 | {
100 | "cell_type": "code",
101 | "execution_count": null,
102 | "id": "ab1a237d-7ffe-409c-a11c-463e8ae54041",
103 | "metadata": {},
104 | "outputs": [
105 | {
106 | "name": "stdout",
107 | "output_type": "stream",
108 | "text": [
109 | "### Instruction: \n",
110 | " The prompt below is a question to answer, a task to complete, or a conversation \n",
111 | " to respond to; decide which and write an appropriate response.\n",
112 | " \n",
113 | "### Prompt: \n",
114 | "Name 3 colors\n",
115 | "### Response:\n"
116 | ]
117 | }
118 | ],
119 | "source": [
120 | "response = mpt.chat_completion(message); response "
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "id": "f2da0052-14bb-495a-8e48-fda851870332",
127 | "metadata": {},
128 | "outputs": [],
129 | "source": []
130 | }
131 | ],
132 | "metadata": {
133 | "kernelspec": {
134 | "display_name": "nonopenai",
135 | "language": "python",
136 | "name": "nonopenai"
137 | },
138 | "language_info": {
139 | "codemirror_mode": {
140 | "name": "ipython",
141 | "version": 3
142 | },
143 | "file_extension": ".py",
144 | "mimetype": "text/x-python",
145 | "name": "python",
146 | "nbconvert_exporter": "python",
147 | "pygments_lexer": "ipython3",
148 | "version": "3.9.12"
149 | }
150 | },
151 | "nbformat": 4,
152 | "nbformat_minor": 5
153 | }
154 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Building LLM Apps...without OpenAI
2 | Want to build LLM Apps...but without OpenAI dependencies? Well have I got the code for you my friend. In this project I walk through how to build a langchain x streamlit app using GPT4All. We start off with a simple app and then build up to a langchain PythonREPL agent.
3 |
4 | ## See it live and in action 📺
5 | [](https://youtu.be/5JpPo-NOq9s 'Tutorial')
6 |
7 | # Startup 🚀
8 | 1. Create a virtual environment `python -m venv nonopenai`
9 | 2. Activate it:
10 | - Windows:`.\nonopenai\Scripts\activate`
11 | - Mac: `source nonopenai/bin/activate`
12 | 3. Install the GPT4All Installer using GUI based installers
13 | - Windows: https://gpt4all.io/installers/gpt4all-installer-win64.exe
14 | - Mac: https://gpt4all.io/installers/gpt4all-installer-darwin.dmg
15 | - Ubuntu: https://gpt4all.io/installers/gpt4all-installer-linux.run
16 | 4. Download the required LLM models and take note of the PATH they're installed to
17 | 5. Clone this repo `git clone https://github.com/nicknochnack/Nopenai`
18 | 6. Go into the directory `cd NonOpenAI`
19 | 7. Install the required dependencies `pip install -r requirements.txt`
20 | 8. Update the path of the models in line 9 of `app.py` and line 5 of `app-chain.py`
21 | 9. Start the python agent app by running `streamlit run app.py` or the chain app by running `streamlit run app-chain.py`
22 | 10. Go back to my YouTube channel and like and subscribe 😉...no seriously...please! lol
23 | 11. The comparison app can be started by running `streamlit run app-comparison.py` before you do that though, update the base ggml download path in line 16, e.g. `BASE_PATH = 'C:/Users/User/AppData/Local/nomic.ai/GPT4All/'` and openAI api key on line 18
24 |
25 |
26 | # Other References 🔗
27 |
-GPT4AllReference
28 | : mainly used to determine how to install the GPT4All library and references. Doco was changing frequently, at the time of coding this was the most up to date example of getting it running.
29 |
30 | # Who, When, Why?
31 | 👨🏾💻 Author: Nick Renotte
32 | 📅 Version: 1.x
33 | 📜 License: This project is licensed under the MIT License
34 |
35 |
--------------------------------------------------------------------------------
/app-chain.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | from langchain import PromptTemplate, LLMChain
3 | from langchain.llms import GPT4All
4 |
5 | PATH = 'C:/Users/User/AppData/Local/nomic.ai/GPT4All/ggml-mpt-7b-instruct.bin'
6 |
7 | llm = GPT4All(model=PATH, verbose=True)
8 |
9 | prompt = PromptTemplate(input_variables=['question'], template="""
10 | Question: {question}
11 |
12 | Answer: Let's think step by step.
13 | """)
14 |
15 | llm_chain = LLMChain(prompt=prompt, llm=llm)
16 |
17 | st.title('🦜🔗 GPT4ALL Y\'All')
18 | st.info('This is using the MPT model!')
19 | prompt = st.text_input('Enter your prompt here!')
20 |
21 | if prompt:
22 | response = llm_chain.run(prompt)
23 | print(response)
24 | st.write(response)
25 |
26 |
--------------------------------------------------------------------------------
/app-comparison.py:
--------------------------------------------------------------------------------
1 | # App dev framework
2 | import streamlit as st
3 | import os
4 |
5 | # Import depdencies
6 | from langchain.llms import GPT4All, OpenAI
7 | from langchain import PromptTemplate, LLMChain
8 | from ctransformers.langchain import CTransformers
9 |
10 | # Python toolchain imports
11 | from langchain.agents.agent_toolkits import create_python_agent
12 | from langchain.tools.python.tool import PythonREPLTool
13 |
14 |
15 | # Path to weights
16 | BASE_PATH = 'C:/Users/User/AppData/Local/nomic.ai/GPT4All/'
17 |
18 | os.environ['OPENAI_API_KEY'] = 'YOUR OPENAI API KEY HERE'
19 |
20 | # Title
21 | st.title('🦜🔗 GPT For Y\'all')
22 |
23 | with st.sidebar:
24 | st.info('This application allows you to use LLMs for a range of tasks. The selections displayed below leverage prompt formatting to streamline your ability to do stuff!')
25 | option = st.radio('Choose your task', ['Base Gen', 'Creative', 'Summarization', 'Few Shot', 'Python'])
26 | models = [*list(os.listdir(BASE_PATH)), 'OpenAI']
27 | model = st.radio('Choose your model', models)
28 | st.write(model)
29 |
30 | if model != 'OpenAI':
31 | PATH = f'{BASE_PATH}{model}'
32 | # Instance of llm
33 | llm = GPT4All(model=PATH, verbose=True, temp=0.1, n_predict=4096, top_p=.95, top_k=40, n_batch=9, repeat_penalty=1.1, repeat_last_n=1.1)
34 |
35 | else:
36 | llm = OpenAI(temperature=0.5)
37 |
38 | if option=='Base Gen':
39 | st.info('Use this application to perform standard chat generation tasks.')
40 |
41 | # Prompt box
42 | prompt = st.text_input('Plug in your prompt here!')
43 | template = PromptTemplate(input_variables=['action'], template="""
44 | As a creative agent, {action}
45 | """)
46 | template = PromptTemplate(input_variables=['action'], template="""
47 | ### Instruction:
48 | The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
49 | ### Prompt:
50 | {action}
51 | ### Response:""")
52 | chain = LLMChain(llm=llm, prompt=template, verbose=True)
53 |
54 | # if we hit enter
55 | if prompt:
56 | # Pass the prompt to the LLM Chain
57 | response = chain.run(prompt)
58 | # do this
59 | st.write(response)
60 |
61 |
62 | if option=='Creative':
63 | st.info('Use this application to perform creative tasks like writing stories and poems.')
64 |
65 | # Prompt box
66 | prompt = st.text_input('Plug in your prompt here!')
67 | template = PromptTemplate(input_variables=['action'], template="""
68 | As a creative agent, {action}
69 | """)
70 | template = PromptTemplate(input_variables=['action'], template="""
71 | ### Instruction:
72 | The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
73 | ### Prompt:
74 | {action}
75 | ### Response:""")
76 | chain = LLMChain(llm=llm, prompt=template, verbose=True)
77 |
78 | # if we hit enter
79 | if prompt:
80 | # Pass the prompt to the LLM Chain
81 | response = chain.run(prompt)
82 | # do this
83 | st.write(response)
84 |
85 | if option=='Summarization':
86 | st.info('Use this application to perform summarization on blocks of text.')
87 |
88 | # Prompt box
89 | prompt = st.text_area('Plug in your prompt here!')
90 | template = PromptTemplate(input_variables=['action'], template="""
91 | ### Instruction:
92 | The prompt below is a passage to summarize. Using the prompt, provide a summarized response.
93 | ### Prompt:
94 | {action}
95 | ### Summary:""")
96 | chain = LLMChain(llm=llm, prompt=template, verbose=True)
97 |
98 | # if we hit enter
99 | if prompt:
100 | # Pass the prompt to the LLM Chain
101 | response = chain.run(prompt)
102 | # do this
103 | st.write(response)
104 |
105 | if option=='Few Shot':
106 |
107 | st.info('Pass through some examples of task-output to perform few-shot prompting.')
108 | # Examples for few shots
109 | examples = st.text_area('Plug in your examples!')
110 | prompt = st.text_area('Plug in your prompt here!')
111 |
112 | template = PromptTemplate(input_variables=['action','examples'], template="""
113 | ### Instruction:
114 | The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
115 | ### Examples:
116 | {examples}
117 | ### Prompt:
118 | {action}
119 | ### Response:""")
120 | chain = LLMChain(llm=llm, prompt=template, verbose=True)
121 |
122 | # if we hit enter
123 | if prompt:
124 | # Pass the prompt to the LLM Chain
125 | response = chain.run(examples=examples, action=prompt)
126 | print(response)
127 | # do this
128 | st.write(response)
129 |
130 | if option=='Python':
131 | st.info('Leverage a Python agent by using the PythonREPLTool inside of Langchain.')
132 | # Python agent
133 | python_agent = create_python_agent(llm=llm, tool=PythonREPLTool(), verbose=True)
134 | # Prompt text box
135 | prompt = st.text_input('Plug in your prompt here!')
136 | # if we hit enter
137 | if prompt:
138 | # Pass the prompt to the LLM Chain
139 | response = python_agent.run(prompt)
140 |
141 | # do this
142 | st.write(response)
143 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 |
3 | from langchain import PromptTemplate, LLMChain
4 | from langchain.llms import GPT4All
5 |
6 | from langchain.agents.agent_toolkits import create_python_agent
7 | from langchain.tools.python.tool import PythonREPLTool
8 |
9 | PATH = 'C:/Users/User/AppData/Local/nomic.ai/GPT4All/ggml-gpt4all-l13b-snoozy.bin'
10 | llm = GPT4All(model=PATH, verbose=True)
11 |
12 | agent_executor = create_python_agent(
13 | llm=llm,
14 | tool=PythonREPLTool(),
15 | verbose=True
16 | )
17 |
18 | st.title('🦜🔗 GPT For Y\'all')
19 |
20 | prompt = st.text_input('Enter your prompt here!')
21 |
22 | if prompt:
23 | response = agent_executor.run(prompt)
24 | st.write(response)
25 |
26 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp==3.8.4
2 | aiosignal==1.3.1
3 | altair==4.2.2
4 | asttokens==2.2.1
5 | async-timeout==4.0.2
6 | attrs==23.1.0
7 | backcall==0.2.0
8 | blinker==1.6.2
9 | cachetools==5.3.0
10 | certifi==2023.5.7
11 | charset-normalizer==3.1.0
12 | click==8.1.3
13 | colorama==0.4.6
14 | comm==0.1.3
15 | dataclasses-json==0.5.7
16 | debugpy==1.6.7
17 | decorator==5.1.1
18 | entrypoints==0.4
19 | executing==1.2.0
20 | filelock==3.12.0
21 | frozenlist==1.3.3
22 | fsspec==2023.5.0
23 | gitdb==4.0.10
24 | GitPython==3.1.31
25 | gpt4all==0.2.1
26 | greenlet==2.0.2
27 | huggingface-hub==0.14.1
28 | idna==3.4
29 | importlib-metadata==6.6.0
30 | ipykernel==6.23.1
31 | ipython==8.13.2
32 | jedi==0.18.2
33 | Jinja2==3.1.2
34 | jsonschema==4.17.3
35 | jupyter_client==8.2.0
36 | jupyter_core==5.3.0
37 | langchain==0.0.173
38 | markdown-it-py==2.2.0
39 | MarkupSafe==2.1.2
40 | marshmallow==3.19.0
41 | marshmallow-enum==1.5.1
42 | matplotlib-inline==0.1.6
43 | mdurl==0.1.2
44 | multidict==6.0.4
45 | mypy-extensions==1.0.0
46 | nest-asyncio==1.5.6
47 | numexpr==2.8.4
48 | numpy==1.24.3
49 | openapi-schema-pydantic==1.2.4
50 | packaging==23.1
51 | pandas==2.0.1
52 | parso==0.8.3
53 | pickleshare==0.7.5
54 | Pillow==9.5.0
55 | platformdirs==3.5.1
56 | prompt-toolkit==3.0.38
57 | protobuf==3.20.3
58 | psutil==5.9.5
59 | pure-eval==0.2.2
60 | pyarrow==12.0.0
61 | pydantic==1.10.7
62 | pydeck==0.8.1b0
63 | Pygments==2.15.1
64 | pygpt4all==1.1.0
65 | pygptj==2.0.3
66 | pyllamacpp==2.1.3
67 | Pympler==1.0.1
68 | pyrsistent==0.19.3
69 | python-dateutil==2.8.2
70 | pytz==2023.3
71 | pywin32==306
72 | PyYAML==6.0
73 | pyzmq==25.0.2
74 | regex==2023.5.5
75 | requests==2.30.0
76 | rich==13.3.5
77 | sentencepiece==0.1.99
78 | six==1.16.0
79 | smmap==5.0.0
80 | SQLAlchemy==2.0.13
81 | stack-data==0.6.2
82 | streamlit==1.22.0
83 | tenacity==8.2.2
84 | tokenizers==0.13.3
85 | toml==0.10.2
86 | toolz==0.12.0
87 | tornado==6.3.2
88 | tqdm==4.65.0
89 | traitlets==5.9.0
90 | transformers==4.29.2
91 | typing-inspect==0.8.0
92 | typing_extensions==4.5.0
93 | tzdata==2023.3
94 | tzlocal==5.0.1
95 | urllib3==2.0.2
96 | validators==0.20.0
97 | watchdog==3.0.0
98 | wcwidth==0.2.6
99 | yarl==1.9.2
100 | zipp==3.15.0
101 |
--------------------------------------------------------------------------------