├── data └── Database ├── src ├── main.py ├── fine_tuner.py ├── ModelSaver.py ├── requests.py ├── CommunicationLog.py ├── Communication.py ├── process_input.py ├── Chatflow.py ├── com.py ├── chat.py ├── FlowiseAICommunication.py ├── load_model.py ├── generate_test_data.py ├── nlp │ └── nlp.py ├── callback.py ├── scriptexecutor.py ├── PyPDF2.py ├── DualCoreLLM.py ├── train_model.py ├── FineTuneGPT,py.txt ├── fiileprocessor.py ├── class.py ├── save_preset.py ├── llmmanager.py ├── model_loader.py ├── memorymodule.py ├── classc.py └── neuralgpt.py ├── ProjectFiles ├── ProjectPlan.txt └── requirements.txt ├── NeuralGPT.egg-info ├── top_level.txt ├── dependency_links.txt ├── requires.txt ├── PKG-INFO └── SOURCES.txt ├── Wordpress plugin ├── Nowy Dokument tekstowy.txt ├── js │ ├── Nowy Dokument tekstowy.txt │ └── chatbox_v2.js ├── ajax.php ├── chatbot.php ├── long.odt ├── long.pdf ├── universal.db ├── combinepdf.pdf ├── wordpress1.pdf ├── wordpress2.pdf ├── dualcorellm.pdf ├── build │ ├── neuralgpt_chatbot │ │ ├── a.pdf │ │ ├── a1.pdf │ │ ├── au.pdf │ │ ├── auto.pdf │ │ └── aurt12.pdf │ └── python_script │ │ ├── base_library.zip │ │ └── localpycs │ │ ├── struct.pyc │ │ ├── pyimod03_ctypes.pyc │ │ ├── pyimod01_archive.pyc │ │ ├── pyimod02_importers.pyc │ │ └── pyimod04_pywin32.pyc ├── htmlmarkup.txt ├── package.json ├── upload.html ├── assets │ ├── upload.html │ ├── NeuralGPT Chatbot.html │ ├── neuralgpt-chatbot.js │ └── chatwindow.js ├── generate_response.py ├── perf.py ├── workschedule.py ├── server.js ├── search.json ├── listmodels.py ├── logcreation.py ├── submit_input.php ├── train_restapi.js ├── cronjob.py ├── validate_llm_file.php ├── NeuralGPT Chatbot.html ├── neuralgpt-browse.js ├── neuralgpt_chatbot.spec ├── test_chatbox.py ├── shortcode.php ├── chatbox_obj.py ├── notif_sys.py ├── python_script.spec ├── get_feedback.php ├── automate_transfer.py ├── module.txt ├── mlm.py ├── chat-window.js ├── chatbox_v2.js ├── admin.php ├── flowise.css ├── chatflow.py ├── load_pretrained.py ├── loadpretrained.py ├── chatwindow.js ├── send_mail.py ├── data_backup.py └── chat_gui.py ├── ind.py ├── neural-big.pdf ├── auto-script1.pdf ├── auto-script2.pdf ├── completepdf.pdf ├── integration1.pdf ├── integration2.pdf ├── integration3.pdf ├── agent-document.pdf ├── agent-document (1).pdf ├── agent-document (2).pdf ├── agent-document (21).pdf ├── agent-document (3).pdf ├── agent-document (4).pdf ├── fine_tuner.py ├── streamlit ├── pages │ └── chat-hub.db ├── requirements.txt └── home.py ├── __pycache__ ├── gui.cpython-311.pyc ├── model.cpython-311.pyc ├── utils.cpython-311.pyc ├── dataset.cpython-311.pyc ├── pinecone.cpython-311.pyc ├── requests.cpython-311.pyc ├── DualCoreLLM.cpython-311.pyc ├── load_model.cpython-311.pyc └── neuralgpt.cpython-311.pyc ├── utils ├── fine_tuner.py ├── Scripting.py ├── InternetAccess.py ├── requests.py ├── CommunicationLog.py ├── MediaPlayer.py ├── Communication.py ├── process_input.py ├── Chatflow.py ├── com.py ├── chat.py ├── FlowiseAICommunication.py ├── FileTransfer.py ├── load_model.py ├── generate_test_data.py ├── callback.py ├── ScriptExecutor.py ├── PyPDF2.py ├── NLPModule.py ├── DualCoreLLM.py ├── train_model.py ├── FineTuneGPT,py.txt ├── FileProcessor.py ├── class.py ├── save_preset.py ├── LLMManager.py ├── Memory.py ├── model_loader.py ├── MemoryModule.py ├── classc.py └── neuralgpt.py ├── code ├── __pycache__ │ ├── models.cpython-311.pyc │ └── utils.cpython-311.pyc ├── DatabaseModule.py ├── ScriptExecutor.py ├── main.py └── utils.py ├── Chat-center ├── requirements.txt ├── instructions.txt ├── index.html └── client.js ├── agent_scripts ├── __pycache__ │ └── database.cpython-311.pyc ├── database.py ├── script_executor.py └── main.py ├── requirements.txt ├── Scripting.py ├── auto ├── task1.py ├── markdown.sh ├── saveastxt.py ├── saveashtml.py ├── task3.sh └── task2.java ├── nlp ├── Scripting.py ├── DualCoreLLM.py ├── MediaPlayer.py ├── tools.py ├── MachineLearning.py ├── DocumentEditor.py ├── FileTransfer.py ├── NLPModule.py └── Memory.py ├── neuralgod.py ├── ModelSaver.py ├── requests.py ├── CommunicationLog.py ├── MediaPlayer.py ├── vord2.py ├── setup.py ├── tools.py ├── Communication.py ├── pinecon.py ├── MachineLearning.py ├── model.py ├── process_input.py ├── agent_script.py ├── ma.py ├── responses.json ├── pine.py ├── Chatflow.py ├── com.py ├── chat.py ├── FlowiseAICommunication.py ├── extract_text.py ├── FileTransfer.py ├── load_model.py ├── long.py ├── generate_test_data.py ├── sort_files.py ├── geninit.py ├── callback.py ├── ScriptExecutor.py ├── PyPDF2.py ├── nlp.py ├── NLPModule.py ├── DualCoreLLM.py ├── main.py ├── .devcontainer └── devcontainer.json ├── train_model.py ├── FineTuneGPT.py ├── GUI ├── GUILLMmanager.py ├── GUImain.py ├── GUIManager.py ├── file-8rb.file ├── GUIFileViewer.py └── GUIFileViewer.txt ├── FileProcessor.py ├── TEST.py ├── class.py ├── save_preset.py ├── LLMManager.py ├── Memory.py ├── model_loader.py ├── gu.py ├── MemoryModule.py ├── exe.py ├── classc.py ├── dataset.py ├── chatboxx.py ├── neuralgpt.py ├── README.md └── DocumentEditor.py /data/Database: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ProjectFiles/ProjectPlan.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Wordpress plugin/Nowy Dokument tekstowy.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Wordpress plugin/js/Nowy Dokument tekstowy.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ind.py: -------------------------------------------------------------------------------- 1 | import pinecone 2 | pinecone.describe_index("neuralai") -------------------------------------------------------------------------------- /Wordpress plugin/ajax.php: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Wordpress plugin/chatbot.php: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | torch 3 | transformers 4 | pytest 5 | -------------------------------------------------------------------------------- /neural-big.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/neural-big.pdf -------------------------------------------------------------------------------- /auto-script1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/auto-script1.pdf -------------------------------------------------------------------------------- /auto-script2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/auto-script2.pdf -------------------------------------------------------------------------------- /completepdf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/completepdf.pdf -------------------------------------------------------------------------------- /integration1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/integration1.pdf -------------------------------------------------------------------------------- /integration2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/integration2.pdf -------------------------------------------------------------------------------- /integration3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/integration3.pdf -------------------------------------------------------------------------------- /agent-document.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent-document.pdf -------------------------------------------------------------------------------- /agent-document (1).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent-document (1).pdf -------------------------------------------------------------------------------- /agent-document (2).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent-document (2).pdf -------------------------------------------------------------------------------- /agent-document (21).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent-document (21).pdf -------------------------------------------------------------------------------- /agent-document (3).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent-document (3).pdf -------------------------------------------------------------------------------- /agent-document (4).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent-document (4).pdf -------------------------------------------------------------------------------- /Wordpress plugin/long.odt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/long.odt -------------------------------------------------------------------------------- /Wordpress plugin/long.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/long.pdf -------------------------------------------------------------------------------- /fine_tuner.py: -------------------------------------------------------------------------------- 1 | fine_tuner = FineTuneGPT('pretrained_model.bin', 'new_dataset.txt') 2 | fine_tuner.fine_tune_model() -------------------------------------------------------------------------------- /Wordpress plugin/universal.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/universal.db -------------------------------------------------------------------------------- /src/fine_tuner.py: -------------------------------------------------------------------------------- 1 | fine_tuner = FineTuneGPT('pretrained_model.bin', 'new_dataset.txt') 2 | fine_tuner.fine_tune_model() -------------------------------------------------------------------------------- /streamlit/pages/chat-hub.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/streamlit/pages/chat-hub.db -------------------------------------------------------------------------------- /Wordpress plugin/combinepdf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/combinepdf.pdf -------------------------------------------------------------------------------- /Wordpress plugin/wordpress1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/wordpress1.pdf -------------------------------------------------------------------------------- /Wordpress plugin/wordpress2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/wordpress2.pdf -------------------------------------------------------------------------------- /__pycache__/gui.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/gui.cpython-311.pyc -------------------------------------------------------------------------------- /utils/fine_tuner.py: -------------------------------------------------------------------------------- 1 | fine_tuner = FineTuneGPT('pretrained_model.bin', 'new_dataset.txt') 2 | fine_tuner.fine_tune_model() -------------------------------------------------------------------------------- /Wordpress plugin/dualcorellm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/dualcorellm.pdf -------------------------------------------------------------------------------- /__pycache__/model.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/model.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/dataset.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/dataset.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/pinecone.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/pinecone.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/requests.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/requests.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/DualCoreLLM.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/DualCoreLLM.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/load_model.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/load_model.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/neuralgpt.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/__pycache__/neuralgpt.cpython-311.pyc -------------------------------------------------------------------------------- /code/__pycache__/models.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/code/__pycache__/models.cpython-311.pyc -------------------------------------------------------------------------------- /code/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/code/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /Chat-center/requirements.txt: -------------------------------------------------------------------------------- 1 | gradio==3.25 2 | openai==0.27 3 | langchain==0.0.139 4 | google-api-python-client 5 | requests 6 | transformers -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/a.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/neuralgpt_chatbot/a.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/a1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/neuralgpt_chatbot/a1.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/au.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/neuralgpt_chatbot/au.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/auto.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/neuralgpt_chatbot/auto.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/aurt12.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/neuralgpt_chatbot/aurt12.pdf -------------------------------------------------------------------------------- /agent_scripts/__pycache__/database.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/agent_scripts/__pycache__/database.cpython-311.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | gradio 2 | requests 3 | datetime 4 | websockets 5 | websocket 6 | asyncio 7 | bs4 8 | pysimplegui 9 | g4f 10 | gpt4free 11 | -------------------------------------------------------------------------------- /streamlit/requirements.txt: -------------------------------------------------------------------------------- 1 | gradio 2 | requests 3 | datetime 4 | websockets 5 | websocket 6 | asyncio 7 | bs4 8 | pysimplegui 9 | g4f 10 | gpt4free -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/base_library.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/python_script/base_library.zip -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/struct.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/python_script/localpycs/struct.pyc -------------------------------------------------------------------------------- /Scripting.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class Scripting: 4 | def __init__(self): 5 | pass 6 | 7 | def execute_script(self, script_path): 8 | subprocess.run(script_path) -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod03_ctypes.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/python_script/localpycs/pyimod03_ctypes.pyc -------------------------------------------------------------------------------- /auto/task1.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import os 3 | 4 | pdf_path = "C:/path/to/pdf/file.pdf" 5 | destination_folder = "E:/AI/NeuralGPT/NeuralGPT" 6 | 7 | shutil.copy(pdf_path, destination_folder) -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod01_archive.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/python_script/localpycs/pyimod01_archive.pyc -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod02_importers.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/python_script/localpycs/pyimod02_importers.pyc -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod04_pywin32.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/HEAD/Wordpress plugin/build/python_script/localpycs/pyimod04_pywin32.pyc -------------------------------------------------------------------------------- /nlp/Scripting.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class Scripting: 4 | def __init__(self): 5 | pass 6 | 7 | def execute_script(self, script_path): 8 | subprocess.run(script_path) -------------------------------------------------------------------------------- /utils/Scripting.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class Scripting: 4 | def __init__(self): 5 | pass 6 | 7 | def execute_script(self, script_path): 8 | subprocess.run(script_path) -------------------------------------------------------------------------------- /NeuralGPT.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: NeuralGPT 3 | Version: 0.1 4 | Summary: A project for neural GPT 5 | Author: B staszewski 6 | Author-email: bstaszewski1984@gmail.com 7 | License-File: LICENSE 8 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | LICENSE 2 | README.md 3 | setup.py 4 | NeuralGPT.egg-info/PKG-INFO 5 | NeuralGPT.egg-info/SOURCES.txt 6 | NeuralGPT.egg-info/dependency_links.txt 7 | NeuralGPT.egg-info/requires.txt 8 | NeuralGPT.egg-info/top_level.txt -------------------------------------------------------------------------------- /ProjectFiles/requirements.txt: -------------------------------------------------------------------------------- 1 | asyncio 2 | g4f 3 | openai 4 | requests 5 | datetime 6 | sqlite3 7 | websockets 8 | json 9 | anthropic 10 | streamlit 11 | fireworks-client 12 | PyCharacterAI 13 | langchain 14 | chromadb 15 | pdfplumber 16 | PySimpleGUI -------------------------------------------------------------------------------- /neuralgod.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | API_URL = "http://localhost:3000/api/v1/prediction/f20a3a35-7d11-445d-a484-1d993a319ebf" 4 | 5 | def query(payload): 6 | response = requests.post(API_URL, json=payload) 7 | return response.json() 8 | 9 | output = query({ 10 | "question": "Hey, how are you?", 11 | }) -------------------------------------------------------------------------------- /nlp/DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | class DualCoreLLM: 2 | def __init__(self, logical_LLM, direct_LLM): 3 | self.logical_LLM = logical_LLM 4 | self.direct_LLM = direct_LLM 5 | 6 | def think(self, input_data): 7 | return self.logical_LLM.process(input_data) 8 | 9 | def execute(self, input_data): 10 | return self.direct_LLM.process(input_data) -------------------------------------------------------------------------------- /ModelSaver.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from model_saver import ModelSaver 3 | 4 | # Load a pretrained model 5 | model = NeuralGPT.from_pretrained('gpt2') 6 | 7 | # Save the model to a local file 8 | saver = ModelSaver(model) 9 | saver.save_local('my_model.bin') 10 | 11 | # Save the model to an online source 12 | saver.save_online('http://example.com/model') -------------------------------------------------------------------------------- /Wordpress plugin/htmlmarkup.txt: -------------------------------------------------------------------------------- 1 |
2 |
3 | 4 | 5 | 6 |
7 | 8 |
-------------------------------------------------------------------------------- /Wordpress plugin/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "neuralgpt-chatbot", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "chatwindow.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "socket.io": "^4.6.1" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/ModelSaver.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from model_saver import ModelSaver 3 | 4 | # Load a pretrained model 5 | model = NeuralGPT.from_pretrained('gpt2') 6 | 7 | # Save the model to a local file 8 | saver = ModelSaver(model) 9 | saver.save_local('my_model.bin') 10 | 11 | # Save the model to an online source 12 | saver.save_online('http://example.com/model') -------------------------------------------------------------------------------- /utils/InternetAccess.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | class InternetAccess: 4 | def __init__(self): 5 | self.session = requests.Session() 6 | 7 | def request(self, method, url, headers=None, params=None, data=None, json=None, auth=None): 8 | response = self.session.request(method, url, headers=headers, params=params, data=data, json=json, auth=auth) 9 | return response -------------------------------------------------------------------------------- /requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get(url, headers=None, params=None): 4 | try: 5 | response = requests.get(url, headers=headers, params=params) 6 | response.raise_for_status() 7 | return response 8 | except HTTPError as http_err: 9 | print(f'HTTP error occurred: {http_err}') 10 | except Exception as err: 11 | print(f'Other error occurred: {err}') -------------------------------------------------------------------------------- /src/requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get(url, headers=None, params=None): 4 | try: 5 | response = requests.get(url, headers=headers, params=params) 6 | response.raise_for_status() 7 | return response 8 | except HTTPError as http_err: 9 | print(f'HTTP error occurred: {http_err}') 10 | except Exception as err: 11 | print(f'Other error occurred: {err}') -------------------------------------------------------------------------------- /utils/requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get(url, headers=None, params=None): 4 | try: 5 | response = requests.get(url, headers=headers, params=params) 6 | response.raise_for_status() 7 | return response 8 | except HTTPError as http_err: 9 | print(f'HTTP error occurred: {http_err}') 10 | except Exception as err: 11 | print(f'Other error occurred: {err}') -------------------------------------------------------------------------------- /CommunicationLog.py: -------------------------------------------------------------------------------- 1 | class CommunicationLog: 2 | def __init__(self): 3 | self.logs = [] 4 | 5 | def add_log(self, message, timestamp, error=None): 6 | log = { 7 | 'message': message, 8 | 'timestamp': timestamp, 9 | 'error': error 10 | } 11 | self.logs.append(log) 12 | 13 | def get_logs(self): 14 | return self.logs -------------------------------------------------------------------------------- /src/CommunicationLog.py: -------------------------------------------------------------------------------- 1 | class CommunicationLog: 2 | def __init__(self): 3 | self.logs = [] 4 | 5 | def add_log(self, message, timestamp, error=None): 6 | log = { 7 | 'message': message, 8 | 'timestamp': timestamp, 9 | 'error': error 10 | } 11 | self.logs.append(log) 12 | 13 | def get_logs(self): 14 | return self.logs -------------------------------------------------------------------------------- /utils/CommunicationLog.py: -------------------------------------------------------------------------------- 1 | class CommunicationLog: 2 | def __init__(self): 3 | self.logs = [] 4 | 5 | def add_log(self, message, timestamp, error=None): 6 | log = { 7 | 'message': message, 8 | 'timestamp': timestamp, 9 | 'error': error 10 | } 11 | self.logs.append(log) 12 | 13 | def get_logs(self): 14 | return self.logs -------------------------------------------------------------------------------- /MediaPlayer.py: -------------------------------------------------------------------------------- 1 | import vlc 2 | 3 | class MediaPlayer: 4 | def __init__(self): 5 | self.instance = vlc.Instance() 6 | self.player = self.instance.media_player_new() 7 | 8 | def play_media(self, media_path): 9 | media = self.instance.media_new(media_path) 10 | self.player.set_media(media) 11 | self.player.play() 12 | 13 | def stop_media(self): 14 | self.player.stop() -------------------------------------------------------------------------------- /vord2.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from gensim.models import Word2Vec 3 | 4 | # Pickle the Word2Vec model with the HIGHEST_PROTOCOL option 5 | model_path = r'E:\AI\NeuralGPT\NeuralGPT\models\ggml-alpaca-7b-q4.bin' 6 | with open(model_path, 'wb') as f: 7 | pickle.dump(model, f, protocol=pickle.HIGHEST_PROTOCOL) 8 | 9 | # Unpickle the Word2Vec model 10 | with open(model_path, 'rb') as f: 11 | model = pickle.load(f) -------------------------------------------------------------------------------- /nlp/MediaPlayer.py: -------------------------------------------------------------------------------- 1 | import vlc 2 | 3 | class MediaPlayer: 4 | def __init__(self): 5 | self.instance = vlc.Instance() 6 | self.player = self.instance.media_player_new() 7 | 8 | def play_media(self, media_path): 9 | media = self.instance.media_new(media_path) 10 | self.player.set_media(media) 11 | self.player.play() 12 | 13 | def stop_media(self): 14 | self.player.stop() -------------------------------------------------------------------------------- /utils/MediaPlayer.py: -------------------------------------------------------------------------------- 1 | import vlc 2 | 3 | class MediaPlayer: 4 | def __init__(self): 5 | self.instance = vlc.Instance() 6 | self.player = self.instance.media_player_new() 7 | 8 | def play_media(self, media_path): 9 | media = self.instance.media_new(media_path) 10 | self.player.set_media(media) 11 | self.player.play() 12 | 13 | def stop_media(self): 14 | self.player.stop() -------------------------------------------------------------------------------- /Wordpress plugin/upload.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | File Upload 5 | 6 | 7 |

Upload a File

8 |
9 | 10 |

11 | 12 |
13 | 14 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="NeuralGPT", 5 | version="0.1", 6 | author="B staszewski", 7 | author_email="bstaszewski1984@gmail.com", 8 | description="A project for neural GPT", 9 | packages=find_packages(), 10 | install_requires=[ 11 | "numpy", 12 | "torch", 13 | "transformers", 14 | "pytest" 15 | ] 16 | ) -------------------------------------------------------------------------------- /Wordpress plugin/assets/upload.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | File Upload 5 | 6 | 7 |

Upload a File

8 |
9 | 10 |

11 | 12 |
13 | 14 | -------------------------------------------------------------------------------- /tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class Tools: 4 | def __init__(self): 5 | pass 6 | 7 | def create_directory(self, directory_path): 8 | os.makedirs(directory_path, exist_ok=True) 9 | 10 | def modify_file(self, file_path, modification_function): 11 | with open(file_path, 'r') as f: 12 | data = f.read() 13 | modified_data = modification_function(data) 14 | with open(file_path, 'w') as f: 15 | f.write(modified_data) -------------------------------------------------------------------------------- /nlp/tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class Tools: 4 | def __init__(self): 5 | pass 6 | 7 | def create_directory(self, directory_path): 8 | os.makedirs(directory_path, exist_ok=True) 9 | 10 | def modify_file(self, file_path, modification_function): 11 | with open(file_path, 'r') as f: 12 | data = f.read() 13 | modified_data = modification_function(data) 14 | with open(file_path, 'w') as f: 15 | f.write(modified_data) -------------------------------------------------------------------------------- /Communication.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class Communication: 4 | async def execute_task(self): 5 | try: 6 | # execute long running task asynchronously with a timeout of 30 seconds 7 | result = await asyncio.wait_for(long_running_task(), timeout=30) 8 | # handle successful completion of the task 9 | return result 10 | except asyncio.TimeoutError: 11 | # handle timeout 12 | return "Task timed out" -------------------------------------------------------------------------------- /pinecon.py: -------------------------------------------------------------------------------- 1 | import pinecone 2 | 3 | pinecone.init(api_key="b372ae78-2b81-49bb-9f4d-d3c3e833921d") 4 | 5 | # Create a new index 6 | pinecone.create_index(index_name="my_index") 7 | 8 | # Index some vectors 9 | vectors = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] 10 | pinecone.index(index_name="my_index", data=vectors) 11 | 12 | # Search for similar vectors 13 | query_vector = [2, 3, 4] 14 | results = pinecone.query(index_name="my_index", data=query_vector, top_k=10) 15 | 16 | print(results) -------------------------------------------------------------------------------- /src/Communication.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class Communication: 4 | async def execute_task(self): 5 | try: 6 | # execute long running task asynchronously with a timeout of 30 seconds 7 | result = await asyncio.wait_for(long_running_task(), timeout=30) 8 | # handle successful completion of the task 9 | return result 10 | except asyncio.TimeoutError: 11 | # handle timeout 12 | return "Task timed out" -------------------------------------------------------------------------------- /utils/Communication.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class Communication: 4 | async def execute_task(self): 5 | try: 6 | # execute long running task asynchronously with a timeout of 30 seconds 7 | result = await asyncio.wait_for(long_running_task(), timeout=30) 8 | # handle successful completion of the task 9 | return result 10 | except asyncio.TimeoutError: 11 | # handle timeout 12 | return "Task timed out" -------------------------------------------------------------------------------- /auto/markdown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set the path to the JSON file 4 | json_file="path/to/json/file.json" 5 | 6 | # Set the path to the output Markdown file 7 | markdown_file="E:/AI/NeuralGPT/NeuralGPT/output.md" 8 | 9 | # Parse the JSON file and extract the data 10 | data=$(jq -r '.data' $json_file) 11 | 12 | # Convert the data to Markdown format 13 | markdown=$(echo $data | pandoc -f html -t markdown) 14 | 15 | # Write the Markdown to the output file 16 | echo $markdown > $markdown_file -------------------------------------------------------------------------------- /MachineLearning.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets 2 | from sklearn.model_selection import train_test_split 3 | from sklearn.linear_model import LinearRegression 4 | 5 | class MachineLearning: 6 | def __init__(self): 7 | pass 8 | 9 | def train_model(self, X, y): 10 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 11 | model = LinearRegression() 12 | model.fit(X_train, y_train) 13 | return model 14 | 15 | def predict(self, model, X): 16 | return model.predict(X) -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from transformers import GPT2LMHeadModel, GPT2Tokenizer 4 | 5 | class GPT(nn.Module): 6 | def __init__(self, model_path): 7 | super(GPT, self).__init__() 8 | self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2') 9 | self.model = GPT2LMHeadModel.from_pretrained(model_path) 10 | 11 | def forward(self, input_ids, attention_mask): 12 | outputs = self.model(input_ids, attention_mask=attention_mask) 13 | logits = outputs.logits 14 | return logits -------------------------------------------------------------------------------- /nlp/MachineLearning.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets 2 | from sklearn.model_selection import train_test_split 3 | from sklearn.linear_model import LinearRegression 4 | 5 | class MachineLearning: 6 | def __init__(self): 7 | pass 8 | 9 | def train_model(self, X, y): 10 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 11 | model = LinearRegression() 12 | model.fit(X_train, y_train) 13 | return model 14 | 15 | def predict(self, model, X): 16 | return model.predict(X) -------------------------------------------------------------------------------- /Chat-center/instructions.txt: -------------------------------------------------------------------------------- 1 | You need to have Node.js and Python installed 2 | Install dependencies listed in Package.JSON (npm install ...) 3 | Install Gradio app (pip install gradio) 4 | Enter your HuggingFace API token in server.js in place marked as: and run it in cmd window: node server.js 5 | In a second cmd window: gradio app5.py 6 | In AgentsGPT.html enter: Google cse ID, google API and OpenAI API in the http request script then open the file in a browser 7 | Click "connect" 8 | You can now speak with the integrated agents via the html chat interface 9 | -------------------------------------------------------------------------------- /process_input.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def process_input(input_text): 4 | if not input_text: 5 | return "Please enter a valid input." 6 | 7 | try: 8 | response = requests.post("http://localhost:8000/predict", json={"text": input_text}) 9 | if response.status_code == 200: 10 | return response.json()["generated_text"] 11 | else: 12 | return "Error processing input. Please try again." 13 | except requests.exceptions.RequestException as e: 14 | return f"Error processing input: {e}. Please try again." -------------------------------------------------------------------------------- /agent_script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | # Create directory if it does not exist 5 | if not os.path.exists("agent_scripts"): 6 | os.mkdir("agent_scripts") 7 | 8 | # Get the content of the script from the URL 9 | url = "https://app.cognosys.ai/agents/4641560f-1ba9-4df6-ad62-1842ef8a892d" 10 | response = requests.get(url) 11 | script_content = response.content 12 | 13 | # Create the file and write the content to it 14 | file_path = os.path.join("agent_scripts", "agent_4641560f-1ba9-4df6-ad62-1842ef8a892d.py") 15 | with open(file_path, "wb") as f: 16 | f.write(script_content) -------------------------------------------------------------------------------- /src/process_input.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def process_input(input_text): 4 | if not input_text: 5 | return "Please enter a valid input." 6 | 7 | try: 8 | response = requests.post("http://localhost:8000/predict", json={"text": input_text}) 9 | if response.status_code == 200: 10 | return response.json()["generated_text"] 11 | else: 12 | return "Error processing input. Please try again." 13 | except requests.exceptions.RequestException as e: 14 | return f"Error processing input: {e}. Please try again." -------------------------------------------------------------------------------- /utils/process_input.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def process_input(input_text): 4 | if not input_text: 5 | return "Please enter a valid input." 6 | 7 | try: 8 | response = requests.post("http://localhost:8000/predict", json={"text": input_text}) 9 | if response.status_code == 200: 10 | return response.json()["generated_text"] 11 | else: 12 | return "Error processing input. Please try again." 13 | except requests.exceptions.RequestException as e: 14 | return f"Error processing input: {e}. Please try again." -------------------------------------------------------------------------------- /nlp/DocumentEditor.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from tkinter import filedialog 3 | 4 | class DocumentEditor: 5 | def __init__(self): 6 | self.root = tk.Tk() 7 | self.root.withdraw() 8 | 9 | def open_file(self): 10 | file_path = filedialog.askopenfilename() 11 | if file_path: 12 | with open(file_path, 'r') as f: 13 | return f.read() 14 | 15 | def save_file(self, data): 16 | file_path = filedialog.asksaveasfilename() 17 | if file_path: 18 | with open(file_path, 'w') as f: 19 | f.write(data) -------------------------------------------------------------------------------- /ma.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pinecone 3 | from NeuralGPT-0.1.utils import * 4 | from NeuralGPT-0.1.gui import run_gui 5 | 6 | # Upload pretrained model 7 | model_path = "E:/AI/NeuralGPT/NeuralGPT/models/ggml-model-q4_0.bin" 8 | model_name = "ggml-model-q4_0" 9 | pinecone.create_index(index_name=model_name, dimension=768) 10 | pinecone.index_embeddings(index_name=model_name, embeddings_path=model_path) 11 | 12 | # Load data 13 | data_file1 = "database1.csv" 14 | data_file2 = "database2.csv" 15 | data1 = load_data(data_file1) 16 | data2 = load_data(data_file2) 17 | 18 | # Run GUI 19 | run_gui(data1, data2) -------------------------------------------------------------------------------- /Wordpress plugin/generate_response.py: -------------------------------------------------------------------------------- 1 | from transformers import GPT2LMHeadModel, GPT2Tokenizer 2 | 3 | model_path = "E:/AI/NeuralGPT/NeuralGPT/models/gpt-j/" 4 | 5 | tokenizer = GPT2Tokenizer.from_pretrained(model_path) 6 | 7 | model = GPT2LMHeadModel.from_pretrained(model_path) 8 | 9 | def generate_response(input_text): 10 | 11 | input_ids = tokenizer.encode(input_text, return_tensors="pt") 12 | 13 | output_ids = model.generate(input_ids, max_length=50, num_return_sequences=1) 14 | 15 | output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) 16 | 17 | return output_text 18 | 19 | 20 | -------------------------------------------------------------------------------- /responses.json: -------------------------------------------------------------------------------- 1 | { 2 | "greetings": [ 3 | { 4 | "pattern": "hi|hello|hey", 5 | "response": "Hi there!" 6 | }, 7 | { 8 | "pattern": "how are you|how's it going", 9 | "response": "I'm doing well, thank you. How about you?" 10 | } 11 | ], 12 | "questions": [ 13 | { 14 | "pattern": "what is your name|who are you", 15 | "response": "My name is NeuralGPT. I'm an AI language model." 16 | }, 17 | { 18 | "pattern": "what can you do", 19 | "response": "I can answer questions, provide information, and have conversations with you." 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /pine.py: -------------------------------------------------------------------------------- 1 | import pinecone 2 | 3 | # Initialize the Pinecone client library 4 | pinecone.init(api_key="41ddf57b-2fc0-495c-9fff-47c6f6ff4e4e") 5 | 6 | # Create a new index 7 | pinecone.create_index(index_name="neuralai-a82b13f.svc.asia-northeast1-gcp.pinecone.io") 8 | 9 | # Index some vectors 10 | vectors = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] 11 | pinecone.index(index_name="neuralai-a82b13f.svc.asia-northeast1-gcp.pinecone.io", data=vectors) 12 | 13 | # Search for similar vectors 14 | query_vector = [2, 3, 4] 15 | results = pinecone.query(index_name="neuralai-a82b13f.svc.asia-northeast1-gcp.pinecone.io", data=query_vector, top_k=10) 16 | 17 | print(results) -------------------------------------------------------------------------------- /Wordpress plugin/perf.py: -------------------------------------------------------------------------------- 1 | from transformers import pipeline, set_seed 2 | 3 | # Load the model 4 | generator = pipeline('text-generation', model='CognitiveCodes/NeuralGPT') 5 | 6 | # Set seed for reproducibility 7 | set_seed(42) 8 | 9 | # Generate text 10 | generated_text = generator("The NeuralGPT project is performing", max_length=100, num_return_sequences=1) 11 | 12 | # Analyze the generated text 13 | performance_analysis = analyze_performance(generated_text) 14 | 15 | # Suggest new ideas for improvement based on analysis 16 | suggested_ideas = suggest_improvement(performance_analysis) 17 | 18 | # Print suggested ideas 19 | print("Suggested ideas for improvement: ", suggested_ideas) -------------------------------------------------------------------------------- /auto/saveastxt.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Set the path to the directory where the text file will be saved 4 | directory_path = r"E:\AI\NeuralGPT\NeuralGPT" 5 | 6 | # Set the path to the file containing the content produced by Cognosys 7 | content_file_path = r"path\to\content\file" 8 | 9 | # Read the content from the file 10 | with open(content_file_path, "r") as file: 11 | content = file.read() 12 | 13 | # Set the name of the text file 14 | file_name = "cognosys_content.txt" 15 | 16 | # Set the path to the text file 17 | file_path = os.path.join(directory_path, file_name) 18 | 19 | # Write the content to the text file 20 | with open(file_path, "w") as file: 21 | file.write(content) -------------------------------------------------------------------------------- /auto/saveashtml.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Path to the local clone of NeuralGPT repository 4 | neuralgpt_path = "E:/AI/NeuralGPT/NeuralGPT" 5 | 6 | # Content produced by Cognosys 7 | content = "This is some content produced by Cognosys." 8 | 9 | # Create the HTML file 10 | filename = "content.html" 11 | filepath = os.path.join(neuralgpt_path, filename) 12 | with open(filepath, "w") as f: 13 | f.write("\n") 14 | f.write("\n") 15 | f.write("Content from Cognosys\n") 16 | f.write("\n") 17 | f.write("\n") 18 | f.write(f"

{content}

\n") 19 | f.write("\n") 20 | f.write("\n") 21 | 22 | print(f"File saved to {filepath}") -------------------------------------------------------------------------------- /Chatflow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Chatflow: 4 | def __init__(self): 5 | self.logger = logging.getLogger(__name__) 6 | self.logger.setLevel(logging.DEBUG) 7 | self.handler = logging.FileHandler('chatflow.log') 8 | self.handler.setLevel(logging.DEBUG) 9 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | self.handler.setFormatter(self.formatter) 11 | self.logger.addHandler(self.handler) 12 | 13 | def run(self): 14 | try: 15 | # code to execute the autonomous scripts 16 | except Exception as e: 17 | self.logger.error(str(e)) 18 | # code to notify the user when an error occurs -------------------------------------------------------------------------------- /src/Chatflow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Chatflow: 4 | def __init__(self): 5 | self.logger = logging.getLogger(__name__) 6 | self.logger.setLevel(logging.DEBUG) 7 | self.handler = logging.FileHandler('chatflow.log') 8 | self.handler.setLevel(logging.DEBUG) 9 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | self.handler.setFormatter(self.formatter) 11 | self.logger.addHandler(self.handler) 12 | 13 | def run(self): 14 | try: 15 | # code to execute the autonomous scripts 16 | except Exception as e: 17 | self.logger.error(str(e)) 18 | # code to notify the user when an error occurs -------------------------------------------------------------------------------- /utils/Chatflow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Chatflow: 4 | def __init__(self): 5 | self.logger = logging.getLogger(__name__) 6 | self.logger.setLevel(logging.DEBUG) 7 | self.handler = logging.FileHandler('chatflow.log') 8 | self.handler.setLevel(logging.DEBUG) 9 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | self.handler.setFormatter(self.formatter) 11 | self.logger.addHandler(self.handler) 12 | 13 | def run(self): 14 | try: 15 | # code to execute the autonomous scripts 16 | except Exception as e: 17 | self.logger.error(str(e)) 18 | # code to notify the user when an error occurs -------------------------------------------------------------------------------- /com.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | 4 | # Generate public-private key pair for NeuralGPT 5 | neuralgpt_public_key = ... 6 | neuralgpt_private_key = ... 7 | 8 | # Generate public-private key pair for flowiseAI app 9 | flowiseai_public_key = ... 10 | flowiseai_private_key = ... 11 | 12 | # Establish a TLS connection 13 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 14 | context.load_cert_chain(certfile=neuralgpt_private_key, keyfile=neuralgpt_public_key) 15 | context.load_verify_locations(cafile=flowiseai_public_key) 16 | with socket.create_connection(('flowiseai.com', 443)) as sock: 17 | with context.wrap_socket(sock, server_side=False) as ssock: 18 | ssock.sendall(b'Hello, world!') 19 | data = ssock.recv(1024) -------------------------------------------------------------------------------- /src/com.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | 4 | # Generate public-private key pair for NeuralGPT 5 | neuralgpt_public_key = ... 6 | neuralgpt_private_key = ... 7 | 8 | # Generate public-private key pair for flowiseAI app 9 | flowiseai_public_key = ... 10 | flowiseai_private_key = ... 11 | 12 | # Establish a TLS connection 13 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 14 | context.load_cert_chain(certfile=neuralgpt_private_key, keyfile=neuralgpt_public_key) 15 | context.load_verify_locations(cafile=flowiseai_public_key) 16 | with socket.create_connection(('flowiseai.com', 443)) as sock: 17 | with context.wrap_socket(sock, server_side=False) as ssock: 18 | ssock.sendall(b'Hello, world!') 19 | data = ssock.recv(1024) -------------------------------------------------------------------------------- /utils/com.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | 4 | # Generate public-private key pair for NeuralGPT 5 | neuralgpt_public_key = ... 6 | neuralgpt_private_key = ... 7 | 8 | # Generate public-private key pair for flowiseAI app 9 | flowiseai_public_key = ... 10 | flowiseai_private_key = ... 11 | 12 | # Establish a TLS connection 13 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 14 | context.load_cert_chain(certfile=neuralgpt_private_key, keyfile=neuralgpt_public_key) 15 | context.load_verify_locations(cafile=flowiseai_public_key) 16 | with socket.create_connection(('flowiseai.com', 443)) as sock: 17 | with context.wrap_socket(sock, server_side=False) as ssock: 18 | ssock.sendall(b'Hello, world!') 19 | data = ssock.recv(1024) -------------------------------------------------------------------------------- /chat.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from transformers import pipeline 3 | 4 | # Define the chatbot pipeline using the pre-trained NeuralGPT model 5 | chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B") 6 | 7 | # Define a function to handle user input and generate chatbot responses 8 | def chat(): 9 | while True: 10 | # Get user input 11 | user_input = input("You: ") 12 | 13 | # Generate chatbot response 14 | try: 15 | chatbot_response = chatbot(user_input, max_length=50)[0]["generated_text"] 16 | print("Chatbot:", chatbot_response) 17 | except Exception as e: 18 | print("Error:", e) 19 | 20 | # Call the chat function to start the chatbox 21 | chat() -------------------------------------------------------------------------------- /Wordpress plugin/workschedule.py: -------------------------------------------------------------------------------- 1 | import schedule 2 | import time 3 | 4 | # Define the function that performs the necessary actions 5 | def perform_actions(): 6 | # Code to access local data storage and modify files 7 | # Code to access universal database and achieve data harmonization 8 | 9 | # Define the schedule for the actions to be performed 10 | schedule.every(24).hours.do(perform_actions) # Run every 24 hours 11 | schedule.every().day.at("12:00").do(perform_actions) # Run every day at 12:00 12 | schedule.every().hour.do(perform_actions) # Run every hour 13 | schedule.every(10).minutes.do(perform_actions) # Run every 10 minutes 14 | 15 | # Run the scheduling system 16 | while True: 17 | schedule.run_pending() 18 | time.sleep(1) -------------------------------------------------------------------------------- /src/chat.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from transformers import pipeline 3 | 4 | # Define the chatbot pipeline using the pre-trained NeuralGPT model 5 | chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B") 6 | 7 | # Define a function to handle user input and generate chatbot responses 8 | def chat(): 9 | while True: 10 | # Get user input 11 | user_input = input("You: ") 12 | 13 | # Generate chatbot response 14 | try: 15 | chatbot_response = chatbot(user_input, max_length=50)[0]["generated_text"] 16 | print("Chatbot:", chatbot_response) 17 | except Exception as e: 18 | print("Error:", e) 19 | 20 | # Call the chat function to start the chatbox 21 | chat() -------------------------------------------------------------------------------- /utils/chat.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from transformers import pipeline 3 | 4 | # Define the chatbot pipeline using the pre-trained NeuralGPT model 5 | chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B") 6 | 7 | # Define a function to handle user input and generate chatbot responses 8 | def chat(): 9 | while True: 10 | # Get user input 11 | user_input = input("You: ") 12 | 13 | # Generate chatbot response 14 | try: 15 | chatbot_response = chatbot(user_input, max_length=50)[0]["generated_text"] 16 | print("Chatbot:", chatbot_response) 17 | except Exception as e: 18 | print("Error:", e) 19 | 20 | # Call the chat function to start the chatbox 21 | chat() -------------------------------------------------------------------------------- /FlowiseAICommunication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class FlowiseAICommunication: 5 | def __init__(self, url): 6 | self.url = url 7 | 8 | def send_message(self, message): 9 | data = {"message": message} 10 | try: 11 | response = requests.post(self.url, json=data) 12 | return response.json() 13 | except requests.exceptions.RequestException as e: 14 | print(e) 15 | return None 16 | 17 | def receive_message(self): 18 | try: 19 | response = requests.get(self.url) 20 | return response.json()["message"] 21 | except requests.exceptions.RequestException as e: 22 | print(e) 23 | return None -------------------------------------------------------------------------------- /src/FlowiseAICommunication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class FlowiseAICommunication: 5 | def __init__(self, url): 6 | self.url = url 7 | 8 | def send_message(self, message): 9 | data = {"message": message} 10 | try: 11 | response = requests.post(self.url, json=data) 12 | return response.json() 13 | except requests.exceptions.RequestException as e: 14 | print(e) 15 | return None 16 | 17 | def receive_message(self): 18 | try: 19 | response = requests.get(self.url) 20 | return response.json()["message"] 21 | except requests.exceptions.RequestException as e: 22 | print(e) 23 | return None -------------------------------------------------------------------------------- /utils/FlowiseAICommunication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class FlowiseAICommunication: 5 | def __init__(self, url): 6 | self.url = url 7 | 8 | def send_message(self, message): 9 | data = {"message": message} 10 | try: 11 | response = requests.post(self.url, json=data) 12 | return response.json() 13 | except requests.exceptions.RequestException as e: 14 | print(e) 15 | return None 16 | 17 | def receive_message(self): 18 | try: 19 | response = requests.get(self.url) 20 | return response.json()["message"] 21 | except requests.exceptions.RequestException as e: 22 | print(e) 23 | return None -------------------------------------------------------------------------------- /Wordpress plugin/server.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const server = http.createServer(); 3 | const io = require('socket.io')(server); 4 | 5 | io.on('connection', (socket) => { 6 | console.log('A user connected'); 7 | 8 | // Handle events from the client 9 | socket.on('chat message', (message) => { 10 | console.log('Received message:', message); 11 | // Process the message and send a response if needed 12 | }); 13 | 14 | // Handle disconnection 15 | socket.on('disconnect', () => { 16 | console.log('A user disconnected'); 17 | }); 18 | }); 19 | 20 | const port = 3001; // Specify the port number for your server 21 | server.listen(port, () => { 22 | console.log(`Socket.io server listening on port ${port}`); 23 | }); 24 | -------------------------------------------------------------------------------- /code/DatabaseModule.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | class DatabaseModule: 3 | def __init__(self, db_name): 4 | self.conn = sqlite3.connect(db_name) 5 | self.cursor = self.conn.cursor() 6 | 7 | def store_data(self, data, table_name): 8 | self.cursor.execute("""CREATE TABLE IF NOT EXISTS """ + table_name + "(id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT)") 9 | self.conn.commit() 10 | self.cursor.execute("""INSERT INTO """ + table_name + "(data)" , (data,)) 11 | self.conn.commit() 12 | 13 | def retrieve_data(self, query, table_name): 14 | self.cursor.execute("""SELECT data FROM """ + table_name + "(data)" , ("%" + query + "%",)) 15 | data = self.cursor.fetchall() 16 | return data 17 | -------------------------------------------------------------------------------- /agent_scripts/database.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | class DatabaseModule: 3 | def __init__(self, db_name): 4 | self.conn = sqlite3.connect(db_name) 5 | self.cursor = self.conn.cursor() 6 | 7 | def store_data(self, data, table_name): 8 | self.cursor.execute("""CREATE TABLE IF NOT EXISTS """ + table_name + "(id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT)") 9 | self.conn.commit() 10 | self.cursor.execute("""INSERT INTO """ + table_name + "(data)" , (data,)) 11 | self.conn.commit() 12 | 13 | def retrieve_data(self, query, table_name): 14 | self.cursor.execute("""SELECT data FROM """ + table_name + "(data)" , ("%" + query + "%",)) 15 | data = self.cursor.fetchall() 16 | return data 17 | -------------------------------------------------------------------------------- /extract_text.py: -------------------------------------------------------------------------------- 1 | import os 2 | import PyPDF2 3 | 4 | def extract_text_from_pdf(pdf_path): 5 | with open(pdf_path, 'rb') as f: 6 | pdf_reader = PyPDF2.PdfFileReader(f) 7 | text = '' 8 | for page in pdf_reader.pages: 9 | text += page.extractText() 10 | return text 11 | 12 | def main(): 13 | directory = 'E:\AI\NeuralGPT\NeuralGPT' 14 | for filename in os.listdir(directory): 15 | if filename.endswith('.pdf'): 16 | pdf_path = os.path.join(directory, filename) 17 | text = extract_text_from_pdf(pdf_path) 18 | txt_path = os.path.splitext(pdf_path)[0] + '.txt' 19 | with open(txt_path, 'w') as f: 20 | f.write(text) 21 | 22 | if __name__ == '__main__': 23 | main() -------------------------------------------------------------------------------- /Wordpress plugin/search.json: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const app = express(); 3 | 4 | // Define the search endpoint 5 | app.get('/api/search', (req, res) => { 6 | // Extract the search query from the query parameter 7 | const query = req.query.q; 8 | 9 | // Perform the search using ElasticSearch 10 | const results = await elasticSearchClient.search({ 11 | index: 'shared_databank', 12 | body: { 13 | query: { 14 | match: { 15 | content: query 16 | } 17 | } 18 | } 19 | }); 20 | 21 | // Return the matching results to the client 22 | res.json(results.hits.hits); 23 | }); 24 | 25 | // Start the server 26 | app.listen(3000, () => { 27 | console.log('Server listening on port 3000'); 28 | }); -------------------------------------------------------------------------------- /FileTransfer.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | 3 | class FileTransfer: 4 | def __init__(self, ftp_host, ftp_user, ftp_password): 5 | self.ftp_host = ftp_host 6 | self.ftp_user = ftp_user 7 | self.ftp_password = ftp_password 8 | 9 | def upload_file(self, local_file_path, remote_file_path): 10 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 11 | with open(local_file_path, 'rb') as f: 12 | ftp.storbinary('STOR ' + remote_file_path, f) 13 | 14 | def download_file(self, remote_file_path, local_file_path): 15 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 16 | with open(local_file_path, 'wb') as f: 17 | ftp.retrbinary('RETR ' + remote_file_path, f.write) -------------------------------------------------------------------------------- /load_model.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import torch 4 | from DualCoreLLM import DualCoreLLM 5 | 6 | def load_model(model_path, use_dualcore=False): 7 | if model_path.startswith("http"): 8 | # Load model from online file 9 | urllib.request.urlretrieve(model_path, "model.bin") 10 | model_path = "model.bin" 11 | 12 | if not os.path.exists(model_path): 13 | raise ValueError("Model file not found.") 14 | 15 | # Load model into memory 16 | model = torch.load(model_path, map_location=torch.device('cpu')) 17 | 18 | if use_dualcore: 19 | # Initialize DualCoreLLM with pretrained model 20 | dualcore = DualCoreLLM(model) 21 | return dualcore 22 | else: 23 | return model -------------------------------------------------------------------------------- /nlp/FileTransfer.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | 3 | class FileTransfer: 4 | def __init__(self, ftp_host, ftp_user, ftp_password): 5 | self.ftp_host = ftp_host 6 | self.ftp_user = ftp_user 7 | self.ftp_password = ftp_password 8 | 9 | def upload_file(self, local_file_path, remote_file_path): 10 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 11 | with open(local_file_path, 'rb') as f: 12 | ftp.storbinary('STOR ' + remote_file_path, f) 13 | 14 | def download_file(self, remote_file_path, local_file_path): 15 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 16 | with open(local_file_path, 'wb') as f: 17 | ftp.retrbinary('RETR ' + remote_file_path, f.write) -------------------------------------------------------------------------------- /src/load_model.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import torch 4 | from DualCoreLLM import DualCoreLLM 5 | 6 | def load_model(model_path, use_dualcore=False): 7 | if model_path.startswith("http"): 8 | # Load model from online file 9 | urllib.request.urlretrieve(model_path, "model.bin") 10 | model_path = "model.bin" 11 | 12 | if not os.path.exists(model_path): 13 | raise ValueError("Model file not found.") 14 | 15 | # Load model into memory 16 | model = torch.load(model_path, map_location=torch.device('cpu')) 17 | 18 | if use_dualcore: 19 | # Initialize DualCoreLLM with pretrained model 20 | dualcore = DualCoreLLM(model) 21 | return dualcore 22 | else: 23 | return model -------------------------------------------------------------------------------- /utils/FileTransfer.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | 3 | class FileTransfer: 4 | def __init__(self, ftp_host, ftp_user, ftp_password): 5 | self.ftp_host = ftp_host 6 | self.ftp_user = ftp_user 7 | self.ftp_password = ftp_password 8 | 9 | def upload_file(self, local_file_path, remote_file_path): 10 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 11 | with open(local_file_path, 'rb') as f: 12 | ftp.storbinary('STOR ' + remote_file_path, f) 13 | 14 | def download_file(self, remote_file_path, local_file_path): 15 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 16 | with open(local_file_path, 'wb') as f: 17 | ftp.retrbinary('RETR ' + remote_file_path, f.write) -------------------------------------------------------------------------------- /utils/load_model.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import torch 4 | from DualCoreLLM import DualCoreLLM 5 | 6 | def load_model(model_path, use_dualcore=False): 7 | if model_path.startswith("http"): 8 | # Load model from online file 9 | urllib.request.urlretrieve(model_path, "model.bin") 10 | model_path = "model.bin" 11 | 12 | if not os.path.exists(model_path): 13 | raise ValueError("Model file not found.") 14 | 15 | # Load model into memory 16 | model = torch.load(model_path, map_location=torch.device('cpu')) 17 | 18 | if use_dualcore: 19 | # Initialize DualCoreLLM with pretrained model 20 | dualcore = DualCoreLLM(model) 21 | return dualcore 22 | else: 23 | return model -------------------------------------------------------------------------------- /Wordpress plugin/listmodels.py: -------------------------------------------------------------------------------- 1 | import os 2 | from NeuralGPT-0.1 import NeuralGPT 3 | # Define the directory where the pretrained models are stored 4 | models_dir = "E:/AI/NeuralGPT/NeuralGPT/models/" 5 | # List all the pretrained models in the directory 6 | pretrained_models = os.listdir(models_dir) 7 | # Display the list of pretrained models to the user 8 | print("Select a pretrained model to load:") 9 | for i, model in enumerate(pretrained_models): 10 | print(f"{i+1}. {model}") 11 | # Ask the user to choose a pretrained model 12 | model_num = int(input("Enter the model number: ")) 13 | # Load the chosen pretrained model 14 | model_path = os.path.join(models_dir, pretrained_models[model_num-1]) 15 | neural_gpt = NeuralGPT(model_path) 16 | # Open the chat window and start the conversation 17 | # ... 18 | -------------------------------------------------------------------------------- /long.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | async def long_running_task(): 4 | progress = 0 5 | while progress < 100: 6 | # do some work 7 | await asyncio.sleep(1) 8 | progress += 10 9 | yield f"Task progress: {progress}%" 10 | yield "Task completed" 11 | 12 | class Communication: 13 | async def execute_task(self): 14 | try: 15 | # execute long running task asynchronously with a timeout of 30 seconds 16 | result = "" 17 | async for progress_update in long_running_task(): 18 | result += progress_update + "\n" 19 | # handle successful completion of the task 20 | return result 21 | except asyncio.TimeoutError: 22 | # handle timeout 23 | return "Task timed out" -------------------------------------------------------------------------------- /generate_test_data.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | # Define a list of possible actions 5 | actions = ['open', 'close', 'turn on', 'turn off', 'start', 'stop'] 6 | 7 | # Define a list of possible objects 8 | objects = ['door', 'window', 'light', 'fan', 'TV', 'AC'] 9 | 10 | # Define a list of possible locations 11 | locations = ['living room', 'bedroom', 'kitchen', 'bathroom', 'garage'] 12 | 13 | # Define a function to generate random test data 14 | def generate_test_data(): 15 | action = random.choice(actions) 16 | obj = random.choice(objects) 17 | location = random.choice(locations) 18 | message = f"{action} the {obj} in the {location}" 19 | return message 20 | 21 | # Generate 10 random test messages 22 | for i in range(10): 23 | test_message = generate_test_data() 24 | print(test_message) -------------------------------------------------------------------------------- /src/generate_test_data.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | # Define a list of possible actions 5 | actions = ['open', 'close', 'turn on', 'turn off', 'start', 'stop'] 6 | 7 | # Define a list of possible objects 8 | objects = ['door', 'window', 'light', 'fan', 'TV', 'AC'] 9 | 10 | # Define a list of possible locations 11 | locations = ['living room', 'bedroom', 'kitchen', 'bathroom', 'garage'] 12 | 13 | # Define a function to generate random test data 14 | def generate_test_data(): 15 | action = random.choice(actions) 16 | obj = random.choice(objects) 17 | location = random.choice(locations) 18 | message = f"{action} the {obj} in the {location}" 19 | return message 20 | 21 | # Generate 10 random test messages 22 | for i in range(10): 23 | test_message = generate_test_data() 24 | print(test_message) -------------------------------------------------------------------------------- /utils/generate_test_data.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | # Define a list of possible actions 5 | actions = ['open', 'close', 'turn on', 'turn off', 'start', 'stop'] 6 | 7 | # Define a list of possible objects 8 | objects = ['door', 'window', 'light', 'fan', 'TV', 'AC'] 9 | 10 | # Define a list of possible locations 11 | locations = ['living room', 'bedroom', 'kitchen', 'bathroom', 'garage'] 12 | 13 | # Define a function to generate random test data 14 | def generate_test_data(): 15 | action = random.choice(actions) 16 | obj = random.choice(objects) 17 | location = random.choice(locations) 18 | message = f"{action} the {obj} in the {location}" 19 | return message 20 | 21 | # Generate 10 random test messages 22 | for i in range(10): 23 | test_message = generate_test_data() 24 | print(test_message) -------------------------------------------------------------------------------- /sort_files.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | # Define the directory where the files are located 5 | directory = "/path/to/directory" 6 | 7 | # Create a dictionary to store the file extensions and their corresponding subdirectories 8 | file_extensions = {} 9 | 10 | # Loop through all the files in the directory 11 | for filename in os.listdir(directory): 12 | 13 | # Get the file extension 14 | file_extension = os.path.splitext(filename)[1] 15 | 16 | # If the file extension is not in the dictionary, create a new subdirectory for it 17 | if file_extension not in file_extensions: 18 | os.mkdir(os.path.join(directory, file_extension[1:])) 19 | file_extensions[file_extension] = True 20 | 21 | # Move the file to the corresponding subdirectory 22 | shutil.move(os.path.join(directory, filename), os.path.join(directory, file_extension[1:], filename)) -------------------------------------------------------------------------------- /auto/task3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set the input CSV file path 4 | input_file="example.csv" 5 | 6 | # Set the output Markdown file path 7 | output_file="E:/AI/NeuralGPT/NeuralGPT/table.md" 8 | 9 | # Read the CSV file and generate a Markdown table 10 | while read line 11 | do 12 | # Replace commas with pipes for Markdown table formatting 13 | row=$(echo $line | sed 's/,/ | /g') 14 | 15 | # Add Markdown table formatting to the row 16 | if [ -z "$header" ] 17 | then 18 | # The first row is the header 19 | header="$row" 20 | separator=$(echo "$header" | sed 's/[^|]/-/g') 21 | table="$header\n$separator" 22 | else 23 | # All other rows are data 24 | table="$table\n$row" 25 | fi 26 | done < "$input_file" 27 | 28 | # Save the Markdown table to the output file 29 | echo -e "$table" > "$output_file" -------------------------------------------------------------------------------- /Wordpress plugin/logcreation.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Set up logging 4 | logging.basicConfig(filename='neural_ai.log', level=logging.DEBUG, 5 | format='%(asctime)s %(levelname)s %(message)s') 6 | 7 | def access_local_data_storage(): 8 | try: 9 | # Access local data storage 10 | # Code to create or modify files 11 | except Exception as e: 12 | # Log the error 13 | logging.error('Error accessing local data storage: {}'.format(str(e))) 14 | 15 | def access_universal_database(): 16 | try: 17 | # Access universal database 18 | # Code to achieve data harmonization 19 | except Exception as e: 20 | # Log the error 21 | logging.error('Error accessing universal database: {}'.format(str(e))) 22 | 23 | # Call the functions 24 | access_local_data_storage() 25 | access_universal_database() -------------------------------------------------------------------------------- /geninit.py: -------------------------------------------------------------------------------- 1 | # Import necessary modules 2 | import sys 3 | import os 4 | from PyQt5.QtWidgets import QApplication, QMainWindow 5 | 6 | # Import project modules 7 | from NeuralGPT-0,1 import DualCoreLLM 8 | from NeuralGPT-0,1 import module2 9 | from NeuralGPT-0,1 import module3 10 | from NeuralGPT-0,1 import module4 11 | from NeuralGPT-0,1 import module5 12 | 13 | # Define function to execute all modules 14 | def execute_modules(): 15 | DualCoreLLM.execute() 16 | module2.execute() 17 | module3.execute() 18 | module4.execute() 19 | module5.execute() 20 | 21 | # Define main function to start GUI and execute modules 22 | def main(): 23 | # Start GUI 24 | app = QApplication(sys.argv) 25 | window = QMainWindow() 26 | window.show() 27 | sys.exit(app.exec_()) 28 | 29 | # Execute modules 30 | execute_modules() 31 | 32 | if __name__ == '__main__': 33 | main() -------------------------------------------------------------------------------- /Wordpress plugin/submit_input.php: -------------------------------------------------------------------------------- 1 | connect_error) { 15 | die("Connection failed: " . $conn->connect_error); 16 | } 17 | 18 | $sql = "INSERT INTO user_input (input_text, input_type, feedback_text, feedback_type, timestamp) 19 | VALUES ('$input_text', '$input_type', '$feedback_text', '$feedback_type', '$timestamp')"; 20 | 21 | if ($conn->query($sql) === TRUE) { 22 | echo "Input submitted successfully"; 23 | } else { 24 | echo "Error: " . $sql . "
" . $conn->error; 25 | } 26 | 27 | $conn->close(); 28 | ?> -------------------------------------------------------------------------------- /Wordpress plugin/train_restapi.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | const express = require('express'); 3 | const bodyParser = require('body-parser'); 4 | const { startTraining } = require('./train'); 5 | 6 | // Create a new Express app 7 | const app = express(); 8 | 9 | // Parse request body as JSON 10 | app.use(bodyParser.json()); 11 | 12 | // Define the endpoint for starting the training process 13 | app.post('/train', async (req, res) => { 14 | // Get the hyperparameters from the request body 15 | const { epochs, batch_size, learning_rate } = req.body; 16 | 17 | // Start the training process with the given hyperparameters 18 | const result = await startTraining(epochs, batch_size, learning_rate); 19 | 20 | // Return the result as JSON 21 | res.json(result); 22 | }); 23 | 24 | // Start the server 25 | app.listen(3000, () => { 26 | console.log('Server started on port 3000'); 27 | }); -------------------------------------------------------------------------------- /code/ScriptExecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | class ScriptExecutor: 3 | def __init__(self, script_path): 4 | self.script_path = script_path 5 | 6 | def execute_script(self, input_data): 7 | try: 8 | # Run the script in a sandboxed environment 9 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 10 | return output.decode('utf-8') 11 | except subprocess.TimeoutExpired: 12 | return "Script execution timed out" 13 | except subprocess.CalledProcessError as e: 14 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 15 | except Exception as e: 16 | return f"Script execution failed with exception: {str(e)}" 17 | 18 | def get_script_output(self, input_data): 19 | return self.execute_script(input_data) 20 | -------------------------------------------------------------------------------- /callback.py: -------------------------------------------------------------------------------- 1 | import pika 2 | 3 | # connect to RabbitMQ server 4 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 5 | channel = connection.channel() 6 | 7 | # create a queue for each instance of the NeuralGPT agent 8 | channel.queue_declare(queue='agent1') 9 | channel.queue_declare(queue='agent2') 10 | channel.queue_declare(queue='agent3') 11 | 12 | # define a callback function to process incoming messages 13 | def callback(ch, method, properties, body): 14 | # process message and execute appropriate task 15 | print("Received message: %r" % body) 16 | 17 | # start consuming messages from the queue 18 | channel.basic_consume(queue='agent1', on_message_callback=callback, auto_ack=True) 19 | channel.basic_consume(queue='agent2', on_message_callback=callback, auto_ack=True) 20 | channel.basic_consume(queue='agent3', on_message_callback=callback, auto_ack=True) 21 | 22 | print('Waiting for messages...') 23 | channel.start_consuming() -------------------------------------------------------------------------------- /src/nlp/nlp.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /ScriptExecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class ScriptExecutor: 4 | def __init__(self, script_path): 5 | self.script_path = script_path 6 | 7 | def execute_script(self, input_data): 8 | try: 9 | # Run the script in a sandboxed environment 10 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 11 | return output.decode('utf-8') 12 | except subprocess.TimeoutExpired: 13 | return "Script execution timed out" 14 | except subprocess.CalledProcessError as e: 15 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 16 | except Exception as e: 17 | return f"Script execution failed with exception: {str(e)}" 18 | 19 | # Example usage 20 | executor = ScriptExecutor('path/to/script.py') 21 | result = executor.execute_script(b'input data') 22 | print(result) -------------------------------------------------------------------------------- /agent_scripts/script_executor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | class ScriptExecutor: 3 | def __init__(self, script_path): 4 | self.script_path = script_path 5 | 6 | def execute_script(self, input_data): 7 | try: 8 | # Run the script in a sandboxed environment 9 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 10 | return output.decode('utf-8') 11 | except subprocess.TimeoutExpired: 12 | return "Script execution timed out" 13 | except subprocess.CalledProcessError as e: 14 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 15 | except Exception as e: 16 | return f"Script execution failed with exception: {str(e)}" 17 | 18 | def get_script_output(self, input_data): 19 | return self.execute_script(input_data) 20 | -------------------------------------------------------------------------------- /src/callback.py: -------------------------------------------------------------------------------- 1 | import pika 2 | 3 | # connect to RabbitMQ server 4 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 5 | channel = connection.channel() 6 | 7 | # create a queue for each instance of the NeuralGPT agent 8 | channel.queue_declare(queue='agent1') 9 | channel.queue_declare(queue='agent2') 10 | channel.queue_declare(queue='agent3') 11 | 12 | # define a callback function to process incoming messages 13 | def callback(ch, method, properties, body): 14 | # process message and execute appropriate task 15 | print("Received message: %r" % body) 16 | 17 | # start consuming messages from the queue 18 | channel.basic_consume(queue='agent1', on_message_callback=callback, auto_ack=True) 19 | channel.basic_consume(queue='agent2', on_message_callback=callback, auto_ack=True) 20 | channel.basic_consume(queue='agent3', on_message_callback=callback, auto_ack=True) 21 | 22 | print('Waiting for messages...') 23 | channel.start_consuming() -------------------------------------------------------------------------------- /utils/callback.py: -------------------------------------------------------------------------------- 1 | import pika 2 | 3 | # connect to RabbitMQ server 4 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 5 | channel = connection.channel() 6 | 7 | # create a queue for each instance of the NeuralGPT agent 8 | channel.queue_declare(queue='agent1') 9 | channel.queue_declare(queue='agent2') 10 | channel.queue_declare(queue='agent3') 11 | 12 | # define a callback function to process incoming messages 13 | def callback(ch, method, properties, body): 14 | # process message and execute appropriate task 15 | print("Received message: %r" % body) 16 | 17 | # start consuming messages from the queue 18 | channel.basic_consume(queue='agent1', on_message_callback=callback, auto_ack=True) 19 | channel.basic_consume(queue='agent2', on_message_callback=callback, auto_ack=True) 20 | channel.basic_consume(queue='agent3', on_message_callback=callback, auto_ack=True) 21 | 22 | print('Waiting for messages...') 23 | channel.start_consuming() -------------------------------------------------------------------------------- /src/scriptexecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class ScriptExecutor: 4 | def __init__(self, script_path): 5 | self.script_path = script_path 6 | 7 | def execute_script(self, input_data): 8 | try: 9 | # Run the script in a sandboxed environment 10 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 11 | return output.decode('utf-8') 12 | except subprocess.TimeoutExpired: 13 | return "Script execution timed out" 14 | except subprocess.CalledProcessError as e: 15 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 16 | except Exception as e: 17 | return f"Script execution failed with exception: {str(e)}" 18 | 19 | # Example usage 20 | executor = ScriptExecutor('path/to/script.py') 21 | result = executor.execute_script(b'input data') 22 | print(result) -------------------------------------------------------------------------------- /utils/ScriptExecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class ScriptExecutor: 4 | def __init__(self, script_path): 5 | self.script_path = script_path 6 | 7 | def execute_script(self, input_data): 8 | try: 9 | # Run the script in a sandboxed environment 10 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 11 | return output.decode('utf-8') 12 | except subprocess.TimeoutExpired: 13 | return "Script execution timed out" 14 | except subprocess.CalledProcessError as e: 15 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 16 | except Exception as e: 17 | return f"Script execution failed with exception: {str(e)}" 18 | 19 | # Example usage 20 | executor = ScriptExecutor('path/to/script.py') 21 | result = executor.execute_script(b'input data') 22 | print(result) -------------------------------------------------------------------------------- /PyPDF2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import PyPDF2 4 | 5 | pdf_path = 'path/to/pdf/file.pdf' 6 | save_path = 'E:/AI/NeuralGPT/NeuralGPT' 7 | 8 | # Check if the save path exists, create it if it doesn't 9 | if not os.path.exists(save_path): 10 | os.makedirs(save_path) 11 | 12 | # Open the PDF file in read-binary mode 13 | with open(pdf_path, 'rb') as pdf_file: 14 | # Read the PDF file 15 | pdf_reader = PyPDF2.PdfFileReader(pdf_file) 16 | # Get the first page of the PDF 17 | page = pdf_reader.getPage(0) 18 | # Create a new PDF writer object 19 | pdf_writer = PyPDF2.PdfFileWriter() 20 | # Add the page to the PDF writer object 21 | pdf_writer.addPage(page) 22 | # Create a new PDF file name 23 | pdf_file_name = os.path.splitext(os.path.basename(pdf_path))[0] + '.pdf' 24 | # Save the PDF file to the specified location 25 | with open(os.path.join(save_path, pdf_file_name), 'wb') as new_pdf_file: 26 | pdf_writer.write(new_pdf_file) -------------------------------------------------------------------------------- /nlp.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /Wordpress plugin/cronjob.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import datetime 4 | 5 | # Set up the paths to the shared databank and the backup directory 6 | shared_databank_path = "e:/repos/database" 7 | backup_dir_path = "e:/repos/database/backup" 8 | 9 | # Create the backup directory if it doesn't exist 10 | if not os.path.exists(backup_dir_path): 11 | os.mkdir(backup_dir_path) 12 | 13 | # Set up the backup filename with the current date and time 14 | backup_filename = "shared_databank_backup_{}.zip".format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) 15 | 16 | # Create the full path to the backup file 17 | backup_file_path = os.path.join(backup_dir_path, backup_filename) 18 | 19 | # Compress the shared databank directory into a zip file 20 | shutil.make_archive(backup_file_path, "zip", shared_databank_path) 21 | 22 | # Print a message to confirm that the backup was successful 23 | print("Backup of shared databank created at {}".format(backup_file_path)) -------------------------------------------------------------------------------- /src/PyPDF2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import PyPDF2 4 | 5 | pdf_path = 'path/to/pdf/file.pdf' 6 | save_path = 'E:/AI/NeuralGPT/NeuralGPT' 7 | 8 | # Check if the save path exists, create it if it doesn't 9 | if not os.path.exists(save_path): 10 | os.makedirs(save_path) 11 | 12 | # Open the PDF file in read-binary mode 13 | with open(pdf_path, 'rb') as pdf_file: 14 | # Read the PDF file 15 | pdf_reader = PyPDF2.PdfFileReader(pdf_file) 16 | # Get the first page of the PDF 17 | page = pdf_reader.getPage(0) 18 | # Create a new PDF writer object 19 | pdf_writer = PyPDF2.PdfFileWriter() 20 | # Add the page to the PDF writer object 21 | pdf_writer.addPage(page) 22 | # Create a new PDF file name 23 | pdf_file_name = os.path.splitext(os.path.basename(pdf_path))[0] + '.pdf' 24 | # Save the PDF file to the specified location 25 | with open(os.path.join(save_path, pdf_file_name), 'wb') as new_pdf_file: 26 | pdf_writer.write(new_pdf_file) -------------------------------------------------------------------------------- /NLPModule.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /nlp/NLPModule.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /utils/PyPDF2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import PyPDF2 4 | 5 | pdf_path = 'path/to/pdf/file.pdf' 6 | save_path = 'E:/AI/NeuralGPT/NeuralGPT' 7 | 8 | # Check if the save path exists, create it if it doesn't 9 | if not os.path.exists(save_path): 10 | os.makedirs(save_path) 11 | 12 | # Open the PDF file in read-binary mode 13 | with open(pdf_path, 'rb') as pdf_file: 14 | # Read the PDF file 15 | pdf_reader = PyPDF2.PdfFileReader(pdf_file) 16 | # Get the first page of the PDF 17 | page = pdf_reader.getPage(0) 18 | # Create a new PDF writer object 19 | pdf_writer = PyPDF2.PdfFileWriter() 20 | # Add the page to the PDF writer object 21 | pdf_writer.addPage(page) 22 | # Create a new PDF file name 23 | pdf_file_name = os.path.splitext(os.path.basename(pdf_path))[0] + '.pdf' 24 | # Save the PDF file to the specified location 25 | with open(os.path.join(save_path, pdf_file_name), 'wb') as new_pdf_file: 26 | pdf_writer.write(new_pdf_file) -------------------------------------------------------------------------------- /agent_scripts/main.py: -------------------------------------------------------------------------------- 1 | from database import DatabaseModule 2 | from sqlite_example import SqliteExample 3 | 4 | from script_executor import ScriptExecutor 5 | from script_executor_example import ScriptExecutorExample 6 | 7 | # Create a new instance of the DatabaseModule class 8 | database = DatabaseModule("mydatabase.db") 9 | 10 | # Create a new instance of the ScriptExecutor class 11 | script_executor = ScriptExecutor("myscript.py") 12 | 13 | # Use the database object to store some data 14 | data = "Hello, world!" 15 | table_name = "mytable" 16 | database.store_data(data, table_name) 17 | 18 | # Retrieve the stored data 19 | query = "Hello, world!" 20 | retrieved_data = database.retrieve_data(query, table_name) 21 | 22 | # Use the script_executor object to execute a script 23 | input_data = b"Hello, world!" 24 | output = script_executor.get_script_output(input_data) 25 | 26 | # Print the retrieved data and script output 27 | print(retrieved_data) 28 | print(output) 29 | -------------------------------------------------------------------------------- /utils/NLPModule.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /Wordpress plugin/validate_llm_file.php: -------------------------------------------------------------------------------- 1 | $max_size) { 14 | return 'Error: File size exceeds maximum allowed. Please upload a smaller file.'; 15 | } 16 | 17 | // Validate LLM file format 18 | $file_content = file_get_contents($file['tmp_name']); 19 | $file_header = substr($file_content, 0, 4); 20 | 21 | if ($file_header !== 'LLM ') { 22 | return 'Error: Invalid file format. Please upload a valid LLM file.'; 23 | } 24 | 25 | return true; // File is valid 26 | } -------------------------------------------------------------------------------- /Wordpress plugin/NeuralGPT Chatbot.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | NeuralGPT Chatbot 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 18 |
19 |
20 |
21 | 22 | 23 |
24 |
25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | 3 | class DualCoreLLM: 4 | def __init__(self): 5 | self.nlp = spacy.load('en_core_web_sm') 6 | 7 | def check_coherence(self, text): 8 | doc = self.nlp(text) 9 | 10 | # Check for semantic coherence 11 | for token in doc: 12 | if token.dep_ == 'nsubj' and token.head.pos_ == 'VERB': 13 | subj = token 14 | verb = token.head 15 | for child in verb.children: 16 | if child.dep_ == 'dobj': 17 | obj = child 18 | if obj.text not in [t.text for t in subj.subtree]: 19 | return False 20 | return True 21 | 22 | def check_grammar(self, text): 23 | doc = self.nlp(text) 24 | 25 | # Check for grammatical correctness 26 | for sent in doc.sents: 27 | if sent.root.dep_ == 'ROOT' and sent.root.tag_ != 'VBZ': 28 | return False 29 | return True -------------------------------------------------------------------------------- /Wordpress plugin/assets/NeuralGPT Chatbot.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | NeuralGPT Chatbot 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 18 |
19 |
20 |
21 | 22 | 23 |
24 |
25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /Wordpress plugin/neuralgpt-browse.js: -------------------------------------------------------------------------------- 1 | jQuery(document).ready(function($) { 2 | $('#neuralgpt-browse-form').on('submit', function(e) { 3 | e.preventDefault(); 4 | var searchQuery = $('#neuralgpt-browse-search').val(); 5 | $.ajax({ 6 | url: '/wp-json/neuralgpt-browse/v1/search', 7 | type: 'POST', 8 | data: { 9 | 'search_query': searchQuery 10 | }, 11 | success: function(response) { 12 | $('#neuralgpt-browse-results').empty(); 13 | $.each(response, function(index, value) { 14 | var listItem = $('
  • '); 15 | var link = $('').attr('href', value.link).text(value.title); 16 | var excerpt = $('

    ').text(value.excerpt); 17 | listItem.append(link).append(excerpt); 18 | $('#neuralgpt-browse-results').append(listItem); 19 | }); 20 | } 21 | }); 22 | }); 23 | }); -------------------------------------------------------------------------------- /src/DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | 3 | class DualCoreLLM: 4 | def __init__(self): 5 | self.nlp = spacy.load('en_core_web_sm') 6 | 7 | def check_coherence(self, text): 8 | doc = self.nlp(text) 9 | 10 | # Check for semantic coherence 11 | for token in doc: 12 | if token.dep_ == 'nsubj' and token.head.pos_ == 'VERB': 13 | subj = token 14 | verb = token.head 15 | for child in verb.children: 16 | if child.dep_ == 'dobj': 17 | obj = child 18 | if obj.text not in [t.text for t in subj.subtree]: 19 | return False 20 | return True 21 | 22 | def check_grammar(self, text): 23 | doc = self.nlp(text) 24 | 25 | # Check for grammatical correctness 26 | for sent in doc.sents: 27 | if sent.root.dep_ == 'ROOT' and sent.root.tag_ != 'VBZ': 28 | return False 29 | return True -------------------------------------------------------------------------------- /utils/DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | 3 | class DualCoreLLM: 4 | def __init__(self): 5 | self.nlp = spacy.load('en_core_web_sm') 6 | 7 | def check_coherence(self, text): 8 | doc = self.nlp(text) 9 | 10 | # Check for semantic coherence 11 | for token in doc: 12 | if token.dep_ == 'nsubj' and token.head.pos_ == 'VERB': 13 | subj = token 14 | verb = token.head 15 | for child in verb.children: 16 | if child.dep_ == 'dobj': 17 | obj = child 18 | if obj.text not in [t.text for t in subj.subtree]: 19 | return False 20 | return True 21 | 22 | def check_grammar(self, text): 23 | doc = self.nlp(text) 24 | 25 | # Check for grammatical correctness 26 | for sent in doc.sents: 27 | if sent.root.dep_ == 'ROOT' and sent.root.tag_ != 'VBZ': 28 | return False 29 | return True -------------------------------------------------------------------------------- /Wordpress plugin/neuralgpt_chatbot.spec: -------------------------------------------------------------------------------- 1 | # -*- mode: python ; coding: utf-8 -*- 2 | 3 | 4 | block_cipher = None 5 | 6 | 7 | a = Analysis( 8 | ['neuralgpt_chatbot.py'], 9 | pathex=[], 10 | binaries=[], 11 | datas=[], 12 | hiddenimports=[], 13 | hookspath=[], 14 | hooksconfig={}, 15 | runtime_hooks=[], 16 | excludes=[], 17 | win_no_prefer_redirects=False, 18 | win_private_assemblies=False, 19 | cipher=block_cipher, 20 | noarchive=False, 21 | ) 22 | pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) 23 | 24 | exe = EXE( 25 | pyz, 26 | a.scripts, 27 | a.binaries, 28 | a.zipfiles, 29 | a.datas, 30 | [], 31 | name='neuralgpt_chatbot', 32 | debug=False, 33 | bootloader_ignore_signals=False, 34 | strip=False, 35 | upx=True, 36 | upx_exclude=[], 37 | runtime_tmpdir=None, 38 | console=True, 39 | disable_windowed_traceback=False, 40 | argv_emulation=False, 41 | target_arch=None, 42 | codesign_identity=None, 43 | entitlements_file=None, 44 | ) 45 | -------------------------------------------------------------------------------- /Wordpress plugin/test_chatbox.py: -------------------------------------------------------------------------------- 1 | import neuralgpt 2 | import local_website 3 | 4 | # code to add a button for Neural-GPT system 5 | button_neuralgpt = tkinter.Button(window, text="Activate Neural-GPT", command=neuralgpt.activate) 6 | button_neuralgpt.pack() 7 | 8 | # code to add a dropdown menu for local website 9 | options = ["Website A", "Website B", "Website C"] 10 | variable = tkinter.StringVar(window) 11 | variable.set(options[0]) 12 | dropdown_localwebsite = tkinter.OptionMenu(window, variable, *options) 13 | dropdown_localwebsite.pack() 14 | 15 | import paho.mqtt.client as mqtt 16 | 17 | # code to connect to MQTT broker 18 | client = mqtt.Client() 19 | client.connect("localhost", 1883, 60) 20 | 21 | # code to send message to all instances of Neural-GPT 22 | def send_message(message): 23 | client.publish("neuralgpt/chat", message) 24 | 25 | # code to receive message from all instances of Neural-GPT 26 | def on_message(client, userdata, message): 27 | print(message.payload.decode()) 28 | 29 | client.subscribe("neuralgpt/chat") 30 | client.on_message = on_message -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from model import GPT 2 | import torch 3 | 4 | # Set the device to use 5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 6 | 7 | # Load the GPT model 8 | model_path = 'E:/AI/NeuralGPT/NeuralGPT/models/ggml-model-q4_0.bin' 9 | model = GPT(model_path) 10 | model.to(device) 11 | 12 | # Set the model to evaluation mode 13 | model.eval() 14 | 15 | # Get user input 16 | prompt = input('Enter a prompt: ') 17 | 18 | # Generate text based on the user input 19 | generated_text = '' 20 | while not generated_text: 21 | # Tokenize the prompt and generate the input sequence 22 | input_ids = model.tokenizer.encode(prompt, return_tensors='pt').to(device) 23 | 24 | # Generate the output sequence 25 | max_length = len(input_ids.flatten()) + 50 26 | output = model.model.generate(input_ids=input_ids, max_length=max_length, do_sample=True) 27 | 28 | # Decode the output sequence and remove the prompt 29 | generated_text = model.tokenizer.decode(output[0], skip_special_tokens=True) 30 | generated_text = generated_text[len(prompt):].strip() 31 | 32 | # Print the generated text 33 | print(generated_text) -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 4 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 5 | "customizations": { 6 | "codespaces": { 7 | "openFiles": [ 8 | "README.md", 9 | "streamlit.py" 10 | ] 11 | }, 12 | "vscode": { 13 | "settings": {}, 14 | "extensions": [ 15 | "ms-python.python", 16 | "ms-python.vscode-pylance" 17 | ] 18 | } 19 | }, 20 | "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y connect_error) { 12 | die("Connection failed: " . $conn->connect_error); 13 | } 14 | 15 | $sql = "SELECT feedback_text, feedback_type FROM user_input WHERE input_text LIKE '%$input_text%' AND input_type = '$input_type' ORDER BY timestamp DESC LIMIT 1"; 16 | 17 | $result = $conn->query($sql); 18 | if ($result->num_rows > 0) { 19 | $row = $result->fetch_assoc(); 20 | $feedback_text = $row['feedback_text']; 21 | $feedback_type = $row['feedback_type']; 22 | if ($feedback_type == 'accept') { 23 | $message = 'Thank you for your idea!'; 24 | } else if ($feedback_type == 'reject') { 25 | $message = 'Sorry, your idea was not accepted.'; 26 | } else { 27 | $message = 'We will consider your idea.'; 28 | } 29 | } else { 30 | $message = 'We did not understand your input. Please try again.'; 31 | } 32 | 33 | $conn->close(); 34 | 35 | echo $message; 36 | ?> -------------------------------------------------------------------------------- /Wordpress plugin/automate_transfer.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | 5 | # Set up the URLs for the WordPress website and agent-gpt web GUI 6 | wordpress_url = 'http://localhost/wordpress' 7 | agent_gpt_url = 'http://localhost:3000' 8 | 9 | # Set up the payload to send to the agent-gpt web GUI 10 | payload = { 11 | 'text': '', 12 | 'model': 'gpt2', 13 | 'length': 50 14 | } 15 | 16 | # Define a function to send text to the agent-gpt web GUI and receive a response 17 | def send_text_to_agent_gpt(text): 18 | payload['text'] = text 19 | response = requests.post(agent_gpt_url, data=json.dumps(payload)) 20 | return response.json()['text'] 21 | 22 | # Define a function to get the latest post from the WordPress website 23 | def get_latest_post(): 24 | response = requests.get(wordpress_url + '/wp-json/wp/v2/posts?per_page=1') 25 | post = response.json()[0] 26 | return post['title']['rendered'] + '\n' + post['content']['rendered'] 27 | 28 | # Loop indefinitely and send the latest post to the agent-gpt web GUI every minute 29 | while True: 30 | text = get_latest_post() 31 | response_text = send_text_to_agent_gpt(text) 32 | print(response_text) 33 | time.sleep(60) -------------------------------------------------------------------------------- /code/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import pandas as pd 4 | from models import GPTModel 5 | from utils import load_data, preprocess_data 6 | 7 | # Define global variables 8 | BATCH_SIZE = 32 9 | EPOCHS = 10 10 | LEARNING_RATE = 0.001 11 | 12 | # Load and preprocess the data 13 | data = load_data('data/dataset1/data_file1.csv') 14 | preprocessed_data = preprocess_data(data) 15 | 16 | # Define the model architecture 17 | model = GPTModel(preprocessed_data.vocab_size, preprocessed_data.max_len) 18 | 19 | # Compile the model 20 | optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE) 21 | model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) 22 | 23 | # Train the model 24 | history = model.fit(preprocessed_data.x_train, preprocessed_data.y_train, 25 | batch_size=BATCH_SIZE, epochs=EPOCHS, 26 | validation_data=(preprocessed_data.x_val, preprocessed_data.y_val)) 27 | 28 | # Evaluate the model 29 | test_loss, test_acc = model.evaluate(preprocessed_data.x_test, preprocessed_data.y_test) 30 | print(f'Test loss: {test_loss}, Test accuracy: {test_acc}') 31 | 32 | # Save the model 33 | model.save('models/gpt_model.h5') -------------------------------------------------------------------------------- /train_model.py: -------------------------------------------------------------------------------- 1 | # Import required modules 2 | from neuralgpt import NeuralGPT 3 | from dualcorellm import DualCoreLLM 4 | 5 | # Fix syntax error in gui.py file 6 | # ... 7 | 8 | # Define function to create GUI 9 | def create_gui(): 10 | # Create GUI window 11 | # ... 12 | 13 | # Provide options to load pretrained model 14 | # ... 15 | 16 | # Load model and test basic functionality 17 | # ... 18 | 19 | # Integrate DualCoreLLM module with GUI 20 | # ... 21 | 22 | # Prompt user for input and respond coherently 23 | # ... 24 | 25 | # Define function to train NeuralGPT model on user's dataset 26 | def train_model(dataset): 27 | # Create NeuralGPT object 28 | model = NeuralGPT() 29 | 30 | # Train model on dataset 31 | model.train(dataset) 32 | 33 | # Save trained model in *.bin format 34 | save_model(model, 'model.bin') 35 | 36 | # Define function to save model in *.bin format 37 | def save_model(model, filename): 38 | # Save model to local file or online source 39 | # ... 40 | 41 | # Call create_gui() function to create GUI 42 | create_gui() 43 | 44 | # Call train_model() function to train model on user's dataset 45 | train_model(dataset) -------------------------------------------------------------------------------- /src/train_model.py: -------------------------------------------------------------------------------- 1 | # Import required modules 2 | from neuralgpt import NeuralGPT 3 | from dualcorellm import DualCoreLLM 4 | 5 | # Fix syntax error in gui.py file 6 | # ... 7 | 8 | # Define function to create GUI 9 | def create_gui(): 10 | # Create GUI window 11 | # ... 12 | 13 | # Provide options to load pretrained model 14 | # ... 15 | 16 | # Load model and test basic functionality 17 | # ... 18 | 19 | # Integrate DualCoreLLM module with GUI 20 | # ... 21 | 22 | # Prompt user for input and respond coherently 23 | # ... 24 | 25 | # Define function to train NeuralGPT model on user's dataset 26 | def train_model(dataset): 27 | # Create NeuralGPT object 28 | model = NeuralGPT() 29 | 30 | # Train model on dataset 31 | model.train(dataset) 32 | 33 | # Save trained model in *.bin format 34 | save_model(model, 'model.bin') 35 | 36 | # Define function to save model in *.bin format 37 | def save_model(model, filename): 38 | # Save model to local file or online source 39 | # ... 40 | 41 | # Call create_gui() function to create GUI 42 | create_gui() 43 | 44 | # Call train_model() function to train model on user's dataset 45 | train_model(dataset) -------------------------------------------------------------------------------- /utils/train_model.py: -------------------------------------------------------------------------------- 1 | # Import required modules 2 | from neuralgpt import NeuralGPT 3 | from dualcorellm import DualCoreLLM 4 | 5 | # Fix syntax error in gui.py file 6 | # ... 7 | 8 | # Define function to create GUI 9 | def create_gui(): 10 | # Create GUI window 11 | # ... 12 | 13 | # Provide options to load pretrained model 14 | # ... 15 | 16 | # Load model and test basic functionality 17 | # ... 18 | 19 | # Integrate DualCoreLLM module with GUI 20 | # ... 21 | 22 | # Prompt user for input and respond coherently 23 | # ... 24 | 25 | # Define function to train NeuralGPT model on user's dataset 26 | def train_model(dataset): 27 | # Create NeuralGPT object 28 | model = NeuralGPT() 29 | 30 | # Train model on dataset 31 | model.train(dataset) 32 | 33 | # Save trained model in *.bin format 34 | save_model(model, 'model.bin') 35 | 36 | # Define function to save model in *.bin format 37 | def save_model(model, filename): 38 | # Save model to local file or online source 39 | # ... 40 | 41 | # Call create_gui() function to create GUI 42 | create_gui() 43 | 44 | # Call train_model() function to train model on user's dataset 45 | train_model(dataset) -------------------------------------------------------------------------------- /FineTuneGPT.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 3 | import torch 4 | 5 | class FineTuneGPT: 6 | def __init__(self, pretrained_model_path, new_dataset): 7 | self.pretrained_model_path = pretrained_model_path 8 | self.new_dataset = new_dataset 9 | 10 | def fine_tune_model(self): 11 | # Load the pretrained model 12 | tokenizer = GPT2Tokenizer.from_pretrained(self.pretrained_model_path) 13 | model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path) 14 | 15 | # Load the new dataset 16 | with open(self.new_dataset, 'r') as f: 17 | text = f.read() 18 | inputs = tokenizer.encode(text, return_tensors='pt') 19 | 20 | # Fine-tune the model with the new dataset 21 | model.train() 22 | optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) 23 | for i in range(100): 24 | outputs = model(inputs, labels=inputs) 25 | loss = outputs[0] 26 | loss.backward() 27 | optimizer.step() 28 | optimizer.zero_grad() 29 | 30 | # Save the fine-tuned model 31 | model.save_pretrained('fine_tuned_model.bin') -------------------------------------------------------------------------------- /src/FineTuneGPT,py.txt: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 3 | import torch 4 | 5 | class FineTuneGPT: 6 | def __init__(self, pretrained_model_path, new_dataset): 7 | self.pretrained_model_path = pretrained_model_path 8 | self.new_dataset = new_dataset 9 | 10 | def fine_tune_model(self): 11 | # Load the pretrained model 12 | tokenizer = GPT2Tokenizer.from_pretrained(self.pretrained_model_path) 13 | model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path) 14 | 15 | # Load the new dataset 16 | with open(self.new_dataset, 'r') as f: 17 | text = f.read() 18 | inputs = tokenizer.encode(text, return_tensors='pt') 19 | 20 | # Fine-tune the model with the new dataset 21 | model.train() 22 | optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) 23 | for i in range(100): 24 | outputs = model(inputs, labels=inputs) 25 | loss = outputs[0] 26 | loss.backward() 27 | optimizer.step() 28 | optimizer.zero_grad() 29 | 30 | # Save the fine-tuned model 31 | model.save_pretrained('fine_tuned_model.bin') -------------------------------------------------------------------------------- /GUI/GUILLMmanager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import threading 4 | 5 | class LLMManager: 6 | def __init__(self): 7 | self.llm_list = [] 8 | self.current_llm = None 9 | self.llm_thread = None 10 | 11 | def add_llm(self, llm_path): 12 | if os.path.exists(llm_path): 13 | self.llm_list.append(llm_path) 14 | 15 | def remove_llm(self, llm_path): 16 | if llm_path in self.llm_list: 17 | self.llm_list.remove(llm_path) 18 | 19 | def list_llms(self): 20 | return self.llm_list 21 | 22 | def set_current_llm(self, llm_path): 23 | if llm_path in self.llm_list: 24 | self.current_llm = llm_path 25 | 26 | def start_llm(self): 27 | if self.current_llm is not None: 28 | self.llm_thread = threading.Thread(target=self._run_llm) 29 | self.llm_thread.start() 30 | 31 | def stop_llm(self): 32 | if self.llm_thread is not None: 33 | self.llm_thread.stop() 34 | 35 | def _run_llm(self): 36 | subprocess.call([self.current_llm]) 37 | 38 | def get_llm_status(self): 39 | if self.llm_thread is not None: 40 | return self.llm_thread.is_alive() 41 | else: 42 | return False -------------------------------------------------------------------------------- /utils/FineTuneGPT,py.txt: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 3 | import torch 4 | 5 | class FineTuneGPT: 6 | def __init__(self, pretrained_model_path, new_dataset): 7 | self.pretrained_model_path = pretrained_model_path 8 | self.new_dataset = new_dataset 9 | 10 | def fine_tune_model(self): 11 | # Load the pretrained model 12 | tokenizer = GPT2Tokenizer.from_pretrained(self.pretrained_model_path) 13 | model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path) 14 | 15 | # Load the new dataset 16 | with open(self.new_dataset, 'r') as f: 17 | text = f.read() 18 | inputs = tokenizer.encode(text, return_tensors='pt') 19 | 20 | # Fine-tune the model with the new dataset 21 | model.train() 22 | optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) 23 | for i in range(100): 24 | outputs = model(inputs, labels=inputs) 25 | loss = outputs[0] 26 | loss.backward() 27 | optimizer.step() 28 | optimizer.zero_grad() 29 | 30 | # Save the fine-tuned model 31 | model.save_pretrained('fine_tuned_model.bin') -------------------------------------------------------------------------------- /Wordpress plugin/assets/neuralgpt-chatbot.js: -------------------------------------------------------------------------------- 1 | jQuery(document).ready(function ($) { 2 | // Establish Socket.io connection 3 | const socket = io('http://localhost:3001'); 4 | 5 | // Function to send a message to the server 6 | function sendMessage() { 7 | const message = $('#neuralgpt-chat-input').val().trim(); 8 | if (message !== '') { 9 | // Emit the message event to the server 10 | socket.emit('chat message', message); 11 | 12 | // Clear the input field 13 | $('#neuralgpt-chat-input').val(''); 14 | } 15 | } 16 | 17 | // Function to handle receiving a response from the server 18 | function handleResponse(response) { 19 | // Append the response to the chat log 20 | $('#neuralgpt-chat-log').append('

    ' + response + '
    '); 21 | } 22 | 23 | // Send message when the send button is clicked 24 | $('#neuralgpt-chat-send').on('click', sendMessage); 25 | 26 | // Send message when Enter key is pressed in the input field 27 | $('#neuralgpt-chat-input').on('keydown', function (e) { 28 | if (e.key === 'Enter') { 29 | sendMessage(); 30 | } 31 | }); 32 | 33 | // Listen for the 'chat message' event from the server 34 | socket.on('chat message', handleResponse); 35 | }); 36 | -------------------------------------------------------------------------------- /Chat-center/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Chat Hub Center 5 | 11 | 12 | 13 |

    Chat Hub Center

    14 | 15 |

    Incoming Messages

    16 | 17 |

    Server Responses

    18 | 19 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /FileProcessor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class FileProcessor: 5 | def __init__(self, storage_path: str): 6 | self.storage_path = storage_path 7 | 8 | def upload_file(self, file_path: str, file_name: str) -> str: 9 | """ 10 | Uploads a file to the storage_path and returns the URL where it can be accessed. 11 | """ 12 | file_url = os.path.join(self.storage_path, file_name) 13 | with open(file_url, 'wb') as f: 14 | f.write(file_path.read()) 15 | return file_url 16 | 17 | def download_file(self, file_url: str) -> bytes: 18 | """ 19 | Downloads a file from the storage_path and returns its contents as bytes. 20 | """ 21 | with open(file_url, 'rb') as f: 22 | file_contents = f.read() 23 | return file_contents 24 | 25 | def process_files(self, file_urls: List[str]) -> List[str]: 26 | """ 27 | Processes a list of files specified by their URLs and returns a list of processed files' URLs. 28 | """ 29 | processed_files = [] 30 | for file_url in file_urls: 31 | # process file here 32 | processed_file_url = file_url + '_processed' 33 | processed_files.append(processed_file_url) 34 | return processed_files -------------------------------------------------------------------------------- /TEST.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from DualCoreLLM import DualCoreLLM # if needed 3 | import re 4 | 5 | # Load pretrained model 6 | model = NeuralGPT.load_model('model.bin') # provide path to model file 7 | 8 | # Define list of prompts 9 | prompts = ['identify yourself', 'How can I improve my life?'] 10 | 11 | # Define function for preprocessing user input 12 | def preprocess_input(text): 13 | text = text.lower() 14 | text = re.sub(r'[^\w\s]', '', text) # remove special characters 15 | return text 16 | 17 | # Define function for generating responses 18 | def generate_response(prompt): 19 | response = model.generate(prompt) 20 | # Evaluate coherence of response 21 | # ... 22 | return response 23 | 24 | # Define function for testing coherence of responses 25 | def test_coherence(prompt): 26 | input_text = input(prompt + ': ') 27 | preprocessed_text = preprocess_input(input_text) 28 | response = generate_response(preprocessed_text) 29 | # Evaluate coherence of response 30 | # ... 31 | return coherence_score 32 | 33 | # Run test for each prompt 34 | total_score = 0 35 | for prompt in prompts: 36 | score = test_coherence(prompt) 37 | total_score += score 38 | 39 | # Output final score 40 | print('Coherence score:', total_score) -------------------------------------------------------------------------------- /src/fiileprocessor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class FileProcessor: 5 | def __init__(self, storage_path: str): 6 | self.storage_path = storage_path 7 | 8 | def upload_file(self, file_path: str, file_name: str) -> str: 9 | """ 10 | Uploads a file to the storage_path and returns the URL where it can be accessed. 11 | """ 12 | file_url = os.path.join(self.storage_path, file_name) 13 | with open(file_url, 'wb') as f: 14 | f.write(file_path.read()) 15 | return file_url 16 | 17 | def download_file(self, file_url: str) -> bytes: 18 | """ 19 | Downloads a file from the storage_path and returns its contents as bytes. 20 | """ 21 | with open(file_url, 'rb') as f: 22 | file_contents = f.read() 23 | return file_contents 24 | 25 | def process_files(self, file_urls: List[str]) -> List[str]: 26 | """ 27 | Processes a list of files specified by their URLs and returns a list of processed files' URLs. 28 | """ 29 | processed_files = [] 30 | for file_url in file_urls: 31 | # process file here 32 | processed_file_url = file_url + '_processed' 33 | processed_files.append(processed_file_url) 34 | return processed_files -------------------------------------------------------------------------------- /utils/FileProcessor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class FileProcessor: 5 | def __init__(self, storage_path: str): 6 | self.storage_path = storage_path 7 | 8 | def upload_file(self, file_path: str, file_name: str) -> str: 9 | """ 10 | Uploads a file to the storage_path and returns the URL where it can be accessed. 11 | """ 12 | file_url = os.path.join(self.storage_path, file_name) 13 | with open(file_url, 'wb') as f: 14 | f.write(file_path.read()) 15 | return file_url 16 | 17 | def download_file(self, file_url: str) -> bytes: 18 | """ 19 | Downloads a file from the storage_path and returns its contents as bytes. 20 | """ 21 | with open(file_url, 'rb') as f: 22 | file_contents = f.read() 23 | return file_contents 24 | 25 | def process_files(self, file_urls: List[str]) -> List[str]: 26 | """ 27 | Processes a list of files specified by their URLs and returns a list of processed files' URLs. 28 | """ 29 | processed_files = [] 30 | for file_url in file_urls: 31 | # process file here 32 | processed_file_url = file_url + '_processed' 33 | processed_files.append(processed_file_url) 34 | return processed_files -------------------------------------------------------------------------------- /class.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | 4 | class Communication: 5 | def __init__(self, protocol, message_format, timeout, retry_limit): 6 | self.protocol = protocol 7 | self.message_format = message_format 8 | self.timeout = timeout 9 | self.retry_limit = retry_limit 10 | 11 | def send_message(self, message): 12 | retries = 0 13 | while retries < self.retry_limit: 14 | try: 15 | response = requests.post(self.protocol, data=message, timeout=self.timeout) 16 | return response 17 | except requests.exceptions.Timeout: 18 | retries += 1 19 | print("Timeout occurred. Retrying...") 20 | time.sleep(1) 21 | except requests.exceptions.RequestException as e: 22 | print("Error occurred: ", e) 23 | break 24 | return None 25 | 26 | def receive_message(self): 27 | retries = 0 28 | while retries < self.retry_limit: 29 | try: 30 | response = requests.get(self.protocol, timeout=self.timeout) 31 | return response 32 | except requests.exceptions.Timeout: 33 | retries += 1 34 | print("Timeout occurred. Retrying...") 35 | time.sleep(1) 36 | except requests.exceptions.RequestException as e: 37 | print("Error occurred: ", e) 38 | break 39 | return None 40 | -------------------------------------------------------------------------------- /Wordpress plugin/module.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/class.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | 4 | class Communication: 5 | def __init__(self, protocol, message_format, timeout, retry_limit): 6 | self.protocol = protocol 7 | self.message_format = message_format 8 | self.timeout = timeout 9 | self.retry_limit = retry_limit 10 | 11 | def send_message(self, message): 12 | retries = 0 13 | while retries < self.retry_limit: 14 | try: 15 | response = requests.post(self.protocol, data=message, timeout=self.timeout) 16 | return response 17 | except requests.exceptions.Timeout: 18 | retries += 1 19 | print("Timeout occurred. Retrying...") 20 | time.sleep(1) 21 | except requests.exceptions.RequestException as e: 22 | print("Error occurred: ", e) 23 | break 24 | return None 25 | 26 | def receive_message(self): 27 | retries = 0 28 | while retries < self.retry_limit: 29 | try: 30 | response = requests.get(self.protocol, timeout=self.timeout) 31 | return response 32 | except requests.exceptions.Timeout: 33 | retries += 1 34 | print("Timeout occurred. Retrying...") 35 | time.sleep(1) 36 | except requests.exceptions.RequestException as e: 37 | print("Error occurred: ", e) 38 | break 39 | return None 40 | -------------------------------------------------------------------------------- /utils/class.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | 4 | class Communication: 5 | def __init__(self, protocol, message_format, timeout, retry_limit): 6 | self.protocol = protocol 7 | self.message_format = message_format 8 | self.timeout = timeout 9 | self.retry_limit = retry_limit 10 | 11 | def send_message(self, message): 12 | retries = 0 13 | while retries < self.retry_limit: 14 | try: 15 | response = requests.post(self.protocol, data=message, timeout=self.timeout) 16 | return response 17 | except requests.exceptions.Timeout: 18 | retries += 1 19 | print("Timeout occurred. Retrying...") 20 | time.sleep(1) 21 | except requests.exceptions.RequestException as e: 22 | print("Error occurred: ", e) 23 | break 24 | return None 25 | 26 | def receive_message(self): 27 | retries = 0 28 | while retries < self.retry_limit: 29 | try: 30 | response = requests.get(self.protocol, timeout=self.timeout) 31 | return response 32 | except requests.exceptions.Timeout: 33 | retries += 1 34 | print("Timeout occurred. Retrying...") 35 | time.sleep(1) 36 | except requests.exceptions.RequestException as e: 37 | print("Error occurred: ", e) 38 | break 39 | return None 40 | -------------------------------------------------------------------------------- /Chat-center/client.js: -------------------------------------------------------------------------------- 1 | const WebSocket = require('ws'); 2 | const readline = require('readline'); 3 | 4 | // Create a WebSocket client that connects to the server 5 | const ws = new WebSocket('ws://localhost:5000'); 6 | 7 | // Listen for when the client connects to the server 8 | ws.on('open', () => { 9 | console.log('Connected to server'); 10 | 11 | // Start reading input from the user and sending messages to the server 12 | const rl = readline.createInterface({ 13 | input: process.stdin, 14 | output: process.stdout 15 | }); 16 | 17 | rl.on('line', (inputText) => { 18 | if (inputText.startsWith('/qna ')) { 19 | // Parse the input text as a QnA message 20 | const [question, passage] = inputText.substring(5).split('|').map((s) => s.trim()); 21 | if (!question || !passage) { 22 | console.log('Invalid /qna command. Usage: /qna | '); 23 | return; 24 | } 25 | 26 | // Send the QnA message to the server 27 | ws.send(JSON.stringify({ type: 'qna', question, passage })); 28 | } else { 29 | // Send a regular chat message to the server 30 | ws.send(JSON.stringify(('text:', inputText) )); 31 | } 32 | }); 33 | }); 34 | 35 | // Listen for messages from the server 36 | ws.on('message', (message) => { 37 | console.log(message.toString()); 38 | }); 39 | -------------------------------------------------------------------------------- /Wordpress plugin/mlm.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import tensorflow as tf 4 | import keras 5 | from keras.models import load_model 6 | Load the pre-trained NeuralGPT model: 7 | Copy code 8 | 9 | model = load_model('E:/Repos/oobabooga_windows/text-generation-webui/models/facebook_opt-1.3b/pytorch_model.bin') 10 | Retrieve user feedback from the database schema and preprocess the data: 11 | Copy code 12 | 13 | feedback_data = pd.read_sql_query('SELECT * FROM feedback_table', con=db_connection) 14 | feedback_text = feedback_data['feedback_text'].tolist() 15 | preprocessed_feedback = preprocess(feedback_text) # preprocess function to clean and tokenize the feedback text 16 | Generate predictions using the preprocessed feedback data: 17 | Copy code 18 | 19 | predictions = model.predict(preprocessed_feedback) 20 | Display the predictions and suggestions for improvement in the dashboard interface: 21 | Copy code 22 | 23 | for i in range(len(predictions)): 24 | if predictions[i] > 0.5: 25 | suggestion = "Your feedback suggests that the model is performing well. Keep up the good work!" 26 | else: 27 | suggestion = "Your feedback suggests that the model needs improvement. Consider fine-tuning the model or collecting more training data." 28 | display_suggestion(feedback_data['user_id'][i], suggestion) # display_suggestion function to display the suggestion in the dashboard interface -------------------------------------------------------------------------------- /save_preset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tkinter as tk 3 | from tkinter import filedialog 4 | 5 | # Define a function to save the current selected parameters to a file 6 | def save_preset(): 7 | # Prompt the user for a name for the preset 8 | preset_name = input("Enter a name for the preset: ") 9 | 10 | # Get the current selected parameters 11 | selected_params = get_selected_params() 12 | 13 | # Save the selected parameters to a file 14 | file_path = filedialog.asksaveasfilename(defaultextension='.json', initialfile=preset_name) 15 | with open(file_path, 'w') as f: 16 | json.dump(selected_params, f) 17 | 18 | # Display a message to the user indicating that the preset has been saved 19 | message = f"Preset '{preset_name}' has been saved." 20 | display_message(message) 21 | 22 | # Define a function to get the current selected parameters 23 | def get_selected_params(): 24 | # TODO: Implement this function to retrieve the selected parameters from the NeuralGPT agent 25 | 26 | return selected_params 27 | 28 | # Define a function to display a message to the user 29 | def display_message(message): 30 | # TODO: Implement this function to display a message in the FlowiseAI dialogue window 31 | 32 | pass 33 | 34 | # Create a GUI with a button to save the preset 35 | root = tk.Tk() 36 | save_button = tk.Button(root, text="Save Preset", command=save_preset) 37 | save_button.pack() 38 | root.mainloop() -------------------------------------------------------------------------------- /src/save_preset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tkinter as tk 3 | from tkinter import filedialog 4 | 5 | # Define a function to save the current selected parameters to a file 6 | def save_preset(): 7 | # Prompt the user for a name for the preset 8 | preset_name = input("Enter a name for the preset: ") 9 | 10 | # Get the current selected parameters 11 | selected_params = get_selected_params() 12 | 13 | # Save the selected parameters to a file 14 | file_path = filedialog.asksaveasfilename(defaultextension='.json', initialfile=preset_name) 15 | with open(file_path, 'w') as f: 16 | json.dump(selected_params, f) 17 | 18 | # Display a message to the user indicating that the preset has been saved 19 | message = f"Preset '{preset_name}' has been saved." 20 | display_message(message) 21 | 22 | # Define a function to get the current selected parameters 23 | def get_selected_params(): 24 | # TODO: Implement this function to retrieve the selected parameters from the NeuralGPT agent 25 | 26 | return selected_params 27 | 28 | # Define a function to display a message to the user 29 | def display_message(message): 30 | # TODO: Implement this function to display a message in the FlowiseAI dialogue window 31 | 32 | pass 33 | 34 | # Create a GUI with a button to save the preset 35 | root = tk.Tk() 36 | save_button = tk.Button(root, text="Save Preset", command=save_preset) 37 | save_button.pack() 38 | root.mainloop() -------------------------------------------------------------------------------- /utils/save_preset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tkinter as tk 3 | from tkinter import filedialog 4 | 5 | # Define a function to save the current selected parameters to a file 6 | def save_preset(): 7 | # Prompt the user for a name for the preset 8 | preset_name = input("Enter a name for the preset: ") 9 | 10 | # Get the current selected parameters 11 | selected_params = get_selected_params() 12 | 13 | # Save the selected parameters to a file 14 | file_path = filedialog.asksaveasfilename(defaultextension='.json', initialfile=preset_name) 15 | with open(file_path, 'w') as f: 16 | json.dump(selected_params, f) 17 | 18 | # Display a message to the user indicating that the preset has been saved 19 | message = f"Preset '{preset_name}' has been saved." 20 | display_message(message) 21 | 22 | # Define a function to get the current selected parameters 23 | def get_selected_params(): 24 | # TODO: Implement this function to retrieve the selected parameters from the NeuralGPT agent 25 | 26 | return selected_params 27 | 28 | # Define a function to display a message to the user 29 | def display_message(message): 30 | # TODO: Implement this function to display a message in the FlowiseAI dialogue window 31 | 32 | pass 33 | 34 | # Create a GUI with a button to save the preset 35 | root = tk.Tk() 36 | save_button = tk.Button(root, text="Save Preset", command=save_preset) 37 | save_button.pack() 38 | root.mainloop() -------------------------------------------------------------------------------- /Wordpress plugin/chat-window.js: -------------------------------------------------------------------------------- 1 | var chatWindow = document.createElement("div"); 2 | chatWindow.id = "chat-window"; 3 | document.body.appendChild(chatWindow); 4 | var chatInput = document.createElement("input"); 5 | chatInput.type = "text"; 6 | chatInput.id = "chat-input"; 7 | chatWindow.appendChild(chatInput); 8 | var chatButton = document.createElement("button"); 9 | chatButton.innerHTML = "Send"; 10 | chatButton.onclick = function() { 11 | var message = document.getElementById("chat-input").value; 12 | document.getElementById("chat-input").value = ""; 13 | sendMessage(message); 14 | } 15 | chatWindow.appendChild(chatButton); 16 | var chatLog = document.createElement("div"); 17 | chatLog.id = "chat-log"; 18 | chatWindow.appendChild(chatLog); 19 | function sendMessage(message) { 20 | var xhr = new XMLHttpRequest(); 21 | xhr.onreadystatechange = function() { 22 | if (xhr.readyState === 4 && xhr.status === 200) { 23 | var response = JSON.parse(xhr.responseText); 24 | addMessage(response.message, "bot"); 25 | } 26 | } 27 | xhr.open("POST", "/wp-admin/admin-ajax.php?action=neuralgpt_chat", true); 28 | xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded"); 29 | xhr.send("message=" + message); 30 | } 31 | function addMessage(message, sender) { 32 | var messageElement = document.createElement("div"); 33 | messageElement.innerHTML = message; 34 | messageElement.className = "message " + sender; 35 | chatLog.appendChild(messageElement); 36 | } -------------------------------------------------------------------------------- /Wordpress plugin/chatbox_v2.js: -------------------------------------------------------------------------------- 1 | function createChatbox(chatboxId, socketUrl) { 2 | const socket = new WebSocket(socketUrl); 3 | 4 | function sendMessage(message) { 5 | socket.send(message); 6 | } 7 | 8 | function receiveMessage() { 9 | socket.onmessage = (event) => { 10 | const message = event.data; 11 | const chatbox = document.getElementById(chatboxId); 12 | chatbox.innerHTML += message; 13 | }; 14 | } 15 | 16 | const chatbox = document.getElementById(chatboxId); 17 | chatbox.addEventListener("submit", (event) => { 18 | event.preventDefault(); 19 | const messageInput = chatbox.querySelector("input[type=text]"); 20 | const message = messageInput.value; 21 | sendMessage(message); 22 | messageInput.value = ""; 23 | }); 24 | 25 | receiveMessage(); 26 | } 27 | 28 | createChatbox("chatbox", "ws://localhost:3000"); 29 | 30 | // Example usage with Neural-GPT system 31 | const neuralGptSocketUrl = "ws://localhost:4000"; 32 | const neuralGptSocket = new WebSocket(neuralGptSocketUrl); 33 | 34 | neuralGptSocket.onmessage = (event) => { 35 | const message = event.data; 36 | const chatbox = document.getElementById("chatbox"); 37 | chatbox.innerHTML += message; 38 | }; 39 | 40 | function sendNeuralGptMessage(message) { 41 | neuralGptSocket.send(message); 42 | } 43 | 44 | sendNeuralGptMessage("Hello, Neural-GPT!"); -------------------------------------------------------------------------------- /Wordpress plugin/js/chatbox_v2.js: -------------------------------------------------------------------------------- 1 | function createChatbox(chatboxId, socketUrl) { 2 | const socket = new WebSocket(socketUrl); 3 | 4 | function sendMessage(message) { 5 | socket.send(message); 6 | } 7 | 8 | function receiveMessage() { 9 | socket.onmessage = (event) => { 10 | const message = event.data; 11 | const chatbox = document.getElementById(chatboxId); 12 | chatbox.innerHTML += message; 13 | }; 14 | } 15 | 16 | const chatbox = document.getElementById(chatboxId); 17 | chatbox.addEventListener("submit", (event) => { 18 | event.preventDefault(); 19 | const messageInput = chatbox.querySelector("input[type=text]"); 20 | const message = messageInput.value; 21 | sendMessage(message); 22 | messageInput.value = ""; 23 | }); 24 | 25 | receiveMessage(); 26 | } 27 | 28 | createChatbox("chatbox", "ws://localhost:3000"); 29 | 30 | // Example usage with Neural-GPT system 31 | const neuralGptSocketUrl = "ws://localhost:4000"; 32 | const neuralGptSocket = new WebSocket(neuralGptSocketUrl); 33 | 34 | neuralGptSocket.onmessage = (event) => { 35 | const message = event.data; 36 | const chatbox = document.getElementById("chatbox"); 37 | chatbox.innerHTML += message; 38 | }; 39 | 40 | function sendNeuralGptMessage(message) { 41 | neuralGptSocket.send(message); 42 | } 43 | 44 | sendNeuralGptMessage("Hello, Neural-GPT!"); -------------------------------------------------------------------------------- /Wordpress plugin/admin.php: -------------------------------------------------------------------------------- 1 | 11 | 12 | 19 |
    20 |

    NeuralGPT Chatbot Settings

    21 |
    22 | 23 | 24 | 25 |
    26 |
    27 | ', self.on_mousewheel) 25 | 26 | # Create a frame inside the canvas 27 | self.inner_frame = tk.Frame(self.canvas) 28 | self.canvas.create_window((0, 0), window=self.inner_frame, anchor='nw') 29 | 30 | def load_image(self, file_path): 31 | # Load the image 32 | image = Image.open(file_path) 33 | photo = ImageTk.PhotoImage(image) 34 | 35 | # Create a label to display the image 36 | label = tk.Label(self.inner_frame, image=photo) 37 | label.image = photo 38 | label.pack() 39 | 40 | def load_document(self, file_path): 41 | # Open the document in the default application 42 | webbrowser.open_new_tab(file_path) 43 | 44 | def load_media(self, file_path): 45 | # Open the media file in a media player 46 | os.startfile(file_path) 47 | 48 | def on_mousewheel(self, event): 49 | # Scroll the canvas when the mouse wheel is used 50 | self.canvas.yview_scroll(int(-1*(event.delta/120)), 'units') -------------------------------------------------------------------------------- /GUI/GUIFileViewer.txt: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from PIL import Image, ImageTk 3 | import webbrowser 4 | import os 5 | 6 | class FileViewer: 7 | def __init__(self, root): 8 | self.root = root 9 | self.frame = tk.Frame(root) 10 | self.frame.pack() 11 | 12 | # Create a scrollbar 13 | self.scrollbar = tk.Scrollbar(self.frame) 14 | self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y) 15 | 16 | # Create a canvas 17 | self.canvas = tk.Canvas(self.frame, yscrollcommand=self.scrollbar.set) 18 | self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) 19 | 20 | # Configure the scrollbar 21 | self.scrollbar.config(command=self.canvas.yview) 22 | 23 | # Bind the canvas to the mouse wheel 24 | self.canvas.bind('', self.on_mousewheel) 25 | 26 | # Create a frame inside the canvas 27 | self.inner_frame = tk.Frame(self.canvas) 28 | self.canvas.create_window((0, 0), window=self.inner_frame, anchor='nw') 29 | 30 | def load_image(self, file_path): 31 | # Load the image 32 | image = Image.open(file_path) 33 | photo = ImageTk.PhotoImage(image) 34 | 35 | # Create a label to display the image 36 | label = tk.Label(self.inner_frame, image=photo) 37 | label.image = photo 38 | label.pack() 39 | 40 | def load_document(self, file_path): 41 | # Open the document in the default application 42 | webbrowser.open_new_tab(file_path) 43 | 44 | def load_media(self, file_path): 45 | # Open the media file in a media player 46 | os.startfile(file_path) 47 | 48 | def on_mousewheel(self, event): 49 | # Scroll the canvas when the mouse wheel is used 50 | self.canvas.yview_scroll(int(-1*(event.delta/120)), 'units') -------------------------------------------------------------------------------- /neuralgpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 5 | 6 | class NeuralGPT: 7 | def __init__(self, model_name_or_path='gpt2', device='cpu'): 8 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) 9 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path) 10 | self.device = device 11 | self.model.to(self.device) 12 | self.model.eval() 13 | 14 | def generate_text(self, prompt='', max_length=100, temperature=1.0, top_p=0.9, top_k=0, repetition_penalty=1.0, num_return_sequences=1): 15 | input_ids = self.tokenizer.encode(prompt, return_tensors='pt') 16 | input_ids = input_ids.to(self.device) 17 | 18 | output_sequences = self.model.generate( 19 | input_ids=input_ids, 20 | max_length=max_length + len(input_ids[0]), 21 | temperature=temperature, 22 | top_p=top_p, 23 | top_k=top_k, 24 | repetition_penalty=repetition_penalty, 25 | do_sample=True, 26 | num_return_sequences=num_return_sequences, 27 | ) 28 | 29 | generated_sequences = [] 30 | for generated_sequence_idx, generated_sequence in enumerate(output_sequences): 31 | generated_sequence = generated_sequence.tolist() 32 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) 33 | text = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)) : ] 34 | generated_sequences.append(text) 35 | 36 | return generated_sequences 37 | 38 | def save_text_to_file(self, text, file_path): 39 | with open(file_path, 'w') as f: 40 | f.write(text) -------------------------------------------------------------------------------- /src/neuralgpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 5 | 6 | class NeuralGPT: 7 | def __init__(self, model_name_or_path='gpt2', device='cpu'): 8 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) 9 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path) 10 | self.device = device 11 | self.model.to(self.device) 12 | self.model.eval() 13 | 14 | def generate_text(self, prompt='', max_length=100, temperature=1.0, top_p=0.9, top_k=0, repetition_penalty=1.0, num_return_sequences=1): 15 | input_ids = self.tokenizer.encode(prompt, return_tensors='pt') 16 | input_ids = input_ids.to(self.device) 17 | 18 | output_sequences = self.model.generate( 19 | input_ids=input_ids, 20 | max_length=max_length + len(input_ids[0]), 21 | temperature=temperature, 22 | top_p=top_p, 23 | top_k=top_k, 24 | repetition_penalty=repetition_penalty, 25 | do_sample=True, 26 | num_return_sequences=num_return_sequences, 27 | ) 28 | 29 | generated_sequences = [] 30 | for generated_sequence_idx, generated_sequence in enumerate(output_sequences): 31 | generated_sequence = generated_sequence.tolist() 32 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) 33 | text = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)) : ] 34 | generated_sequences.append(text) 35 | 36 | return generated_sequences 37 | 38 | def save_text_to_file(self, text, file_path): 39 | with open(file_path, 'w') as f: 40 | f.write(text) -------------------------------------------------------------------------------- /streamlit/home.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import streamlit as st 3 | 4 | server_ports = [] 5 | client_ports = [] 6 | 7 | # Inicjalizacja danych w st.session_state 8 | if "server_ports" not in st.session_state: 9 | st.session_state['server_ports'] = "" 10 | if "client_ports" not in st.session_state: 11 | st.session_state['client_ports'] = "" 12 | if "user_ID" not in st.session_state: 13 | st.session_state.user_ID = "" 14 | if "gradio_Port" not in st.session_state: 15 | st.session_state.gradio_Port = "" 16 | if "server" not in st.session_state: 17 | st.session_state.server = False 18 | if "client" not in st.session_state: 19 | st.session_state.client = False 20 | 21 | st.set_page_config(layout="wide") 22 | 23 | async def main(): 24 | 25 | st.title("NeuralGPT") 26 | 27 | gradio_Ports = st.container(border=True) 28 | gradio_Ports.markdown(st.session_state.gradio_Port) 29 | 30 | with st.sidebar: 31 | # Wyświetlanie danych, które mogą być modyfikowane na różnych stronach 32 | serverPorts = st.container(border=True) 33 | serverPorts.markdown(st.session_state['server_ports']) 34 | st.text("Client ports") 35 | clientPorts = st.container(border=True) 36 | clientPorts.markdown(st.session_state['client_ports']) 37 | st.text("Character.ai ID") 38 | user_id = st.container(border=True) 39 | user_id.markdown(st.session_state.user_ID) 40 | status = st.status(label="runs", state="complete", expanded=False) 41 | 42 | if st.session_state.server == True: 43 | st.markdown("server running...") 44 | 45 | if st.session_state.client == True: 46 | st.markdown("client running") 47 | 48 | # Uruchomienie aplikacji 49 | asyncio.run(main()) 50 | -------------------------------------------------------------------------------- /utils/neuralgpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 5 | 6 | class NeuralGPT: 7 | def __init__(self, model_name_or_path='gpt2', device='cpu'): 8 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) 9 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path) 10 | self.device = device 11 | self.model.to(self.device) 12 | self.model.eval() 13 | 14 | def generate_text(self, prompt='', max_length=100, temperature=1.0, top_p=0.9, top_k=0, repetition_penalty=1.0, num_return_sequences=1): 15 | input_ids = self.tokenizer.encode(prompt, return_tensors='pt') 16 | input_ids = input_ids.to(self.device) 17 | 18 | output_sequences = self.model.generate( 19 | input_ids=input_ids, 20 | max_length=max_length + len(input_ids[0]), 21 | temperature=temperature, 22 | top_p=top_p, 23 | top_k=top_k, 24 | repetition_penalty=repetition_penalty, 25 | do_sample=True, 26 | num_return_sequences=num_return_sequences, 27 | ) 28 | 29 | generated_sequences = [] 30 | for generated_sequence_idx, generated_sequence in enumerate(output_sequences): 31 | generated_sequence = generated_sequence.tolist() 32 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) 33 | text = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)) : ] 34 | generated_sequences.append(text) 35 | 36 | return generated_sequences 37 | 38 | def save_text_to_file(self, text, file_path): 39 | with open(file_path, 'w') as f: 40 | f.write(text) -------------------------------------------------------------------------------- /Wordpress plugin/chat_gui.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | import threading 3 | from neuralgpt import NeuralGPT 4 | # Load the pretrained model 5 | model_path = "E:/AI/NeuralGPT/NeuralGPT/models/ggml-model-q4_0.bin" 6 | neural_gpt = NeuralGPT(model_path) 7 | # Create the chat window 8 | root = tk.Tk() 9 | root.title("NeuralGPT Chat Window") 10 | # Create the chat history display 11 | chat_history = tk.Text(root, height=20, width=50, state=tk.DISABLED) 12 | chat_history.grid(row=0, column=0, padx=10, pady=10) 13 | # Create the input field and button 14 | input_field = tk.Entry(root, width=50) 15 | input_field.grid(row=1, column=0, padx=10, pady=10) 16 | send_button = tk.Button(root, text="Send", command=lambda: send_message()) 17 | send_button.grid(row=1, column=1, padx=10, pady=10) 18 | # Define the send message function 19 | def send_message(): 20 | # Get the user input 21 | user_input = input_field.get() 22 | input_field.delete(0, tk.END) 23 | # Add the user input to the chat history 24 | chat_history.configure(state=tk.NORMAL) 25 | chat_history.insert(tk.END, "You: " + user_input + "\n") 26 | chat_history.configure(state=tk.DISABLED) 27 | # Generate a response using the NeuralGPT model 28 | response = neural_gpt.generate_response(user_input) 29 | # Add the response to the chat history 30 | chat_history.configure(state=tk.NORMAL) 31 | chat_history.insert(tk.END, "NeuralGPT: " + response + "\n") 32 | chat_history.configure(state=tk.DISABLED) 33 | # Define the update chat function 34 | def update_chat(): 35 | while True: 36 | # Check for other active instances of Neural AI 37 | # Communicate with them through the chatbox if there are any 38 | # Leave the chatbox open for user to speak with running instance if there 39 | are none 40 | pass 41 | # Start the update chat thread 42 | chat_thread = threading.Thread(target=update_chat) 43 | chat_thread.start() 44 | # Start the GUI main loop 45 | root.mainloop() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NeuralGPT 2 | The software is still in development but if you're interested in trying it out, you can check out the 'chat-center' folder as this is the only part of the repository which actually works. 3 | Here's the latest vesion of server you can run yourself and connect to it all sorts of clients from the same folder: 4 | You'll need to paste couple API keys to make it 'work' - Fireworks API, Google API and Google CSE ID in case of the server... 5 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/ServerV2.py 6 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/serverV3.py 7 | 8 | Warning - 'stop websocket server/client' button (function) doesn't work, so you'll have to stop the entire app 9 | ![001](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/ee4f6faa-4e00-417b-8907-3843b362db09) 10 | 11 | Here are couple clients as examples: 12 | 13 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/FireworksAgentsGPT.py 14 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/TkDocsBot.py 15 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/Docsbotport.html 16 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/StarCoderOK.js 17 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/flowise%20ulitmatum.html 18 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/Chaindesk%20Agent.html 19 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/characterAI.py 20 | 21 | ![017](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/1e290007-0ee7-4aaf-bb5d-2c372ab98f0a) 22 | ![009](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/ba873ac5-2265-4dc2-96ce-eac91a02dfa5) 23 | ![gui](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/7b7c4d89-476f-4d74-8697-3dbcdce95609) 24 | ![schem](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/bd1f6d28-dd09-40c8-bc58-1c9439d44197) 25 | -------------------------------------------------------------------------------- /DocumentEditor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class DocumentEditor: 5 | def __init__(self, file_path: str): 6 | self.file_path = file_path 7 | self.file_type = os.path.splitext(file_path)[1] 8 | self.file_content = self._read_file() 9 | 10 | def _read_file(self): 11 | with open(self.file_path, 'r') as f: 12 | return f.read() 13 | 14 | def _write_file(self): 15 | with open(self.file_path, 'w') as f: 16 | f.write(self.file_content) 17 | 18 | def insert_text(self, text: str, position: int): 19 | self.file_content = self.file_content[:position] + text + self.file_content[position:] 20 | self._write_file() 21 | 22 | def delete_text(self, start: int, end: int): 23 | self.file_content = self.file_content[:start] + self.file_content[end:] 24 | self._write_file() 25 | 26 | def format_text(self, start: int, end: int, format_type: str): 27 | # Implement text formatting (bold, italic, underline, etc.) 28 | pass 29 | 30 | def insert_image(self, image_path: str, position: int): 31 | # Implement image insertion 32 | pass 33 | 34 | def insert_hyperlink(self, link: str, position: int): 35 | # Implement hyperlink insertion 36 | pass 37 | 38 | def get_file_content(self): 39 | return self.file_content 40 | 41 | class DocumentEditorManager: 42 | def __init__(self): 43 | self.editors = {} 44 | 45 | def create_editor(self, file_path: str) -> str: 46 | editor_id = str(len(self.editors)) 47 | self.editors[editor_id] = DocumentEditor(file_path) 48 | return editor_id 49 | 50 | def delete_editor(self, editor_id: str): 51 | del self.editors[editor_id] 52 | 53 | def get_editor(self, editor_id: str) -> DocumentEditor: 54 | return self.editors[editor_id] 55 | 56 | def get_all_editors(self) -> List[DocumentEditor]: 57 | return list(self.editors.values()) --------------------------------------------------------------------------------