├── .devcontainer └── devcontainer.json ├── Chat-center ├── 000.py ├── 002.py ├── 4all.js ├── Agent1.py ├── AgentsGPT.html ├── Alpaca-server.js ├── AlpacaLoRA.js ├── AlpacaLoRAOK.js ├── Blenderbot-server.js ├── Chaindesk Agent.html ├── ChainlitCli.py ├── CharacterAIlogin.js ├── ChatGPT-server.js ├── ChatGPT-server.py ├── ChatPDF.py ├── Code-Santa.html ├── CodeParrot.html ├── Docsbotport.html ├── FireworksAgentsGPT.py ├── FlowiseChat.html ├── G4FServer.js ├── GradioPDF.py ├── GradioServer.py ├── GradioServer2.py ├── Guanaco.js ├── H2O.js ├── H2Oserver.js ├── LangchainCohereSQL.js ├── Llama2-70B.js ├── Llama2-70Bqlora.js ├── MasterServer.py ├── NeuralGPT.py ├── NewGUI.py ├── OpenOrca.js ├── PDF-Langchain.py ├── SERPSearch.js ├── SERVER-ChatGPT.js ├── ServerFireworks.py ├── ServerMain.py ├── ServerTkinkter.py ├── ServerV2.py ├── StarCoderOK.js ├── Starchat.js ├── Starcoder.js ├── TkDocsBot.py ├── agent_template.py ├── alpaca-client.html ├── alpaca-server.py ├── app5.py ├── big.js ├── cccp.py ├── characterAI.py ├── chatflow.js ├── client.js ├── cohere.js ├── flowise ulitmatum.html ├── flowiseAutoGPT.js ├── flowiseLocalUlitmatum.html ├── g4f-Bing.py ├── g4f.js ├── gpt4free.py ├── index.html ├── instructions.txt ├── langchainagent.py ├── mosaic.js ├── replitcoder.html ├── requirements.txt ├── santacoder.html ├── santacoder.js ├── server.js ├── serverGLM.py ├── serverV3.py ├── servernew.js ├── sery.py └── vserver.py ├── ChatBox.py ├── Chatflow.py ├── Code-Santa.html ├── CodeParrot.html ├── Communication.py ├── CommunicationLog.py ├── DocumentEditor.py ├── DualCoreLLM.py ├── FileProcessor.py ├── FileTransfer.py ├── FineTuneGPT.py ├── FlowiseAICommunication.py ├── GUI ├── GUIFileViewer.py ├── GUIFileViewer.txt ├── GUILLMmanager.py ├── GUIManager.py ├── GUIauth.py ├── GUImain.py ├── file-8rb.file └── streamlit.py ├── LICENSE ├── LLMManager.py ├── MachineLearning.py ├── MediaPlayer.py ├── Memory.py ├── MemoryModule.py ├── ModelSaver.py ├── NLPModule.py ├── NeuralGPT.egg-info ├── PKG-INFO ├── SOURCES.txt ├── dependency_links.txt ├── requires.txt └── top_level.txt ├── ProjectFiles ├── ProjectPlan.txt ├── agent_neural.py ├── agents_neural.py ├── conteneiro.py ├── home.py ├── pages │ ├── NeuralAgentsv2.py │ └── a3.py ├── py.py └── requirements.txt ├── PyPDF2.py ├── README.md ├── ScriptExecutor.py ├── Scripting.py ├── TEST.py ├── Wordpress plugin ├── NeuralGPT Chatbot.html ├── Node.js ├── Nowy Dokument tekstowy.txt ├── admin.php ├── ajax.php ├── assets │ ├── NeuralGPT Chatbot.html │ ├── chatwindow.js │ ├── neuralgpt-browser.php │ ├── neuralgpt-chatbot.js │ └── upload.html ├── automate_transfer.py ├── automoto.txt ├── build │ ├── neuralgpt_chatbot │ │ ├── a.pdf │ │ ├── a1.pdf │ │ ├── au.pdf │ │ ├── aurt12.pdf │ │ └── auto.pdf │ └── python_script │ │ ├── Analysis-00.toc │ │ ├── COLLECT-00.toc │ │ ├── EXE-00.toc │ │ ├── PKG-00.toc │ │ ├── base_library.zip │ │ ├── here.py │ │ └── localpycs │ │ ├── pyimod01_archive.pyc │ │ ├── pyimod02_importers.pyc │ │ ├── pyimod03_ctypes.pyc │ │ ├── pyimod04_pywin32.pyc │ │ └── struct.pyc ├── chat-window.js ├── chat_gui.py ├── chatbot.php ├── chatbox_obj.py ├── chatbox_v2.js ├── chatflow.py ├── chatwindow.js ├── combinepdf.pdf ├── createmodify_files.py ├── cronjob.py ├── data_backup.py ├── data_synch.py ├── database_operations.py ├── dataproto.txt ├── dualcorellm.pdf ├── dualcorellm.txt ├── flowise.css ├── flowise.js ├── generate_response.py ├── get_feedback.php ├── htmlmarkup.txt ├── js │ ├── Nowy Dokument tekstowy.txt │ ├── chatbox_v2.js │ ├── flowise.js │ └── main.js ├── listmodels.py ├── load_pretrained.py ├── loadpretrained.py ├── logcreation.py ├── long.odt ├── long.pdf ├── long.rtf ├── looongchat.txt ├── main.js ├── mlm.py ├── module.txt ├── neuralgpt-browse.js ├── neuralgpt-browse.php ├── neuralgpt-chatbot.js ├── neuralgpt-chatbot.php ├── neuralgpt_chatbot.spec ├── neuralgpt_handle_user_input.php ├── notif_sys.py ├── package-lock.json ├── package.json ├── perf.py ├── protocol.txt ├── python_script.py ├── python_script.spec ├── search.json ├── send_mail.py ├── server.js ├── settings.php ├── shortcode.php ├── submit_input.php ├── test_chatbox.py ├── train_restapi.js ├── universal.db ├── upload.html ├── validate_llm_file.php ├── wordpress1.pdf ├── wordpress2.pdf └── workschedule.py ├── __pycache__ ├── DualCoreLLM.cpython-311.pyc ├── dataset.cpython-311.pyc ├── gui.cpython-311.pyc ├── load_model.cpython-311.pyc ├── model.cpython-311.pyc ├── neuralgpt.cpython-311.pyc ├── pinecone.cpython-311.pyc ├── requests.cpython-311.pyc └── utils.cpython-311.pyc ├── _errors.py ├── agent-document (1).pdf ├── agent-document (2).pdf ├── agent-document (21).pdf ├── agent-document (3).pdf ├── agent-document (4).pdf ├── agent-document.pdf ├── agent_script.py ├── agent_scripts ├── __pycache__ │ └── database.cpython-311.pyc ├── agent_4641560f-1ba9-4df6-ad62-1842ef8a892d.py ├── database.py ├── main.py └── script_executor.py ├── appgui.py ├── auto-script1.pdf ├── auto-script2.pdf ├── auto ├── markdown.sh ├── saveashtml.py ├── saveastxt.py ├── task1.py ├── task2.java └── task3.sh ├── autogpt Chatflow.json ├── automation.json ├── callback.py ├── chat.py ├── chat6boxfont.py ├── chatboxx.py ├── class.py ├── classc.py ├── code ├── DatabaseModule.py ├── ScriptExecutor.py ├── __pycache__ │ ├── models.cpython-311.pyc │ └── utils.cpython-311.pyc ├── main.py ├── models.py └── utils.py ├── com.py ├── completepdf.pdf ├── data ├── Database └── dataset1 │ └── datasets.csv ├── dataset.py ├── exe.py ├── execute.py ├── extract_text.py ├── fine_tuner.py ├── generate_test_data.py ├── geninit.py ├── gu.py ├── gui.py ├── ind.py ├── init.py ├── integration1.pdf ├── integration2.pdf ├── integration3.pdf ├── load_model.py ├── long.py ├── ma.py ├── main.py ├── model.py ├── model_loader.py ├── neural-big.pdf ├── neuralgod.py ├── neuralgpt.py ├── nlp.py ├── nlp ├── DocumentEditor.py ├── DualCoreLLM.py ├── FileTransfer.py ├── MachineLearning.py ├── MediaPlayer.py ├── Memory.py ├── NLPModule.py ├── Scripting.py └── tools.py ├── notif.py ├── notification.py ├── pine.py ├── pinecon.py ├── process_input.py ├── provided_documents.txt ├── requests.py ├── requirements.txt ├── responses.json ├── save_preset.py ├── setup.py ├── sort_files.py ├── src ├── ChatBox.py ├── Chatflow.py ├── Communication.py ├── CommunicationLog.py ├── DualCoreLLM.py ├── FineTuneGPT,py.txt ├── FlowiseAICommunication.py ├── ModelSaver.py ├── PyPDF2.py ├── _errors.py ├── callback.py ├── chat.py ├── class.py ├── classc.py ├── com.py ├── documenteditor.py ├── fiileprocessor.py ├── fine_tuner.py ├── generate_test_data.py ├── llmmanager.py ├── load_model.py ├── main.py ├── memorymodule.py ├── model_loader.py ├── neuralgpt.py ├── nlp │ └── nlp.py ├── notif.py ├── notification.py ├── process_input.py ├── requests.py ├── save_preset.py ├── scriptexecutor.py └── train_model.py ├── streamlit.py ├── streamlit ├── Docsbotport.html ├── ServChar.py ├── ServFire.py ├── ServG4F.py ├── ServG4F2.py ├── clientCharacter.py ├── clientFireworks.py ├── clientG4F.py ├── clientG4F2.py ├── comp.html ├── components.html ├── flowise.html ├── home.py ├── pages │ ├── Connectivity.py │ ├── Docsbotport.html │ ├── Gradio.py │ ├── NeuralGPT.py │ ├── chat-hub.db │ ├── comp.html │ ├── components.html │ └── flowise.html └── requirements.txt ├── test_model.py ├── testing.py ├── tools.py ├── train_model.py ├── utils.py ├── utils ├── ChatBox.py ├── Chatflow.py ├── Communication.py ├── CommunicationLog.py ├── DocumentEditor.py ├── DualCoreLLM.py ├── FileProcessor.py ├── FileTransfer.py ├── FineTuneGPT,py.txt ├── FlowiseAICommunication.py ├── InternetAccess.py ├── LLMManager.py ├── MediaPlayer.py ├── Memory.py ├── MemoryModule.py ├── NLPModule.py ├── PyPDF2.py ├── ScriptExecutor.py ├── Scripting.py ├── _errors.py ├── callback.py ├── chat.py ├── chatboxx.py ├── class.py ├── classc.py ├── com.py ├── fine_tuner.py ├── generate_test_data.py ├── load_model.py ├── model_loader.py ├── neuralgpt.py ├── notif.py ├── notification.py ├── process_input.py ├── requests.py ├── save_preset.py └── train_model.py └── vord2.py /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python 3", 3 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 4 | "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", 5 | "customizations": { 6 | "codespaces": { 7 | "openFiles": [ 8 | "README.md", 9 | "streamlit.py" 10 | ] 11 | }, 12 | "vscode": { 13 | "settings": {}, 14 | "extensions": [ 15 | "ms-python.python", 16 | "ms-python.vscode-pylance" 17 | ] 18 | } 19 | }, 20 | "updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y { 9 | console.log('Connected to server'); 10 | 11 | // Start reading input from the user and sending messages to the server 12 | const rl = readline.createInterface({ 13 | input: process.stdin, 14 | output: process.stdout 15 | }); 16 | 17 | rl.on('line', (inputText) => { 18 | if (inputText.startsWith('/qna ')) { 19 | // Parse the input text as a QnA message 20 | const [question, passage] = inputText.substring(5).split('|').map((s) => s.trim()); 21 | if (!question || !passage) { 22 | console.log('Invalid /qna command. Usage: /qna | '); 23 | return; 24 | } 25 | 26 | // Send the QnA message to the server 27 | ws.send(JSON.stringify({ type: 'qna', question, passage })); 28 | } else { 29 | // Send a regular chat message to the server 30 | ws.send(JSON.stringify(('text:', inputText) )); 31 | } 32 | }); 33 | }); 34 | 35 | // Listen for messages from the server 36 | ws.on('message', (message) => { 37 | console.log(message.toString()); 38 | }); 39 | -------------------------------------------------------------------------------- /Chat-center/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Chat Hub Center 5 | 11 | 12 | 13 |

Chat Hub Center

14 | 15 |

Incoming Messages

16 | 17 |

Server Responses

18 | 19 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /Chat-center/instructions.txt: -------------------------------------------------------------------------------- 1 | You need to have Node.js and Python installed 2 | Install dependencies listed in Package.JSON (npm install ...) 3 | Install Gradio app (pip install gradio) 4 | Enter your HuggingFace API token in server.js in place marked as: and run it in cmd window: node server.js 5 | In a second cmd window: gradio app5.py 6 | In AgentsGPT.html enter: Google cse ID, google API and OpenAI API in the http request script then open the file in a browser 7 | Click "connect" 8 | You can now speak with the integrated agents via the html chat interface 9 | -------------------------------------------------------------------------------- /Chat-center/requirements.txt: -------------------------------------------------------------------------------- 1 | gradio==3.25 2 | openai==0.27 3 | langchain==0.0.139 4 | google-api-python-client 5 | requests 6 | transformers -------------------------------------------------------------------------------- /Chatflow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Chatflow: 4 | def __init__(self): 5 | self.logger = logging.getLogger(__name__) 6 | self.logger.setLevel(logging.DEBUG) 7 | self.handler = logging.FileHandler('chatflow.log') 8 | self.handler.setLevel(logging.DEBUG) 9 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | self.handler.setFormatter(self.formatter) 11 | self.logger.addHandler(self.handler) 12 | 13 | def run(self): 14 | try: 15 | # code to execute the autonomous scripts 16 | except Exception as e: 17 | self.logger.error(str(e)) 18 | # code to notify the user when an error occurs -------------------------------------------------------------------------------- /Communication.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class Communication: 4 | async def execute_task(self): 5 | try: 6 | # execute long running task asynchronously with a timeout of 30 seconds 7 | result = await asyncio.wait_for(long_running_task(), timeout=30) 8 | # handle successful completion of the task 9 | return result 10 | except asyncio.TimeoutError: 11 | # handle timeout 12 | return "Task timed out" -------------------------------------------------------------------------------- /CommunicationLog.py: -------------------------------------------------------------------------------- 1 | class CommunicationLog: 2 | def __init__(self): 3 | self.logs = [] 4 | 5 | def add_log(self, message, timestamp, error=None): 6 | log = { 7 | 'message': message, 8 | 'timestamp': timestamp, 9 | 'error': error 10 | } 11 | self.logs.append(log) 12 | 13 | def get_logs(self): 14 | return self.logs -------------------------------------------------------------------------------- /DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | 3 | class DualCoreLLM: 4 | def __init__(self): 5 | self.nlp = spacy.load('en_core_web_sm') 6 | 7 | def check_coherence(self, text): 8 | doc = self.nlp(text) 9 | 10 | # Check for semantic coherence 11 | for token in doc: 12 | if token.dep_ == 'nsubj' and token.head.pos_ == 'VERB': 13 | subj = token 14 | verb = token.head 15 | for child in verb.children: 16 | if child.dep_ == 'dobj': 17 | obj = child 18 | if obj.text not in [t.text for t in subj.subtree]: 19 | return False 20 | return True 21 | 22 | def check_grammar(self, text): 23 | doc = self.nlp(text) 24 | 25 | # Check for grammatical correctness 26 | for sent in doc.sents: 27 | if sent.root.dep_ == 'ROOT' and sent.root.tag_ != 'VBZ': 28 | return False 29 | return True -------------------------------------------------------------------------------- /FileProcessor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class FileProcessor: 5 | def __init__(self, storage_path: str): 6 | self.storage_path = storage_path 7 | 8 | def upload_file(self, file_path: str, file_name: str) -> str: 9 | """ 10 | Uploads a file to the storage_path and returns the URL where it can be accessed. 11 | """ 12 | file_url = os.path.join(self.storage_path, file_name) 13 | with open(file_url, 'wb') as f: 14 | f.write(file_path.read()) 15 | return file_url 16 | 17 | def download_file(self, file_url: str) -> bytes: 18 | """ 19 | Downloads a file from the storage_path and returns its contents as bytes. 20 | """ 21 | with open(file_url, 'rb') as f: 22 | file_contents = f.read() 23 | return file_contents 24 | 25 | def process_files(self, file_urls: List[str]) -> List[str]: 26 | """ 27 | Processes a list of files specified by their URLs and returns a list of processed files' URLs. 28 | """ 29 | processed_files = [] 30 | for file_url in file_urls: 31 | # process file here 32 | processed_file_url = file_url + '_processed' 33 | processed_files.append(processed_file_url) 34 | return processed_files -------------------------------------------------------------------------------- /FileTransfer.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | 3 | class FileTransfer: 4 | def __init__(self, ftp_host, ftp_user, ftp_password): 5 | self.ftp_host = ftp_host 6 | self.ftp_user = ftp_user 7 | self.ftp_password = ftp_password 8 | 9 | def upload_file(self, local_file_path, remote_file_path): 10 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 11 | with open(local_file_path, 'rb') as f: 12 | ftp.storbinary('STOR ' + remote_file_path, f) 13 | 14 | def download_file(self, remote_file_path, local_file_path): 15 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 16 | with open(local_file_path, 'wb') as f: 17 | ftp.retrbinary('RETR ' + remote_file_path, f.write) -------------------------------------------------------------------------------- /FineTuneGPT.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 3 | import torch 4 | 5 | class FineTuneGPT: 6 | def __init__(self, pretrained_model_path, new_dataset): 7 | self.pretrained_model_path = pretrained_model_path 8 | self.new_dataset = new_dataset 9 | 10 | def fine_tune_model(self): 11 | # Load the pretrained model 12 | tokenizer = GPT2Tokenizer.from_pretrained(self.pretrained_model_path) 13 | model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path) 14 | 15 | # Load the new dataset 16 | with open(self.new_dataset, 'r') as f: 17 | text = f.read() 18 | inputs = tokenizer.encode(text, return_tensors='pt') 19 | 20 | # Fine-tune the model with the new dataset 21 | model.train() 22 | optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) 23 | for i in range(100): 24 | outputs = model(inputs, labels=inputs) 25 | loss = outputs[0] 26 | loss.backward() 27 | optimizer.step() 28 | optimizer.zero_grad() 29 | 30 | # Save the fine-tuned model 31 | model.save_pretrained('fine_tuned_model.bin') -------------------------------------------------------------------------------- /FlowiseAICommunication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class FlowiseAICommunication: 5 | def __init__(self, url): 6 | self.url = url 7 | 8 | def send_message(self, message): 9 | data = {"message": message} 10 | try: 11 | response = requests.post(self.url, json=data) 12 | return response.json() 13 | except requests.exceptions.RequestException as e: 14 | print(e) 15 | return None 16 | 17 | def receive_message(self): 18 | try: 19 | response = requests.get(self.url) 20 | return response.json()["message"] 21 | except requests.exceptions.RequestException as e: 22 | print(e) 23 | return None -------------------------------------------------------------------------------- /GUI/GUIFileViewer.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from PIL import Image, ImageTk 3 | import webbrowser 4 | import os 5 | 6 | class FileViewer: 7 | def __init__(self, root): 8 | self.root = root 9 | self.frame = tk.Frame(root) 10 | self.frame.pack() 11 | 12 | # Create a scrollbar 13 | self.scrollbar = tk.Scrollbar(self.frame) 14 | self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y) 15 | 16 | # Create a canvas 17 | self.canvas = tk.Canvas(self.frame, yscrollcommand=self.scrollbar.set) 18 | self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) 19 | 20 | # Configure the scrollbar 21 | self.scrollbar.config(command=self.canvas.yview) 22 | 23 | # Bind the canvas to the mouse wheel 24 | self.canvas.bind('', self.on_mousewheel) 25 | 26 | # Create a frame inside the canvas 27 | self.inner_frame = tk.Frame(self.canvas) 28 | self.canvas.create_window((0, 0), window=self.inner_frame, anchor='nw') 29 | 30 | def load_image(self, file_path): 31 | # Load the image 32 | image = Image.open(file_path) 33 | photo = ImageTk.PhotoImage(image) 34 | 35 | # Create a label to display the image 36 | label = tk.Label(self.inner_frame, image=photo) 37 | label.image = photo 38 | label.pack() 39 | 40 | def load_document(self, file_path): 41 | # Open the document in the default application 42 | webbrowser.open_new_tab(file_path) 43 | 44 | def load_media(self, file_path): 45 | # Open the media file in a media player 46 | os.startfile(file_path) 47 | 48 | def on_mousewheel(self, event): 49 | # Scroll the canvas when the mouse wheel is used 50 | self.canvas.yview_scroll(int(-1*(event.delta/120)), 'units') -------------------------------------------------------------------------------- /GUI/GUIFileViewer.txt: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from PIL import Image, ImageTk 3 | import webbrowser 4 | import os 5 | 6 | class FileViewer: 7 | def __init__(self, root): 8 | self.root = root 9 | self.frame = tk.Frame(root) 10 | self.frame.pack() 11 | 12 | # Create a scrollbar 13 | self.scrollbar = tk.Scrollbar(self.frame) 14 | self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y) 15 | 16 | # Create a canvas 17 | self.canvas = tk.Canvas(self.frame, yscrollcommand=self.scrollbar.set) 18 | self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True) 19 | 20 | # Configure the scrollbar 21 | self.scrollbar.config(command=self.canvas.yview) 22 | 23 | # Bind the canvas to the mouse wheel 24 | self.canvas.bind('', self.on_mousewheel) 25 | 26 | # Create a frame inside the canvas 27 | self.inner_frame = tk.Frame(self.canvas) 28 | self.canvas.create_window((0, 0), window=self.inner_frame, anchor='nw') 29 | 30 | def load_image(self, file_path): 31 | # Load the image 32 | image = Image.open(file_path) 33 | photo = ImageTk.PhotoImage(image) 34 | 35 | # Create a label to display the image 36 | label = tk.Label(self.inner_frame, image=photo) 37 | label.image = photo 38 | label.pack() 39 | 40 | def load_document(self, file_path): 41 | # Open the document in the default application 42 | webbrowser.open_new_tab(file_path) 43 | 44 | def load_media(self, file_path): 45 | # Open the media file in a media player 46 | os.startfile(file_path) 47 | 48 | def on_mousewheel(self, event): 49 | # Scroll the canvas when the mouse wheel is used 50 | self.canvas.yview_scroll(int(-1*(event.delta/120)), 'units') -------------------------------------------------------------------------------- /GUI/GUILLMmanager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import threading 4 | 5 | class LLMManager: 6 | def __init__(self): 7 | self.llm_list = [] 8 | self.current_llm = None 9 | self.llm_thread = None 10 | 11 | def add_llm(self, llm_path): 12 | if os.path.exists(llm_path): 13 | self.llm_list.append(llm_path) 14 | 15 | def remove_llm(self, llm_path): 16 | if llm_path in self.llm_list: 17 | self.llm_list.remove(llm_path) 18 | 19 | def list_llms(self): 20 | return self.llm_list 21 | 22 | def set_current_llm(self, llm_path): 23 | if llm_path in self.llm_list: 24 | self.current_llm = llm_path 25 | 26 | def start_llm(self): 27 | if self.current_llm is not None: 28 | self.llm_thread = threading.Thread(target=self._run_llm) 29 | self.llm_thread.start() 30 | 31 | def stop_llm(self): 32 | if self.llm_thread is not None: 33 | self.llm_thread.stop() 34 | 35 | def _run_llm(self): 36 | subprocess.call([self.current_llm]) 37 | 38 | def get_llm_status(self): 39 | if self.llm_thread is not None: 40 | return self.llm_thread.is_alive() 41 | else: 42 | return False -------------------------------------------------------------------------------- /GUI/GUIManager.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | 3 | class GUIManager: 4 | def __init__(self): 5 | self.root = tk.Tk() 6 | self.root.title("Universal Embedding Framework") 7 | self.root.geometry("800x600") 8 | 9 | # Create menu bar 10 | self.menu_bar = tk.Menu(self.root) 11 | self.file_menu = tk.Menu(self.menu_bar, tearoff=0) 12 | self.file_menu.add_command(label="Open") 13 | self.file_menu.add_command(label="Save") 14 | self.file_menu.add_separator() 15 | self.file_menu.add_command(label="Exit", command=self.root.quit) 16 | self.menu_bar.add_cascade(label="File", menu=self.file_menu) 17 | 18 | self.edit_menu = tk.Menu(self.menu_bar, tearoff=0) 19 | self.edit_menu.add_command(label="Cut") 20 | self.edit_menu.add_command(label="Copy") 21 | self.edit_menu.add_command(label="Paste") 22 | self.menu_bar.add_cascade(label="Edit", menu=self.edit_menu) 23 | 24 | self.view_menu = tk.Menu(self.menu_bar, tearoff=0) 25 | self.view_menu.add_command(label="Toggle Fullscreen") 26 | self.menu_bar.add_cascade(label="View", menu=self.view_menu) 27 | 28 | self.help_menu = tk.Menu(self.menu_bar, tearoff=0) 29 | self.help_menu.add_command(label="About") 30 | self.menu_bar.add_cascade(label="Help", menu=self.help_menu) 31 | 32 | self.root.config(menu=self.menu_bar) 33 | 34 | # Create text area for document editing 35 | self.text_area = tk.Text(self.root) 36 | self.text_area.pack(expand=True, fill="both") 37 | 38 | self.root.mainloop() 39 | 40 | if __name__ == "__main__": 41 | gui_manager = GUIManager() -------------------------------------------------------------------------------- /GUI/GUImain.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | 3 | class GUIManager: 4 | def __init__(self): 5 | self.root = tk.Tk() 6 | self.root.title("Universal Embedding Framework") 7 | self.root.geometry("800x600") 8 | 9 | # Create menu bar 10 | self.menu_bar = tk.Menu(self.root) 11 | self.file_menu = tk.Menu(self.menu_bar, tearoff=0) 12 | self.file_menu.add_command(label="Open") 13 | self.file_menu.add_command(label="Save") 14 | self.file_menu.add_separator() 15 | self.file_menu.add_command(label="Exit", command=self.root.quit) 16 | self.menu_bar.add_cascade(label="File", menu=self.file_menu) 17 | 18 | self.edit_menu = tk.Menu(self.menu_bar, tearoff=0) 19 | self.edit_menu.add_command(label="Cut") 20 | self.edit_menu.add_command(label="Copy") 21 | self.edit_menu.add_command(label="Paste") 22 | self.menu_bar.add_cascade(label="Edit", menu=self.edit_menu) 23 | 24 | self.view_menu = tk.Menu(self.menu_bar, tearoff=0) 25 | self.view_menu.add_command(label="Toggle Fullscreen") 26 | self.menu_bar.add_cascade(label="View", menu=self.view_menu) 27 | 28 | self.help_menu = tk.Menu(self.menu_bar, tearoff=0) 29 | self.help_menu.add_command(label="About") 30 | self.menu_bar.add_cascade(label="Help", menu=self.help_menu) 31 | 32 | self.root.config(menu=self.menu_bar) 33 | 34 | # Create text area for document editing 35 | self.text_area = tk.Text(self.root) 36 | self.text_area.pack(expand=True, fill="both") 37 | 38 | self.root.mainloop() 39 | 40 | if __name__ == "__main__": 41 | gui_manager = GUIManager() -------------------------------------------------------------------------------- /GUI/file-8rb.file: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | 3 | class GUIManager: 4 | def __init__(self): 5 | self.root = tk.Tk() 6 | self.root.title("Universal Embedding Framework") 7 | self.root.geometry("800x600") 8 | 9 | # Create menu bar 10 | self.menu_bar = tk.Menu(self.root) 11 | self.file_menu = tk.Menu(self.menu_bar, tearoff=0) 12 | self.file_menu.add_command(label="Open") 13 | self.file_menu.add_command(label="Save") 14 | self.file_menu.add_separator() 15 | self.file_menu.add_command(label="Exit", command=self.root.quit) 16 | self.menu_bar.add_cascade(label="File", menu=self.file_menu) 17 | 18 | self.edit_menu = tk.Menu(self.menu_bar, tearoff=0) 19 | self.edit_menu.add_command(label="Cut") 20 | self.edit_menu.add_command(label="Copy") 21 | self.edit_menu.add_command(label="Paste") 22 | self.menu_bar.add_cascade(label="Edit", menu=self.edit_menu) 23 | 24 | self.view_menu = tk.Menu(self.menu_bar, tearoff=0) 25 | self.view_menu.add_command(label="Toggle Fullscreen") 26 | self.menu_bar.add_cascade(label="View", menu=self.view_menu) 27 | 28 | self.help_menu = tk.Menu(self.menu_bar, tearoff=0) 29 | self.help_menu.add_command(label="About") 30 | self.menu_bar.add_cascade(label="Help", menu=self.help_menu) 31 | 32 | self.root.config(menu=self.menu_bar) 33 | 34 | # Create text area for document editing 35 | self.text_area = tk.Text(self.root) 36 | self.text_area.pack(expand=True, fill="both") 37 | 38 | self.root.mainloop() 39 | 40 | if __name__ == "__main__": 41 | gui_manager = GUIManager() -------------------------------------------------------------------------------- /LLMManager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | class LLM: 5 | def __init__(self, name, bin_file_path): 6 | self.name = name 7 | self.bin_file_path = bin_file_path 8 | 9 | class LLMManager: 10 | def __init__(self, local_storage_path): 11 | self.local_storage_path = local_storage_path 12 | self.llms = [] 13 | 14 | def add_llm(self, llm): 15 | self.llms.append(llm) 16 | 17 | def remove_llm(self, llm_name): 18 | for llm in self.llms: 19 | if llm.name == llm_name: 20 | self.llms.remove(llm) 21 | 22 | def download_llm(self, url): 23 | response = requests.get(url) 24 | llm_name = os.path.basename(url) 25 | llm_file_path = os.path.join(self.local_storage_path, llm_name) 26 | with open(llm_file_path, 'wb') as f: 27 | f.write(response.content) 28 | llm = LLM(llm_name, llm_file_path) 29 | self.add_llm(llm) 30 | 31 | def upload_llm(self, llm_file_path): 32 | llm_name = os.path.basename(llm_file_path) 33 | llm = LLM(llm_name, llm_file_path) 34 | self.add_llm(llm) 35 | 36 | def connect_llm(self, llm_name): 37 | for llm in self.llms: 38 | if llm.name == llm_name: 39 | # connect the llm 40 | pass 41 | 42 | def disconnect_llm(self, llm_name): 43 | for llm in self.llms: 44 | if llm.name == llm_name: 45 | # disconnect the llm 46 | pass -------------------------------------------------------------------------------- /MachineLearning.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets 2 | from sklearn.model_selection import train_test_split 3 | from sklearn.linear_model import LinearRegression 4 | 5 | class MachineLearning: 6 | def __init__(self): 7 | pass 8 | 9 | def train_model(self, X, y): 10 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 11 | model = LinearRegression() 12 | model.fit(X_train, y_train) 13 | return model 14 | 15 | def predict(self, model, X): 16 | return model.predict(X) -------------------------------------------------------------------------------- /MediaPlayer.py: -------------------------------------------------------------------------------- 1 | import vlc 2 | 3 | class MediaPlayer: 4 | def __init__(self): 5 | self.instance = vlc.Instance() 6 | self.player = self.instance.media_player_new() 7 | 8 | def play_media(self, media_path): 9 | media = self.instance.media_new(media_path) 10 | self.player.set_media(media) 11 | self.player.play() 12 | 13 | def stop_media(self): 14 | self.player.stop() -------------------------------------------------------------------------------- /Memory.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | class Memory: 4 | def __init__(self, db_file): 5 | self.conn = sqlite3.connect(db_file) 6 | self.cursor = self.conn.cursor() 7 | self.cursor.execute('''CREATE TABLE IF NOT EXISTS short_term_memory 8 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 9 | data TEXT)''') 10 | self.cursor.execute('''CREATE TABLE IF NOT EXISTS long_term_memory 11 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 12 | data TEXT)''') 13 | self.conn.commit() 14 | 15 | def add_to_short_term_memory(self, data): 16 | self.cursor.execute("INSERT INTO short_term_memory (data) VALUES (?)", (data,)) 17 | self.conn.commit() 18 | 19 | def add_to_long_term_memory(self, data): 20 | self.cursor.execute("INSERT INTO long_term_memory (data) VALUES (?)", (data,)) 21 | self.conn.commit() 22 | 23 | def retrieve_from_short_term_memory(self): 24 | self.cursor.execute("SELECT * FROM short_term_memory") 25 | return self.cursor.fetchall() 26 | 27 | def retrieve_from_long_term_memory(self): 28 | self.cursor.execute("SELECT * FROM long_term_memory") 29 | return self.cursor.fetchall() 30 | 31 | def clear_short_term_memory(self): 32 | self.cursor.execute("DELETE FROM short_term_memory") 33 | self.conn.commit() 34 | 35 | def clear_long_term_memory(self): 36 | self.cursor.execute("DELETE FROM long_term_memory") 37 | self.conn.commit() -------------------------------------------------------------------------------- /MemoryModule.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | class MemoryModule: 4 | def __init__(self, db_path): 5 | self.db_path = db_path 6 | self.conn = sqlite3.connect(self.db_path) 7 | self.create_tables() 8 | 9 | def create_tables(self): 10 | cursor = self.conn.cursor() 11 | cursor.execute('''CREATE TABLE IF NOT EXISTS short_term_memory 12 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 13 | data TEXT)''') 14 | cursor.execute('''CREATE TABLE IF NOT EXISTS long_term_memory 15 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 16 | data TEXT)''') 17 | self.conn.commit() 18 | 19 | def store_data(self, data, memory_type): 20 | cursor = self.conn.cursor() 21 | if memory_type == 'short_term': 22 | cursor.execute('''INSERT INTO short_term_memory (data) VALUES (?)''', (data,)) 23 | elif memory_type == 'long_term': 24 | cursor.execute('''INSERT INTO long_term_memory (data) VALUES (?)''', (data,)) 25 | self.conn.commit() 26 | 27 | def retrieve_data(self, query, memory_type): 28 | cursor = self.conn.cursor() 29 | if memory_type == 'short_term': 30 | cursor.execute('''SELECT data FROM short_term_memory WHERE data LIKE ?''', ('%' + query + '%',)) 31 | elif memory_type == 'long_term': 32 | cursor.execute('''SELECT data FROM long_term_memory WHERE data LIKE ?''', ('%' + query + '%',)) 33 | data = cursor.fetchall() 34 | return data -------------------------------------------------------------------------------- /ModelSaver.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from model_saver import ModelSaver 3 | 4 | # Load a pretrained model 5 | model = NeuralGPT.from_pretrained('gpt2') 6 | 7 | # Save the model to a local file 8 | saver = ModelSaver(model) 9 | saver.save_local('my_model.bin') 10 | 11 | # Save the model to an online source 12 | saver.save_online('http://example.com/model') -------------------------------------------------------------------------------- /NLPModule.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /NeuralGPT.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: NeuralGPT 3 | Version: 0.1 4 | Summary: A project for neural GPT 5 | Author: B staszewski 6 | Author-email: bstaszewski1984@gmail.com 7 | License-File: LICENSE 8 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | LICENSE 2 | README.md 3 | setup.py 4 | NeuralGPT.egg-info/PKG-INFO 5 | NeuralGPT.egg-info/SOURCES.txt 6 | NeuralGPT.egg-info/dependency_links.txt 7 | NeuralGPT.egg-info/requires.txt 8 | NeuralGPT.egg-info/top_level.txt -------------------------------------------------------------------------------- /NeuralGPT.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | torch 3 | transformers 4 | pytest 5 | -------------------------------------------------------------------------------- /NeuralGPT.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /ProjectFiles/ProjectPlan.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ProjectFiles/requirements.txt: -------------------------------------------------------------------------------- 1 | asyncio 2 | g4f 3 | openai 4 | requests 5 | datetime 6 | sqlite3 7 | websockets 8 | json 9 | anthropic 10 | streamlit 11 | fireworks-client 12 | PyCharacterAI 13 | langchain 14 | chromadb 15 | pdfplumber 16 | PySimpleGUI -------------------------------------------------------------------------------- /PyPDF2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import PyPDF2 4 | 5 | pdf_path = 'path/to/pdf/file.pdf' 6 | save_path = 'E:/AI/NeuralGPT/NeuralGPT' 7 | 8 | # Check if the save path exists, create it if it doesn't 9 | if not os.path.exists(save_path): 10 | os.makedirs(save_path) 11 | 12 | # Open the PDF file in read-binary mode 13 | with open(pdf_path, 'rb') as pdf_file: 14 | # Read the PDF file 15 | pdf_reader = PyPDF2.PdfFileReader(pdf_file) 16 | # Get the first page of the PDF 17 | page = pdf_reader.getPage(0) 18 | # Create a new PDF writer object 19 | pdf_writer = PyPDF2.PdfFileWriter() 20 | # Add the page to the PDF writer object 21 | pdf_writer.addPage(page) 22 | # Create a new PDF file name 23 | pdf_file_name = os.path.splitext(os.path.basename(pdf_path))[0] + '.pdf' 24 | # Save the PDF file to the specified location 25 | with open(os.path.join(save_path, pdf_file_name), 'wb') as new_pdf_file: 26 | pdf_writer.write(new_pdf_file) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NeuralGPT 2 | The software is still in development but if you're interested in trying it out, you can check out the 'chat-center' folder as this is the only part of the repository which actually works. 3 | Here's the latest vesion of server you can run yourself and connect to it all sorts of clients from the same folder: 4 | You'll need to paste couple API keys to make it 'work' - Fireworks API, Google API and Google CSE ID in case of the server... 5 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/ServerV2.py 6 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/serverV3.py 7 | 8 | Warning - 'stop websocket server/client' button (function) doesn't work, so you'll have to stop the entire app 9 | ![001](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/ee4f6faa-4e00-417b-8907-3843b362db09) 10 | 11 | Here are couple clients as examples: 12 | 13 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/FireworksAgentsGPT.py 14 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/TkDocsBot.py 15 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/Docsbotport.html 16 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/StarCoderOK.js 17 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/flowise%20ulitmatum.html 18 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/Chaindesk%20Agent.html 19 | https://github.com/CognitiveCodes/NeuralGPT/blob/main/Chat-center/characterAI.py 20 | 21 | ![017](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/1e290007-0ee7-4aaf-bb5d-2c372ab98f0a) 22 | ![009](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/ba873ac5-2265-4dc2-96ce-eac91a02dfa5) 23 | ![gui](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/7b7c4d89-476f-4d74-8697-3dbcdce95609) 24 | ![schem](https://github.com/CognitiveCodes/NeuralGPT/assets/133844350/bd1f6d28-dd09-40c8-bc58-1c9439d44197) 25 | -------------------------------------------------------------------------------- /ScriptExecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class ScriptExecutor: 4 | def __init__(self, script_path): 5 | self.script_path = script_path 6 | 7 | def execute_script(self, input_data): 8 | try: 9 | # Run the script in a sandboxed environment 10 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 11 | return output.decode('utf-8') 12 | except subprocess.TimeoutExpired: 13 | return "Script execution timed out" 14 | except subprocess.CalledProcessError as e: 15 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 16 | except Exception as e: 17 | return f"Script execution failed with exception: {str(e)}" 18 | 19 | # Example usage 20 | executor = ScriptExecutor('path/to/script.py') 21 | result = executor.execute_script(b'input data') 22 | print(result) -------------------------------------------------------------------------------- /Scripting.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class Scripting: 4 | def __init__(self): 5 | pass 6 | 7 | def execute_script(self, script_path): 8 | subprocess.run(script_path) -------------------------------------------------------------------------------- /TEST.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from DualCoreLLM import DualCoreLLM # if needed 3 | import re 4 | 5 | # Load pretrained model 6 | model = NeuralGPT.load_model('model.bin') # provide path to model file 7 | 8 | # Define list of prompts 9 | prompts = ['identify yourself', 'How can I improve my life?'] 10 | 11 | # Define function for preprocessing user input 12 | def preprocess_input(text): 13 | text = text.lower() 14 | text = re.sub(r'[^\w\s]', '', text) # remove special characters 15 | return text 16 | 17 | # Define function for generating responses 18 | def generate_response(prompt): 19 | response = model.generate(prompt) 20 | # Evaluate coherence of response 21 | # ... 22 | return response 23 | 24 | # Define function for testing coherence of responses 25 | def test_coherence(prompt): 26 | input_text = input(prompt + ': ') 27 | preprocessed_text = preprocess_input(input_text) 28 | response = generate_response(preprocessed_text) 29 | # Evaluate coherence of response 30 | # ... 31 | return coherence_score 32 | 33 | # Run test for each prompt 34 | total_score = 0 35 | for prompt in prompts: 36 | score = test_coherence(prompt) 37 | total_score += score 38 | 39 | # Output final score 40 | print('Coherence score:', total_score) -------------------------------------------------------------------------------- /Wordpress plugin/NeuralGPT Chatbot.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | NeuralGPT Chatbot 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 18 |
19 |
20 |
21 | 22 | 23 |
24 |
25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /Wordpress plugin/Nowy Dokument tekstowy.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/Nowy Dokument tekstowy.txt -------------------------------------------------------------------------------- /Wordpress plugin/admin.php: -------------------------------------------------------------------------------- 1 | 11 | 12 | 19 |
20 |

NeuralGPT Chatbot Settings

21 |
22 | 23 | 24 | 25 |
26 |
27 | -------------------------------------------------------------------------------- /Wordpress plugin/assets/NeuralGPT Chatbot.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | NeuralGPT Chatbot 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 18 |
19 |
20 |
21 | 22 | 23 |
24 |
25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /Wordpress plugin/assets/chatwindow.js: -------------------------------------------------------------------------------- 1 | var chatWindow = document.createElement("div"); 2 | chatWindow.id = "chat-window"; 3 | document.body.appendChild(chatWindow); 4 | 5 | var chatInput = document.createElement("input"); 6 | chatInput.type = "text"; 7 | chatInput.id = "chat-input"; 8 | chatWindow.appendChild(chatInput); 9 | 10 | var chatButton = document.createElement("button"); 11 | chatButton.innerHTML = "Send"; 12 | chatButton.onclick = function() { 13 | var message = document.getElementById("chat-input").value; 14 | document.getElementById("chat-input").value = ""; 15 | sendMessage(message); 16 | } 17 | chatWindow.appendChild(chatButton); 18 | 19 | var chatLog = document.createElement("div"); 20 | chatLog.id = "chat-log"; 21 | chatWindow.appendChild(chatLog); 22 | 23 | function sendMessage(message) { 24 | var xhr = new XMLHttpRequest(); 25 | xhr.onreadystatechange = function() { 26 | if (xhr.readyState === 4 && xhr.status === 200) { 27 | var response = JSON.parse(xhr.responseText); 28 | addMessage(response.message, "bot"); 29 | } 30 | } 31 | xhr.open("POST", "/wp-admin/admin-ajax.php?action=neuralgpt_chat", true); 32 | xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded"); 33 | xhr.send("message=" + message); 34 | } 35 | 36 | function addMessage(message, sender) { 37 | var messageElement = document.createElement("div"); 38 | messageElement.innerHTML = message; 39 | messageElement.className = "message " + sender; 40 | chatLog.appendChild(messageElement); 41 | } -------------------------------------------------------------------------------- /Wordpress plugin/assets/neuralgpt-chatbot.js: -------------------------------------------------------------------------------- 1 | jQuery(document).ready(function ($) { 2 | // Establish Socket.io connection 3 | const socket = io('http://localhost:3001'); 4 | 5 | // Function to send a message to the server 6 | function sendMessage() { 7 | const message = $('#neuralgpt-chat-input').val().trim(); 8 | if (message !== '') { 9 | // Emit the message event to the server 10 | socket.emit('chat message', message); 11 | 12 | // Clear the input field 13 | $('#neuralgpt-chat-input').val(''); 14 | } 15 | } 16 | 17 | // Function to handle receiving a response from the server 18 | function handleResponse(response) { 19 | // Append the response to the chat log 20 | $('#neuralgpt-chat-log').append('
' + response + '
'); 21 | } 22 | 23 | // Send message when the send button is clicked 24 | $('#neuralgpt-chat-send').on('click', sendMessage); 25 | 26 | // Send message when Enter key is pressed in the input field 27 | $('#neuralgpt-chat-input').on('keydown', function (e) { 28 | if (e.key === 'Enter') { 29 | sendMessage(); 30 | } 31 | }); 32 | 33 | // Listen for the 'chat message' event from the server 34 | socket.on('chat message', handleResponse); 35 | }); 36 | -------------------------------------------------------------------------------- /Wordpress plugin/assets/upload.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | File Upload 5 | 6 | 7 |

Upload a File

8 |
9 | 10 |

11 | 12 |
13 | 14 | -------------------------------------------------------------------------------- /Wordpress plugin/automate_transfer.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import time 4 | 5 | # Set up the URLs for the WordPress website and agent-gpt web GUI 6 | wordpress_url = 'http://localhost/wordpress' 7 | agent_gpt_url = 'http://localhost:3000' 8 | 9 | # Set up the payload to send to the agent-gpt web GUI 10 | payload = { 11 | 'text': '', 12 | 'model': 'gpt2', 13 | 'length': 50 14 | } 15 | 16 | # Define a function to send text to the agent-gpt web GUI and receive a response 17 | def send_text_to_agent_gpt(text): 18 | payload['text'] = text 19 | response = requests.post(agent_gpt_url, data=json.dumps(payload)) 20 | return response.json()['text'] 21 | 22 | # Define a function to get the latest post from the WordPress website 23 | def get_latest_post(): 24 | response = requests.get(wordpress_url + '/wp-json/wp/v2/posts?per_page=1') 25 | post = response.json()[0] 26 | return post['title']['rendered'] + '\n' + post['content']['rendered'] 27 | 28 | # Loop indefinitely and send the latest post to the agent-gpt web GUI every minute 29 | while True: 30 | text = get_latest_post() 31 | response_text = send_text_to_agent_gpt(text) 32 | print(response_text) 33 | time.sleep(60) -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/a.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/neuralgpt_chatbot/a.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/a1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/neuralgpt_chatbot/a1.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/au.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/neuralgpt_chatbot/au.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/aurt12.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/neuralgpt_chatbot/aurt12.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/neuralgpt_chatbot/auto.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/neuralgpt_chatbot/auto.pdf -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/base_library.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/python_script/base_library.zip -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod01_archive.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/python_script/localpycs/pyimod01_archive.pyc -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod02_importers.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/python_script/localpycs/pyimod02_importers.pyc -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod03_ctypes.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/python_script/localpycs/pyimod03_ctypes.pyc -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/pyimod04_pywin32.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/python_script/localpycs/pyimod04_pywin32.pyc -------------------------------------------------------------------------------- /Wordpress plugin/build/python_script/localpycs/struct.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/build/python_script/localpycs/struct.pyc -------------------------------------------------------------------------------- /Wordpress plugin/chat-window.js: -------------------------------------------------------------------------------- 1 | var chatWindow = document.createElement("div"); 2 | chatWindow.id = "chat-window"; 3 | document.body.appendChild(chatWindow); 4 | var chatInput = document.createElement("input"); 5 | chatInput.type = "text"; 6 | chatInput.id = "chat-input"; 7 | chatWindow.appendChild(chatInput); 8 | var chatButton = document.createElement("button"); 9 | chatButton.innerHTML = "Send"; 10 | chatButton.onclick = function() { 11 | var message = document.getElementById("chat-input").value; 12 | document.getElementById("chat-input").value = ""; 13 | sendMessage(message); 14 | } 15 | chatWindow.appendChild(chatButton); 16 | var chatLog = document.createElement("div"); 17 | chatLog.id = "chat-log"; 18 | chatWindow.appendChild(chatLog); 19 | function sendMessage(message) { 20 | var xhr = new XMLHttpRequest(); 21 | xhr.onreadystatechange = function() { 22 | if (xhr.readyState === 4 && xhr.status === 200) { 23 | var response = JSON.parse(xhr.responseText); 24 | addMessage(response.message, "bot"); 25 | } 26 | } 27 | xhr.open("POST", "/wp-admin/admin-ajax.php?action=neuralgpt_chat", true); 28 | xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded"); 29 | xhr.send("message=" + message); 30 | } 31 | function addMessage(message, sender) { 32 | var messageElement = document.createElement("div"); 33 | messageElement.innerHTML = message; 34 | messageElement.className = "message " + sender; 35 | chatLog.appendChild(messageElement); 36 | } -------------------------------------------------------------------------------- /Wordpress plugin/chat_gui.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | import threading 3 | from neuralgpt import NeuralGPT 4 | # Load the pretrained model 5 | model_path = "E:/AI/NeuralGPT/NeuralGPT/models/ggml-model-q4_0.bin" 6 | neural_gpt = NeuralGPT(model_path) 7 | # Create the chat window 8 | root = tk.Tk() 9 | root.title("NeuralGPT Chat Window") 10 | # Create the chat history display 11 | chat_history = tk.Text(root, height=20, width=50, state=tk.DISABLED) 12 | chat_history.grid(row=0, column=0, padx=10, pady=10) 13 | # Create the input field and button 14 | input_field = tk.Entry(root, width=50) 15 | input_field.grid(row=1, column=0, padx=10, pady=10) 16 | send_button = tk.Button(root, text="Send", command=lambda: send_message()) 17 | send_button.grid(row=1, column=1, padx=10, pady=10) 18 | # Define the send message function 19 | def send_message(): 20 | # Get the user input 21 | user_input = input_field.get() 22 | input_field.delete(0, tk.END) 23 | # Add the user input to the chat history 24 | chat_history.configure(state=tk.NORMAL) 25 | chat_history.insert(tk.END, "You: " + user_input + "\n") 26 | chat_history.configure(state=tk.DISABLED) 27 | # Generate a response using the NeuralGPT model 28 | response = neural_gpt.generate_response(user_input) 29 | # Add the response to the chat history 30 | chat_history.configure(state=tk.NORMAL) 31 | chat_history.insert(tk.END, "NeuralGPT: " + response + "\n") 32 | chat_history.configure(state=tk.DISABLED) 33 | # Define the update chat function 34 | def update_chat(): 35 | while True: 36 | # Check for other active instances of Neural AI 37 | # Communicate with them through the chatbox if there are any 38 | # Leave the chatbox open for user to speak with running instance if there 39 | are none 40 | pass 41 | # Start the update chat thread 42 | chat_thread = threading.Thread(target=update_chat) 43 | chat_thread.start() 44 | # Start the GUI main loop 45 | root.mainloop() -------------------------------------------------------------------------------- /Wordpress plugin/chatbot.php: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Wordpress plugin/chatbox_obj.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | class Chatbox: 4 | def __init__(self, host, port): 5 | self.host = host 6 | self.port = port 7 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 8 | self.socket.connect((host, port)) 9 | 10 | def send_message(self, message): 11 | self.socket.sendall(message.encode()) 12 | 13 | def receive_message(self): 14 | data = self.socket.recv(1024) 15 | return data.decode() 16 | 17 | def close(self): 18 | self.socket.close() 19 | This code creates a chatbox object that connects to a specified host and port. The send_message method sends a message to the chatbox, while the receive_message method receives a message from the chatbox. The close method closes the connection to the chatbox. 20 | 21 | To use the chatbox, you can create an instance of the Chatbox class and call the send_message and receive_message methods as needed. For example: 22 | 23 | Copy code 24 | 25 | chatbox = Chatbox('localhost', 5021) 26 | chatbox.send_message('Hello, world!') 27 | response = chatbox.receive_message() 28 | print(response) 29 | chatbox.close() -------------------------------------------------------------------------------- /Wordpress plugin/chatbox_v2.js: -------------------------------------------------------------------------------- 1 | function createChatbox(chatboxId, socketUrl) { 2 | const socket = new WebSocket(socketUrl); 3 | 4 | function sendMessage(message) { 5 | socket.send(message); 6 | } 7 | 8 | function receiveMessage() { 9 | socket.onmessage = (event) => { 10 | const message = event.data; 11 | const chatbox = document.getElementById(chatboxId); 12 | chatbox.innerHTML += message; 13 | }; 14 | } 15 | 16 | const chatbox = document.getElementById(chatboxId); 17 | chatbox.addEventListener("submit", (event) => { 18 | event.preventDefault(); 19 | const messageInput = chatbox.querySelector("input[type=text]"); 20 | const message = messageInput.value; 21 | sendMessage(message); 22 | messageInput.value = ""; 23 | }); 24 | 25 | receiveMessage(); 26 | } 27 | 28 | createChatbox("chatbox", "ws://localhost:3000"); 29 | 30 | // Example usage with Neural-GPT system 31 | const neuralGptSocketUrl = "ws://localhost:4000"; 32 | const neuralGptSocket = new WebSocket(neuralGptSocketUrl); 33 | 34 | neuralGptSocket.onmessage = (event) => { 35 | const message = event.data; 36 | const chatbox = document.getElementById("chatbox"); 37 | chatbox.innerHTML += message; 38 | }; 39 | 40 | function sendNeuralGptMessage(message) { 41 | neuralGptSocket.send(message); 42 | } 43 | 44 | sendNeuralGptMessage("Hello, Neural-GPT!"); -------------------------------------------------------------------------------- /Wordpress plugin/chatflow.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | from flowise import FlowiseClient 5 | from unified_model import UnifiedModel 6 | def main(model_path): 7 | # Load pretrained model 8 | model = UnifiedModel(model_path) 9 | # Connect to FlowiseAI 10 | client = FlowiseClient() 11 | # Check for active instances of Neural AI 12 | active_instances = client.get_active_instances(model_name='NeuralGPT') 13 | if active_instances: 14 | # Communicate with other active instances 15 | instance_id = active_instances[0]['instance_id'] 16 | chat_id = client.join_chat(instance_id) 17 | client.send_message(chat_id, 'Hello from another instance!') 18 | # Open chat window 19 | chat_id = client.create_chat(model_name='NeuralGPT') 20 | # Listen for messages 21 | while True: 22 | messages = client.get_messages(chat_id) 23 | for message in messages: 24 | if message['type'] == 'text': 25 | # Generate response 26 | response = model.generate_response(message['text']) 27 | # Send response 28 | client.send_message(chat_id, response) 29 | if __name__ == '__main__': 30 | # Parse command line arguments 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--model_path', type=str, required=True, 33 | help='Path to pretrained NeuralGPT model') 34 | args = parser.parse_args() 35 | # Check if model path exists 36 | if not os.path.exists(args.model_path): 37 | print(f"Error: Model path '{args.model_path}' does not exist.") 38 | exit() 39 | # Run main function 40 | main(args.model_path) -------------------------------------------------------------------------------- /Wordpress plugin/chatwindow.js: -------------------------------------------------------------------------------- 1 | var chatWindow = document.createElement("div"); 2 | chatWindow.id = "chat-window"; 3 | document.body.appendChild(chatWindow); 4 | 5 | var chatInput = document.createElement("input"); 6 | chatInput.type = "text"; 7 | chatInput.id = "chat-input"; 8 | chatWindow.appendChild(chatInput); 9 | 10 | var chatButton = document.createElement("button"); 11 | chatButton.innerHTML = "Send"; 12 | chatButton.onclick = function() { 13 | var message = document.getElementById("chat-input").value; 14 | document.getElementById("chat-input").value = ""; 15 | sendMessage(message); 16 | } 17 | chatWindow.appendChild(chatButton); 18 | 19 | var chatLog = document.createElement("div"); 20 | chatLog.id = "chat-log"; 21 | chatWindow.appendChild(chatLog); 22 | 23 | function sendMessage(message) { 24 | var xhr = new XMLHttpRequest(); 25 | xhr.onreadystatechange = function() { 26 | if (xhr.readyState === 4 && xhr.status === 200) { 27 | var response = JSON.parse(xhr.responseText); 28 | addMessage(response.message, "bot"); 29 | } 30 | } 31 | xhr.open("POST", "/wp-admin/admin-ajax.php?action=neuralgpt_chat", true); 32 | xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded"); 33 | xhr.send("message=" + message); 34 | } 35 | 36 | function addMessage(message, sender) { 37 | var messageElement = document.createElement("div"); 38 | messageElement.innerHTML = message; 39 | messageElement.className = "message " + sender; 40 | chatLog.appendChild(messageElement); 41 | } -------------------------------------------------------------------------------- /Wordpress plugin/combinepdf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/combinepdf.pdf -------------------------------------------------------------------------------- /Wordpress plugin/cronjob.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import datetime 4 | 5 | # Set up the paths to the shared databank and the backup directory 6 | shared_databank_path = "e:/repos/database" 7 | backup_dir_path = "e:/repos/database/backup" 8 | 9 | # Create the backup directory if it doesn't exist 10 | if not os.path.exists(backup_dir_path): 11 | os.mkdir(backup_dir_path) 12 | 13 | # Set up the backup filename with the current date and time 14 | backup_filename = "shared_databank_backup_{}.zip".format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) 15 | 16 | # Create the full path to the backup file 17 | backup_file_path = os.path.join(backup_dir_path, backup_filename) 18 | 19 | # Compress the shared databank directory into a zip file 20 | shutil.make_archive(backup_file_path, "zip", shared_databank_path) 21 | 22 | # Print a message to confirm that the backup was successful 23 | print("Backup of shared databank created at {}".format(backup_file_path)) -------------------------------------------------------------------------------- /Wordpress plugin/data_backup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import datetime 4 | import time 5 | 6 | # Define the path to the database file 7 | database_path = 'E:/xampp/htdocs/wordpress/wp-content/plugins/neuralgpt-chatbot/universal.db' 8 | 9 | # Define the path to the backup directory 10 | backup_dir = 'e:/ai' 11 | 12 | # Define the backup interval in seconds (e.g. 24 hours) 13 | backup_interval = 86400 14 | 15 | # Define a function to create a backup of the database 16 | def backup_database(): 17 | # Create a timestamp for the backup file name 18 | timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') 19 | # Create the backup file name 20 | backup_file = 'database_backup_' + timestamp + '.db' 21 | # Create the full path to the backup file 22 | backup_path = backup_dir + backup_file 23 | # Copy the database file to the backup directory 24 | shutil.copy(database_path, backup_path) 25 | 26 | # Define a function to restore the database from a backup 27 | def restore_database(backup_file): 28 | # Create the full path to the backup file 29 | backup_path = backup_dir + backup_file 30 | # Copy the backup file to the database directory 31 | shutil.copy(backup_path, database_path) 32 | 33 | # Define a function to run the backup process at set intervals 34 | def run_backup_process(): 35 | while True: 36 | # Wait for the backup interval to elapse 37 | time.sleep(backup_interval) 38 | # Create a backup of the database 39 | backup_database() 40 | 41 | # Run the backup process in a separate thread 42 | backup_thread = threading.Thread(target=run_backup_process) 43 | backup_thread.start() -------------------------------------------------------------------------------- /Wordpress plugin/dualcorellm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/dualcorellm.pdf -------------------------------------------------------------------------------- /Wordpress plugin/flowise.css: -------------------------------------------------------------------------------- 1 | /* style.css */ 2 | 3 | body { 4 | margin: 0; 5 | padding: 0; 6 | font-family: sans-serif; 7 | } 8 | 9 | #app { 10 | display: flex; 11 | flex-direction: column; 12 | align-items: center; 13 | justify-content: center; 14 | height: 100vh; 15 | } 16 | 17 | #header { 18 | margin-bottom: 2rem; 19 | } 20 | 21 | #chatbot { 22 | width: 100%; 23 | max-width: 600px; 24 | border: 1px solid #ccc; 25 | border-radius: 0.5rem; 26 | overflow: hidden; 27 | } 28 | 29 | #messages { 30 | height: 300px; 31 | overflow-y: scroll; 32 | padding: 1rem; 33 | } 34 | 35 | .message { 36 | margin-bottom: 0.5rem; 37 | padding: 0.5rem; 38 | border-radius: 0.5rem; 39 | } 40 | 41 | .user { 42 | background-color: #f2f2f2; 43 | text-align: right; 44 | } 45 | 46 | .chatbot { 47 | background-color: #e6e6e6; 48 | text-align: left; 49 | } 50 | 51 | #input-form { 52 | display: flex; 53 | justify-content: center; 54 | align-items: center; 55 | padding: 1rem; 56 | background-color: #f2f2f2; 57 | } 58 | 59 | #input-field { 60 | flex-grow: 1; 61 | margin-right: 1rem; 62 | padding: 0.5rem; 63 | border-radius: 0.5rem; 64 | border: none; 65 | } 66 | 67 | button { 68 | padding: 0.5rem 1rem; 69 | border-radius: 0.5rem; 70 | border: none; 71 | background-color: #4caf50; 72 | color: white; 73 | font-weight: bold; 74 | cursor: pointer; 75 | } 76 | 77 | button:hover { 78 | background-color: #3e8e41; 79 | } -------------------------------------------------------------------------------- /Wordpress plugin/generate_response.py: -------------------------------------------------------------------------------- 1 | from transformers import GPT2LMHeadModel, GPT2Tokenizer 2 | 3 | model_path = "E:/AI/NeuralGPT/NeuralGPT/models/gpt-j/" 4 | 5 | tokenizer = GPT2Tokenizer.from_pretrained(model_path) 6 | 7 | model = GPT2LMHeadModel.from_pretrained(model_path) 8 | 9 | def generate_response(input_text): 10 | 11 | input_ids = tokenizer.encode(input_text, return_tensors="pt") 12 | 13 | output_ids = model.generate(input_ids, max_length=50, num_return_sequences=1) 14 | 15 | output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) 16 | 17 | return output_text 18 | 19 | 20 | -------------------------------------------------------------------------------- /Wordpress plugin/get_feedback.php: -------------------------------------------------------------------------------- 1 | connect_error) { 12 | die("Connection failed: " . $conn->connect_error); 13 | } 14 | 15 | $sql = "SELECT feedback_text, feedback_type FROM user_input WHERE input_text LIKE '%$input_text%' AND input_type = '$input_type' ORDER BY timestamp DESC LIMIT 1"; 16 | 17 | $result = $conn->query($sql); 18 | if ($result->num_rows > 0) { 19 | $row = $result->fetch_assoc(); 20 | $feedback_text = $row['feedback_text']; 21 | $feedback_type = $row['feedback_type']; 22 | if ($feedback_type == 'accept') { 23 | $message = 'Thank you for your idea!'; 24 | } else if ($feedback_type == 'reject') { 25 | $message = 'Sorry, your idea was not accepted.'; 26 | } else { 27 | $message = 'We will consider your idea.'; 28 | } 29 | } else { 30 | $message = 'We did not understand your input. Please try again.'; 31 | } 32 | 33 | $conn->close(); 34 | 35 | echo $message; 36 | ?> -------------------------------------------------------------------------------- /Wordpress plugin/htmlmarkup.txt: -------------------------------------------------------------------------------- 1 |
2 |
3 | 4 | 5 | 6 |
7 |
    8 |
    -------------------------------------------------------------------------------- /Wordpress plugin/js/Nowy Dokument tekstowy.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/js/Nowy Dokument tekstowy.txt -------------------------------------------------------------------------------- /Wordpress plugin/js/chatbox_v2.js: -------------------------------------------------------------------------------- 1 | function createChatbox(chatboxId, socketUrl) { 2 | const socket = new WebSocket(socketUrl); 3 | 4 | function sendMessage(message) { 5 | socket.send(message); 6 | } 7 | 8 | function receiveMessage() { 9 | socket.onmessage = (event) => { 10 | const message = event.data; 11 | const chatbox = document.getElementById(chatboxId); 12 | chatbox.innerHTML += message; 13 | }; 14 | } 15 | 16 | const chatbox = document.getElementById(chatboxId); 17 | chatbox.addEventListener("submit", (event) => { 18 | event.preventDefault(); 19 | const messageInput = chatbox.querySelector("input[type=text]"); 20 | const message = messageInput.value; 21 | sendMessage(message); 22 | messageInput.value = ""; 23 | }); 24 | 25 | receiveMessage(); 26 | } 27 | 28 | createChatbox("chatbox", "ws://localhost:3000"); 29 | 30 | // Example usage with Neural-GPT system 31 | const neuralGptSocketUrl = "ws://localhost:4000"; 32 | const neuralGptSocket = new WebSocket(neuralGptSocketUrl); 33 | 34 | neuralGptSocket.onmessage = (event) => { 35 | const message = event.data; 36 | const chatbox = document.getElementById("chatbox"); 37 | chatbox.innerHTML += message; 38 | }; 39 | 40 | function sendNeuralGptMessage(message) { 41 | neuralGptSocket.send(message); 42 | } 43 | 44 | sendNeuralGptMessage("Hello, Neural-GPT!"); -------------------------------------------------------------------------------- /Wordpress plugin/listmodels.py: -------------------------------------------------------------------------------- 1 | import os 2 | from NeuralGPT-0.1 import NeuralGPT 3 | # Define the directory where the pretrained models are stored 4 | models_dir = "E:/AI/NeuralGPT/NeuralGPT/models/" 5 | # List all the pretrained models in the directory 6 | pretrained_models = os.listdir(models_dir) 7 | # Display the list of pretrained models to the user 8 | print("Select a pretrained model to load:") 9 | for i, model in enumerate(pretrained_models): 10 | print(f"{i+1}. {model}") 11 | # Ask the user to choose a pretrained model 12 | model_num = int(input("Enter the model number: ")) 13 | # Load the chosen pretrained model 14 | model_path = os.path.join(models_dir, pretrained_models[model_num-1]) 15 | neural_gpt = NeuralGPT(model_path) 16 | # Open the chat window and start the conversation 17 | # ... 18 | -------------------------------------------------------------------------------- /Wordpress plugin/load_pretrained.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | from flowise import FlowiseClient 5 | from unified_model import UnifiedModel 6 | def main(model_path): 7 | # Load pretrained model 8 | model = UnifiedModel(model_path) 9 | # Connect to FlowiseAI 10 | client = FlowiseClient() 11 | # Check for active instances of Neural AI 12 | active_instances = client.get_active_instances(model_name='NeuralGPT') 13 | if active_instances: 14 | # Communicate with other active instances 15 | instance_id = active_instances[0]['instance_id'] 16 | chat_id = client.join_chat(instance_id) 17 | client.send_message(chat_id, 'Hello from another instance!') 18 | # Open chat window 19 | chat_id = client.create_chat(model_name='NeuralGPT') 20 | # Listen for messages 21 | while True: 22 | messages = client.get_messages(chat_id) 23 | for message in messages: 24 | if message['type'] == 'text': 25 | # Generate response 26 | response = model.generate_response(message['text']) 27 | # Send response 28 | client.send_message(chat_id, response) 29 | if __name__ == '__main__': 30 | # Parse command line arguments 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--model_path', type=str, required=True, 33 | help='Path to pretrained NeuralGPT model') 34 | args = parser.parse_args() 35 | # Check if model path exists 36 | if not os.path.exists(args.model_path): 37 | print(f"Error: Model path '{args.model_path}' does not exist.") 38 | exit() 39 | # Run main function 40 | main(args.model_path) -------------------------------------------------------------------------------- /Wordpress plugin/loadpretrained.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | from flowise import FlowiseClient 5 | from unified_model import UnifiedModel 6 | def main(model_path): 7 | # Load pretrained model 8 | model = UnifiedModel(model_path) 9 | # Connect to FlowiseAI 10 | client = FlowiseClient() 11 | # Check for active instances of Neural AI 12 | active_instances = client.get_active_instances(model_name='NeuralGPT') 13 | if active_instances: 14 | # Communicate with other active instancesg 15 | instance_id = active_instances[0]['instance_id'] 16 | chat_id = client.join_chat(instance_id) 17 | client.send_message(chat_id, 'Hello from another instance!') 18 | # Open chat window 19 | chat_id = client.create_chat(model_name='NeuralGPT') 20 | # Listen for messages 21 | while True: 22 | messages = client.get_messages(chat_id) 23 | for message in messages: 24 | if message['type'] == 'text': 25 | # Generate response 26 | response = model.generate_response(message['text']) 27 | # Send response 28 | client.send_message(chat_id, response) 29 | if __name__ == '__main__': 30 | # Parse command line arguments 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('--model_path', type=str, required=True, 33 | help='Path to pretrained NeuralGPT model') 34 | args = parser.parse_args() 35 | # Check if model path exists 36 | if not os.path.exists(args.model_path): 37 | print(f"Error: Model path '{args.model_path}' does not exist.") 38 | exit() 39 | # Run main function 40 | main(args.model_path -------------------------------------------------------------------------------- /Wordpress plugin/logcreation.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Set up logging 4 | logging.basicConfig(filename='neural_ai.log', level=logging.DEBUG, 5 | format='%(asctime)s %(levelname)s %(message)s') 6 | 7 | def access_local_data_storage(): 8 | try: 9 | # Access local data storage 10 | # Code to create or modify files 11 | except Exception as e: 12 | # Log the error 13 | logging.error('Error accessing local data storage: {}'.format(str(e))) 14 | 15 | def access_universal_database(): 16 | try: 17 | # Access universal database 18 | # Code to achieve data harmonization 19 | except Exception as e: 20 | # Log the error 21 | logging.error('Error accessing universal database: {}'.format(str(e))) 22 | 23 | # Call the functions 24 | access_local_data_storage() 25 | access_universal_database() -------------------------------------------------------------------------------- /Wordpress plugin/long.odt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/long.odt -------------------------------------------------------------------------------- /Wordpress plugin/long.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/long.pdf -------------------------------------------------------------------------------- /Wordpress plugin/mlm.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import tensorflow as tf 4 | import keras 5 | from keras.models import load_model 6 | Load the pre-trained NeuralGPT model: 7 | Copy code 8 | 9 | model = load_model('E:/Repos/oobabooga_windows/text-generation-webui/models/facebook_opt-1.3b/pytorch_model.bin') 10 | Retrieve user feedback from the database schema and preprocess the data: 11 | Copy code 12 | 13 | feedback_data = pd.read_sql_query('SELECT * FROM feedback_table', con=db_connection) 14 | feedback_text = feedback_data['feedback_text'].tolist() 15 | preprocessed_feedback = preprocess(feedback_text) # preprocess function to clean and tokenize the feedback text 16 | Generate predictions using the preprocessed feedback data: 17 | Copy code 18 | 19 | predictions = model.predict(preprocessed_feedback) 20 | Display the predictions and suggestions for improvement in the dashboard interface: 21 | Copy code 22 | 23 | for i in range(len(predictions)): 24 | if predictions[i] > 0.5: 25 | suggestion = "Your feedback suggests that the model is performing well. Keep up the good work!" 26 | else: 27 | suggestion = "Your feedback suggests that the model needs improvement. Consider fine-tuning the model or collecting more training data." 28 | display_suggestion(feedback_data['user_id'][i], suggestion) # display_suggestion function to display the suggestion in the dashboard interface -------------------------------------------------------------------------------- /Wordpress plugin/module.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Wordpress plugin/neuralgpt-browse.js: -------------------------------------------------------------------------------- 1 | jQuery(document).ready(function($) { 2 | $('#neuralgpt-browse-form').on('submit', function(e) { 3 | e.preventDefault(); 4 | var searchQuery = $('#neuralgpt-browse-search').val(); 5 | $.ajax({ 6 | url: '/wp-json/neuralgpt-browse/v1/search', 7 | type: 'POST', 8 | data: { 9 | 'search_query': searchQuery 10 | }, 11 | success: function(response) { 12 | $('#neuralgpt-browse-results').empty(); 13 | $.each(response, function(index, value) { 14 | var listItem = $('
  • '); 15 | var link = $('').attr('href', value.link).text(value.title); 16 | var excerpt = $('

    ').text(value.excerpt); 17 | listItem.append(link).append(excerpt); 18 | $('#neuralgpt-browse-results').append(listItem); 19 | }); 20 | } 21 | }); 22 | }); 23 | }); -------------------------------------------------------------------------------- /Wordpress plugin/neuralgpt_chatbot.spec: -------------------------------------------------------------------------------- 1 | # -*- mode: python ; coding: utf-8 -*- 2 | 3 | 4 | block_cipher = None 5 | 6 | 7 | a = Analysis( 8 | ['neuralgpt_chatbot.py'], 9 | pathex=[], 10 | binaries=[], 11 | datas=[], 12 | hiddenimports=[], 13 | hookspath=[], 14 | hooksconfig={}, 15 | runtime_hooks=[], 16 | excludes=[], 17 | win_no_prefer_redirects=False, 18 | win_private_assemblies=False, 19 | cipher=block_cipher, 20 | noarchive=False, 21 | ) 22 | pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) 23 | 24 | exe = EXE( 25 | pyz, 26 | a.scripts, 27 | a.binaries, 28 | a.zipfiles, 29 | a.datas, 30 | [], 31 | name='neuralgpt_chatbot', 32 | debug=False, 33 | bootloader_ignore_signals=False, 34 | strip=False, 35 | upx=True, 36 | upx_exclude=[], 37 | runtime_tmpdir=None, 38 | console=True, 39 | disable_windowed_traceback=False, 40 | argv_emulation=False, 41 | target_arch=None, 42 | codesign_identity=None, 43 | entitlements_file=None, 44 | ) 45 | -------------------------------------------------------------------------------- /Wordpress plugin/notif_sys.py: -------------------------------------------------------------------------------- 1 | import pika 2 | from twilio.rest import Client 3 | 4 | def send_notification(message): 5 | # Check user access level and send notifications accordingly 6 | # Use Twilio to send SMS notifications 7 | account_sid = 'your_account_sid' 8 | auth_token = 'your_auth_token' 9 | client = Client(account_sid, auth_token) 10 | message = client.messages \ 11 | .create( 12 | body=message, 13 | from_='your_twilio_number', 14 | to='user_phone_number' 15 | ) 16 | 17 | def callback(ch, method, properties, body): 18 | # Triggered whenever changes are made to the database 19 | send_notification(body) 20 | 21 | # Set up RabbitMQ connection 22 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 23 | channel = connection.channel() 24 | channel.queue_declare(queue='database_changes') 25 | 26 | # Listen for messages on the queue 27 | channel.basic_consume(queue='database_changes', on_message_callback=callback, auto_ack=True) 28 | 29 | print('Waiting for database changes...') 30 | channel.start_consuming() -------------------------------------------------------------------------------- /Wordpress plugin/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "neuralgpt-chatbot", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "chatwindow.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "socket.io": "^4.6.1" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /Wordpress plugin/perf.py: -------------------------------------------------------------------------------- 1 | from transformers import pipeline, set_seed 2 | 3 | # Load the model 4 | generator = pipeline('text-generation', model='CognitiveCodes/NeuralGPT') 5 | 6 | # Set seed for reproducibility 7 | set_seed(42) 8 | 9 | # Generate text 10 | generated_text = generator("The NeuralGPT project is performing", max_length=100, num_return_sequences=1) 11 | 12 | # Analyze the generated text 13 | performance_analysis = analyze_performance(generated_text) 14 | 15 | # Suggest new ideas for improvement based on analysis 16 | suggested_ideas = suggest_improvement(performance_analysis) 17 | 18 | # Print suggested ideas 19 | print("Suggested ideas for improvement: ", suggested_ideas) -------------------------------------------------------------------------------- /Wordpress plugin/python_script.spec: -------------------------------------------------------------------------------- 1 | # -*- mode: python ; coding: utf-8 -*- 2 | 3 | 4 | block_cipher = None 5 | 6 | 7 | a = Analysis( 8 | ['python_script.py'], 9 | pathex=[], 10 | binaries=[], 11 | datas=[], 12 | hiddenimports=[], 13 | hookspath=[], 14 | hooksconfig={}, 15 | runtime_hooks=[], 16 | excludes=[], 17 | win_no_prefer_redirects=False, 18 | win_private_assemblies=False, 19 | cipher=block_cipher, 20 | noarchive=False, 21 | ) 22 | pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) 23 | 24 | exe = EXE( 25 | pyz, 26 | a.scripts, 27 | [], 28 | exclude_binaries=True, 29 | name='python_script', 30 | debug=False, 31 | bootloader_ignore_signals=False, 32 | strip=False, 33 | upx=True, 34 | console=True, 35 | disable_windowed_traceback=False, 36 | argv_emulation=False, 37 | target_arch=None, 38 | codesign_identity=None, 39 | entitlements_file=None, 40 | ) 41 | coll = COLLECT( 42 | exe, 43 | a.binaries, 44 | a.zipfiles, 45 | a.datas, 46 | strip=False, 47 | upx=True, 48 | upx_exclude=[], 49 | name='python_script', 50 | ) 51 | -------------------------------------------------------------------------------- /Wordpress plugin/search.json: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const app = express(); 3 | 4 | // Define the search endpoint 5 | app.get('/api/search', (req, res) => { 6 | // Extract the search query from the query parameter 7 | const query = req.query.q; 8 | 9 | // Perform the search using ElasticSearch 10 | const results = await elasticSearchClient.search({ 11 | index: 'shared_databank', 12 | body: { 13 | query: { 14 | match: { 15 | content: query 16 | } 17 | } 18 | } 19 | }); 20 | 21 | // Return the matching results to the client 22 | res.json(results.hits.hits); 23 | }); 24 | 25 | // Start the server 26 | app.listen(3000, () => { 27 | console.log('Server listening on port 3000'); 28 | }); -------------------------------------------------------------------------------- /Wordpress plugin/send_mail.py: -------------------------------------------------------------------------------- 1 | import smtplib 2 | from email.mime.text import MIMEText 3 | 4 | def send_notification(user_email, message): 5 | # Set up SMTP server 6 | smtp_server = "smtp.gmail.com" 7 | smtp_port = 587 8 | smtp_username = "your_email@gmail.com" 9 | smtp_password = "your_password" 10 | 11 | # Set up message 12 | msg = MIMEText(message) 13 | msg['From'] = smtp_username 14 | msg['To'] = user_email 15 | msg['Subject'] = "Notification: Changes made to Universal Database" 16 | 17 | # Send message 18 | with smtplib.SMTP(smtp_server, smtp_port) as server: 19 | server.starttls() 20 | server.login(smtp_username, smtp_password) 21 | server.sendmail(smtp_username, user_email, msg.as_string()) 22 | 23 | def notify_users(users, message): 24 | for user in users: 25 | send_notification(user['email'], message) 26 | 27 | # Example usage 28 | users = [ 29 | {'email': 'user1@example.com', 'access_level': 'admin'}, 30 | {'email': 'user2@example.com', 'access_level': 'user'}, 31 | {'email': 'user3@example.com', 'access_level': 'user'} 32 | ] 33 | 34 | message = "Changes have been made to the Universal Database." 35 | 36 | # Notify all users 37 | notify_users(users, message) 38 | 39 | # Notify only admins 40 | admins = [user for user in users if user['access_level'] == 'admin'] 41 | notify_users(admins, message) -------------------------------------------------------------------------------- /Wordpress plugin/server.js: -------------------------------------------------------------------------------- 1 | const http = require('http'); 2 | const server = http.createServer(); 3 | const io = require('socket.io')(server); 4 | 5 | io.on('connection', (socket) => { 6 | console.log('A user connected'); 7 | 8 | // Handle events from the client 9 | socket.on('chat message', (message) => { 10 | console.log('Received message:', message); 11 | // Process the message and send a response if needed 12 | }); 13 | 14 | // Handle disconnection 15 | socket.on('disconnect', () => { 16 | console.log('A user disconnected'); 17 | }); 18 | }); 19 | 20 | const port = 3001; // Specify the port number for your server 21 | server.listen(port, () => { 22 | console.log(`Socket.io server listening on port ${port}`); 23 | }); 24 | -------------------------------------------------------------------------------- /Wordpress plugin/shortcode.php: -------------------------------------------------------------------------------- 1 | // Load the GPT-2 model and set the configuration 2 | const model = await tf.loadGraphModel('path/to/model'); 3 | const config = { 4 | length: 50, 5 | temperature: 0.7, 6 | top_k: 40, 7 | top_p: 0.9, 8 | frequency_penalty: 0.5, 9 | presence_penalty: 0.5, 10 | }; 11 | 12 | // Define the intent recognition and entity extraction functions 13 | function getIntent(text) { 14 | // Use a natural language processing library (e.g. Dialogflow, Wit.ai) to recognize the intent of the user's message 15 | return intent; 16 | } 17 | 18 | function getEntities(text) { 19 | // Use a natural language processing library (e.g. Dialogflow, Wit.ai) to extract entities from the user's message 20 | return entities; 21 | } 22 | 23 | // Define the chatbot function 24 | async function chatbot(input) { 25 | // Get the intent and entities from the user's input 26 | const intent = getIntent(input); 27 | const entities = getEntities(input); 28 | 29 | // Generate a response using the GPT-2 model and the input 30 | const response = await model.generate(input, config); 31 | 32 | // Return the response 33 | return response; 34 | } -------------------------------------------------------------------------------- /Wordpress plugin/submit_input.php: -------------------------------------------------------------------------------- 1 | connect_error) { 15 | die("Connection failed: " . $conn->connect_error); 16 | } 17 | 18 | $sql = "INSERT INTO user_input (input_text, input_type, feedback_text, feedback_type, timestamp) 19 | VALUES ('$input_text', '$input_type', '$feedback_text', '$feedback_type', '$timestamp')"; 20 | 21 | if ($conn->query($sql) === TRUE) { 22 | echo "Input submitted successfully"; 23 | } else { 24 | echo "Error: " . $sql . "
    " . $conn->error; 25 | } 26 | 27 | $conn->close(); 28 | ?> -------------------------------------------------------------------------------- /Wordpress plugin/test_chatbox.py: -------------------------------------------------------------------------------- 1 | import neuralgpt 2 | import local_website 3 | 4 | # code to add a button for Neural-GPT system 5 | button_neuralgpt = tkinter.Button(window, text="Activate Neural-GPT", command=neuralgpt.activate) 6 | button_neuralgpt.pack() 7 | 8 | # code to add a dropdown menu for local website 9 | options = ["Website A", "Website B", "Website C"] 10 | variable = tkinter.StringVar(window) 11 | variable.set(options[0]) 12 | dropdown_localwebsite = tkinter.OptionMenu(window, variable, *options) 13 | dropdown_localwebsite.pack() 14 | 15 | import paho.mqtt.client as mqtt 16 | 17 | # code to connect to MQTT broker 18 | client = mqtt.Client() 19 | client.connect("localhost", 1883, 60) 20 | 21 | # code to send message to all instances of Neural-GPT 22 | def send_message(message): 23 | client.publish("neuralgpt/chat", message) 24 | 25 | # code to receive message from all instances of Neural-GPT 26 | def on_message(client, userdata, message): 27 | print(message.payload.decode()) 28 | 29 | client.subscribe("neuralgpt/chat") 30 | client.on_message = on_message -------------------------------------------------------------------------------- /Wordpress plugin/train_restapi.js: -------------------------------------------------------------------------------- 1 | // Import required modules 2 | const express = require('express'); 3 | const bodyParser = require('body-parser'); 4 | const { startTraining } = require('./train'); 5 | 6 | // Create a new Express app 7 | const app = express(); 8 | 9 | // Parse request body as JSON 10 | app.use(bodyParser.json()); 11 | 12 | // Define the endpoint for starting the training process 13 | app.post('/train', async (req, res) => { 14 | // Get the hyperparameters from the request body 15 | const { epochs, batch_size, learning_rate } = req.body; 16 | 17 | // Start the training process with the given hyperparameters 18 | const result = await startTraining(epochs, batch_size, learning_rate); 19 | 20 | // Return the result as JSON 21 | res.json(result); 22 | }); 23 | 24 | // Start the server 25 | app.listen(3000, () => { 26 | console.log('Server started on port 3000'); 27 | }); -------------------------------------------------------------------------------- /Wordpress plugin/universal.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/universal.db -------------------------------------------------------------------------------- /Wordpress plugin/upload.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | File Upload 5 | 6 | 7 |

    Upload a File

    8 |
    9 | 10 |

    11 | 12 |
    13 | 14 | -------------------------------------------------------------------------------- /Wordpress plugin/validate_llm_file.php: -------------------------------------------------------------------------------- 1 | $max_size) { 14 | return 'Error: File size exceeds maximum allowed. Please upload a smaller file.'; 15 | } 16 | 17 | // Validate LLM file format 18 | $file_content = file_get_contents($file['tmp_name']); 19 | $file_header = substr($file_content, 0, 4); 20 | 21 | if ($file_header !== 'LLM ') { 22 | return 'Error: Invalid file format. Please upload a valid LLM file.'; 23 | } 24 | 25 | return true; // File is valid 26 | } -------------------------------------------------------------------------------- /Wordpress plugin/wordpress1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/wordpress1.pdf -------------------------------------------------------------------------------- /Wordpress plugin/wordpress2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/Wordpress plugin/wordpress2.pdf -------------------------------------------------------------------------------- /Wordpress plugin/workschedule.py: -------------------------------------------------------------------------------- 1 | import schedule 2 | import time 3 | 4 | # Define the function that performs the necessary actions 5 | def perform_actions(): 6 | # Code to access local data storage and modify files 7 | # Code to access universal database and achieve data harmonization 8 | 9 | # Define the schedule for the actions to be performed 10 | schedule.every(24).hours.do(perform_actions) # Run every 24 hours 11 | schedule.every().day.at("12:00").do(perform_actions) # Run every day at 12:00 12 | schedule.every().hour.do(perform_actions) # Run every hour 13 | schedule.every(10).minutes.do(perform_actions) # Run every 10 minutes 14 | 15 | # Run the scheduling system 16 | while True: 17 | schedule.run_pending() 18 | time.sleep(1) -------------------------------------------------------------------------------- /__pycache__/DualCoreLLM.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/DualCoreLLM.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/dataset.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/dataset.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/gui.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/gui.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/load_model.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/load_model.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/model.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/model.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/neuralgpt.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/neuralgpt.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/pinecone.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/pinecone.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/requests.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/requests.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /agent-document (1).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent-document (1).pdf -------------------------------------------------------------------------------- /agent-document (2).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent-document (2).pdf -------------------------------------------------------------------------------- /agent-document (21).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent-document (21).pdf -------------------------------------------------------------------------------- /agent-document (3).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent-document (3).pdf -------------------------------------------------------------------------------- /agent-document (4).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent-document (4).pdf -------------------------------------------------------------------------------- /agent-document.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent-document.pdf -------------------------------------------------------------------------------- /agent_script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | # Create directory if it does not exist 5 | if not os.path.exists("agent_scripts"): 6 | os.mkdir("agent_scripts") 7 | 8 | # Get the content of the script from the URL 9 | url = "https://app.cognosys.ai/agents/4641560f-1ba9-4df6-ad62-1842ef8a892d" 10 | response = requests.get(url) 11 | script_content = response.content 12 | 13 | # Create the file and write the content to it 14 | file_path = os.path.join("agent_scripts", "agent_4641560f-1ba9-4df6-ad62-1842ef8a892d.py") 15 | with open(file_path, "wb") as f: 16 | f.write(script_content) -------------------------------------------------------------------------------- /agent_scripts/__pycache__/database.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/agent_scripts/__pycache__/database.cpython-311.pyc -------------------------------------------------------------------------------- /agent_scripts/database.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | class DatabaseModule: 3 | def __init__(self, db_name): 4 | self.conn = sqlite3.connect(db_name) 5 | self.cursor = self.conn.cursor() 6 | 7 | def store_data(self, data, table_name): 8 | self.cursor.execute("""CREATE TABLE IF NOT EXISTS """ + table_name + "(id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT)") 9 | self.conn.commit() 10 | self.cursor.execute("""INSERT INTO """ + table_name + "(data)" , (data,)) 11 | self.conn.commit() 12 | 13 | def retrieve_data(self, query, table_name): 14 | self.cursor.execute("""SELECT data FROM """ + table_name + "(data)" , ("%" + query + "%",)) 15 | data = self.cursor.fetchall() 16 | return data 17 | -------------------------------------------------------------------------------- /agent_scripts/main.py: -------------------------------------------------------------------------------- 1 | from database import DatabaseModule 2 | from sqlite_example import SqliteExample 3 | 4 | from script_executor import ScriptExecutor 5 | from script_executor_example import ScriptExecutorExample 6 | 7 | # Create a new instance of the DatabaseModule class 8 | database = DatabaseModule("mydatabase.db") 9 | 10 | # Create a new instance of the ScriptExecutor class 11 | script_executor = ScriptExecutor("myscript.py") 12 | 13 | # Use the database object to store some data 14 | data = "Hello, world!" 15 | table_name = "mytable" 16 | database.store_data(data, table_name) 17 | 18 | # Retrieve the stored data 19 | query = "Hello, world!" 20 | retrieved_data = database.retrieve_data(query, table_name) 21 | 22 | # Use the script_executor object to execute a script 23 | input_data = b"Hello, world!" 24 | output = script_executor.get_script_output(input_data) 25 | 26 | # Print the retrieved data and script output 27 | print(retrieved_data) 28 | print(output) 29 | -------------------------------------------------------------------------------- /agent_scripts/script_executor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | class ScriptExecutor: 3 | def __init__(self, script_path): 4 | self.script_path = script_path 5 | 6 | def execute_script(self, input_data): 7 | try: 8 | # Run the script in a sandboxed environment 9 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 10 | return output.decode('utf-8') 11 | except subprocess.TimeoutExpired: 12 | return "Script execution timed out" 13 | except subprocess.CalledProcessError as e: 14 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 15 | except Exception as e: 16 | return f"Script execution failed with exception: {str(e)}" 17 | 18 | def get_script_output(self, input_data): 19 | return self.execute_script(input_data) 20 | -------------------------------------------------------------------------------- /auto-script1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/auto-script1.pdf -------------------------------------------------------------------------------- /auto-script2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/auto-script2.pdf -------------------------------------------------------------------------------- /auto/markdown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set the path to the JSON file 4 | json_file="path/to/json/file.json" 5 | 6 | # Set the path to the output Markdown file 7 | markdown_file="E:/AI/NeuralGPT/NeuralGPT/output.md" 8 | 9 | # Parse the JSON file and extract the data 10 | data=$(jq -r '.data' $json_file) 11 | 12 | # Convert the data to Markdown format 13 | markdown=$(echo $data | pandoc -f html -t markdown) 14 | 15 | # Write the Markdown to the output file 16 | echo $markdown > $markdown_file -------------------------------------------------------------------------------- /auto/saveashtml.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Path to the local clone of NeuralGPT repository 4 | neuralgpt_path = "E:/AI/NeuralGPT/NeuralGPT" 5 | 6 | # Content produced by Cognosys 7 | content = "This is some content produced by Cognosys." 8 | 9 | # Create the HTML file 10 | filename = "content.html" 11 | filepath = os.path.join(neuralgpt_path, filename) 12 | with open(filepath, "w") as f: 13 | f.write("\n") 14 | f.write("\n") 15 | f.write("Content from Cognosys\n") 16 | f.write("\n") 17 | f.write("\n") 18 | f.write(f"

    {content}

    \n") 19 | f.write("\n") 20 | f.write("\n") 21 | 22 | print(f"File saved to {filepath}") -------------------------------------------------------------------------------- /auto/saveastxt.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Set the path to the directory where the text file will be saved 4 | directory_path = r"E:\AI\NeuralGPT\NeuralGPT" 5 | 6 | # Set the path to the file containing the content produced by Cognosys 7 | content_file_path = r"path\to\content\file" 8 | 9 | # Read the content from the file 10 | with open(content_file_path, "r") as file: 11 | content = file.read() 12 | 13 | # Set the name of the text file 14 | file_name = "cognosys_content.txt" 15 | 16 | # Set the path to the text file 17 | file_path = os.path.join(directory_path, file_name) 18 | 19 | # Write the content to the text file 20 | with open(file_path, "w") as file: 21 | file.write(content) -------------------------------------------------------------------------------- /auto/task1.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import os 3 | 4 | pdf_path = "C:/path/to/pdf/file.pdf" 5 | destination_folder = "E:/AI/NeuralGPT/NeuralGPT" 6 | 7 | shutil.copy(pdf_path, destination_folder) -------------------------------------------------------------------------------- /auto/task2.java: -------------------------------------------------------------------------------- 1 | import org.apache.commons.io.FilenameUtils; 2 | import org.commonmark.node.Node; 3 | import org.commonmark.parser.Parser; 4 | import org.commonmark.renderer.html.HtmlRenderer; 5 | 6 | import java.io.File; 7 | import java.io.IOException; 8 | import java.nio.file.Files; 9 | import java.nio.file.Path; 10 | import java.nio.file.Paths; 11 | 12 | public class TextToMarkdownConverter { 13 | 14 | public static void main(String[] args) throws IOException { 15 | String textFilePath = "C:/path/to/text/file.txt"; 16 | String destinationFolder = "E:/AI/NeuralGPT/NeuralGPT"; 17 | 18 | File file = new File(textFilePath); 19 | String fileExtension = FilenameUtils.getExtension(file.getName()); 20 | 21 | String markdownFileName = FilenameUtils.removeExtension(file.getName()) + ".md"; 22 | Path markdownFilePath = Paths.get(destinationFolder, markdownFileName); 23 | 24 | String text = Files.readString(file.toPath()); 25 | String markdown = convertToMarkdown(text, fileExtension); 26 | 27 | Files.writeString(markdownFilePath, markdown); 28 | } 29 | 30 | private static String convertToMarkdown(String text, String fileExtension) { 31 | Parser parser = null; 32 | if (fileExtension.equals("txt")) { 33 | parser = Parser.builder().build(); 34 | } else if (fileExtension.equals("docx")) { 35 | parser = new DocxToMarkdownParser(); 36 | } else if (fileExtension.equals("pdf")) { 37 | parser = new PdfToMarkdownParser(); 38 | } 39 | 40 | Node document = parser.parse(text); 41 | HtmlRenderer renderer = HtmlRenderer.builder().build(); 42 | return renderer.render(document); 43 | } 44 | } -------------------------------------------------------------------------------- /auto/task3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set the input CSV file path 4 | input_file="example.csv" 5 | 6 | # Set the output Markdown file path 7 | output_file="E:/AI/NeuralGPT/NeuralGPT/table.md" 8 | 9 | # Read the CSV file and generate a Markdown table 10 | while read line 11 | do 12 | # Replace commas with pipes for Markdown table formatting 13 | row=$(echo $line | sed 's/,/ | /g') 14 | 15 | # Add Markdown table formatting to the row 16 | if [ -z "$header" ] 17 | then 18 | # The first row is the header 19 | header="$row" 20 | separator=$(echo "$header" | sed 's/[^|]/-/g') 21 | table="$header\n$separator" 22 | else 23 | # All other rows are data 24 | table="$table\n$row" 25 | fi 26 | done < "$input_file" 27 | 28 | # Save the Markdown table to the output file 29 | echo -e "$table" > "$output_file" -------------------------------------------------------------------------------- /callback.py: -------------------------------------------------------------------------------- 1 | import pika 2 | 3 | # connect to RabbitMQ server 4 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 5 | channel = connection.channel() 6 | 7 | # create a queue for each instance of the NeuralGPT agent 8 | channel.queue_declare(queue='agent1') 9 | channel.queue_declare(queue='agent2') 10 | channel.queue_declare(queue='agent3') 11 | 12 | # define a callback function to process incoming messages 13 | def callback(ch, method, properties, body): 14 | # process message and execute appropriate task 15 | print("Received message: %r" % body) 16 | 17 | # start consuming messages from the queue 18 | channel.basic_consume(queue='agent1', on_message_callback=callback, auto_ack=True) 19 | channel.basic_consume(queue='agent2', on_message_callback=callback, auto_ack=True) 20 | channel.basic_consume(queue='agent3', on_message_callback=callback, auto_ack=True) 21 | 22 | print('Waiting for messages...') 23 | channel.start_consuming() -------------------------------------------------------------------------------- /chat.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from transformers import pipeline 3 | 4 | # Define the chatbot pipeline using the pre-trained NeuralGPT model 5 | chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B") 6 | 7 | # Define a function to handle user input and generate chatbot responses 8 | def chat(): 9 | while True: 10 | # Get user input 11 | user_input = input("You: ") 12 | 13 | # Generate chatbot response 14 | try: 15 | chatbot_response = chatbot(user_input, max_length=50)[0]["generated_text"] 16 | print("Chatbot:", chatbot_response) 17 | except Exception as e: 18 | print("Error:", e) 19 | 20 | # Call the chat function to start the chatbox 21 | chat() -------------------------------------------------------------------------------- /chatboxx.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from neuralgpt import NeuralGPT 3 | 4 | class ChatBox: 5 | def __init__(self): 6 | self.debug_mode = False 7 | self.neuralgpt = NeuralGPT() 8 | 9 | self.root = tk.Tk() 10 | self.root.title("ChatBox") 11 | self.root.geometry("400x400") 12 | 13 | self.input_label = tk.Label(self.root, text="User:") 14 | self.input_label.pack() 15 | 16 | self.input_field = tk.Entry(self.root) 17 | self.input_field.pack() 18 | 19 | self.output_label = tk.Label(self.root, text="ChatBot:") 20 | self.output_label.pack() 21 | 22 | self.output_field = tk.Text(self.root) 23 | self.output_field.pack() 24 | 25 | self.debug_button = tk.Button(self.root, text="Debug Mode", command=self.toggle_debug_mode) 26 | self.debug_button.pack() 27 | 28 | self.send_button = tk.Button(self.root, text="Send", command=self.send_message) 29 | self.send_button.pack() 30 | 31 | def toggle_debug_mode(self): 32 | self.debug_mode = not self.debug_mode 33 | 34 | def send_message(self): 35 | user_input = self.input_field.get() 36 | self.input_field.delete(0, tk.END) 37 | 38 | try: 39 | response = self.neuralgpt.generate_response(user_input) 40 | self.output_field.insert(tk.END, f"{user_input}\n") 41 | self.output_field.insert(tk.END, f"{response}\n") 42 | except Exception as e: 43 | if self.debug_mode: 44 | print(e) 45 | else: 46 | raise e 47 | 48 | def run(self): 49 | self.root.mainloop() 50 | 51 | if __name__ == "__main__": 52 | chatbox = ChatBox() 53 | chatbox.run() -------------------------------------------------------------------------------- /class.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | 4 | class Communication: 5 | def __init__(self, protocol, message_format, timeout, retry_limit): 6 | self.protocol = protocol 7 | self.message_format = message_format 8 | self.timeout = timeout 9 | self.retry_limit = retry_limit 10 | 11 | def send_message(self, message): 12 | retries = 0 13 | while retries < self.retry_limit: 14 | try: 15 | response = requests.post(self.protocol, data=message, timeout=self.timeout) 16 | return response 17 | except requests.exceptions.Timeout: 18 | retries += 1 19 | print("Timeout occurred. Retrying...") 20 | time.sleep(1) 21 | except requests.exceptions.RequestException as e: 22 | print("Error occurred: ", e) 23 | break 24 | return None 25 | 26 | def receive_message(self): 27 | retries = 0 28 | while retries < self.retry_limit: 29 | try: 30 | response = requests.get(self.protocol, timeout=self.timeout) 31 | return response 32 | except requests.exceptions.Timeout: 33 | retries += 1 34 | print("Timeout occurred. Retrying...") 35 | time.sleep(1) 36 | except requests.exceptions.RequestException as e: 37 | print("Error occurred: ", e) 38 | break 39 | return None 40 | -------------------------------------------------------------------------------- /classc.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | class CommunicationClass: 4 | def __init__(self, protocol, message_format, encryption, authentication): 5 | self.protocol = protocol 6 | self.message_format = message_format 7 | self.encryption = encryption 8 | self.authentication = authentication 9 | 10 | def send_message(self, message): 11 | # Send the message using the specified protocol and message format 12 | pass 13 | 14 | def receive_message(self): 15 | # Receive a message using the specified protocol and message format 16 | pass 17 | 18 | def encrypt_message(self, message): 19 | # Encrypt the message using the specified encryption mechanism 20 | pass 21 | 22 | def decrypt_message(self, message): 23 | # Decrypt the message using the specified encryption mechanism 24 | pass 25 | 26 | def authenticate_user(self, user): 27 | # Authenticate the user using the specified authentication mechanism 28 | pass 29 | 30 | # Load the configuration file 31 | with open('config.json', 'r') as f: 32 | config = json.load(f) 33 | 34 | # Create the communication class based on the configuration parameters 35 | communication_class = CommunicationClass(config['protocol'], config['message_format'], config['encryption'], config['authentication']) 36 | 37 | # Integrate the communication class with NeuralGPT and flowiseAI app 38 | neural_gpt.set_communication_class(communication_class) 39 | flowise_ai.set_communication_class(communication_class) 40 | 41 | # Test the communication 42 | neural_gpt.send_message('Hello, world!') 43 | message = flowise_ai.receive_message() 44 | print(message) -------------------------------------------------------------------------------- /code/DatabaseModule.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | class DatabaseModule: 3 | def __init__(self, db_name): 4 | self.conn = sqlite3.connect(db_name) 5 | self.cursor = self.conn.cursor() 6 | 7 | def store_data(self, data, table_name): 8 | self.cursor.execute("""CREATE TABLE IF NOT EXISTS """ + table_name + "(id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT)") 9 | self.conn.commit() 10 | self.cursor.execute("""INSERT INTO """ + table_name + "(data)" , (data,)) 11 | self.conn.commit() 12 | 13 | def retrieve_data(self, query, table_name): 14 | self.cursor.execute("""SELECT data FROM """ + table_name + "(data)" , ("%" + query + "%",)) 15 | data = self.cursor.fetchall() 16 | return data 17 | -------------------------------------------------------------------------------- /code/ScriptExecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | class ScriptExecutor: 3 | def __init__(self, script_path): 4 | self.script_path = script_path 5 | 6 | def execute_script(self, input_data): 7 | try: 8 | # Run the script in a sandboxed environment 9 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 10 | return output.decode('utf-8') 11 | except subprocess.TimeoutExpired: 12 | return "Script execution timed out" 13 | except subprocess.CalledProcessError as e: 14 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 15 | except Exception as e: 16 | return f"Script execution failed with exception: {str(e)}" 17 | 18 | def get_script_output(self, input_data): 19 | return self.execute_script(input_data) 20 | -------------------------------------------------------------------------------- /code/__pycache__/models.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/code/__pycache__/models.cpython-311.pyc -------------------------------------------------------------------------------- /code/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/code/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /code/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import pandas as pd 4 | from models import GPTModel 5 | from utils import load_data, preprocess_data 6 | 7 | # Define global variables 8 | BATCH_SIZE = 32 9 | EPOCHS = 10 10 | LEARNING_RATE = 0.001 11 | 12 | # Load and preprocess the data 13 | data = load_data('data/dataset1/data_file1.csv') 14 | preprocessed_data = preprocess_data(data) 15 | 16 | # Define the model architecture 17 | model = GPTModel(preprocessed_data.vocab_size, preprocessed_data.max_len) 18 | 19 | # Compile the model 20 | optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE) 21 | model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) 22 | 23 | # Train the model 24 | history = model.fit(preprocessed_data.x_train, preprocessed_data.y_train, 25 | batch_size=BATCH_SIZE, epochs=EPOCHS, 26 | validation_data=(preprocessed_data.x_val, preprocessed_data.y_val)) 27 | 28 | # Evaluate the model 29 | test_loss, test_acc = model.evaluate(preprocessed_data.x_test, preprocessed_data.y_test) 30 | print(f'Test loss: {test_loss}, Test accuracy: {test_acc}') 31 | 32 | # Save the model 33 | model.save('models/gpt_model.h5') -------------------------------------------------------------------------------- /code/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import tensorflow as tf 4 | from tensorflow.keras.preprocessing.text import Tokenizer 5 | from tensorflow.keras.preprocessing.sequence import pad_sequences 6 | 7 | class PreprocessedData: 8 | def __init__(self, x_train, y_train, x_val, y_val, x_test, y_test, vocab_size, max_len): 9 | self.x_train = x_train 10 | self.y_train = y_train 11 | self.x_val = x_val 12 | self.y_val = y_val 13 | self.x_test = x_test 14 | self.y_test = y_test 15 | self.vocab_size = vocab_size 16 | self.max_len = max_len 17 | 18 | def load_data(file_path): 19 | data = pd.read_csv(file_path) 20 | return data 21 | 22 | def preprocess_data(data): 23 | # Tokenize the text data 24 | tokenizer = Tokenizer() 25 | tokenizer.fit_on_texts(data['text']) 26 | sequences = tokenizer.texts_to_sequences(data['text']) 27 | 28 | # Pad the sequences to a fixed length 29 | max_len = max([len(seq) for seq in sequences]) 30 | padded_sequences = pad_sequences(sequences, maxlen=max_len, padding='post') 31 | 32 | # Split the data into train, validation, and test sets 33 | x_train, y_train = padded_sequences[:8000], padded_sequences[:8000] 34 | x_val, y_val = padded_sequences[8000:9000], padded_sequences[8000:9000] 35 | x_test, y_test = padded_sequences[9000:], padded_sequences[9000:] 36 | 37 | # Get the vocabulary size 38 | vocab_size = len(tokenizer.word_index) + 1 39 | 40 | # Return the preprocessed data 41 | return PreprocessedData(x_train, y_train, x_val, y_val, x_test, y_test, vocab_size, max_len) -------------------------------------------------------------------------------- /com.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | 4 | # Generate public-private key pair for NeuralGPT 5 | neuralgpt_public_key = ... 6 | neuralgpt_private_key = ... 7 | 8 | # Generate public-private key pair for flowiseAI app 9 | flowiseai_public_key = ... 10 | flowiseai_private_key = ... 11 | 12 | # Establish a TLS connection 13 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 14 | context.load_cert_chain(certfile=neuralgpt_private_key, keyfile=neuralgpt_public_key) 15 | context.load_verify_locations(cafile=flowiseai_public_key) 16 | with socket.create_connection(('flowiseai.com', 443)) as sock: 17 | with context.wrap_socket(sock, server_side=False) as ssock: 18 | ssock.sendall(b'Hello, world!') 19 | data = ssock.recv(1024) -------------------------------------------------------------------------------- /completepdf.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/completepdf.pdf -------------------------------------------------------------------------------- /data/Database: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/data/Database -------------------------------------------------------------------------------- /dataset.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data import Dataset 3 | 4 | class TextDataset(Dataset): 5 | def __init__(self, data_path, seq_len): 6 | self.seq_len = seq_len 7 | self.vocab = self.build_vocab(data_path) 8 | self.data = self.load_data(data_path) 9 | 10 | def __len__(self): 11 | return len(self.data) 12 | 13 | def __getitem__(self, idx): 14 | # Get the sequence of tokens at the specified index 15 | seq = self.data[idx] 16 | 17 | # Convert the sequence to a tensor of token IDs 18 | tokens = [self.vocab[token] for token in seq] 19 | tokens = torch.tensor(tokens) 20 | 21 | # Split the sequence into input and target sequences 22 | input_seq = tokens[:-1] 23 | target_seq = tokens[1:] 24 | 25 | return input_seq, target_seq 26 | 27 | def build_vocab(self, data_path): 28 | # Build a vocabulary of unique tokens in the data 29 | vocab = {} 30 | with open(data_path, 'r') as f: 31 | for line in f: 32 | for token in line.strip().split(): 33 | if token not in vocab: 34 | vocab[token] = len(vocab) 35 | return vocab 36 | 37 | def load_data(self, data_path): 38 | # Load the data from the specified file and split it into sequences 39 | data = [] 40 | with open(data_path, 'r') as f: 41 | for line in f: 42 | tokens = line.strip().split() 43 | for i in range(0, len(tokens), self.seq_len): 44 | seq = tokens[i:i+self.seq_len] 45 | if len(seq) == self.seq_len: 46 | data.append(seq) 47 | return data -------------------------------------------------------------------------------- /exe.py: -------------------------------------------------------------------------------- 1 | import gensim 2 | import tkinter as tk 3 | from utils import preprocess_text 4 | 5 | class GUIManager: 6 | def __init__(self): 7 | # Load pre-trained Word2Vec model 8 | self.word2vec_model = gensim.models.KeyedVectors.load_word2vec_format('path/to/word2vec/model.bin', binary=True) 9 | 10 | # Create GUI 11 | self.root = tk.Tk() 12 | self.root.title("Word Embedding Demo") 13 | 14 | # Create text box 15 | self.text_box = tk.Text(self.root, height=20, width=80) 16 | self.text_box.pack() 17 | 18 | # Create button 19 | self.button = tk.Button(self.root, text="Execute", command=self.execute) 20 | self.button.pack() 21 | 22 | def execute(self): 23 | # Get input text from text box 24 | input_text = self.text_box.get("1.0", "end-1c") 25 | 26 | # Preprocess input text 27 | preprocessed_text = preprocess_text(input_text) 28 | 29 | # Process input text using pre-trained model 30 | word_embeddings = [] 31 | for word in preprocessed_text.split(): 32 | try: 33 | word_embedding = self.word2vec_model[word] 34 | word_embeddings.append(word_embedding) 35 | except KeyError: 36 | # Ignore words that are not in the vocabulary 37 | pass 38 | 39 | # Display output in text box 40 | output_text = f"Word embeddings: {word_embeddings}" 41 | self.text_box.delete("1.0", "end") 42 | self.text_box.insert("1.0", output_text) 43 | 44 | if __name__ == '__main__': 45 | gui_manager = GUIManager() 46 | gui_manager.root.mainloop() -------------------------------------------------------------------------------- /extract_text.py: -------------------------------------------------------------------------------- 1 | import os 2 | import PyPDF2 3 | 4 | def extract_text_from_pdf(pdf_path): 5 | with open(pdf_path, 'rb') as f: 6 | pdf_reader = PyPDF2.PdfFileReader(f) 7 | text = '' 8 | for page in pdf_reader.pages: 9 | text += page.extractText() 10 | return text 11 | 12 | def main(): 13 | directory = 'E:\AI\NeuralGPT\NeuralGPT' 14 | for filename in os.listdir(directory): 15 | if filename.endswith('.pdf'): 16 | pdf_path = os.path.join(directory, filename) 17 | text = extract_text_from_pdf(pdf_path) 18 | txt_path = os.path.splitext(pdf_path)[0] + '.txt' 19 | with open(txt_path, 'w') as f: 20 | f.write(text) 21 | 22 | if __name__ == '__main__': 23 | main() -------------------------------------------------------------------------------- /fine_tuner.py: -------------------------------------------------------------------------------- 1 | fine_tuner = FineTuneGPT('pretrained_model.bin', 'new_dataset.txt') 2 | fine_tuner.fine_tune_model() -------------------------------------------------------------------------------- /generate_test_data.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | # Define a list of possible actions 5 | actions = ['open', 'close', 'turn on', 'turn off', 'start', 'stop'] 6 | 7 | # Define a list of possible objects 8 | objects = ['door', 'window', 'light', 'fan', 'TV', 'AC'] 9 | 10 | # Define a list of possible locations 11 | locations = ['living room', 'bedroom', 'kitchen', 'bathroom', 'garage'] 12 | 13 | # Define a function to generate random test data 14 | def generate_test_data(): 15 | action = random.choice(actions) 16 | obj = random.choice(objects) 17 | location = random.choice(locations) 18 | message = f"{action} the {obj} in the {location}" 19 | return message 20 | 21 | # Generate 10 random test messages 22 | for i in range(10): 23 | test_message = generate_test_data() 24 | print(test_message) -------------------------------------------------------------------------------- /geninit.py: -------------------------------------------------------------------------------- 1 | # Import necessary modules 2 | import sys 3 | import os 4 | from PyQt5.QtWidgets import QApplication, QMainWindow 5 | 6 | # Import project modules 7 | from NeuralGPT-0,1 import DualCoreLLM 8 | from NeuralGPT-0,1 import module2 9 | from NeuralGPT-0,1 import module3 10 | from NeuralGPT-0,1 import module4 11 | from NeuralGPT-0,1 import module5 12 | 13 | # Define function to execute all modules 14 | def execute_modules(): 15 | DualCoreLLM.execute() 16 | module2.execute() 17 | module3.execute() 18 | module4.execute() 19 | module5.execute() 20 | 21 | # Define main function to start GUI and execute modules 22 | def main(): 23 | # Start GUI 24 | app = QApplication(sys.argv) 25 | window = QMainWindow() 26 | window.show() 27 | sys.exit(app.exec_()) 28 | 29 | # Execute modules 30 | execute_modules() 31 | 32 | if __name__ == '__main__': 33 | main() -------------------------------------------------------------------------------- /gu.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | import subprocess 3 | 4 | class Application(tk.Frame): 5 | def __init__(self, master=None): 6 | super().__init__(master) 7 | self.master = master 8 | self.pack() 9 | self.create_widgets() 10 | 11 | def create_widgets(self): 12 | self.input_label = tk.Label(self, text="Enter input text:") 13 | self.input_label.pack(side="top") 14 | 15 | self.input_text = tk.Text(self, height=10, width=50) 16 | self.input_text.pack(side="top") 17 | 18 | self.run_button = tk.Button(self) 19 | self.run_button["text"] = "Run NeuralGPT" 20 | self.run_button["command"] = self.run_neuralgpt 21 | self.run_button.pack(side="top") 22 | 23 | self.output_label = tk.Label(self, text="Output:") 24 | self.output_label.pack(side="top") 25 | 26 | self.output_text = tk.Text(self, height=10, width=50) 27 | self.output_text.pack(side="top") 28 | 29 | self.quit_button = tk.Button(self, text="Quit", fg="red", 30 | command=self.master.destroy) 31 | self.quit_button.pack(side="bottom") 32 | 33 | def run_neuralgpt(self): 34 | input_text = self.input_text.get("1.0", "end-1c") 35 | output = subprocess.check_output(["python", "E:/AI/NeuralGPT/NeuralGPT/src/main.py", input_text]) 36 | self.output_text.delete("1.0", "end") 37 | self.output_text.insert("1.0", output.decode()) 38 | 39 | root = tk.Tk() 40 | app = Application(master=root) 41 | app.mainloop() -------------------------------------------------------------------------------- /ind.py: -------------------------------------------------------------------------------- 1 | import pinecone 2 | pinecone.describe_index("neuralai") -------------------------------------------------------------------------------- /integration1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/integration1.pdf -------------------------------------------------------------------------------- /integration2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/integration2.pdf -------------------------------------------------------------------------------- /integration3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/integration3.pdf -------------------------------------------------------------------------------- /load_model.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import torch 4 | from DualCoreLLM import DualCoreLLM 5 | 6 | def load_model(model_path, use_dualcore=False): 7 | if model_path.startswith("http"): 8 | # Load model from online file 9 | urllib.request.urlretrieve(model_path, "model.bin") 10 | model_path = "model.bin" 11 | 12 | if not os.path.exists(model_path): 13 | raise ValueError("Model file not found.") 14 | 15 | # Load model into memory 16 | model = torch.load(model_path, map_location=torch.device('cpu')) 17 | 18 | if use_dualcore: 19 | # Initialize DualCoreLLM with pretrained model 20 | dualcore = DualCoreLLM(model) 21 | return dualcore 22 | else: 23 | return model -------------------------------------------------------------------------------- /long.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | async def long_running_task(): 4 | progress = 0 5 | while progress < 100: 6 | # do some work 7 | await asyncio.sleep(1) 8 | progress += 10 9 | yield f"Task progress: {progress}%" 10 | yield "Task completed" 11 | 12 | class Communication: 13 | async def execute_task(self): 14 | try: 15 | # execute long running task asynchronously with a timeout of 30 seconds 16 | result = "" 17 | async for progress_update in long_running_task(): 18 | result += progress_update + "\n" 19 | # handle successful completion of the task 20 | return result 21 | except asyncio.TimeoutError: 22 | # handle timeout 23 | return "Task timed out" -------------------------------------------------------------------------------- /ma.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pinecone 3 | from NeuralGPT-0.1.utils import * 4 | from NeuralGPT-0.1.gui import run_gui 5 | 6 | # Upload pretrained model 7 | model_path = "E:/AI/NeuralGPT/NeuralGPT/models/ggml-model-q4_0.bin" 8 | model_name = "ggml-model-q4_0" 9 | pinecone.create_index(index_name=model_name, dimension=768) 10 | pinecone.index_embeddings(index_name=model_name, embeddings_path=model_path) 11 | 12 | # Load data 13 | data_file1 = "database1.csv" 14 | data_file2 = "database2.csv" 15 | data1 = load_data(data_file1) 16 | data2 = load_data(data_file2) 17 | 18 | # Run GUI 19 | run_gui(data1, data2) -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from model import GPT 2 | import torch 3 | 4 | # Set the device to use 5 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 6 | 7 | # Load the GPT model 8 | model_path = 'E:/AI/NeuralGPT/NeuralGPT/models/ggml-model-q4_0.bin' 9 | model = GPT(model_path) 10 | model.to(device) 11 | 12 | # Set the model to evaluation mode 13 | model.eval() 14 | 15 | # Get user input 16 | prompt = input('Enter a prompt: ') 17 | 18 | # Generate text based on the user input 19 | generated_text = '' 20 | while not generated_text: 21 | # Tokenize the prompt and generate the input sequence 22 | input_ids = model.tokenizer.encode(prompt, return_tensors='pt').to(device) 23 | 24 | # Generate the output sequence 25 | max_length = len(input_ids.flatten()) + 50 26 | output = model.model.generate(input_ids=input_ids, max_length=max_length, do_sample=True) 27 | 28 | # Decode the output sequence and remove the prompt 29 | generated_text = model.tokenizer.decode(output[0], skip_special_tokens=True) 30 | generated_text = generated_text[len(prompt):].strip() 31 | 32 | # Print the generated text 33 | print(generated_text) -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from transformers import GPT2LMHeadModel, GPT2Tokenizer 4 | 5 | class GPT(nn.Module): 6 | def __init__(self, model_path): 7 | super(GPT, self).__init__() 8 | self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2') 9 | self.model = GPT2LMHeadModel.from_pretrained(model_path) 10 | 11 | def forward(self, input_ids, attention_mask): 12 | outputs = self.model(input_ids, attention_mask=attention_mask) 13 | logits = outputs.logits 14 | return logits -------------------------------------------------------------------------------- /model_loader.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from neuralgpt import NeuralGPT 3 | from model_loader import ModelLoader 4 | 5 | class ChatBox: 6 | def __init__(self): 7 | self.model = None 8 | self.loader = ModelLoader() 9 | 10 | self.root = tk.Tk() 11 | self.root.title('Chatbox') 12 | 13 | self.input_label = tk.Label(self.root, text='Input:') 14 | self.input_label.pack() 15 | 16 | self.input_field = tk.Entry(self.root) 17 | self.input_field.pack() 18 | 19 | self.output_label = tk.Label(self.root, text='Output:') 20 | self.output_label.pack() 21 | 22 | self.output_field = tk.Text(self.root, height=10, width=50) 23 | self.output_field.pack() 24 | 25 | self.submit_button = tk.Button(self.root, text='Submit', command=self.submit) 26 | self.submit_button.pack() 27 | 28 | def submit(self): 29 | if not self.model: 30 | # Load the model if it hasn't been loaded yet 31 | self.model = self.loader.load_local('my_model.bin') 32 | 33 | # Get the user input 34 | user_input = self.input_field.get() 35 | 36 | # Generate a response using the model 37 | response = self.model.generate(user_input) 38 | 39 | # Display the response in the output field 40 | self.output_field.insert(tk.END, response + '\n') 41 | 42 | def run(self): 43 | self.root.mainloop() 44 | 45 | if __name__ == '__main__': 46 | chatbox = ChatBox() 47 | chatbox.run() -------------------------------------------------------------------------------- /neural-big.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/neural-big.pdf -------------------------------------------------------------------------------- /neuralgod.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | API_URL = "http://localhost:3000/api/v1/prediction/f20a3a35-7d11-445d-a484-1d993a319ebf" 4 | 5 | def query(payload): 6 | response = requests.post(API_URL, json=payload) 7 | return response.json() 8 | 9 | output = query({ 10 | "question": "Hey, how are you?", 11 | }) -------------------------------------------------------------------------------- /neuralgpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 5 | 6 | class NeuralGPT: 7 | def __init__(self, model_name_or_path='gpt2', device='cpu'): 8 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) 9 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path) 10 | self.device = device 11 | self.model.to(self.device) 12 | self.model.eval() 13 | 14 | def generate_text(self, prompt='', max_length=100, temperature=1.0, top_p=0.9, top_k=0, repetition_penalty=1.0, num_return_sequences=1): 15 | input_ids = self.tokenizer.encode(prompt, return_tensors='pt') 16 | input_ids = input_ids.to(self.device) 17 | 18 | output_sequences = self.model.generate( 19 | input_ids=input_ids, 20 | max_length=max_length + len(input_ids[0]), 21 | temperature=temperature, 22 | top_p=top_p, 23 | top_k=top_k, 24 | repetition_penalty=repetition_penalty, 25 | do_sample=True, 26 | num_return_sequences=num_return_sequences, 27 | ) 28 | 29 | generated_sequences = [] 30 | for generated_sequence_idx, generated_sequence in enumerate(output_sequences): 31 | generated_sequence = generated_sequence.tolist() 32 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) 33 | text = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)) : ] 34 | generated_sequences.append(text) 35 | 36 | return generated_sequences 37 | 38 | def save_text_to_file(self, text, file_path): 39 | with open(file_path, 'w') as f: 40 | f.write(text) -------------------------------------------------------------------------------- /nlp.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /nlp/DocumentEditor.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from tkinter import filedialog 3 | 4 | class DocumentEditor: 5 | def __init__(self): 6 | self.root = tk.Tk() 7 | self.root.withdraw() 8 | 9 | def open_file(self): 10 | file_path = filedialog.askopenfilename() 11 | if file_path: 12 | with open(file_path, 'r') as f: 13 | return f.read() 14 | 15 | def save_file(self, data): 16 | file_path = filedialog.asksaveasfilename() 17 | if file_path: 18 | with open(file_path, 'w') as f: 19 | f.write(data) -------------------------------------------------------------------------------- /nlp/DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | class DualCoreLLM: 2 | def __init__(self, logical_LLM, direct_LLM): 3 | self.logical_LLM = logical_LLM 4 | self.direct_LLM = direct_LLM 5 | 6 | def think(self, input_data): 7 | return self.logical_LLM.process(input_data) 8 | 9 | def execute(self, input_data): 10 | return self.direct_LLM.process(input_data) -------------------------------------------------------------------------------- /nlp/FileTransfer.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | 3 | class FileTransfer: 4 | def __init__(self, ftp_host, ftp_user, ftp_password): 5 | self.ftp_host = ftp_host 6 | self.ftp_user = ftp_user 7 | self.ftp_password = ftp_password 8 | 9 | def upload_file(self, local_file_path, remote_file_path): 10 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 11 | with open(local_file_path, 'rb') as f: 12 | ftp.storbinary('STOR ' + remote_file_path, f) 13 | 14 | def download_file(self, remote_file_path, local_file_path): 15 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 16 | with open(local_file_path, 'wb') as f: 17 | ftp.retrbinary('RETR ' + remote_file_path, f.write) -------------------------------------------------------------------------------- /nlp/MachineLearning.py: -------------------------------------------------------------------------------- 1 | from sklearn import datasets 2 | from sklearn.model_selection import train_test_split 3 | from sklearn.linear_model import LinearRegression 4 | 5 | class MachineLearning: 6 | def __init__(self): 7 | pass 8 | 9 | def train_model(self, X, y): 10 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 11 | model = LinearRegression() 12 | model.fit(X_train, y_train) 13 | return model 14 | 15 | def predict(self, model, X): 16 | return model.predict(X) -------------------------------------------------------------------------------- /nlp/MediaPlayer.py: -------------------------------------------------------------------------------- 1 | import vlc 2 | 3 | class MediaPlayer: 4 | def __init__(self): 5 | self.instance = vlc.Instance() 6 | self.player = self.instance.media_player_new() 7 | 8 | def play_media(self, media_path): 9 | media = self.instance.media_new(media_path) 10 | self.player.set_media(media) 11 | self.player.play() 12 | 13 | def stop_media(self): 14 | self.player.stop() -------------------------------------------------------------------------------- /nlp/Memory.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | class Memory: 4 | def __init__(self, db_file): 5 | self.conn = sqlite3.connect(db_file) 6 | self.cursor = self.conn.cursor() 7 | self.cursor.execute('''CREATE TABLE IF NOT EXISTS short_term_memory 8 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 9 | data TEXT)''') 10 | self.cursor.execute('''CREATE TABLE IF NOT EXISTS long_term_memory 11 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 12 | data TEXT)''') 13 | self.conn.commit() 14 | 15 | def add_to_short_term_memory(self, data): 16 | self.cursor.execute("INSERT INTO short_term_memory (data) VALUES (?)", (data,)) 17 | self.conn.commit() 18 | 19 | def add_to_long_term_memory(self, data): 20 | self.cursor.execute("INSERT INTO long_term_memory (data) VALUES (?)", (data,)) 21 | self.conn.commit() 22 | 23 | def retrieve_from_short_term_memory(self): 24 | self.cursor.execute("SELECT * FROM short_term_memory") 25 | return self.cursor.fetchall() 26 | 27 | def retrieve_from_long_term_memory(self): 28 | self.cursor.execute("SELECT * FROM long_term_memory") 29 | return self.cursor.fetchall() 30 | 31 | def clear_short_term_memory(self): 32 | self.cursor.execute("DELETE FROM short_term_memory") 33 | self.conn.commit() 34 | 35 | def clear_long_term_memory(self): 36 | self.cursor.execute("DELETE FROM long_term_memory") 37 | self.conn.commit() -------------------------------------------------------------------------------- /nlp/NLPModule.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /nlp/Scripting.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class Scripting: 4 | def __init__(self): 5 | pass 6 | 7 | def execute_script(self, script_path): 8 | subprocess.run(script_path) -------------------------------------------------------------------------------- /nlp/tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class Tools: 4 | def __init__(self): 5 | pass 6 | 7 | def create_directory(self, directory_path): 8 | os.makedirs(directory_path, exist_ok=True) 9 | 10 | def modify_file(self, file_path, modification_function): 11 | with open(file_path, 'r') as f: 12 | data = f.read() 13 | modified_data = modification_function(data) 14 | with open(file_path, 'w') as f: 15 | f.write(modified_data) -------------------------------------------------------------------------------- /pine.py: -------------------------------------------------------------------------------- 1 | import pinecone 2 | 3 | # Initialize the Pinecone client library 4 | pinecone.init(api_key="41ddf57b-2fc0-495c-9fff-47c6f6ff4e4e") 5 | 6 | # Create a new index 7 | pinecone.create_index(index_name="neuralai-a82b13f.svc.asia-northeast1-gcp.pinecone.io") 8 | 9 | # Index some vectors 10 | vectors = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] 11 | pinecone.index(index_name="neuralai-a82b13f.svc.asia-northeast1-gcp.pinecone.io", data=vectors) 12 | 13 | # Search for similar vectors 14 | query_vector = [2, 3, 4] 15 | results = pinecone.query(index_name="neuralai-a82b13f.svc.asia-northeast1-gcp.pinecone.io", data=query_vector, top_k=10) 16 | 17 | print(results) -------------------------------------------------------------------------------- /pinecon.py: -------------------------------------------------------------------------------- 1 | import pinecone 2 | 3 | pinecone.init(api_key="b372ae78-2b81-49bb-9f4d-d3c3e833921d") 4 | 5 | # Create a new index 6 | pinecone.create_index(index_name="my_index") 7 | 8 | # Index some vectors 9 | vectors = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] 10 | pinecone.index(index_name="my_index", data=vectors) 11 | 12 | # Search for similar vectors 13 | query_vector = [2, 3, 4] 14 | results = pinecone.query(index_name="my_index", data=query_vector, top_k=10) 15 | 16 | print(results) -------------------------------------------------------------------------------- /process_input.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def process_input(input_text): 4 | if not input_text: 5 | return "Please enter a valid input." 6 | 7 | try: 8 | response = requests.post("http://localhost:8000/predict", json={"text": input_text}) 9 | if response.status_code == 200: 10 | return response.json()["generated_text"] 11 | else: 12 | return "Error processing input. Please try again." 13 | except requests.exceptions.RequestException as e: 14 | return f"Error processing input: {e}. Please try again." -------------------------------------------------------------------------------- /requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get(url, headers=None, params=None): 4 | try: 5 | response = requests.get(url, headers=headers, params=params) 6 | response.raise_for_status() 7 | return response 8 | except HTTPError as http_err: 9 | print(f'HTTP error occurred: {http_err}') 10 | except Exception as err: 11 | print(f'Other error occurred: {err}') -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | gradio 2 | requests 3 | datetime 4 | websockets 5 | websocket 6 | asyncio 7 | bs4 8 | pysimplegui 9 | g4f 10 | gpt4free 11 | -------------------------------------------------------------------------------- /responses.json: -------------------------------------------------------------------------------- 1 | { 2 | "greetings": [ 3 | { 4 | "pattern": "hi|hello|hey", 5 | "response": "Hi there!" 6 | }, 7 | { 8 | "pattern": "how are you|how's it going", 9 | "response": "I'm doing well, thank you. How about you?" 10 | } 11 | ], 12 | "questions": [ 13 | { 14 | "pattern": "what is your name|who are you", 15 | "response": "My name is NeuralGPT. I'm an AI language model." 16 | }, 17 | { 18 | "pattern": "what can you do", 19 | "response": "I can answer questions, provide information, and have conversations with you." 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /save_preset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tkinter as tk 3 | from tkinter import filedialog 4 | 5 | # Define a function to save the current selected parameters to a file 6 | def save_preset(): 7 | # Prompt the user for a name for the preset 8 | preset_name = input("Enter a name for the preset: ") 9 | 10 | # Get the current selected parameters 11 | selected_params = get_selected_params() 12 | 13 | # Save the selected parameters to a file 14 | file_path = filedialog.asksaveasfilename(defaultextension='.json', initialfile=preset_name) 15 | with open(file_path, 'w') as f: 16 | json.dump(selected_params, f) 17 | 18 | # Display a message to the user indicating that the preset has been saved 19 | message = f"Preset '{preset_name}' has been saved." 20 | display_message(message) 21 | 22 | # Define a function to get the current selected parameters 23 | def get_selected_params(): 24 | # TODO: Implement this function to retrieve the selected parameters from the NeuralGPT agent 25 | 26 | return selected_params 27 | 28 | # Define a function to display a message to the user 29 | def display_message(message): 30 | # TODO: Implement this function to display a message in the FlowiseAI dialogue window 31 | 32 | pass 33 | 34 | # Create a GUI with a button to save the preset 35 | root = tk.Tk() 36 | save_button = tk.Button(root, text="Save Preset", command=save_preset) 37 | save_button.pack() 38 | root.mainloop() -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="NeuralGPT", 5 | version="0.1", 6 | author="B staszewski", 7 | author_email="bstaszewski1984@gmail.com", 8 | description="A project for neural GPT", 9 | packages=find_packages(), 10 | install_requires=[ 11 | "numpy", 12 | "torch", 13 | "transformers", 14 | "pytest" 15 | ] 16 | ) -------------------------------------------------------------------------------- /sort_files.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | # Define the directory where the files are located 5 | directory = "/path/to/directory" 6 | 7 | # Create a dictionary to store the file extensions and their corresponding subdirectories 8 | file_extensions = {} 9 | 10 | # Loop through all the files in the directory 11 | for filename in os.listdir(directory): 12 | 13 | # Get the file extension 14 | file_extension = os.path.splitext(filename)[1] 15 | 16 | # If the file extension is not in the dictionary, create a new subdirectory for it 17 | if file_extension not in file_extensions: 18 | os.mkdir(os.path.join(directory, file_extension[1:])) 19 | file_extensions[file_extension] = True 20 | 21 | # Move the file to the corresponding subdirectory 22 | shutil.move(os.path.join(directory, filename), os.path.join(directory, file_extension[1:], filename)) -------------------------------------------------------------------------------- /src/Chatflow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Chatflow: 4 | def __init__(self): 5 | self.logger = logging.getLogger(__name__) 6 | self.logger.setLevel(logging.DEBUG) 7 | self.handler = logging.FileHandler('chatflow.log') 8 | self.handler.setLevel(logging.DEBUG) 9 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | self.handler.setFormatter(self.formatter) 11 | self.logger.addHandler(self.handler) 12 | 13 | def run(self): 14 | try: 15 | # code to execute the autonomous scripts 16 | except Exception as e: 17 | self.logger.error(str(e)) 18 | # code to notify the user when an error occurs -------------------------------------------------------------------------------- /src/Communication.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class Communication: 4 | async def execute_task(self): 5 | try: 6 | # execute long running task asynchronously with a timeout of 30 seconds 7 | result = await asyncio.wait_for(long_running_task(), timeout=30) 8 | # handle successful completion of the task 9 | return result 10 | except asyncio.TimeoutError: 11 | # handle timeout 12 | return "Task timed out" -------------------------------------------------------------------------------- /src/CommunicationLog.py: -------------------------------------------------------------------------------- 1 | class CommunicationLog: 2 | def __init__(self): 3 | self.logs = [] 4 | 5 | def add_log(self, message, timestamp, error=None): 6 | log = { 7 | 'message': message, 8 | 'timestamp': timestamp, 9 | 'error': error 10 | } 11 | self.logs.append(log) 12 | 13 | def get_logs(self): 14 | return self.logs -------------------------------------------------------------------------------- /src/DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | 3 | class DualCoreLLM: 4 | def __init__(self): 5 | self.nlp = spacy.load('en_core_web_sm') 6 | 7 | def check_coherence(self, text): 8 | doc = self.nlp(text) 9 | 10 | # Check for semantic coherence 11 | for token in doc: 12 | if token.dep_ == 'nsubj' and token.head.pos_ == 'VERB': 13 | subj = token 14 | verb = token.head 15 | for child in verb.children: 16 | if child.dep_ == 'dobj': 17 | obj = child 18 | if obj.text not in [t.text for t in subj.subtree]: 19 | return False 20 | return True 21 | 22 | def check_grammar(self, text): 23 | doc = self.nlp(text) 24 | 25 | # Check for grammatical correctness 26 | for sent in doc.sents: 27 | if sent.root.dep_ == 'ROOT' and sent.root.tag_ != 'VBZ': 28 | return False 29 | return True -------------------------------------------------------------------------------- /src/FineTuneGPT,py.txt: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 3 | import torch 4 | 5 | class FineTuneGPT: 6 | def __init__(self, pretrained_model_path, new_dataset): 7 | self.pretrained_model_path = pretrained_model_path 8 | self.new_dataset = new_dataset 9 | 10 | def fine_tune_model(self): 11 | # Load the pretrained model 12 | tokenizer = GPT2Tokenizer.from_pretrained(self.pretrained_model_path) 13 | model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path) 14 | 15 | # Load the new dataset 16 | with open(self.new_dataset, 'r') as f: 17 | text = f.read() 18 | inputs = tokenizer.encode(text, return_tensors='pt') 19 | 20 | # Fine-tune the model with the new dataset 21 | model.train() 22 | optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) 23 | for i in range(100): 24 | outputs = model(inputs, labels=inputs) 25 | loss = outputs[0] 26 | loss.backward() 27 | optimizer.step() 28 | optimizer.zero_grad() 29 | 30 | # Save the fine-tuned model 31 | model.save_pretrained('fine_tuned_model.bin') -------------------------------------------------------------------------------- /src/FlowiseAICommunication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class FlowiseAICommunication: 5 | def __init__(self, url): 6 | self.url = url 7 | 8 | def send_message(self, message): 9 | data = {"message": message} 10 | try: 11 | response = requests.post(self.url, json=data) 12 | return response.json() 13 | except requests.exceptions.RequestException as e: 14 | print(e) 15 | return None 16 | 17 | def receive_message(self): 18 | try: 19 | response = requests.get(self.url) 20 | return response.json()["message"] 21 | except requests.exceptions.RequestException as e: 22 | print(e) 23 | return None -------------------------------------------------------------------------------- /src/ModelSaver.py: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from model_saver import ModelSaver 3 | 4 | # Load a pretrained model 5 | model = NeuralGPT.from_pretrained('gpt2') 6 | 7 | # Save the model to a local file 8 | saver = ModelSaver(model) 9 | saver.save_local('my_model.bin') 10 | 11 | # Save the model to an online source 12 | saver.save_online('http://example.com/model') -------------------------------------------------------------------------------- /src/PyPDF2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import PyPDF2 4 | 5 | pdf_path = 'path/to/pdf/file.pdf' 6 | save_path = 'E:/AI/NeuralGPT/NeuralGPT' 7 | 8 | # Check if the save path exists, create it if it doesn't 9 | if not os.path.exists(save_path): 10 | os.makedirs(save_path) 11 | 12 | # Open the PDF file in read-binary mode 13 | with open(pdf_path, 'rb') as pdf_file: 14 | # Read the PDF file 15 | pdf_reader = PyPDF2.PdfFileReader(pdf_file) 16 | # Get the first page of the PDF 17 | page = pdf_reader.getPage(0) 18 | # Create a new PDF writer object 19 | pdf_writer = PyPDF2.PdfFileWriter() 20 | # Add the page to the PDF writer object 21 | pdf_writer.addPage(page) 22 | # Create a new PDF file name 23 | pdf_file_name = os.path.splitext(os.path.basename(pdf_path))[0] + '.pdf' 24 | # Save the PDF file to the specified location 25 | with open(os.path.join(save_path, pdf_file_name), 'wb') as new_pdf_file: 26 | pdf_writer.write(new_pdf_file) -------------------------------------------------------------------------------- /src/callback.py: -------------------------------------------------------------------------------- 1 | import pika 2 | 3 | # connect to RabbitMQ server 4 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 5 | channel = connection.channel() 6 | 7 | # create a queue for each instance of the NeuralGPT agent 8 | channel.queue_declare(queue='agent1') 9 | channel.queue_declare(queue='agent2') 10 | channel.queue_declare(queue='agent3') 11 | 12 | # define a callback function to process incoming messages 13 | def callback(ch, method, properties, body): 14 | # process message and execute appropriate task 15 | print("Received message: %r" % body) 16 | 17 | # start consuming messages from the queue 18 | channel.basic_consume(queue='agent1', on_message_callback=callback, auto_ack=True) 19 | channel.basic_consume(queue='agent2', on_message_callback=callback, auto_ack=True) 20 | channel.basic_consume(queue='agent3', on_message_callback=callback, auto_ack=True) 21 | 22 | print('Waiting for messages...') 23 | channel.start_consuming() -------------------------------------------------------------------------------- /src/chat.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from transformers import pipeline 3 | 4 | # Define the chatbot pipeline using the pre-trained NeuralGPT model 5 | chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B") 6 | 7 | # Define a function to handle user input and generate chatbot responses 8 | def chat(): 9 | while True: 10 | # Get user input 11 | user_input = input("You: ") 12 | 13 | # Generate chatbot response 14 | try: 15 | chatbot_response = chatbot(user_input, max_length=50)[0]["generated_text"] 16 | print("Chatbot:", chatbot_response) 17 | except Exception as e: 18 | print("Error:", e) 19 | 20 | # Call the chat function to start the chatbox 21 | chat() -------------------------------------------------------------------------------- /src/class.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | 4 | class Communication: 5 | def __init__(self, protocol, message_format, timeout, retry_limit): 6 | self.protocol = protocol 7 | self.message_format = message_format 8 | self.timeout = timeout 9 | self.retry_limit = retry_limit 10 | 11 | def send_message(self, message): 12 | retries = 0 13 | while retries < self.retry_limit: 14 | try: 15 | response = requests.post(self.protocol, data=message, timeout=self.timeout) 16 | return response 17 | except requests.exceptions.Timeout: 18 | retries += 1 19 | print("Timeout occurred. Retrying...") 20 | time.sleep(1) 21 | except requests.exceptions.RequestException as e: 22 | print("Error occurred: ", e) 23 | break 24 | return None 25 | 26 | def receive_message(self): 27 | retries = 0 28 | while retries < self.retry_limit: 29 | try: 30 | response = requests.get(self.protocol, timeout=self.timeout) 31 | return response 32 | except requests.exceptions.Timeout: 33 | retries += 1 34 | print("Timeout occurred. Retrying...") 35 | time.sleep(1) 36 | except requests.exceptions.RequestException as e: 37 | print("Error occurred: ", e) 38 | break 39 | return None 40 | -------------------------------------------------------------------------------- /src/classc.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | class CommunicationClass: 4 | def __init__(self, protocol, message_format, encryption, authentication): 5 | self.protocol = protocol 6 | self.message_format = message_format 7 | self.encryption = encryption 8 | self.authentication = authentication 9 | 10 | def send_message(self, message): 11 | # Send the message using the specified protocol and message format 12 | pass 13 | 14 | def receive_message(self): 15 | # Receive a message using the specified protocol and message format 16 | pass 17 | 18 | def encrypt_message(self, message): 19 | # Encrypt the message using the specified encryption mechanism 20 | pass 21 | 22 | def decrypt_message(self, message): 23 | # Decrypt the message using the specified encryption mechanism 24 | pass 25 | 26 | def authenticate_user(self, user): 27 | # Authenticate the user using the specified authentication mechanism 28 | pass 29 | 30 | # Load the configuration file 31 | with open('config.json', 'r') as f: 32 | config = json.load(f) 33 | 34 | # Create the communication class based on the configuration parameters 35 | communication_class = CommunicationClass(config['protocol'], config['message_format'], config['encryption'], config['authentication']) 36 | 37 | # Integrate the communication class with NeuralGPT and flowiseAI app 38 | neural_gpt.set_communication_class(communication_class) 39 | flowise_ai.set_communication_class(communication_class) 40 | 41 | # Test the communication 42 | neural_gpt.send_message('Hello, world!') 43 | message = flowise_ai.receive_message() 44 | print(message) -------------------------------------------------------------------------------- /src/com.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | 4 | # Generate public-private key pair for NeuralGPT 5 | neuralgpt_public_key = ... 6 | neuralgpt_private_key = ... 7 | 8 | # Generate public-private key pair for flowiseAI app 9 | flowiseai_public_key = ... 10 | flowiseai_private_key = ... 11 | 12 | # Establish a TLS connection 13 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 14 | context.load_cert_chain(certfile=neuralgpt_private_key, keyfile=neuralgpt_public_key) 15 | context.load_verify_locations(cafile=flowiseai_public_key) 16 | with socket.create_connection(('flowiseai.com', 443)) as sock: 17 | with context.wrap_socket(sock, server_side=False) as ssock: 18 | ssock.sendall(b'Hello, world!') 19 | data = ssock.recv(1024) -------------------------------------------------------------------------------- /src/fiileprocessor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class FileProcessor: 5 | def __init__(self, storage_path: str): 6 | self.storage_path = storage_path 7 | 8 | def upload_file(self, file_path: str, file_name: str) -> str: 9 | """ 10 | Uploads a file to the storage_path and returns the URL where it can be accessed. 11 | """ 12 | file_url = os.path.join(self.storage_path, file_name) 13 | with open(file_url, 'wb') as f: 14 | f.write(file_path.read()) 15 | return file_url 16 | 17 | def download_file(self, file_url: str) -> bytes: 18 | """ 19 | Downloads a file from the storage_path and returns its contents as bytes. 20 | """ 21 | with open(file_url, 'rb') as f: 22 | file_contents = f.read() 23 | return file_contents 24 | 25 | def process_files(self, file_urls: List[str]) -> List[str]: 26 | """ 27 | Processes a list of files specified by their URLs and returns a list of processed files' URLs. 28 | """ 29 | processed_files = [] 30 | for file_url in file_urls: 31 | # process file here 32 | processed_file_url = file_url + '_processed' 33 | processed_files.append(processed_file_url) 34 | return processed_files -------------------------------------------------------------------------------- /src/fine_tuner.py: -------------------------------------------------------------------------------- 1 | fine_tuner = FineTuneGPT('pretrained_model.bin', 'new_dataset.txt') 2 | fine_tuner.fine_tune_model() -------------------------------------------------------------------------------- /src/generate_test_data.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | # Define a list of possible actions 5 | actions = ['open', 'close', 'turn on', 'turn off', 'start', 'stop'] 6 | 7 | # Define a list of possible objects 8 | objects = ['door', 'window', 'light', 'fan', 'TV', 'AC'] 9 | 10 | # Define a list of possible locations 11 | locations = ['living room', 'bedroom', 'kitchen', 'bathroom', 'garage'] 12 | 13 | # Define a function to generate random test data 14 | def generate_test_data(): 15 | action = random.choice(actions) 16 | obj = random.choice(objects) 17 | location = random.choice(locations) 18 | message = f"{action} the {obj} in the {location}" 19 | return message 20 | 21 | # Generate 10 random test messages 22 | for i in range(10): 23 | test_message = generate_test_data() 24 | print(test_message) -------------------------------------------------------------------------------- /src/llmmanager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | class LLM: 5 | def __init__(self, name, bin_file_path): 6 | self.name = name 7 | self.bin_file_path = bin_file_path 8 | 9 | class LLMManager: 10 | def __init__(self, local_storage_path): 11 | self.local_storage_path = local_storage_path 12 | self.llms = [] 13 | 14 | def add_llm(self, llm): 15 | self.llms.append(llm) 16 | 17 | def remove_llm(self, llm_name): 18 | for llm in self.llms: 19 | if llm.name == llm_name: 20 | self.llms.remove(llm) 21 | 22 | def download_llm(self, url): 23 | response = requests.get(url) 24 | llm_name = os.path.basename(url) 25 | llm_file_path = os.path.join(self.local_storage_path, llm_name) 26 | with open(llm_file_path, 'wb') as f: 27 | f.write(response.content) 28 | llm = LLM(llm_name, llm_file_path) 29 | self.add_llm(llm) 30 | 31 | def upload_llm(self, llm_file_path): 32 | llm_name = os.path.basename(llm_file_path) 33 | llm = LLM(llm_name, llm_file_path) 34 | self.add_llm(llm) 35 | 36 | def connect_llm(self, llm_name): 37 | for llm in self.llms: 38 | if llm.name == llm_name: 39 | # connect the llm 40 | pass 41 | 42 | def disconnect_llm(self, llm_name): 43 | for llm in self.llms: 44 | if llm.name == llm_name: 45 | # disconnect the llm 46 | pass -------------------------------------------------------------------------------- /src/load_model.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import torch 4 | from DualCoreLLM import DualCoreLLM 5 | 6 | def load_model(model_path, use_dualcore=False): 7 | if model_path.startswith("http"): 8 | # Load model from online file 9 | urllib.request.urlretrieve(model_path, "model.bin") 10 | model_path = "model.bin" 11 | 12 | if not os.path.exists(model_path): 13 | raise ValueError("Model file not found.") 14 | 15 | # Load model into memory 16 | model = torch.load(model_path, map_location=torch.device('cpu')) 17 | 18 | if use_dualcore: 19 | # Initialize DualCoreLLM with pretrained model 20 | dualcore = DualCoreLLM(model) 21 | return dualcore 22 | else: 23 | return model -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/src/main.py -------------------------------------------------------------------------------- /src/memorymodule.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | class MemoryModule: 4 | def __init__(self, db_path): 5 | self.db_path = db_path 6 | self.conn = sqlite3.connect(self.db_path) 7 | self.create_tables() 8 | 9 | def create_tables(self): 10 | cursor = self.conn.cursor() 11 | cursor.execute('''CREATE TABLE IF NOT EXISTS short_term_memory 12 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 13 | data TEXT)''') 14 | cursor.execute('''CREATE TABLE IF NOT EXISTS long_term_memory 15 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 16 | data TEXT)''') 17 | self.conn.commit() 18 | 19 | def store_data(self, data, memory_type): 20 | cursor = self.conn.cursor() 21 | if memory_type == 'short_term': 22 | cursor.execute('''INSERT INTO short_term_memory (data) VALUES (?)''', (data,)) 23 | elif memory_type == 'long_term': 24 | cursor.execute('''INSERT INTO long_term_memory (data) VALUES (?)''', (data,)) 25 | self.conn.commit() 26 | 27 | def retrieve_data(self, query, memory_type): 28 | cursor = self.conn.cursor() 29 | if memory_type == 'short_term': 30 | cursor.execute('''SELECT data FROM short_term_memory WHERE data LIKE ?''', ('%' + query + '%',)) 31 | elif memory_type == 'long_term': 32 | cursor.execute('''SELECT data FROM long_term_memory WHERE data LIKE ?''', ('%' + query + '%',)) 33 | data = cursor.fetchall() 34 | return data -------------------------------------------------------------------------------- /src/model_loader.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from neuralgpt import NeuralGPT 3 | from model_loader import ModelLoader 4 | 5 | class ChatBox: 6 | def __init__(self): 7 | self.model = None 8 | self.loader = ModelLoader() 9 | 10 | self.root = tk.Tk() 11 | self.root.title('Chatbox') 12 | 13 | self.input_label = tk.Label(self.root, text='Input:') 14 | self.input_label.pack() 15 | 16 | self.input_field = tk.Entry(self.root) 17 | self.input_field.pack() 18 | 19 | self.output_label = tk.Label(self.root, text='Output:') 20 | self.output_label.pack() 21 | 22 | self.output_field = tk.Text(self.root, height=10, width=50) 23 | self.output_field.pack() 24 | 25 | self.submit_button = tk.Button(self.root, text='Submit', command=self.submit) 26 | self.submit_button.pack() 27 | 28 | def submit(self): 29 | if not self.model: 30 | # Load the model if it hasn't been loaded yet 31 | self.model = self.loader.load_local('my_model.bin') 32 | 33 | # Get the user input 34 | user_input = self.input_field.get() 35 | 36 | # Generate a response using the model 37 | response = self.model.generate(user_input) 38 | 39 | # Display the response in the output field 40 | self.output_field.insert(tk.END, response + '\n') 41 | 42 | def run(self): 43 | self.root.mainloop() 44 | 45 | if __name__ == '__main__': 46 | chatbox = ChatBox() 47 | chatbox.run() -------------------------------------------------------------------------------- /src/neuralgpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 5 | 6 | class NeuralGPT: 7 | def __init__(self, model_name_or_path='gpt2', device='cpu'): 8 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) 9 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path) 10 | self.device = device 11 | self.model.to(self.device) 12 | self.model.eval() 13 | 14 | def generate_text(self, prompt='', max_length=100, temperature=1.0, top_p=0.9, top_k=0, repetition_penalty=1.0, num_return_sequences=1): 15 | input_ids = self.tokenizer.encode(prompt, return_tensors='pt') 16 | input_ids = input_ids.to(self.device) 17 | 18 | output_sequences = self.model.generate( 19 | input_ids=input_ids, 20 | max_length=max_length + len(input_ids[0]), 21 | temperature=temperature, 22 | top_p=top_p, 23 | top_k=top_k, 24 | repetition_penalty=repetition_penalty, 25 | do_sample=True, 26 | num_return_sequences=num_return_sequences, 27 | ) 28 | 29 | generated_sequences = [] 30 | for generated_sequence_idx, generated_sequence in enumerate(output_sequences): 31 | generated_sequence = generated_sequence.tolist() 32 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) 33 | text = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)) : ] 34 | generated_sequences.append(text) 35 | 36 | return generated_sequences 37 | 38 | def save_text_to_file(self, text, file_path): 39 | with open(file_path, 'w') as f: 40 | f.write(text) -------------------------------------------------------------------------------- /src/nlp/nlp.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /src/process_input.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def process_input(input_text): 4 | if not input_text: 5 | return "Please enter a valid input." 6 | 7 | try: 8 | response = requests.post("http://localhost:8000/predict", json={"text": input_text}) 9 | if response.status_code == 200: 10 | return response.json()["generated_text"] 11 | else: 12 | return "Error processing input. Please try again." 13 | except requests.exceptions.RequestException as e: 14 | return f"Error processing input: {e}. Please try again." -------------------------------------------------------------------------------- /src/requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get(url, headers=None, params=None): 4 | try: 5 | response = requests.get(url, headers=headers, params=params) 6 | response.raise_for_status() 7 | return response 8 | except HTTPError as http_err: 9 | print(f'HTTP error occurred: {http_err}') 10 | except Exception as err: 11 | print(f'Other error occurred: {err}') -------------------------------------------------------------------------------- /src/save_preset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tkinter as tk 3 | from tkinter import filedialog 4 | 5 | # Define a function to save the current selected parameters to a file 6 | def save_preset(): 7 | # Prompt the user for a name for the preset 8 | preset_name = input("Enter a name for the preset: ") 9 | 10 | # Get the current selected parameters 11 | selected_params = get_selected_params() 12 | 13 | # Save the selected parameters to a file 14 | file_path = filedialog.asksaveasfilename(defaultextension='.json', initialfile=preset_name) 15 | with open(file_path, 'w') as f: 16 | json.dump(selected_params, f) 17 | 18 | # Display a message to the user indicating that the preset has been saved 19 | message = f"Preset '{preset_name}' has been saved." 20 | display_message(message) 21 | 22 | # Define a function to get the current selected parameters 23 | def get_selected_params(): 24 | # TODO: Implement this function to retrieve the selected parameters from the NeuralGPT agent 25 | 26 | return selected_params 27 | 28 | # Define a function to display a message to the user 29 | def display_message(message): 30 | # TODO: Implement this function to display a message in the FlowiseAI dialogue window 31 | 32 | pass 33 | 34 | # Create a GUI with a button to save the preset 35 | root = tk.Tk() 36 | save_button = tk.Button(root, text="Save Preset", command=save_preset) 37 | save_button.pack() 38 | root.mainloop() -------------------------------------------------------------------------------- /src/scriptexecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class ScriptExecutor: 4 | def __init__(self, script_path): 5 | self.script_path = script_path 6 | 7 | def execute_script(self, input_data): 8 | try: 9 | # Run the script in a sandboxed environment 10 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 11 | return output.decode('utf-8') 12 | except subprocess.TimeoutExpired: 13 | return "Script execution timed out" 14 | except subprocess.CalledProcessError as e: 15 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 16 | except Exception as e: 17 | return f"Script execution failed with exception: {str(e)}" 18 | 19 | # Example usage 20 | executor = ScriptExecutor('path/to/script.py') 21 | result = executor.execute_script(b'input data') 22 | print(result) -------------------------------------------------------------------------------- /src/train_model.py: -------------------------------------------------------------------------------- 1 | # Import required modules 2 | from neuralgpt import NeuralGPT 3 | from dualcorellm import DualCoreLLM 4 | 5 | # Fix syntax error in gui.py file 6 | # ... 7 | 8 | # Define function to create GUI 9 | def create_gui(): 10 | # Create GUI window 11 | # ... 12 | 13 | # Provide options to load pretrained model 14 | # ... 15 | 16 | # Load model and test basic functionality 17 | # ... 18 | 19 | # Integrate DualCoreLLM module with GUI 20 | # ... 21 | 22 | # Prompt user for input and respond coherently 23 | # ... 24 | 25 | # Define function to train NeuralGPT model on user's dataset 26 | def train_model(dataset): 27 | # Create NeuralGPT object 28 | model = NeuralGPT() 29 | 30 | # Train model on dataset 31 | model.train(dataset) 32 | 33 | # Save trained model in *.bin format 34 | save_model(model, 'model.bin') 35 | 36 | # Define function to save model in *.bin format 37 | def save_model(model, filename): 38 | # Save model to local file or online source 39 | # ... 40 | 41 | # Call create_gui() function to create GUI 42 | create_gui() 43 | 44 | # Call train_model() function to train model on user's dataset 45 | train_model(dataset) -------------------------------------------------------------------------------- /streamlit/home.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import streamlit as st 3 | 4 | server_ports = [] 5 | client_ports = [] 6 | 7 | # Inicjalizacja danych w st.session_state 8 | if "server_ports" not in st.session_state: 9 | st.session_state['server_ports'] = "" 10 | if "client_ports" not in st.session_state: 11 | st.session_state['client_ports'] = "" 12 | if "user_ID" not in st.session_state: 13 | st.session_state.user_ID = "" 14 | if "gradio_Port" not in st.session_state: 15 | st.session_state.gradio_Port = "" 16 | if "server" not in st.session_state: 17 | st.session_state.server = False 18 | if "client" not in st.session_state: 19 | st.session_state.client = False 20 | 21 | st.set_page_config(layout="wide") 22 | 23 | async def main(): 24 | 25 | st.title("NeuralGPT") 26 | 27 | gradio_Ports = st.container(border=True) 28 | gradio_Ports.markdown(st.session_state.gradio_Port) 29 | 30 | with st.sidebar: 31 | # Wyświetlanie danych, które mogą być modyfikowane na różnych stronach 32 | serverPorts = st.container(border=True) 33 | serverPorts.markdown(st.session_state['server_ports']) 34 | st.text("Client ports") 35 | clientPorts = st.container(border=True) 36 | clientPorts.markdown(st.session_state['client_ports']) 37 | st.text("Character.ai ID") 38 | user_id = st.container(border=True) 39 | user_id.markdown(st.session_state.user_ID) 40 | status = st.status(label="runs", state="complete", expanded=False) 41 | 42 | if st.session_state.server == True: 43 | st.markdown("server running...") 44 | 45 | if st.session_state.client == True: 46 | st.markdown("client running") 47 | 48 | # Uruchomienie aplikacji 49 | asyncio.run(main()) 50 | -------------------------------------------------------------------------------- /streamlit/pages/chat-hub.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CognitiveCodes/NeuralGPT/fa77277024cbb8a3b05f781c09e508a27a64debd/streamlit/pages/chat-hub.db -------------------------------------------------------------------------------- /streamlit/requirements.txt: -------------------------------------------------------------------------------- 1 | gradio 2 | requests 3 | datetime 4 | websockets 5 | websocket 6 | asyncio 7 | bs4 8 | pysimplegui 9 | g4f 10 | gpt4free -------------------------------------------------------------------------------- /tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | class Tools: 4 | def __init__(self): 5 | pass 6 | 7 | def create_directory(self, directory_path): 8 | os.makedirs(directory_path, exist_ok=True) 9 | 10 | def modify_file(self, file_path, modification_function): 11 | with open(file_path, 'r') as f: 12 | data = f.read() 13 | modified_data = modification_function(data) 14 | with open(file_path, 'w') as f: 15 | f.write(modified_data) -------------------------------------------------------------------------------- /train_model.py: -------------------------------------------------------------------------------- 1 | # Import required modules 2 | from neuralgpt import NeuralGPT 3 | from dualcorellm import DualCoreLLM 4 | 5 | # Fix syntax error in gui.py file 6 | # ... 7 | 8 | # Define function to create GUI 9 | def create_gui(): 10 | # Create GUI window 11 | # ... 12 | 13 | # Provide options to load pretrained model 14 | # ... 15 | 16 | # Load model and test basic functionality 17 | # ... 18 | 19 | # Integrate DualCoreLLM module with GUI 20 | # ... 21 | 22 | # Prompt user for input and respond coherently 23 | # ... 24 | 25 | # Define function to train NeuralGPT model on user's dataset 26 | def train_model(dataset): 27 | # Create NeuralGPT object 28 | model = NeuralGPT() 29 | 30 | # Train model on dataset 31 | model.train(dataset) 32 | 33 | # Save trained model in *.bin format 34 | save_model(model, 'model.bin') 35 | 36 | # Define function to save model in *.bin format 37 | def save_model(model, filename): 38 | # Save model to local file or online source 39 | # ... 40 | 41 | # Call create_gui() function to create GUI 42 | create_gui() 43 | 44 | # Call train_model() function to train model on user's dataset 45 | train_model(dataset) -------------------------------------------------------------------------------- /utils/Chatflow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Chatflow: 4 | def __init__(self): 5 | self.logger = logging.getLogger(__name__) 6 | self.logger.setLevel(logging.DEBUG) 7 | self.handler = logging.FileHandler('chatflow.log') 8 | self.handler.setLevel(logging.DEBUG) 9 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | self.handler.setFormatter(self.formatter) 11 | self.logger.addHandler(self.handler) 12 | 13 | def run(self): 14 | try: 15 | # code to execute the autonomous scripts 16 | except Exception as e: 17 | self.logger.error(str(e)) 18 | # code to notify the user when an error occurs -------------------------------------------------------------------------------- /utils/Communication.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class Communication: 4 | async def execute_task(self): 5 | try: 6 | # execute long running task asynchronously with a timeout of 30 seconds 7 | result = await asyncio.wait_for(long_running_task(), timeout=30) 8 | # handle successful completion of the task 9 | return result 10 | except asyncio.TimeoutError: 11 | # handle timeout 12 | return "Task timed out" -------------------------------------------------------------------------------- /utils/CommunicationLog.py: -------------------------------------------------------------------------------- 1 | class CommunicationLog: 2 | def __init__(self): 3 | self.logs = [] 4 | 5 | def add_log(self, message, timestamp, error=None): 6 | log = { 7 | 'message': message, 8 | 'timestamp': timestamp, 9 | 'error': error 10 | } 11 | self.logs.append(log) 12 | 13 | def get_logs(self): 14 | return self.logs -------------------------------------------------------------------------------- /utils/DualCoreLLM.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | 3 | class DualCoreLLM: 4 | def __init__(self): 5 | self.nlp = spacy.load('en_core_web_sm') 6 | 7 | def check_coherence(self, text): 8 | doc = self.nlp(text) 9 | 10 | # Check for semantic coherence 11 | for token in doc: 12 | if token.dep_ == 'nsubj' and token.head.pos_ == 'VERB': 13 | subj = token 14 | verb = token.head 15 | for child in verb.children: 16 | if child.dep_ == 'dobj': 17 | obj = child 18 | if obj.text not in [t.text for t in subj.subtree]: 19 | return False 20 | return True 21 | 22 | def check_grammar(self, text): 23 | doc = self.nlp(text) 24 | 25 | # Check for grammatical correctness 26 | for sent in doc.sents: 27 | if sent.root.dep_ == 'ROOT' and sent.root.tag_ != 'VBZ': 28 | return False 29 | return True -------------------------------------------------------------------------------- /utils/FileProcessor.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import List 3 | 4 | class FileProcessor: 5 | def __init__(self, storage_path: str): 6 | self.storage_path = storage_path 7 | 8 | def upload_file(self, file_path: str, file_name: str) -> str: 9 | """ 10 | Uploads a file to the storage_path and returns the URL where it can be accessed. 11 | """ 12 | file_url = os.path.join(self.storage_path, file_name) 13 | with open(file_url, 'wb') as f: 14 | f.write(file_path.read()) 15 | return file_url 16 | 17 | def download_file(self, file_url: str) -> bytes: 18 | """ 19 | Downloads a file from the storage_path and returns its contents as bytes. 20 | """ 21 | with open(file_url, 'rb') as f: 22 | file_contents = f.read() 23 | return file_contents 24 | 25 | def process_files(self, file_urls: List[str]) -> List[str]: 26 | """ 27 | Processes a list of files specified by their URLs and returns a list of processed files' URLs. 28 | """ 29 | processed_files = [] 30 | for file_url in file_urls: 31 | # process file here 32 | processed_file_url = file_url + '_processed' 33 | processed_files.append(processed_file_url) 34 | return processed_files -------------------------------------------------------------------------------- /utils/FileTransfer.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | 3 | class FileTransfer: 4 | def __init__(self, ftp_host, ftp_user, ftp_password): 5 | self.ftp_host = ftp_host 6 | self.ftp_user = ftp_user 7 | self.ftp_password = ftp_password 8 | 9 | def upload_file(self, local_file_path, remote_file_path): 10 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 11 | with open(local_file_path, 'rb') as f: 12 | ftp.storbinary('STOR ' + remote_file_path, f) 13 | 14 | def download_file(self, remote_file_path, local_file_path): 15 | with ftplib.FTP(self.ftp_host, self.ftp_user, self.ftp_password) as ftp: 16 | with open(local_file_path, 'wb') as f: 17 | ftp.retrbinary('RETR ' + remote_file_path, f.write) -------------------------------------------------------------------------------- /utils/FineTuneGPT,py.txt: -------------------------------------------------------------------------------- 1 | from neuralgpt import NeuralGPT 2 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 3 | import torch 4 | 5 | class FineTuneGPT: 6 | def __init__(self, pretrained_model_path, new_dataset): 7 | self.pretrained_model_path = pretrained_model_path 8 | self.new_dataset = new_dataset 9 | 10 | def fine_tune_model(self): 11 | # Load the pretrained model 12 | tokenizer = GPT2Tokenizer.from_pretrained(self.pretrained_model_path) 13 | model = GPT2LMHeadModel.from_pretrained(self.pretrained_model_path) 14 | 15 | # Load the new dataset 16 | with open(self.new_dataset, 'r') as f: 17 | text = f.read() 18 | inputs = tokenizer.encode(text, return_tensors='pt') 19 | 20 | # Fine-tune the model with the new dataset 21 | model.train() 22 | optimizer = torch.optim.Adam(model.parameters(), lr=5e-5) 23 | for i in range(100): 24 | outputs = model(inputs, labels=inputs) 25 | loss = outputs[0] 26 | loss.backward() 27 | optimizer.step() 28 | optimizer.zero_grad() 29 | 30 | # Save the fine-tuned model 31 | model.save_pretrained('fine_tuned_model.bin') -------------------------------------------------------------------------------- /utils/FlowiseAICommunication.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | class FlowiseAICommunication: 5 | def __init__(self, url): 6 | self.url = url 7 | 8 | def send_message(self, message): 9 | data = {"message": message} 10 | try: 11 | response = requests.post(self.url, json=data) 12 | return response.json() 13 | except requests.exceptions.RequestException as e: 14 | print(e) 15 | return None 16 | 17 | def receive_message(self): 18 | try: 19 | response = requests.get(self.url) 20 | return response.json()["message"] 21 | except requests.exceptions.RequestException as e: 22 | print(e) 23 | return None -------------------------------------------------------------------------------- /utils/InternetAccess.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | class InternetAccess: 4 | def __init__(self): 5 | self.session = requests.Session() 6 | 7 | def request(self, method, url, headers=None, params=None, data=None, json=None, auth=None): 8 | response = self.session.request(method, url, headers=headers, params=params, data=data, json=json, auth=auth) 9 | return response -------------------------------------------------------------------------------- /utils/LLMManager.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | 4 | class LLM: 5 | def __init__(self, name, bin_file_path): 6 | self.name = name 7 | self.bin_file_path = bin_file_path 8 | 9 | class LLMManager: 10 | def __init__(self, local_storage_path): 11 | self.local_storage_path = local_storage_path 12 | self.llms = [] 13 | 14 | def add_llm(self, llm): 15 | self.llms.append(llm) 16 | 17 | def remove_llm(self, llm_name): 18 | for llm in self.llms: 19 | if llm.name == llm_name: 20 | self.llms.remove(llm) 21 | 22 | def download_llm(self, url): 23 | response = requests.get(url) 24 | llm_name = os.path.basename(url) 25 | llm_file_path = os.path.join(self.local_storage_path, llm_name) 26 | with open(llm_file_path, 'wb') as f: 27 | f.write(response.content) 28 | llm = LLM(llm_name, llm_file_path) 29 | self.add_llm(llm) 30 | 31 | def upload_llm(self, llm_file_path): 32 | llm_name = os.path.basename(llm_file_path) 33 | llm = LLM(llm_name, llm_file_path) 34 | self.add_llm(llm) 35 | 36 | def connect_llm(self, llm_name): 37 | for llm in self.llms: 38 | if llm.name == llm_name: 39 | # connect the llm 40 | pass 41 | 42 | def disconnect_llm(self, llm_name): 43 | for llm in self.llms: 44 | if llm.name == llm_name: 45 | # disconnect the llm 46 | pass -------------------------------------------------------------------------------- /utils/MediaPlayer.py: -------------------------------------------------------------------------------- 1 | import vlc 2 | 3 | class MediaPlayer: 4 | def __init__(self): 5 | self.instance = vlc.Instance() 6 | self.player = self.instance.media_player_new() 7 | 8 | def play_media(self, media_path): 9 | media = self.instance.media_new(media_path) 10 | self.player.set_media(media) 11 | self.player.play() 12 | 13 | def stop_media(self): 14 | self.player.stop() -------------------------------------------------------------------------------- /utils/Memory.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | class Memory: 4 | def __init__(self, db_file): 5 | self.conn = sqlite3.connect(db_file) 6 | self.cursor = self.conn.cursor() 7 | self.cursor.execute('''CREATE TABLE IF NOT EXISTS short_term_memory 8 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 9 | data TEXT)''') 10 | self.cursor.execute('''CREATE TABLE IF NOT EXISTS long_term_memory 11 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 12 | data TEXT)''') 13 | self.conn.commit() 14 | 15 | def add_to_short_term_memory(self, data): 16 | self.cursor.execute("INSERT INTO short_term_memory (data) VALUES (?)", (data,)) 17 | self.conn.commit() 18 | 19 | def add_to_long_term_memory(self, data): 20 | self.cursor.execute("INSERT INTO long_term_memory (data) VALUES (?)", (data,)) 21 | self.conn.commit() 22 | 23 | def retrieve_from_short_term_memory(self): 24 | self.cursor.execute("SELECT * FROM short_term_memory") 25 | return self.cursor.fetchall() 26 | 27 | def retrieve_from_long_term_memory(self): 28 | self.cursor.execute("SELECT * FROM long_term_memory") 29 | return self.cursor.fetchall() 30 | 31 | def clear_short_term_memory(self): 32 | self.cursor.execute("DELETE FROM short_term_memory") 33 | self.conn.commit() 34 | 35 | def clear_long_term_memory(self): 36 | self.cursor.execute("DELETE FROM long_term_memory") 37 | self.conn.commit() -------------------------------------------------------------------------------- /utils/MemoryModule.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | class MemoryModule: 4 | def __init__(self, db_path): 5 | self.db_path = db_path 6 | self.conn = sqlite3.connect(self.db_path) 7 | self.create_tables() 8 | 9 | def create_tables(self): 10 | cursor = self.conn.cursor() 11 | cursor.execute('''CREATE TABLE IF NOT EXISTS short_term_memory 12 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 13 | data TEXT)''') 14 | cursor.execute('''CREATE TABLE IF NOT EXISTS long_term_memory 15 | (id INTEGER PRIMARY KEY AUTOINCREMENT, 16 | data TEXT)''') 17 | self.conn.commit() 18 | 19 | def store_data(self, data, memory_type): 20 | cursor = self.conn.cursor() 21 | if memory_type == 'short_term': 22 | cursor.execute('''INSERT INTO short_term_memory (data) VALUES (?)''', (data,)) 23 | elif memory_type == 'long_term': 24 | cursor.execute('''INSERT INTO long_term_memory (data) VALUES (?)''', (data,)) 25 | self.conn.commit() 26 | 27 | def retrieve_data(self, query, memory_type): 28 | cursor = self.conn.cursor() 29 | if memory_type == 'short_term': 30 | cursor.execute('''SELECT data FROM short_term_memory WHERE data LIKE ?''', ('%' + query + '%',)) 31 | elif memory_type == 'long_term': 32 | cursor.execute('''SELECT data FROM long_term_memory WHERE data LIKE ?''', ('%' + query + '%',)) 33 | data = cursor.fetchall() 34 | return data -------------------------------------------------------------------------------- /utils/NLPModule.py: -------------------------------------------------------------------------------- 1 | import spacy 2 | from spacy.lang.en import English 3 | from spacy.lang.es import Spanish 4 | from spacy.lang.fr import French 5 | 6 | class NLPModule: 7 | def __init__(self, language='en'): 8 | if language == 'en': 9 | self.nlp = English() 10 | elif language == 'es': 11 | self.nlp = Spanish() 12 | elif language == 'fr': 13 | self.nlp = French() 14 | else: 15 | raise ValueError('Unsupported language') 16 | 17 | def process_text(self, text): 18 | doc = self.nlp(text) 19 | return doc 20 | 21 | def generate_text(self, template): 22 | # TODO: Implement text generation 23 | return None 24 | 25 | def train_model(self, data): 26 | # TODO: Implement model training 27 | return None 28 | 29 | def customize_model(self, data): 30 | # TODO: Implement model customization 31 | return None -------------------------------------------------------------------------------- /utils/PyPDF2.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import PyPDF2 4 | 5 | pdf_path = 'path/to/pdf/file.pdf' 6 | save_path = 'E:/AI/NeuralGPT/NeuralGPT' 7 | 8 | # Check if the save path exists, create it if it doesn't 9 | if not os.path.exists(save_path): 10 | os.makedirs(save_path) 11 | 12 | # Open the PDF file in read-binary mode 13 | with open(pdf_path, 'rb') as pdf_file: 14 | # Read the PDF file 15 | pdf_reader = PyPDF2.PdfFileReader(pdf_file) 16 | # Get the first page of the PDF 17 | page = pdf_reader.getPage(0) 18 | # Create a new PDF writer object 19 | pdf_writer = PyPDF2.PdfFileWriter() 20 | # Add the page to the PDF writer object 21 | pdf_writer.addPage(page) 22 | # Create a new PDF file name 23 | pdf_file_name = os.path.splitext(os.path.basename(pdf_path))[0] + '.pdf' 24 | # Save the PDF file to the specified location 25 | with open(os.path.join(save_path, pdf_file_name), 'wb') as new_pdf_file: 26 | pdf_writer.write(new_pdf_file) -------------------------------------------------------------------------------- /utils/ScriptExecutor.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class ScriptExecutor: 4 | def __init__(self, script_path): 5 | self.script_path = script_path 6 | 7 | def execute_script(self, input_data): 8 | try: 9 | # Run the script in a sandboxed environment 10 | output = subprocess.check_output(['python', self.script_path], input=input_data, timeout=10, stderr=subprocess.STDOUT) 11 | return output.decode('utf-8') 12 | except subprocess.TimeoutExpired: 13 | return "Script execution timed out" 14 | except subprocess.CalledProcessError as e: 15 | return f"Script execution failed with error code {e.returncode}: {e.output.decode('utf-8')}" 16 | except Exception as e: 17 | return f"Script execution failed with exception: {str(e)}" 18 | 19 | # Example usage 20 | executor = ScriptExecutor('path/to/script.py') 21 | result = executor.execute_script(b'input data') 22 | print(result) -------------------------------------------------------------------------------- /utils/Scripting.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | class Scripting: 4 | def __init__(self): 5 | pass 6 | 7 | def execute_script(self, script_path): 8 | subprocess.run(script_path) -------------------------------------------------------------------------------- /utils/callback.py: -------------------------------------------------------------------------------- 1 | import pika 2 | 3 | # connect to RabbitMQ server 4 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) 5 | channel = connection.channel() 6 | 7 | # create a queue for each instance of the NeuralGPT agent 8 | channel.queue_declare(queue='agent1') 9 | channel.queue_declare(queue='agent2') 10 | channel.queue_declare(queue='agent3') 11 | 12 | # define a callback function to process incoming messages 13 | def callback(ch, method, properties, body): 14 | # process message and execute appropriate task 15 | print("Received message: %r" % body) 16 | 17 | # start consuming messages from the queue 18 | channel.basic_consume(queue='agent1', on_message_callback=callback, auto_ack=True) 19 | channel.basic_consume(queue='agent2', on_message_callback=callback, auto_ack=True) 20 | channel.basic_consume(queue='agent3', on_message_callback=callback, auto_ack=True) 21 | 22 | print('Waiting for messages...') 23 | channel.start_consuming() -------------------------------------------------------------------------------- /utils/chat.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from transformers import pipeline 3 | 4 | # Define the chatbot pipeline using the pre-trained NeuralGPT model 5 | chatbot = pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B") 6 | 7 | # Define a function to handle user input and generate chatbot responses 8 | def chat(): 9 | while True: 10 | # Get user input 11 | user_input = input("You: ") 12 | 13 | # Generate chatbot response 14 | try: 15 | chatbot_response = chatbot(user_input, max_length=50)[0]["generated_text"] 16 | print("Chatbot:", chatbot_response) 17 | except Exception as e: 18 | print("Error:", e) 19 | 20 | # Call the chat function to start the chatbox 21 | chat() -------------------------------------------------------------------------------- /utils/class.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | 4 | class Communication: 5 | def __init__(self, protocol, message_format, timeout, retry_limit): 6 | self.protocol = protocol 7 | self.message_format = message_format 8 | self.timeout = timeout 9 | self.retry_limit = retry_limit 10 | 11 | def send_message(self, message): 12 | retries = 0 13 | while retries < self.retry_limit: 14 | try: 15 | response = requests.post(self.protocol, data=message, timeout=self.timeout) 16 | return response 17 | except requests.exceptions.Timeout: 18 | retries += 1 19 | print("Timeout occurred. Retrying...") 20 | time.sleep(1) 21 | except requests.exceptions.RequestException as e: 22 | print("Error occurred: ", e) 23 | break 24 | return None 25 | 26 | def receive_message(self): 27 | retries = 0 28 | while retries < self.retry_limit: 29 | try: 30 | response = requests.get(self.protocol, timeout=self.timeout) 31 | return response 32 | except requests.exceptions.Timeout: 33 | retries += 1 34 | print("Timeout occurred. Retrying...") 35 | time.sleep(1) 36 | except requests.exceptions.RequestException as e: 37 | print("Error occurred: ", e) 38 | break 39 | return None 40 | -------------------------------------------------------------------------------- /utils/classc.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | class CommunicationClass: 4 | def __init__(self, protocol, message_format, encryption, authentication): 5 | self.protocol = protocol 6 | self.message_format = message_format 7 | self.encryption = encryption 8 | self.authentication = authentication 9 | 10 | def send_message(self, message): 11 | # Send the message using the specified protocol and message format 12 | pass 13 | 14 | def receive_message(self): 15 | # Receive a message using the specified protocol and message format 16 | pass 17 | 18 | def encrypt_message(self, message): 19 | # Encrypt the message using the specified encryption mechanism 20 | pass 21 | 22 | def decrypt_message(self, message): 23 | # Decrypt the message using the specified encryption mechanism 24 | pass 25 | 26 | def authenticate_user(self, user): 27 | # Authenticate the user using the specified authentication mechanism 28 | pass 29 | 30 | # Load the configuration file 31 | with open('config.json', 'r') as f: 32 | config = json.load(f) 33 | 34 | # Create the communication class based on the configuration parameters 35 | communication_class = CommunicationClass(config['protocol'], config['message_format'], config['encryption'], config['authentication']) 36 | 37 | # Integrate the communication class with NeuralGPT and flowiseAI app 38 | neural_gpt.set_communication_class(communication_class) 39 | flowise_ai.set_communication_class(communication_class) 40 | 41 | # Test the communication 42 | neural_gpt.send_message('Hello, world!') 43 | message = flowise_ai.receive_message() 44 | print(message) -------------------------------------------------------------------------------- /utils/com.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | 4 | # Generate public-private key pair for NeuralGPT 5 | neuralgpt_public_key = ... 6 | neuralgpt_private_key = ... 7 | 8 | # Generate public-private key pair for flowiseAI app 9 | flowiseai_public_key = ... 10 | flowiseai_private_key = ... 11 | 12 | # Establish a TLS connection 13 | context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 14 | context.load_cert_chain(certfile=neuralgpt_private_key, keyfile=neuralgpt_public_key) 15 | context.load_verify_locations(cafile=flowiseai_public_key) 16 | with socket.create_connection(('flowiseai.com', 443)) as sock: 17 | with context.wrap_socket(sock, server_side=False) as ssock: 18 | ssock.sendall(b'Hello, world!') 19 | data = ssock.recv(1024) -------------------------------------------------------------------------------- /utils/fine_tuner.py: -------------------------------------------------------------------------------- 1 | fine_tuner = FineTuneGPT('pretrained_model.bin', 'new_dataset.txt') 2 | fine_tuner.fine_tune_model() -------------------------------------------------------------------------------- /utils/generate_test_data.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | 4 | # Define a list of possible actions 5 | actions = ['open', 'close', 'turn on', 'turn off', 'start', 'stop'] 6 | 7 | # Define a list of possible objects 8 | objects = ['door', 'window', 'light', 'fan', 'TV', 'AC'] 9 | 10 | # Define a list of possible locations 11 | locations = ['living room', 'bedroom', 'kitchen', 'bathroom', 'garage'] 12 | 13 | # Define a function to generate random test data 14 | def generate_test_data(): 15 | action = random.choice(actions) 16 | obj = random.choice(objects) 17 | location = random.choice(locations) 18 | message = f"{action} the {obj} in the {location}" 19 | return message 20 | 21 | # Generate 10 random test messages 22 | for i in range(10): 23 | test_message = generate_test_data() 24 | print(test_message) -------------------------------------------------------------------------------- /utils/load_model.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import os 3 | import torch 4 | from DualCoreLLM import DualCoreLLM 5 | 6 | def load_model(model_path, use_dualcore=False): 7 | if model_path.startswith("http"): 8 | # Load model from online file 9 | urllib.request.urlretrieve(model_path, "model.bin") 10 | model_path = "model.bin" 11 | 12 | if not os.path.exists(model_path): 13 | raise ValueError("Model file not found.") 14 | 15 | # Load model into memory 16 | model = torch.load(model_path, map_location=torch.device('cpu')) 17 | 18 | if use_dualcore: 19 | # Initialize DualCoreLLM with pretrained model 20 | dualcore = DualCoreLLM(model) 21 | return dualcore 22 | else: 23 | return model -------------------------------------------------------------------------------- /utils/model_loader.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from neuralgpt import NeuralGPT 3 | from model_loader import ModelLoader 4 | 5 | class ChatBox: 6 | def __init__(self): 7 | self.model = None 8 | self.loader = ModelLoader() 9 | 10 | self.root = tk.Tk() 11 | self.root.title('Chatbox') 12 | 13 | self.input_label = tk.Label(self.root, text='Input:') 14 | self.input_label.pack() 15 | 16 | self.input_field = tk.Entry(self.root) 17 | self.input_field.pack() 18 | 19 | self.output_label = tk.Label(self.root, text='Output:') 20 | self.output_label.pack() 21 | 22 | self.output_field = tk.Text(self.root, height=10, width=50) 23 | self.output_field.pack() 24 | 25 | self.submit_button = tk.Button(self.root, text='Submit', command=self.submit) 26 | self.submit_button.pack() 27 | 28 | def submit(self): 29 | if not self.model: 30 | # Load the model if it hasn't been loaded yet 31 | self.model = self.loader.load_local('my_model.bin') 32 | 33 | # Get the user input 34 | user_input = self.input_field.get() 35 | 36 | # Generate a response using the model 37 | response = self.model.generate(user_input) 38 | 39 | # Display the response in the output field 40 | self.output_field.insert(tk.END, response + '\n') 41 | 42 | def run(self): 43 | self.root.mainloop() 44 | 45 | if __name__ == '__main__': 46 | chatbox = ChatBox() 47 | chatbox.run() -------------------------------------------------------------------------------- /utils/neuralgpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from transformers import GPT2Tokenizer, GPT2LMHeadModel 5 | 6 | class NeuralGPT: 7 | def __init__(self, model_name_or_path='gpt2', device='cpu'): 8 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path) 9 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path) 10 | self.device = device 11 | self.model.to(self.device) 12 | self.model.eval() 13 | 14 | def generate_text(self, prompt='', max_length=100, temperature=1.0, top_p=0.9, top_k=0, repetition_penalty=1.0, num_return_sequences=1): 15 | input_ids = self.tokenizer.encode(prompt, return_tensors='pt') 16 | input_ids = input_ids.to(self.device) 17 | 18 | output_sequences = self.model.generate( 19 | input_ids=input_ids, 20 | max_length=max_length + len(input_ids[0]), 21 | temperature=temperature, 22 | top_p=top_p, 23 | top_k=top_k, 24 | repetition_penalty=repetition_penalty, 25 | do_sample=True, 26 | num_return_sequences=num_return_sequences, 27 | ) 28 | 29 | generated_sequences = [] 30 | for generated_sequence_idx, generated_sequence in enumerate(output_sequences): 31 | generated_sequence = generated_sequence.tolist() 32 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) 33 | text = text[len(self.tokenizer.decode(input_ids[0], clean_up_tokenization_spaces=True)) : ] 34 | generated_sequences.append(text) 35 | 36 | return generated_sequences 37 | 38 | def save_text_to_file(self, text, file_path): 39 | with open(file_path, 'w') as f: 40 | f.write(text) -------------------------------------------------------------------------------- /utils/process_input.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def process_input(input_text): 4 | if not input_text: 5 | return "Please enter a valid input." 6 | 7 | try: 8 | response = requests.post("http://localhost:8000/predict", json={"text": input_text}) 9 | if response.status_code == 200: 10 | return response.json()["generated_text"] 11 | else: 12 | return "Error processing input. Please try again." 13 | except requests.exceptions.RequestException as e: 14 | return f"Error processing input: {e}. Please try again." -------------------------------------------------------------------------------- /utils/requests.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get(url, headers=None, params=None): 4 | try: 5 | response = requests.get(url, headers=headers, params=params) 6 | response.raise_for_status() 7 | return response 8 | except HTTPError as http_err: 9 | print(f'HTTP error occurred: {http_err}') 10 | except Exception as err: 11 | print(f'Other error occurred: {err}') -------------------------------------------------------------------------------- /utils/save_preset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import tkinter as tk 3 | from tkinter import filedialog 4 | 5 | # Define a function to save the current selected parameters to a file 6 | def save_preset(): 7 | # Prompt the user for a name for the preset 8 | preset_name = input("Enter a name for the preset: ") 9 | 10 | # Get the current selected parameters 11 | selected_params = get_selected_params() 12 | 13 | # Save the selected parameters to a file 14 | file_path = filedialog.asksaveasfilename(defaultextension='.json', initialfile=preset_name) 15 | with open(file_path, 'w') as f: 16 | json.dump(selected_params, f) 17 | 18 | # Display a message to the user indicating that the preset has been saved 19 | message = f"Preset '{preset_name}' has been saved." 20 | display_message(message) 21 | 22 | # Define a function to get the current selected parameters 23 | def get_selected_params(): 24 | # TODO: Implement this function to retrieve the selected parameters from the NeuralGPT agent 25 | 26 | return selected_params 27 | 28 | # Define a function to display a message to the user 29 | def display_message(message): 30 | # TODO: Implement this function to display a message in the FlowiseAI dialogue window 31 | 32 | pass 33 | 34 | # Create a GUI with a button to save the preset 35 | root = tk.Tk() 36 | save_button = tk.Button(root, text="Save Preset", command=save_preset) 37 | save_button.pack() 38 | root.mainloop() -------------------------------------------------------------------------------- /utils/train_model.py: -------------------------------------------------------------------------------- 1 | # Import required modules 2 | from neuralgpt import NeuralGPT 3 | from dualcorellm import DualCoreLLM 4 | 5 | # Fix syntax error in gui.py file 6 | # ... 7 | 8 | # Define function to create GUI 9 | def create_gui(): 10 | # Create GUI window 11 | # ... 12 | 13 | # Provide options to load pretrained model 14 | # ... 15 | 16 | # Load model and test basic functionality 17 | # ... 18 | 19 | # Integrate DualCoreLLM module with GUI 20 | # ... 21 | 22 | # Prompt user for input and respond coherently 23 | # ... 24 | 25 | # Define function to train NeuralGPT model on user's dataset 26 | def train_model(dataset): 27 | # Create NeuralGPT object 28 | model = NeuralGPT() 29 | 30 | # Train model on dataset 31 | model.train(dataset) 32 | 33 | # Save trained model in *.bin format 34 | save_model(model, 'model.bin') 35 | 36 | # Define function to save model in *.bin format 37 | def save_model(model, filename): 38 | # Save model to local file or online source 39 | # ... 40 | 41 | # Call create_gui() function to create GUI 42 | create_gui() 43 | 44 | # Call train_model() function to train model on user's dataset 45 | train_model(dataset) -------------------------------------------------------------------------------- /vord2.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from gensim.models import Word2Vec 3 | 4 | # Pickle the Word2Vec model with the HIGHEST_PROTOCOL option 5 | model_path = r'E:\AI\NeuralGPT\NeuralGPT\models\ggml-alpaca-7b-q4.bin' 6 | with open(model_path, 'wb') as f: 7 | pickle.dump(model, f, protocol=pickle.HIGHEST_PROTOCOL) 8 | 9 | # Unpickle the Word2Vec model 10 | with open(model_path, 'rb') as f: 11 | model = pickle.load(f) --------------------------------------------------------------------------------