├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── custom.md │ └── feature_request.md ├── .gitignore ├── LICENSE ├── README.md ├── ai ├── algorithms │ ├── supervised_learning │ │ ├── supervised_learning.py │ │ └── train.py │ └── unsupervised_learning │ │ ├── train.py │ │ └── unsupervised_learning.py ├── models │ ├── decision_trees │ │ ├── decision_tree.py │ │ └── train.py │ └── neural_networks │ │ ├── neural_network.py │ │ └── train.py └── utils │ ├── data_preprocessing │ ├── data_preprocessing.py │ └── train.py │ └── feature_extraction │ ├── feature_extraction.py │ └── train.py ├── algorithms ├── searching │ ├── binary_search.py │ └── linear_search.py └── sorting │ ├── merge_sort.py │ └── quick_sort.py ├── docs ├── NeuroNexus.jpeg ├── architecture │ ├── ai │ │ └── architecture.md │ └── quantum │ │ └── architecture.md └── tutorials │ ├── advanced_topics │ └── advanced_topics.md │ └── getting_started │ └── getting_started.md ├── integrations └── slack │ ├── README.md │ ├── config.json │ ├── requirements.txt │ ├── setup.py │ └── slack.py ├── network ├── architecture │ ├── decentralized │ │ └── decentralized_network.py │ └── distributed │ │ └── distributed_network.py └── protocols │ ├── communication │ ├── neural_network_communication.py │ └── quantum_communication.py │ └── data_storage │ ├── neural_network_data_storage.py │ └── quantum_data_storage.py ├── neuromorphic ├── neural_networks │ ├── long_short_term_memory │ │ └── long_short_term_memory.py │ ├── recurrent_neural_networks │ │ └── recurrent_neural_networks.py │ └── spiking_neural_networks │ │ └── spiking_neural_networks.py ├── neurons │ ├── artificial_neuron │ │ └── artificial_neuron.py │ └── biological_neuron │ │ └── biological_neuron.py └── synapses │ ├── synaptic_plasticity │ └── synaptic_plasticity.py │ └── synaptic_transmission │ └── synaptic_transmission.py ├── quantum ├── quantum_algorithms │ ├── grover │ │ ├── grover.py │ │ └── train.py │ └── shor │ │ └── train.py ├── quantum_simulators │ ├── cirq │ │ ├── cirq_simulator.py │ │ └── cirq_simulator_with_noise.py │ └── qiskit │ │ ├── qiskit_simulator.py │ │ └── qiskit_simulator_with_noise.py └── qubits │ ├── qubit_measurement │ ├── qubit_measurement.py │ └── train.py │ └── qubit_operations │ ├── qubit_operations.py │ └── train.py ├── security ├── access_control │ ├── neural_network_access_control.py │ └── quantum_access_control.py ├── authentication │ ├── neural_network_authentication.py │ └── quantum_authentication.py └── encryption │ ├── neural_network_encryption.py │ └── quantum_encryption.py ├── tests └── unit_tests │ ├── ai │ └── test_neural_network.py │ └── quantum │ ├── test_quantum_circuit.py │ └── test_quantum_simulation.py └── utils └── data_structures ├── graphs └── graph.py ├── linked_lists └── linked_list.py ├── queues └── queue.py ├── stacks └── stack.py └── trees └── tree.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore files and directories 2 | 3 | * .git 4 | * .idea 5 | * .vscode 6 | * __pycache__ 7 | * *.pyc 8 | * *.egg-info 9 | * dist 10 | * build 11 | * docs/_build 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

NeuroNexus by KOSASIH is licensed under Creative Commons Attribution 4.0 International

2 | 3 | # neuronexus-core 4 | The core repository for NeuroNexus, containing the AI-driven, decentralized, and self-aware network architecture. 5 | 6 | # NeuroNexus Core 7 | 8 | The core repository for NeuroNexus, containing the AI-driven, decentralized, and self-aware network architecture. 9 | 10 | ## Overview 11 | 12 | NeuroNexus is a revolutionary network architecture that combines the power of artificial intelligence, decentralization, and self-awareness to create a highly scalable, secure, and efficient network. This repository contains the core components of the NeuroNexus architecture, including the AI-driven network protocol, decentralized data storage, and self-aware network management. 13 | 14 | ## Features 15 | 16 | * **AI-driven Network Protocol**: A novel network protocol that uses artificial intelligence to optimize network performance, security, and efficiency. 17 | * **Decentralized Data Storage**: A decentralized data storage system that allows for secure, efficient, and scalable data storage and retrieval. 18 | * **Self-aware Network Management**: A self-aware network management system that uses machine learning and artificial intelligence to monitor, analyze, and optimize network performance. 19 | 20 | ## Architecture 21 | 22 | The NeuroNexus architecture consists of the following components: 23 | 24 | * **NeuroNexus Node**: The core component of the NeuroNexus architecture, responsible for executing the AI-driven network protocol and managing decentralized data storage. 25 | * **NeuroNexus Network**: The decentralized network of NeuroNexus nodes that work together to provide a highly scalable, secure, and efficient network. 26 | * **NeuroNexus Hub**: The central hub of the NeuroNexus network, responsible for managing network traffic, optimizing network performance, and providing self-aware network management. 27 | 28 | ## Benefits 29 | 30 | * **Highly Scalable**: NeuroNexus is designed to scale horizontally, allowing for a highly scalable and efficient network. 31 | * **Secure**: NeuroNexus uses advanced security protocols and decentralized data storage to provide a highly secure network. 32 | * **Efficient**: NeuroNexus uses AI-driven network protocol and self-aware network management to optimize network performance and efficiency. 33 | 34 | ## Getting Started 35 | 36 | To get started with NeuroNexus, follow these steps: 37 | 38 | 1. Clone the repository: `git clone https://github.com/KOSASIH/neuronexus-core.git` 39 | 2. Install the required dependencies: `pip install -r requirements.txt` 40 | 3. Run the NeuroNexus node: `python neuronexus_node.py` 41 | 42 | ## Contributing 43 | 44 | We welcome contributions to the NeuroNexus project. To contribute, follow these steps: 45 | 46 | 1. Fork the repository: `git fork https://github.com/KOSASIH/neuronexus-core.git` 47 | 2. Create a new branch: `git branch my-feature` 48 | 3. Make changes and commit: `git commit -m "My feature"` 49 | 4. Push changes: `git push origin my-feature` 50 | 5. Create a pull request: `git pull-request` 51 | 52 | ## License 53 | 54 | NeuroNexus is licensed under the Apache License, Version 2.0. See LICENSE for more information. 55 | 56 | ## Contact 57 | 58 | For more information, please contact us at [info@neuronexus.io](mailto:info@neuronexus.io). 59 | -------------------------------------------------------------------------------- /ai/algorithms/supervised_learning/supervised_learning.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.model_selection import train_test_split 3 | from sklearn.metrics import accuracy_score, classification_report, confusion_matrix 4 | 5 | class SupervisedLearning: 6 | def __init__(self, model): 7 | self.model = model 8 | 9 | def train(self, X, y): 10 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) 11 | self.model.fit(X_train, y_train) 12 | y_pred = self.model.predict(X_test) 13 | accuracy = accuracy_score(y_test, y_pred) 14 | print(f"Accuracy: {accuracy:.4f}") 15 | print("Classification Report:") 16 | print(classification_report(y_test, y_pred)) 17 | print("Confusion Matrix:") 18 | print(confusion_matrix(y_test, y_pred)) 19 | 20 | class LogisticRegression: 21 | def __init__(self, learning_rate=0.001, n_iters=1000): 22 | self.lr = learning_rate 23 | self.n_iters = n_iters 24 | self.w = None 25 | self.b = None 26 | 27 | def fit(self, X, y): 28 | n_samples, n_features = X.shape 29 | self.w = np.zeros(n_features) 30 | self.b = 0 31 | for _ in range(self.n_iters): 32 | linear_model = np.dot(X, self.w) + self.b 33 | y_predicted = self._sigmoid(linear_model) 34 | dw = (1 / n_samples) * np.dot(X.T, (y_predicted - y)) 35 | db = (1 / n_samples) * np.sum(y_predicted - y) 36 | self.w -= self.lr * dw 37 | self.b -= self.lr * db 38 | 39 | def predict(self, X): 40 | linear_model = np.dot(X, self.w) + self.b 41 | y_predicted = self._sigmoid(linear_model) 42 | y_predicted_cls = [1 if i > 0.5 else 0 for i in y_predicted] 43 | return np.array(y_predicted_cls) 44 | 45 | def _sigmoid(self, x): 46 | return 1 / (1 + np.exp(-x)) 47 | 48 | class DecisionTreeClassifier: 49 | def __init__(self, max_depth=None): 50 | self.max_depth = max_depth 51 | self.tree = {} 52 | 53 | def fit(self, X, y): 54 | self.tree = self._build_tree(X, y) 55 | 56 | def predict(self, X): 57 | return [self._predict(inputs) for inputs in X] 58 | 59 | def _build_tree(self, X, y): 60 | n_samples, n_features = X.shape 61 | n_labels = len(np.unique(y)) 62 | 63 | if (self.max_depth is not None and self.max_depth == 1) or n_labels == 1 or n_features == 0: 64 | leaf_value = np.argmax(np.bincount(y)) 65 | return leaf_value 66 | 67 | feat_idxs = np.random.choice(n_features, n_features, replace=False) 68 | best_feat = None 69 | best_thr = None 70 | best_gain = -1 71 | for idx in feat_idxs: 72 | X_column = X[:, idx] 73 | thresholds = np.unique(X_column) 74 | for threshold in thresholds: 75 | gain = self._information_gain(y, X_column, threshold) 76 | 77 | if gain > best_gain: 78 | best_gain = gain 79 | best_feat = idx 80 | best_thr = threshold 81 | 82 | if best_feat is None: 83 | leaf_value = np.argmax(np.bincount(y)) 84 | return leaf_value 85 | 86 | left_idxs, right_idxs = self._split(X[:, best_feat], best_thr) 87 | left = self._build_tree(X[left_idxs, :], y[left_idxs]) 88 | right = self._build_tree(X[right_idxs, :], y[right_idxs]) 89 | return {"feature": best_feat, "threshold": best_thr, "left": left, "right": right} 90 | 91 | def _predict(self, inputs): 92 | node = self.tree 93 | while isinstance(node, dict): 94 | feature = node["feature"] 95 | threshold = node["threshold"] 96 | if inputs[feature] <= threshold: 97 | node = node["left"] 98 | else: 99 | node = node["right"] 100 | 101 | return node 102 | 103 | def _information_gain(self, y, X_column, threshold): 104 | parent_entropy = self._entropy(y) 105 | 106 | left_idxs, right_idxs = self._split(X_column, threshold) 107 | if len(left_idxs) == 0 or len(right_idxs) == 0: 108 | return 0 109 | 110 | n = len(y) 111 | e1 = self._entropy(y[left_idxs]) 112 | e2 = self._entropy(y[right_idxs]) 113 | 114 | child_entropy = (len(left_idxs) / n) * e1 + (len(right_idxs) / n) * e2 115 | 116 | ig = parent_entropy - child_entropy 117 | return ig 118 | 119 | def _ split(self, X_column, threshold): 120 | left_idxs = np.argwhere(X_column <= threshold).flatten() 121 | right_idxs = np.argwhere(X_column > threshold).flatten() 122 | return left_idxs, right_idxs 123 | 124 | def _entropy(self, y): 125 | hist = np.bincount(y) 126 | ps = hist / len(y) 127 | return -np.sum([p * np.log2(p) for p in ps if p > 0]) 128 | 129 | class RandomForestClassifier: 130 | def __init__(self, n_estimators=100, max_depth=None): 131 | self.n_estimators = n_estimators 132 | self.max_depth = max_depth 133 | self.trees = [] 134 | 135 | def fit(self, X, y): 136 | for _ in range(self.n_estimators): 137 | tree = DecisionTreeClassifier(max_depth=self.max_depth) 138 | tree.fit(X, y) 139 | self.trees.append(tree) 140 | 141 | def predict(self, X): 142 | predictions = [tree.predict(X) for tree in self.trees] 143 | predictions = np.array(predictions).T 144 | predictions = [np.bincount(prediction).argmax() for prediction in predictions] 145 | return np.array(predictions) 146 | 147 | class SupportVectorMachine: 148 | def __init__(self, learning_rate=0.001, lambda_param=0.01, n_iters=1000): 149 | self.lr = learning_rate 150 | self.lambda_param = lambda_param 151 | self.n_iters = n_iters 152 | self.w = None 153 | self.b = None 154 | 155 | def fit(self, X, y): 156 | n_samples, n_features = X.shape 157 | self.w = np.zeros(n_features) 158 | self.b = 0 159 | for _ in range(self.n_iters): 160 | for idx, x_i in enumerate(X): 161 | condition = y[idx] * (np.dot(x_i, self.w) - self.b) >= 1 162 | if condition: 163 | self.w -= self.lr * (2 * self.lambda_param * self.w) 164 | else: 165 | self.w -= self.lr * (2 * self.lambda_param * self.w - np.dot(x_i, y[idx])) 166 | self.b -= self.lr * y[idx] 167 | 168 | def predict(self, X): 169 | linear_output = np.dot(X, self.w) - self.b 170 | return np.sign(linear_output) 171 | 172 | def main(): 173 | X = np.array([[1, 2], [3, 4], [5, 6]]) 174 | y = np.array([0, 0, 1]) 175 | model = SupervisedLearning(LogisticRegression()) 176 | model.train(X, y) 177 | 178 | if __name__ == "__main__": 179 | main() 180 | -------------------------------------------------------------------------------- /ai/algorithms/supervised_learning/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from supervised_learning import SupervisedLearning, LogisticRegression, DecisionTreeClassifier, RandomForestClassifier, SupportVectorMachine 3 | 4 | def main(): 5 | X = np.array([[1, 2], [3, 4], [5, 6]]) 6 | y = np.array([0, 0, 1]) 7 | model = SupervisedLearning(LogisticRegression()) 8 | model.train(X, y) 9 | 10 | if __name__ == "__main__": 11 | main() 12 | -------------------------------------------------------------------------------- /ai/algorithms/unsupervised_learning/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from unsupervised_learning import Unsup ervisedLearning, KMeansClustering, PrincipalComponentAnalysis, GaussianMixtureModel 3 | 4 | def main(): 5 | X = np.array([[1, 2], [3, 4], [5, 6]]) 6 | model = UnsupervisedLearning(KMeansClustering()) 7 | model.fit(X) 8 | predictions = model.predict(X) 9 | print(predictions) 10 | 11 | if __name__ == "__main__": 12 | main() 13 | -------------------------------------------------------------------------------- /ai/algorithms/unsupervised_learning/unsupervised_learning.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.cluster import KMeans 3 | from sklearn.decomposition import PCA 4 | from sklearn.mixture import GaussianMixture 5 | 6 | class UnsupervisedLearning: 7 | def __init__(self, model): 8 | self.model = model 9 | 10 | def fit(self, X): 11 | self.model.fit(X) 12 | 13 | def predict(self, X): 14 | return self.model.predict(X) 15 | 16 | class KMeansClustering: 17 | def __init__(self, n_clusters=5): 18 | self.n_clusters = n_clusters 19 | self.model = KMeans(n_clusters=n_clusters) 20 | 21 | def fit(self, X): 22 | self.model.fit(X) 23 | 24 | def predict(self, X): 25 | return self.model.predict(X) 26 | 27 | class PrincipalComponentAnalysis: 28 | def __init__(self, n_components=2): 29 | self.n_components = n_components 30 | self.model = PCA(n_components=n_components) 31 | 32 | def fit(self, X): 33 | self.model.fit(X) 34 | 35 | def transform(self, X): 36 | return self.model.transform(X) 37 | 38 | class GaussianMixtureModel: 39 | def __init__(self, n_components=5): 40 | self.n_components = n_components 41 | self.model = GaussianMixture(n_components=n_components) 42 | 43 | def fit(self, X): 44 | self.model.fit(X) 45 | 46 | def predict(self, X): 47 | return self.model.predict(X) 48 | 49 | def main(): 50 | X = np.array([[1, 2], [3, 4], [5, 6]]) 51 | model = UnsupervisedLearning(KMeansClustering()) 52 | model.fit(X) 53 | predictions = model.predict(X) 54 | print(predictions) 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /ai/models/decision_trees/decision_tree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class DecisionTree: 4 | def __init__(self, max_depth=None): 5 | self.max_depth = max_depth 6 | self.tree = {} 7 | 8 | def fit(self, X, y): 9 | self.tree = self._build_tree(X, y) 10 | 11 | def predict(self, X): 12 | return [self._predict(inputs) for inputs in X] 13 | 14 | def _build_tree(self, X, y): 15 | n_samples, n_features = X.shape 16 | n_labels = len(np.unique(y)) 17 | 18 | if (self.max_depth is not None and self.max_depth == 1) or n_labels == 1 or n_features == 0: 19 | leaf_value = np.argmax(np.bincount(y)) 20 | return leaf_value 21 | 22 | feat_idxs = np.random.choice(n_features, n_features, replace=False) 23 | best_feat = None 24 | best_thr = None 25 | best_gain = -1 26 | for idx in feat_idxs: 27 | X_column = X[:, idx] 28 | thresholds = np.unique(X_column) 29 | for threshold in thresholds: 30 | gain = self._information_gain(y, X_column, threshold) 31 | 32 | if gain > best_gain: 33 | best_gain = gain 34 | best_feat = idx 35 | best_thr = threshold 36 | 37 | if best_feat is None: 38 | leaf_value = np.argmax(np.bincount(y)) 39 | return leaf_value 40 | 41 | left_idxs, right_idxs = self._split(X[:, best_feat], best_thr) 42 | left = self._build_tree(X[left_idxs, :], y[left_idxs]) 43 | right = self._build_tree(X[right_idxs, :], y[right_idxs]) 44 | return {"feature": best_feat, "threshold": best_thr, "left": left, "right": right} 45 | 46 | def _predict(self, inputs): 47 | node = self.tree 48 | while isinstance(node, dict): 49 | feature = node["feature"] 50 | threshold = node["threshold"] 51 | if inputs[feature] <= threshold: 52 | node = node["left"] 53 | else: 54 | node = node["right"] 55 | 56 | return node 57 | 58 | def _information_gain(self, y, X_column, threshold): 59 | parent_entropy = self._entropy(y) 60 | 61 | left_idxs, right_idxs = self._split(X_column, threshold) 62 | if len(left_idxs) == 0 or len(right_idxs) == 0: 63 | return 0 64 | 65 | n = len(y) 66 | e1 = self._entropy(y[left_idxs]) 67 | e2 = self._entropy(y[right_idxs]) 68 | 69 | child_entropy = (len(left_idxs) / n) * e1 + (len(right_idxs) / n) * e2 70 | 71 | ig = parent_entropy - child_entropy 72 | return ig 73 | 74 | def _split(self, X_column, threshold): 75 | left_idxs = np.argwhere(X_column <= threshold).flatten() 76 | right_idxs = np.argwhere(X_column > threshold).flatten() 77 | return left_idxs, right_idxs 78 | 79 | def _entropy(self, y): 80 | hist = np.bincount(y) 81 | ps = hist / len(y) 82 | return -np.sum([p * np.log2(p) for p in ps if p > 0]) 83 | 84 | def train(model, X, y): 85 | model.fit(X, y) 86 | 87 | def test(model, X, y): 88 | y_pred = model.predict(X) 89 | accuracy = np.sum(y_pred == y) / len(y) 90 | return accuracy 91 | 92 | def main(): 93 | X = np.array([[1, 2], [3, 4], [5, 6]]) 94 | y = np .array([0, 0, 1]) 95 | model = DecisionTree(max_depth=2) 96 | train(model, X, y) 97 | accuracy = test(model, X, y) 98 | print(f"Accuracy: {accuracy:.4f}") 99 | 100 | if __name__ == "__main__": 101 | main() 102 | -------------------------------------------------------------------------------- /ai/models/decision_trees/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from decision_tree import DecisionTree, train, test 3 | 4 | def main(): 5 | X = np.array([[1, 2], [3, 4], [5, 6]]) 6 | y = np.array([0, 0, 1]) 7 | model = DecisionTree(max_depth=2) 8 | train(model, X, y) 9 | accuracy = test(model, X, y) 10 | print(f"Accuracy: {accuracy:.4f}") 11 | 12 | if __name__ == "__main__": 13 | main() 14 | -------------------------------------------------------------------------------- /ai/models/neural_networks/neural_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | from torch.utils.data import Dataset, DataLoader 6 | 7 | class NeuralNetwork(nn.Module): 8 | def __init__(self, input_dim, hidden_dim, output_dim): 9 | super(NeuralNetwork, self).__init__() 10 | self.fc1 = nn.Linear(input_dim, hidden_dim) 11 | self.relu = nn.ReLU() 12 | self.fc2 = nn.Linear(hidden_dim, output_dim) 13 | 14 | def forward(self, x): 15 | out = self.fc1(x) 16 | out = self.relu(out) 17 | out = self.fc2(out) 18 | return out 19 | 20 | class ConvolutionalNeuralNetwork(nn.Module): 21 | def __init__(self, input_dim, hidden_dim, output_dim): 22 | super(ConvolutionalNeuralNetwork, self).__init__() 23 | self.conv1 = nn.Conv2d(input_dim, hidden_dim, kernel_size=3) 24 | self.relu = nn.ReLU() 25 | self.conv2 = nn.Conv2d(hidden_dim, output_dim, kernel_size=3) 26 | 27 | def forward(self, x): 28 | out = self.conv1(x) 29 | out = self.relu(out) 30 | out = self.conv2(out) 31 | return out 32 | 33 | class RecurrentNeuralNetwork(nn.Module): 34 | def __init__(self, input_dim, hidden_dim, output_dim): 35 | super(RecurrentNeuralNetwork, self).__init__() 36 | self.rnn = nn.LSTM(input_dim, hidden_dim, num_layers=1, batch_first=True) 37 | self.fc = nn.Linear(hidden_dim, output_dim) 38 | 39 | def forward(self, x): 40 | h0 = torch.zeros(1, x.size(0), self.hidden_dim).to(x.device) 41 | c0 = torch.zeros(1, x.size(0), self.hidden_dim).to(x.device) 42 | out, _ = self.rnn(x, (h0, c0)) 43 | out = self.fc(out[:, -1, :]) 44 | return out 45 | 46 | class Autoencoder(nn.Module): 47 | def __init__(self, input_dim, hidden_dim): 48 | super(Autoencoder, self).__init__() 49 | self.encoder = nn.Sequential( 50 | nn.Linear(input_dim, hidden_dim), 51 | nn.ReLU(), 52 | nn.Linear(hidden_dim, hidden_dim) 53 | ) 54 | self.decoder = nn.Sequential( 55 | nn.Linear(hidden_dim, hidden_dim), 56 | nn.ReLU(), 57 | nn.Linear(hidden_dim, input_dim) 58 | ) 59 | 60 | def forward(self, x): 61 | encoded = self.encoder(x) 62 | decoded = self.decoder(encoded) 63 | return decoded 64 | 65 | class GenerativeAdversarialNetwork(nn.Module): 66 | def __init__(self, input_dim, hidden_dim): 67 | super(GenerativeAdversarialNetwork, self).__init__() 68 | self.generator = nn.Sequential( 69 | nn.Linear(input_dim, hidden_dim), 70 | nn.ReLU(), 71 | nn.Linear(hidden_dim, hidden_dim) 72 | ) 73 | self.discriminator = nn.Sequential( 74 | nn.Linear(input_dim, hidden_dim), 75 | nn.ReLU(), 76 | nn.Linear(hidden_dim, 1) 77 | ) 78 | 79 | def forward(self, x): 80 | generated = self.generator(x) 81 | validity = self.discriminator(generated) 82 | return generated, validity 83 | 84 | class NeuralNetworkDataset(Dataset): 85 | def __init__(self, X, y): 86 | self.X = X 87 | self.y = y 88 | 89 | def __len__(self): 90 | return len(self.X) 91 | 92 | def __getitem__(self, idx): 93 | return self.X[idx], self.y[idx] 94 | 95 | def train(model, device, loader, optimizer, criterion): 96 | model.train() 97 | total_loss = 0 98 | for batch_idx, (data, target) in enumerate(loader): 99 | data, target = data.to(device), target.to(device) 100 | optimizer.zero_grad() 101 | output = model(data) 102 | loss = criterion(output, target) 103 | loss.backward() 104 | optimizer.step() 105 | total_loss += loss.item() 106 | return total_loss / len(loader) 107 | 108 | def test(model, device, loader, criterion): 109 | model.eval() 110 | total_loss = 0 111 | with torch.no_grad(): 112 | for data, target in loader: 113 | data, target = data.to(device), target.to(device) 114 | output = model(data) 115 | loss = criterion(output, target) 116 | total_loss += loss.item() 117 | return total_loss / len(loader) 118 | 119 | def main(): 120 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 121 | model = NeuralNetwork(input_dim=784, hidden_dim=256, output_dim=10) 122 | model.to(device) 123 | optimizer = optim.Adam(model.parameters(), lr=0.001) 124 | criterion = nn.CrossEntropyLoss() 125 | train_loader = DataLoader(NeuralNetworkDataset(X_train, y_train), batch_size=32, shuffle=True) 126 | test_loader = DataLoader(NeuralNetwork Dataset(X_test, y_test), batch_size=32, shuffle=False) 127 | for epoch in range(10): 128 | train_loss = train(model, device, train_loader, optimizer, criterion) 129 | test_loss = test(model, device, test_loader, criterion) 130 | print(f"Epoch {epoch+1}, Train Loss: {train_loss:.4f}, Test Loss: {test_loss:.4f}") 131 | 132 | if __name__ == "__main__": 133 | main() 134 | -------------------------------------------------------------------------------- /ai/models/neural_networks/train.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from neural_network import NeuralNetwork, train, test 5 | 6 | def main(): 7 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 8 | model = NeuralNetwork(input_dim=784, hidden_dim=256, output_dim=10) 9 | model.to(device) 10 | optimizer = optim.Adam(model.parameters(), lr=0.001) 11 | criterion = nn.CrossEntropyLoss() 12 | train_loader = ... 13 | test_loader = ... 14 | for epoch in range(10): 15 | train_loss = train(model, device, train_loader, optimizer, criterion) 16 | test_loss = test(model, device, test_loader, criterion) 17 | print(f"Epoch {epoch+1}, Train Loss: {train_loss:.4f}, Test Loss: {test_loss:.4f}") 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /ai/utils/data_preprocessing/data_preprocessing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.preprocessing import StandardScaler, MinMaxScaler 5 | 6 | class DataPreprocessing: 7 | def __init__(self): 8 | pass 9 | 10 | def load_data(self, file_path): 11 | try: 12 | data = pd.read_csv(file_path) 13 | return data 14 | except Exception as e: 15 | print(f"Error loading data: {e}") 16 | 17 | def split_data(self, data, test_size=0.2, random_state=42): 18 | X = data.drop('target', axis=1) 19 | y = data['target'] 20 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) 21 | return X_train, X_test, y_train, y_test 22 | 23 | def scale_data(self, X_train, X_test, scaling_method='standard'): 24 | if scaling_method == 'standard': 25 | scaler = StandardScaler() 26 | elif scaling_method == 'min_max': 27 | scaler = MinMaxScaler() 28 | else: 29 | raise ValueError("Invalid scaling method. Please choose 'standard' or 'min_max'.") 30 | 31 | X_train_scaled = scaler.fit_transform(X_train) 32 | X_test_scaled = scaler.transform(X_test) 33 | return X_train_scaled, X_test_scaled 34 | 35 | def handle_missing_values(self, data, strategy='mean'): 36 | if strategy == 'mean': 37 | data.fillna(data.mean(), inplace=True) 38 | elif strategy == 'median': 39 | data.fillna(data.median(), inplace=True) 40 | elif strategy == 'mode': 41 | data.fillna(data.mode().iloc[0], inplace=True) 42 | else: 43 | raise ValueError("Invalid strategy for handling missing values. Please choose 'mean', 'median', or 'mode'.") 44 | 45 | return data 46 | 47 | def main(): 48 | data_preprocessing = DataPreprocessing() 49 | data = data_preprocessing.load_data('data.csv') 50 | X_train, X_test, y_train, y_test = data_preprocessing.split_data(data) 51 | X_train_scaled, X_test_scaled = data_preprocessing.scale_data(X_train, X_test) 52 | data = data_preprocessing.handle_missing_values(data) 53 | 54 | if __name__ == "__main__": 55 | main() 56 | -------------------------------------------------------------------------------- /ai/utils/data_preprocessing/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from data_preprocessing import DataPreprocessing 3 | 4 | def main(): 5 | data_preprocessing = DataPreprocessing() 6 | data = data_preprocessing.load_data('data.csv') 7 | X_train, X_test, y_train, y_test = data_preprocessing.split_data(data) 8 | X_train_scaled, X_test_scaled = data_preprocessing.scale_data(X_train, X_test) 9 | data = data_preprocessing.handle_missing_values(data) 10 | 11 | if __name__ == "__main__": 12 | main() 13 | -------------------------------------------------------------------------------- /ai/utils/feature_extraction/feature_extraction.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.decomposition import PCA 3 | from sklearn.feature_selection import SelectKBest, chi2 4 | 5 | class FeatureExtraction: 6 | def __init__(self): 7 | pass 8 | 9 | def pca(self, X, n_components=2): 10 | pca = PCA(n_components=n_components) 11 | X_pca = pca.fit_transform(X) 12 | return X_pca 13 | 14 | def select_k_best(self, X, y, k=10): 15 | selector = SelectKBest(chi2, k=k) 16 | X_selected = selector.fit_transform(X, y) 17 | return X_selected 18 | 19 | def recursive_feature_elimination(self, X, y, n_features=10): 20 | from sklearn.feature_selection import RFE 21 | from sklearn.linear_model import LogisticRegression 22 | estimator = LogisticRegression() 23 | selector = RFE(estimator, n_features_to_select=n_features) 24 | X_selected = selector.fit_transform(X, y) 25 | return X_selected 26 | 27 | def main(): 28 | feature_extraction = FeatureExtraction() 29 | X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) 30 | y = np.array([0, 0, 1]) 31 | X_pca = feature_extraction.pca(X) 32 | X_selected = feature_extraction.select_k_best(X, y) 33 | X_selected = feature_extraction.recursive_feature_elimination(X, y) 34 | 35 | if __name__ == "__main__": 36 | main() 37 | -------------------------------------------------------------------------------- /ai/utils/feature_extraction/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from feature_extraction import FeatureExtraction 3 | 4 | def main(): 5 | feature_extraction = FeatureExtraction() 6 | X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) 7 | y = np.array([0, 0, 1]) 8 | X_pca = feature_extraction.pca(X) 9 | X_selected =feature_extraction.select_k_best(X, y) 10 | X_selected = feature_extraction.recursive_feature_elimination(X, y) 11 | 12 | if __name__ == "__main__": 13 | main() 14 | -------------------------------------------------------------------------------- /algorithms/searching/binary_search.py: -------------------------------------------------------------------------------- 1 | # binary_search.py 2 | 3 | def binary_search(arr, target): 4 | low = 0 5 | high = len(arr) - 1 6 | while low <= high: 7 | mid = (low + high) // 2 8 | if arr[mid] == target: 9 | return mid 10 | elif arr[mid] < target: 11 | low = mid + 1 12 | else: 13 | high = mid - 1 14 | return -1 15 | 16 | def binary_search_recursive(arr, target): 17 | if len(arr) == 0: 18 | return -1 19 | mid = len(arr) // 2 20 | if arr[mid] == target: 21 | return mid 22 | elif arr[mid] < target: 23 | return binary_search_recursive(arr[mid + 1:], target) + mid + 1 24 | else: 25 | return binary_search_recursive(arr[:mid], target) 26 | 27 | def binary_search_iterative(arr, target): 28 | stack = [(0, len(arr) - 1)] 29 | while stack: 30 | low, high = stack.pop() 31 | mid = (low + high) // 2 32 | if arr[mid] == target: 33 | return mid 34 | elif arr[mid] < target: 35 | stack.append((mid + 1, high)) 36 | else: 37 | stack.append((low, mid - 1)) 38 | return -1 39 | 40 | def binary_search_parallel(arr, target): 41 | if len(arr) == 0: 42 | return -1 43 | mid = len(arr) // 2 44 | if arr[mid] == target: 45 | return mid 46 | elif arr[mid] < target: 47 | return binary_search_parallel(arr[mid + 1:], target) + mid + 1 48 | else: 49 | return binary_search_parallel(arr[:mid], target) 50 | 51 | def binary_search_hybrid(arr, target): 52 | if len(arr) <= 10: 53 | return linear_search(arr, target) 54 | low = 0 55 | high = len(arr) - 1 56 | while low <= high: 57 | mid = (low + high) // 2 58 | if arr[mid] == target: 59 | return mid 60 | elif arr[mid] < target: 61 | low = mid + 1 62 | else: 63 | high = mid - 1 64 | return -1 65 | 66 | def linear_search(arr, target): 67 | for i in range(len(arr)): 68 | if arr[i] == target: 69 | return i 70 | return -1 71 | 72 | # Example usage: 73 | arr = [1, 2, 3, 4, 5, 6, 7, 8, 9] 74 | print(binary_search(arr, 5)) # 4 75 | print(binary_search_recursive(arr, 5)) # 4 76 | print(binary_search_iterative(arr, 5)) # 4 77 | print(binary_search_parallel(arr, 5)) # 4 78 | print(binary_search_hybrid(arr, 5)) # 4 79 | -------------------------------------------------------------------------------- /algorithms/searching/linear_search.py: -------------------------------------------------------------------------------- 1 | # linear_search.py 2 | 3 | def linear_search(arr, target): 4 | for i in range(len(arr)): 5 | if arr[i] == target: 6 | return i 7 | return -1 8 | 9 | def linear_search_recursive(arr, target): 10 | if len(arr) == 0: 11 | return -1 12 | if arr[0] == target: 13 | return 0 14 | return linear_search_recursive(arr[1:], target) + 1 15 | 16 | def linear_search_iterative(arr, target): 17 | for i in range(len(arr)): 18 | if arr[i] == target: 19 | return i 20 | return -1 21 | 22 | def linear_search_parallel(arr, target): 23 | if len(arr) == 0: 24 | return -1 25 | if arr[0] == target: 26 | return 0 27 | return linear_search_parallel(arr[1:], target) + 1 28 | 29 | def linear_search_hybrid(arr, target): 30 | if len(arr) <= 10: 31 | return linear_search(arr, target) 32 | for i in range(len(arr)): 33 | if arr[i] == target: 34 | return i 35 | return -1 36 | 37 | def linear_search_with_sentinel(arr, target): 38 | arr.append(target) 39 | i = 0 40 | while arr[i] != target: 41 | i += 1 42 | if i == len(arr) - 1: 43 | return -1 44 | return i 45 | 46 | def linear_search_with_hashing(arr, target): 47 | hash_table = {} 48 | for i in range(len(arr)): 49 | hash_table[arr[i]] = i 50 | return hash_table.get(target, -1) 51 | 52 | # Example usage: 53 | arr = [1, 2, 3, 4, 5, 6, 7, 8, 9] 54 | print(linear_search(arr, 5)) # 4 55 | print(linear_search_recursive(arr, 5)) # 4 56 | print(linear_search_iterative(arr, 5)) # 4 57 | print(linear_search_parallel(arr, 5)) # 4 58 | print(linear_search_hybrid(arr, 5)) # 4 59 | print(linear_search_with_sentinel(arr, 5)) # 4 60 | print(linear_search_with_hashing(arr, 5)) # 4 61 | -------------------------------------------------------------------------------- /algorithms/sorting/merge_sort.py: -------------------------------------------------------------------------------- 1 | # merge_sort.py 2 | 3 | def merge_sort(arr): 4 | if len(arr) <= 1: 5 | return arr 6 | mid = len(arr) // 2 7 | left = merge_sort(arr[:mid]) 8 | right = merge_sort(arr[mid:]) 9 | return merge(left, right) 10 | 11 | def merge(left, right): 12 | result = [] 13 | while len(left) > 0 and len(right) > 0: 14 | if left[0] <= right[0]: 15 | result.append(left.pop(0)) 16 | else: 17 | result.append(right.pop(0)) 18 | result.extend(left) 19 | result.extend(right) 20 | return result 21 | 22 | def merge_sort_in_place(arr): 23 | _merge_sort_in_place(arr, 0, len(arr) - 1) 24 | 25 | def _merge_sort_in_place(arr, low, high): 26 | if low < high: 27 | mid = (low + high) // 2 28 | _merge_sort_in_place(arr, low, mid) 29 | _merge_sort_in_place(arr, mid + 1, high) 30 | _merge_in_place(arr, low, mid, high) 31 | 32 | def _merge_in_place(arr, low, mid, high): 33 | left = arr[low:mid + 1] 34 | right = arr[mid + 1:high + 1] 35 | i = j = 0 36 | k = low 37 | while i < len(left) and j < len(right): 38 | if left[i] <= right[j]: 39 | arr[k] = left[i] 40 | i += 1 41 | else: 42 | arr[k] = right[j] 43 | j += 1 44 | k += 1 45 | while i < len(left): 46 | arr[k] = left[i] 47 | i += 1 48 | k += 1 49 | while j < len(right): 50 | arr[k] = right[j] 51 | j += 1 52 | k += 1 53 | 54 | def merge_sort_iterative(arr): 55 | stack = [(0, len(arr) - 1)] 56 | while stack: 57 | low, high = stack.pop() 58 | if low < high: 59 | mid = (low + high) // 2 60 | stack.append((low, mid)) 61 | stack.append((mid + 1, high)) 62 | return arr 63 | 64 | def merge_sort_parallel(arr): 65 | if len(arr) <= 1: 66 | return arr 67 | mid = len(arr) // 2 68 | left = merge_sort_parallel(arr[:mid]) 69 | right = merge_sort_parallel(arr[mid:]) 70 | return merge(left, right) 71 | 72 | def merge_sort_hybrid(arr): 73 | if len(arr) <= 10: 74 | return sorted(arr) 75 | mid = len(arr) // 2 76 | left = merge_sort_hybrid(arr[:mid]) 77 | right = merge_sort_hybrid(arr[mid:]) 78 | return merge(left, right) 79 | 80 | # Example usage: 81 | arr = [5, 2, 9, 1, 7, 3, 6, 8, 4] 82 | print(merge_sort(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 83 | print(merge_sort_in_place(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 84 | print(merge_sort_iterative(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 85 | print(merge_sort_parallel(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 86 | print(merge_sort_hybrid(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 87 | -------------------------------------------------------------------------------- /algorithms/sorting/quick_sort.py: -------------------------------------------------------------------------------- 1 | # quick_sort.py 2 | 3 | def quick_sort(arr): 4 | if len(arr) <= 1: 5 | return arr 6 | pivot = arr[len(arr) // 2] 7 | left = [x for x in arr if x < pivot] 8 | middle = [x for x in arr if x == pivot] 9 | right = [x for x in arr if x > pivot] 10 | return quick_sort(left) + middle + quick_sort(right) 11 | 12 | def quick_sort_in_place(arr): 13 | _quick_sort_in_place(arr, 0, len(arr) - 1) 14 | 15 | def _quick_sort_in_place(arr, low, high): 16 | if low < high: 17 | pivot_index = _partition(arr, low, high) 18 | _quick_sort_in_place(arr, low, pivot_index - 1) 19 | _quick_sort_in_place(arr, pivot_index + 1, high) 20 | 21 | def _partition(arr, low, high): 22 | pivot = arr[high] 23 | i = low - 1 24 | for j in range(low, high): 25 | if arr[j] < pivot: 26 | i += 1 27 | arr[i], arr[j] = arr[j], arr[i] 28 | arr[i + 1], arr[high] = arr[high], arr[i + 1] 29 | return i + 1 30 | 31 | def quick_sort_iterative(arr): 32 | stack = [(0, len(arr) - 1)] 33 | while stack: 34 | low, high = stack.pop() 35 | if low < high: 36 | pivot_index = _partition(arr, low, high) 37 | stack.append((low, pivot_index - 1)) 38 | stack.append((pivot_index + 1, high)) 39 | return arr 40 | 41 | def quick_sort_parallel(arr): 42 | if len(arr) <= 1: 43 | return arr 44 | pivot = arr[len(arr) // 2] 45 | left = [x for x in arr if x < pivot] 46 | middle = [x for x in arr if x == pivot] 47 | right = [x for x in arr if x > pivot] 48 | return quick_sort_parallel(left) + middle + quick_sort_parallel(right) 49 | 50 | def quick_sort_hybrid(arr): 51 | if len(arr) <= 10: 52 | return sorted(arr) 53 | pivot = arr[len(arr) // 2] 54 | left = [x for x in arr if x < pivot] 55 | middle = [x for x in arr if x == pivot] 56 | right = [x for x in arr if x > pivot] 57 | return quick_sort_hybrid(left) + middle + quick_sort_hybrid(right) 58 | 59 | # Example usage: 60 | arr = [5, 2, 9, 1, 7, 3, 6, 8, 4] 61 | print(quick_sort(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 62 | print(quick_sort_in_place(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 63 | print(quick_sort_iterative(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 64 | print(quick_sort_parallel(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 65 | print(quick_sort_hybrid(arr)) # [1, 2, 3, 4, 5, 6, 7, 8, 9] 66 | -------------------------------------------------------------------------------- /docs/NeuroNexus.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KOSASIH/neuronexus-core/fca78004ca78a155a69d63cea25dd598672af535/docs/NeuroNexus.jpeg -------------------------------------------------------------------------------- /docs/architecture/ai/architecture.md: -------------------------------------------------------------------------------- 1 | # AI Architecture 2 | 3 | ## Overview 4 | 5 | Our AI architecture is designed to provide a scalable and flexible framework for building and deploying AI models. The architecture is based on a microservices approach, with each component designed to be highly modular and reusable. 6 | 7 | ## Components 8 | 9 | * **Data Ingestion**: Responsible for collecting and processing data from various sources. 10 | * **Model Training**: Responsible for training and validating AI models using the ingested data. 11 | * **Model Deployment**: Responsible for deploying trained models to production environments. 12 | * **Model Serving**: Responsible for serving deployed models and handling incoming requests. 13 | 14 | ## Data Flow 15 | 16 | 1. Data is ingested from various sources and processed into a standardized format. 17 | 2. The processed data is then used to train and validate AI models. 18 | 3. Trained models are deployed to production environments. 19 | 4. Deployed models are served and handle incoming requests. 20 | 21 | ## Benefits 22 | 23 | * Scalable and flexible architecture 24 | * Highly modular and reusable components 25 | * Easy to integrate with existing systems 26 | * Supports a wide range of AI models and frameworks 27 | -------------------------------------------------------------------------------- /docs/architecture/quantum/architecture.md: -------------------------------------------------------------------------------- 1 | # Quantum Architecture 2 | 3 | ## Overview 4 | 5 | Our quantum architecture is designed to provide a scalable and flexible framework for building and deploying quantum applications. The architecture is based on a microservices approach, with each component designed to be highly modular and reusable. 6 | 7 | ## Components 8 | 9 | * **Quantum Circuit**: Responsible for executing quantum circuits and simulating quantum systems. 10 | * **Quantum Algorithm**: Responsible for implementing quantum algorithms and solving complex problems. 11 | * **Quantum Control**: Responsible for controlling and optimizing quantum systems. 12 | * **Quantum Error Correction**: Responsible for correcting errors in quantum systems. 13 | 14 | ## Data Flow 15 | 16 | 1. Quantum circuits are executed and simulated using quantum hardware or software. 17 | 2. Quantum algorithms are implemented and executed on the simulated quantum systems. 18 | 3. Quantum control is used to optimize and control the quantum systems. 19 | 4. Quantum error correction is used to correct errors in the quantum systems. 20 | 21 | ## Benefits 22 | 23 | * Scalable and flexible architecture 24 | * Highly modular and reusable components 25 | * Easy to integrate with existing systems 26 | * Supports a wide range of quantum algorithms and frameworks 27 | -------------------------------------------------------------------------------- /docs/tutorials/advanced_topics/advanced_topics.md: -------------------------------------------------------------------------------- 1 | # Advanced Topics 2 | 3 | ## Overview 4 | 5 | This tutorial will guide you through the process of exploring advanced topics in AI and quantum computing. 6 | 7 | ## Prerequisites 8 | 9 | * Familiarity with AI and quantum concepts 10 | * Basic understanding of programming languages such as Python 11 | 12 | ## Step 1: Explore Advanced AI Topics 13 | 14 | * Explore advanced AI topics such as deep learning and natural language processing. 15 | * Learn about the latest advancements in AI research and development. 16 | 17 | ## Step 2: Explore Advanced Quantum Topics 18 | 19 | * Explore advanced quantum topics such as quantum error correction and quantum simulation. 20 | * Learn about the latest advancements in quantum research and development. 21 | 22 | ## Step 3: Implement Advanced AI and Quantum Algorithms 23 | 24 | * Implement advanced AI and quantum algorithms using popular frameworks such as TensorFlow and Qiskit. 25 | * Learn about the best practices for implementing AI and quantum algorithms. 26 | 27 | ## Step 4: Optimize AI and Quantum Performance 28 | 29 | * Optimize AI and quantum performance using techniques such as parallel processing and distributed computing. 30 | * Learn about the best practices for optimizing AI and quantum performance. 31 | 32 | ## Benefits 33 | 34 | * In-depth knowledge of advanced AI and quantum topics 35 | * Hands-on experience with implementing advanced AI and quantum algorithms 36 | * Improved performance and efficiency in AI and quantum applications 37 | -------------------------------------------------------------------------------- /docs/tutorials/getting_started/getting_started.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | ## Overview 4 | 5 | This tutorial will guide you through the process of getting started with our AI and quantum platforms. 6 | 7 | ## Prerequisites 8 | 9 | * Familiarity with Python programming language 10 | * Basic understanding of AI and quantum concepts 11 | 12 | ## Step 1: Install Required Libraries 13 | 14 | * Install the required libraries using pip: `pip install -r requirements.txt` 15 | 16 | ## Step 2: Set up the Environment 17 | 18 | * Set up the environment by running the following command: `python setup.py` 19 | 20 | ## Step 3: Run the Examples 21 | 22 | * Run the examples by executing the following command: `python examples.py` 23 | 24 | ## Step 4: Explore the Documentation 25 | 26 | * Explore the documentation by visiting the following link: 27 | 28 | ## Benefits 29 | 30 | * Easy to follow tutorial 31 | * Step-by-step instructions 32 | * Supports a wide range of AI and quantum frameworks 33 | -------------------------------------------------------------------------------- /integrations/slack/README.md: -------------------------------------------------------------------------------- 1 | # Slack Integration 2 | 3 | This integration provides a seamless way to interact with Slack from within Neuronexus Core. 4 | 5 | ## Features 6 | 7 | * Send messages to Slack channels 8 | * Receive messages from Slack channels 9 | * Support for Slack slash commands 10 | 11 | ## Installation 12 | 13 | 1. Install the required libraries using pip: `pip install -r requirements.txt` 14 | 2. Set up the Slack integration by running the following command: `python setup.py` 15 | 16 | ## Configuration 17 | 18 | * Set the Slack API token in the `config.json` file 19 | * Set the Slack channel ID in the `config.json` file 20 | 21 | ## Usage 22 | 23 | * Send a message to a Slack channel using the `send_message` function 24 | * Receive a message from a Slack channel using the `receive_message` function 25 | 26 | ## Benefits 27 | 28 | * Easy to use and integrate with Slack 29 | * Supports a wide range of Slack features 30 | * Highly customizable 31 | -------------------------------------------------------------------------------- /integrations/slack/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "slack_token": "YOUR_SLACK_API_TOKEN", 3 | "slack_channel": "YOUR_SLACK_CHANNEL_ID" 4 | } 5 | -------------------------------------------------------------------------------- /integrations/slack/requirements.txt: -------------------------------------------------------------------------------- 1 | slack 2 | -------------------------------------------------------------------------------- /integrations/slack/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # Set up the Slack integration 4 | def setup(): 5 | # Create the config.json file if it doesn't exist 6 | if not os.path.exists('config.json'): 7 | with open('config.json', 'w') as f: 8 | json.dump({'slack_token': '', 'slack_channel': ''}, f) 9 | 10 | # Install the required libraries 11 | os.system('pip install -r requirements.txt') 12 | 13 | if __name__ == '__main__': 14 | setup() 15 | -------------------------------------------------------------------------------- /integrations/slack/slack.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from slack import WebClient 4 | 5 | # Load the configuration from the config.json file 6 | with open('config.json') as f: 7 | config = json.load(f) 8 | 9 | # Set the Slack API token and channel ID 10 | slack_token = config['slack_token'] 11 | slack_channel = config['slack_channel'] 12 | 13 | # Create a Slack client 14 | slack_client = WebClient(token=slack_token) 15 | 16 | def send_message(message): 17 | # Send a message to the Slack channel 18 | slack_client.chat_postMessage(channel=slack_channel, text=message) 19 | 20 | def receive_message(): 21 | # Receive a message from the Slack channel 22 | response = slack_client.chat_getPermalink(channel=slack_channel) 23 | return response['message']['text'] 24 | -------------------------------------------------------------------------------- /network/architecture/decentralized/decentralized_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class Decentralized Network(nn.Module): 7 | def __init__(self, num_nodes, num_edges): 8 | super(DecentralizedNetwork, self).__init__() 9 | self.num_nodes = num_nodes 10 | self.num_edges = num_edges 11 | self.adjacency_matrix = nn.Parameter(torch.randn(num_nodes, num_nodes)) 12 | self.edge_weights = nn.Parameter(torch.randn(num_edges, 1)) 13 | 14 | def forward(self, input_data): 15 | output = torch.matmul(self.adjacency_matrix, input_data) 16 | output = torch.matmul(output, self.edge_weights) 17 | return output 18 | 19 | class BlockchainNetwork(nn.Module): 20 | def __init__(self, num_nodes, num_blocks): 21 | super(BlockchainNetwork, self).__init__() 22 | self.num_nodes = num_nodes 23 | self.num_blocks = num_blocks 24 | self.blockchain = nn.Parameter(torch.randn(num_blocks, num_nodes)) 25 | 26 | def forward(self, input_data): 27 | output = torch.matmul(self.blockchain, input_data) 28 | return output 29 | 30 | class PeerToPeerNetwork(nn.Module): 31 | def __init__(self, num_nodes, num_peers): 32 | super(PeerToPeerNetwork, self).__init__() 33 | self.num_nodes = num_nodes 34 | self.num_peers = num_peers 35 | self.peer_matrix = nn.Parameter(torch.randn(num_nodes, num_peers)) 36 | 37 | def forward(self, input_data): 38 | output = torch.matmul(self.peer_matrix, input_data) 39 | return output 40 | 41 | # Create an instance of each network 42 | decentralized_network = DecentralizedNetwork(10, 20) 43 | blockchain_network = BlockchainNetwork(10, 20) 44 | peer_to_peer_network = PeerToPeerNetwork(10, 20) 45 | 46 | # Train each network 47 | criterion = nn.MSELoss() 48 | optimizer = optim.SGD(decentralized_network.parameters(), lr=0.01) 49 | 50 | for epoch in range(100): 51 | optimizer.zero_grad() 52 | output = decentralized_network(torch.randn(1, 10)) 53 | loss = criterion(output, torch.randn(1, 10)) 54 | loss.backward() 55 | optimizer.step() 56 | 57 | optimizer.zero_grad() 58 | output = blockchain_network(torch.randn(1, 10)) 59 | loss = criterion(output, torch.randn(1, 10)) 60 | loss.backward() 61 | optimizer.step() 62 | 63 | optimizer.zero_grad() 64 | output = peer_to_peer_network(torch.randn(1, 10)) 65 | loss = criterion(output, torch.randn(1, 10)) 66 | loss.backward() 67 | optimizer.step() 68 | 69 | # Test each network 70 | test_input = torch.randn(1, 10) 71 | print("Decentralized Network Output:", decentralized_network(test_input)) 72 | print("Blockchain Network Output:", blockchain_network(test_input)) 73 | print("Peer To Peer Network Output:", peer_to_peer_network(test_input)) 74 | -------------------------------------------------------------------------------- /network/architecture/distributed/distributed_network.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class DistributedNetwork(nn.Module): 7 | def __init__(self, num_nodes, num_edges): 8 | super(DistributedNetwork, self).__init__() 9 | self.num_nodes = num_nodes 10 | self.num_edges = num_edges 11 | self.adjacency_matrix = nn.Parameter(torch.randn(num_nodes, num_nodes)) 12 | self.edge_weights = nn.Parameter(torch.randn(num_edges, 1)) 13 | 14 | def forward(self, input_data): 15 | output = torch.matmul(self.adjacency_matrix, input_data) 16 | output = torch.matmul(output, self.edge_weights) 17 | return output 18 | 19 | class DistributedBlockchainNetwork(nn.Module): 20 | def __init__(self, num_nodes, num_blocks): 21 | super(DistributedBlockchainNetwork, self).__init__() 22 | self.num_nodes = num_nodes 23 | self.num_blocks = num_blocks 24 | self.blockchain = nn.Parameter(torch.randn(num_blocks, num_nodes)) 25 | 26 | def forward(self, input_data): 27 | output = torch.matmul(self.blockchain, input_data) 28 | return output 29 | 30 | class DistributedPeerToPeerNetwork(nn.Module): 31 | def __init__(self, num_nodes, num_peers): 32 | super(DistributedPeerToPeerNetwork, self).__init__() 33 | self.num_nodes = num_nodes 34 | self.num_peers = num_peers 35 | self.peer_matrix = nn.Parameter(torch.randn(num_nodes, num_peers)) 36 | 37 | def forward(self, input_data): 38 | output = torch.matmul(self.peer_matrix, input_data) 39 | return output 40 | 41 | # Create an instance of each network 42 | distributed_network = DistributedNetwork(10, 20) 43 | distributed_blockchain_network = DistributedBlockchainNetwork(10, 20) 44 | distributed_peer_to_peer_network = DistributedPeerToPeerNetwork(10, 20) 45 | 46 | # Train each network 47 | criterion = nn.MSELoss() 48 | optimizer = optim.SGD(distributed_network.parameters(), lr=0.01) 49 | 50 | for epoch in range(100): 51 | optimizer.zero_grad() 52 | output = distributed_network(torch.randn(1, 10)) 53 | loss = criterion(output, torch.randn(1, 10)) 54 | loss.backward() 55 | optimizer.step() 56 | 57 | optimizer.zero_grad() 58 | output = distributed_blockchain_network(torch.randn(1, 10)) 59 | loss = criterion(output, torch.randn(1, 10)) 60 | loss.backward() 61 | optimizer.step() 62 | 63 | optimizer.zero_grad() 64 | output = distributed_peer_to_peer_network(torch.randn(1, 10)) 65 | loss = criterion(output, torch.randn(1, 10)) 66 | loss.backward() 67 | optimizer.step() 68 | 69 | # Test each network 70 | test_input = torch.randn(1, 10) 71 | print("Distributed Network Output:", distributed_network(test_input)) 72 | print("Distributed Blockchain Network Output:", distributed_blockchain_network(test_input)) 73 | print("Distributed Peer To Peer Network Output:", distributed_peer_to_peer_network(test_input)) 74 | -------------------------------------------------------------------------------- /network/protocols/communication/neural_network_communication.py: -------------------------------------------------------------------------------- 1 | # neural_network_communication.py 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | class NeuralNetworkCommunicationProtocol: 8 | def __init__(self, input_size, hidden_size, output_size): 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | self.model = nn.Sequential( 13 | nn.Linear(input_size, hidden_size), 14 | nn.ReLU(), 15 | nn.Linear(hidden_size, output_size) 16 | ) 17 | self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) 18 | 19 | def encode_message(self, message): 20 | tensor_message = torch.tensor([ord(c) for c in message]) 21 | output = self.model(tensor_message) 22 | return output 23 | 24 | def transmit_message(self, output): 25 | noise = torch.randn_like(output) 26 | transmitted_output = output + noise 27 | return transmitted_output 28 | 29 | def decode_message(self, transmitted_output): 30 | decoded_output = self.model(transmitted_output) 31 | text_message = '' 32 | for i in range(decoded_output.shape[0]): 33 | text_message += chr(int(decoded_output[i].item())) 34 | return text_message 35 | -------------------------------------------------------------------------------- /network/protocols/communication/quantum_communication.py: -------------------------------------------------------------------------------- 1 | # quantum_communication.py 2 | 3 | import numpy as np 4 | from qiskit import QuantumCircuit, execute 5 | from qiskit.quantum_info import Statevector 6 | 7 | class QuantumCommunicationProtocol: 8 | def __init__(self, num_qubits, error_correction=True): 9 | self.num_qubits = num_qubits 10 | self.error_correction = error_correction 11 | self.qc = QuantumCircuit(num_qubits) 12 | 13 | def encode_message(self, message): 14 | binary_message = ''.join(format(ord(c), '08b') for c in message) 15 | self.qc.x(0) # Initialize the first qubit to |1 16 | for i, bit in enumerate(binary_message): 17 | if bit == '1': 18 | self.qc.x(i+1) # Apply X gate to encode the bit 19 | if self.error_correction: 20 | self.qc.barrier() 21 | self.qc.cx(0, 1) # Apply CNOT gate to encode the parity bit 22 | self.qc.cx(0, 2) # Apply CNOT gate to encode the parity bit 23 | 24 | def transmit_message(self): 25 | job = execute(self.qc, backend='qasm_simulator') 26 | result = job.result() 27 | counts = result.get_counts() 28 | message = '' 29 | for i in range(self.num_qubits): 30 | if counts.get('1' * (i+1) + '0' * (self.num_qubits - i - 1), 0) > 0: 31 | message += '1' 32 | else: 33 | message += '0' 34 | return message 35 | 36 | def decode_message(self, message): 37 | text_message = '' 38 | for i in range(0, len(message), 8): 39 | byte = message[i:i+8] 40 | text_message += chr(int(byte, 2)) 41 | return text_message 42 | -------------------------------------------------------------------------------- /network/protocols/data_storage/neural_network_data_storage.py: -------------------------------------------------------------------------------- 1 | # neural_network_data_storage.py 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | class NeuralNetworkData StorageProtocol: 8 | def __init__(self, input_size, hidden_size, output_size): 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | self.model = nn.Sequential( 13 | nn.Linear(input_size, hidden_size), 14 | nn.ReLU(), 15 | nn.Linear(hidden_size, output_size) 16 | ) 17 | self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) 18 | 19 | def store_data(self, data): 20 | tensor_data = torch.tensor([ord(c) for c in data]) 21 | output = self.model(tensor_data) 22 | return output 23 | 24 | def retrieve_data(self, output): 25 | noise = torch.randn_like(output) 26 | retrieved_output = output + noise 27 | return retrieved_output 28 | 29 | def decode_data(self, retrieved_output): 30 | decoded_output = self.model(retrieved_output) 31 | text_data = '' 32 | for i in range(decoded_output.shape[0]): 33 | text_data += chr(int(decoded_output[i].item())) 34 | return text_data 35 | -------------------------------------------------------------------------------- /network/protocols/data_storage/quantum_data_storage.py: -------------------------------------------------------------------------------- 1 | # quantum_data_storage.py 2 | 3 | import numpy as np 4 | from qiskit import QuantumCircuit, execute 5 | from qiskit.quantum_info import Statevector 6 | 7 | class QuantumDataStorageProtocol: 8 | def __init__(self, num_qubits): 9 | self.num_qubits = num_qubits 10 | self.qc = QuantumCircuit(num_qubits) 11 | 12 | def store_data(self, data): 13 | binary_data = ''.join(format(ord(c), '08b') for c in data) 14 | self.qc.x(0) # Initialize the first qubit to |1 15 | for i, bit in enumerate(binary_data): 16 | if bit == '1': 17 | self.qc.x(i+1) # Apply X gate to store the bit 18 | 19 | def retrieve_data(self): 20 | job = execute(self.qc, backend='qasm_simulator') 21 | result = job.result() 22 | counts = result.get_counts() 23 | data = '' 24 | for i in range(self.num_qubits): 25 | if counts.get('1' * (i+1) + '0' * (self.num_qubits - i - 1), 0) > 0: 26 | data += '1' 27 | else: 28 | data += '0' 29 | return data 30 | 31 | def decode_data(self, data): 32 | text_data = '' 33 | for i in range(0, len(data), 8): 34 | byte = data[i:i+8] 35 | text_data += chr(int(byte, 2)) 36 | return text_data 37 | -------------------------------------------------------------------------------- /neuromorphic/neural_networks/long_short_term_memory/long_short_term_memory.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class LongShortTermMemory(nn.Module): 7 | def __init__(self, input_size, hidden_size, output_size): 8 | super(LongShortTermMemory, self).__init__() 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | 13 | self.fc1 = nn.Linear(input_size, hidden_size) 14 | self.fc2 = nn.Linear(hidden_size, output_size) 15 | 16 | self.lstm = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True) 17 | 18 | def forward(self, x): 19 | h0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 20 | c0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 21 | 22 | out, _ = self.lstm(x, (h0, c0)) 23 | out = self.fc1(out[:, -1, :]) 24 | out = self.fc2(out) 25 | return out 26 | 27 | class LSTMCell(nn.Module): 28 | def __init__(self, input_size, hidden_size): 29 | super(LSTMCell, self).__init__() 30 | self.input_size = input_size 31 | self.hidden_size = hidden_size 32 | 33 | self.fc_i = nn.Linear(input_size, hidden_size) 34 | self.fc_f = nn.Linear(input_size, hidden_size) 35 | self.fc_g = nn.Linear(input_size, hidden_size) 36 | self.fc_o = nn.Linear(input_size, hidden_size) 37 | 38 | def forward(self, x, h, c): 39 | i = torch.sigmoid(self.fc_i(x)) 40 | f = torch.sigmoid(self.fc_f(x)) 41 | g = torch.tanh(self.fc_g(x)) 42 | o = torch.sigmoid(self.fc_o(x)) 43 | 44 | c = f * c + i * g 45 | h = o * torch.tanh(c) 46 | return h, c 47 | 48 | class LSTMNetwork(nn.Module): 49 | def __init__(self, input_size, hidden_size, output_size): 50 | super(LSTMNetwork, self).__init__() 51 | self.input_size = input_size 52 | self.hidden_size = hidden_size 53 | self.output_size = output_size 54 | 55 | self.lstm_cell = LSTMCell(input_size, hidden_size) 56 | self.fc = nn.Linear(hidden_size, output_size) 57 | 58 | def forward(self, x): 59 | h = torch.zeros(x.size(0), self.hidden_size).to(x.device) 60 | c = torch.zeros(x.size(0), self.hidden_size).to(x.device) 61 | 62 | for i in range(x.size(1)): 63 | h, c = self.lstm_cell(x[:, i, :], h, c) 64 | 65 | out = self.fc(h) 66 | return out 67 | 68 | class LongShortTermMemoryTrainer: 69 | def __init__(self, model, optimizer, loss_fn): 70 | self.model = model 71 | self.optimizer = optimizer 72 | self.loss_fn = loss_fn 73 | 74 | def train(self, inputs, targets): 75 | self.optimizer.zero_grad() 76 | outputs = self.model(inputs) 77 | loss = self.loss_fn(outputs, targets) 78 | loss.backward() 79 | self.optimizer.step() 80 | return loss.item() 81 | 82 | def test(self, inputs, targets): 83 | outputs = self.model(inputs) 84 | loss = self.loss_fn(outputs, targets) 85 | return loss.item() 86 | 87 | # Example usage: 88 | if __name__ == "__main__": 89 | # Set random seed for reproducibility 90 | torch.manual_seed(0) 91 | 92 | # Define the long short-term memory model 93 | model = LongShortTermMemory(input_size=784, hidden_size=256, output_size=10) 94 | 95 | # Define the optimizer and loss function 96 | optimizer = optim.Adam(model.parameters(), lr=0.001) 97 | loss_fn = nn.CrossEntropyLoss() 98 | 99 | # Define the trainer 100 | trainer = LongShortTermMemoryTrainer(model, optimizer, loss_fn) 101 | 102 | # Train the model 103 | inputs = torch.randn(100, 784) 104 | targets = torch.randint(0, 10, (100,)) 105 | for epoch in range(10): 106 | loss = trainer.train(inputs, targets) 107 | print(f"Epoch {epoch+1}, Loss: {loss:.4f}") 108 | 109 | # Test the model 110 | test_inputs = torch.randn(100, 784) 111 | test_targets = torch.randint(0, 10, (100,)) 112 | test_loss = trainer.test(test_inputs, test_targets) 113 | print(f"Test Loss: {test_loss:.4f}") 114 | -------------------------------------------------------------------------------- /neuromorphic/neural_networks/recurrent_neural_networks/recurrent_neural_networks.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class RecurrentNe uralNetwork(nn.Module): 7 | def __init__(self, input_size, hidden_size, output_size): 8 | super(RecurrentNeuralNetwork, self).__init__() 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | 13 | self.fc1 = nn.Linear(input_size, hidden_size) 14 | self.fc2 = nn.Linear(hidden_size, output_size) 15 | 16 | self.rnn = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True) 17 | 18 | def forward(self, x): 19 | h0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 20 | c0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 21 | 22 | out, _ = self.rnn(x, (h0, c0)) 23 | out = self.fc1(out[:, -1, :]) 24 | out = self.fc2(out) 25 | return out 26 | 27 | class LSTMNeuralNetwork(nn.Module): 28 | def __init__(self, input_size, hidden_size, output_size): 29 | super(LSTMNeuralNetwork, self).__init__() 30 | self.input_size = input_size 31 | self.hidden_size = hidden_size 32 | self.output_size = output_size 33 | 34 | self.fc1 = nn.Linear(input_size, hidden_size) 35 | self.fc2 = nn.Linear(hidden_size, output_size) 36 | 37 | self.lstm = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True) 38 | 39 | def forward(self, x): 40 | h0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 41 | c0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 42 | 43 | out, _ = self.lstm(x, (h0, c0)) 44 | out = self.fc1(out[:, -1, :]) 45 | out = self.fc2(out) 46 | return out 47 | 48 | class GRUNeuralNetwork(nn.Module): 49 | def __init__(self, input_size, hidden_size, output_size): 50 | super(GRUNeuralNetwork, self).__init__() 51 | self.input_size = input_size 52 | self.hidden_size = hidden_size 53 | self.output_size = output_size 54 | 55 | self.fc1 = nn.Linear(input_size, hidden_size) 56 | self.fc2 = nn.Linear(hidden_size, output_size) 57 | 58 | self.gru = nn.GRU(input_size, hidden_size, num_layers=1, batch_first=True) 59 | 60 | def forward(self, x): 61 | h0 = torch.zeros(1, x.size(0), self.hidden_size).to(x.device) 62 | 63 | out, _ = self.gru(x, h0) 64 | out = self.fc1(out[:, -1, :]) 65 | out = self.fc2(out) 66 | return out 67 | 68 | class RecurrentNeuralNetworkTrainer: 69 | def __init__(self, model, optimizer, loss_fn): 70 | self.model = model 71 | self.optimizer = optimizer 72 | self.loss_fn = loss_fn 73 | 74 | def train(self, inputs, targets): 75 | self.optimizer.zero_grad() 76 | outputs = self.model(inputs) 77 | loss = self.loss_fn(outputs, targets) 78 | loss.backward() 79 | self.optimizer.step() 80 | return loss.item() 81 | 82 | def test(self, inputs, targets): 83 | outputs = self.model(inputs) 84 | loss = self.loss_fn(outputs, targets) 85 | return loss.item() 86 | 87 | # Example usage: 88 | if __name__ == "__main__": 89 | # Set random seed for reproducibility 90 | torch.manual_seed(0) 91 | 92 | # Define the recurrent neural network model 93 | model = RecurrentNeuralNetwork(input_size=784, hidden_size=256, output_size=10) 94 | 95 | # Define the optimizer and loss function 96 | optimizer = optim.Adam(model.parameters(), lr=0.001) 97 | loss_fn = nn.CrossEntropyLoss() 98 | 99 | # Define the trainer 100 | trainer = RecurrentNeuralNetworkTrainer(model, optimizer, loss_fn) 101 | 102 | # Train the model 103 | inputs = torch.randn(100, 784) 104 | targets = torch.randint(0, 10, (100,)) 105 | for epoch in range(10): 106 | loss = trainer.train(inputs, targets) 107 | print(f"Epoch {epoch+1}, Loss: {loss:.4f}") 108 | 109 | # Test the model 110 | test_inputs = torch.randn(100, 784) 111 | test_targets = torch.randint(0, 10, (100,)) 112 | test_loss = trainer.test(test_inputs, test_targets) 113 | print(f"Test Loss: {test_loss:.4f}") 114 | -------------------------------------------------------------------------------- /neuromorphic/neural_networks/spiking_neural_networks/spiking_neural_networks.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class Spiking NeuralNetwork(nn.Module): 7 | def __init__(self, input_size, hidden_size, output_size): 8 | super(SpikingNeuralNetwork, self).__init__() 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | 13 | self.fc1 = nn.Linear(input_size, hidden_size) 14 | self.fc2 = nn.Linear(hidden_size, output_size) 15 | 16 | self.spike_fn = nn.ReLU() 17 | self.reset_fn = nn.ReLU() 18 | 19 | def forward(self, x): 20 | x = self.spike_fn(self.fc1(x)) 21 | x = self.reset_fn(self.fc2(x)) 22 | return x 23 | 24 | def reset(self): 25 | self.fc1.reset_parameters() 26 | self.fc2.reset_parameters() 27 | 28 | class LIFNeuron(nn.Module): 29 | def __init__(self, tau, v_th, v_reset): 30 | super(LIFNeuron, self).__init__() 31 | self.tau = tau 32 | self.v_th = v_th 33 | self.v_reset = v_reset 34 | 35 | self.v = torch.zeros(1) 36 | 37 | def forward(self, x): 38 | dvdt = (self.v - self.v_reset) / self.tau + x 39 | self.v = self.v + dvdt 40 | spike = torch.where(self.v >= self.v_th, 1, 0) 41 | self.v = torch.where(spike, self.v_reset, self.v) 42 | return spike 43 | 44 | class IzhikevichNeuron(nn.Module): 45 | def __init__(self, a, b, c, d): 46 | super(IzhikevichNeuron, self).__init__() 47 | self.a = a 48 | self.b = b 49 | self.c = c 50 | self.d = d 51 | 52 | self.v = torch.zeros(1) 53 | self.u = torch.zeros(1) 54 | 55 | def forward(self, x): 56 | dvdt = 0.04 * self.v**2 + 5 * self.v + 140 - self.u + x 57 | dudt = self.a * (self.b * self.v - self.u) 58 | self.v = self.v + dvdt 59 | self.u = self.u + dudt 60 | spike = torch.where(self.v >= 30, 1, 0) 61 | self.v = torch.where(spike, self.c, self.v) 62 | self.u = torch.where(spike, self.u + self.d, self.u) 63 | return spike 64 | 65 | class SpikingNeuralNetworkTrainer: 66 | def __init__(self, model, optimizer, loss_fn): 67 | self.model = model 68 | self.optimizer = optimizer 69 | self.loss_fn = loss_fn 70 | 71 | def train(self, inputs, targets): 72 | self.optimizer.zero_grad() 73 | outputs = self.model(inputs) 74 | loss = self.loss_fn(outputs, targets) 75 | loss.backward() 76 | self.optimizer.step() 77 | return loss.item() 78 | 79 | def test(self, inputs, targets): 80 | outputs = self.model(inputs) 81 | loss = self.loss_fn(outputs, targets) 82 | return loss.item() 83 | 84 | # Example usage: 85 | if __name__ == "__main__": 86 | # Set random seed for reproducibility 87 | torch.manual_seed(0) 88 | 89 | # Define the spiking neural network model 90 | model = SpikingNeuralNetwork(input_size=784, hidden_size=256, output_size=10) 91 | 92 | # Define the optimizer and loss function 93 | optimizer = optim.Adam(model.parameters(), lr=0.001) 94 | loss_fn = nn.CrossEntropyLoss() 95 | 96 | # Define the trainer 97 | trainer = SpikingNeuralNetworkTrainer(model, optimizer, loss_fn) 98 | 99 | # Train the model 100 | inputs = torch.randn(100, 784) 101 | targets = torch.randint(0, 10, (100,)) 102 | for epoch in range(10): 103 | loss = trainer.train(inputs, targets) 104 | print(f"Epoch {epoch+1}, Loss: {loss:.4f}") 105 | 106 | # Test the model 107 | test_inputs = torch.randn(100, 784) 108 | test_targets = torch.randint(0, 10, (100,)) 109 | test_loss = trainer.test(test_inputs, test_targets) 110 | print(f"Test Loss: {test_loss:.4f}") 111 | -------------------------------------------------------------------------------- /neuromorphic/neurons/artificial_neuron/artificial_neuron.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class ArtificialNeuron(nn .Module): 7 | def __init__(self, input_dim, hidden_dim, output_dim): 8 | super(ArtificialNeuron, self).__init__() 9 | self.fc1 = nn.Linear(input_dim, hidden_dim) 10 | self.fc2 = nn.Linear(hidden_dim, output_dim) 11 | 12 | def forward(self, x): 13 | x = torch.relu(self.fc1(x)) 14 | x = self.fc2(x) 15 | return x 16 | 17 | class ReLUNeuron(nn.Module): 18 | def __init__(self, input_dim, hidden_dim, output_dim): 19 | super(ReLUNeuron, self).__init__() 20 | self.fc1 = nn.Linear(input_dim, hidden_dim) 21 | self.fc2 = nn.Linear(hidden_dim, output_dim) 22 | 23 | def forward(self, x): 24 | x = torch.relu(self.fc1(x)) 25 | x = torch.relu(self.fc2(x)) 26 | return x 27 | 28 | class SigmoidalNeuron(nn.Module): 29 | def __init__(self, input_dim, hidden_dim, output_dim): 30 | super(SigmoidalNeuron, self).__init__() 31 | self.fc1 = nn.Linear(input_dim, hidden_dim) 32 | self.fc2 = nn.Linear(hidden_dim, output_dim) 33 | 34 | def forward(self, x): 35 | x = torch.sigmoid(self.fc1(x)) 36 | x = torch.sigmoid(self.fc2(x)) 37 | return x 38 | 39 | class SoftmaxNeuron(nn.Module): 40 | def __init__(self, input_dim, hidden_dim, output_dim): 41 | super(SoftmaxNeuron, self).__init__() 42 | self.fc1 = nn.Linear(input_dim, hidden_dim) 43 | self.fc2 = nn.Linear(hidden_dim, output_dim) 44 | 45 | def forward(self, x): 46 | x = torch.softmax(self.fc1(x), dim=1) 47 | x = torch.softmax(self.fc2(x), dim=1) 48 | return x 49 | 50 | class ParametricReLUNeuron(nn.Module): 51 | def __init__(self, input_dim, hidden_dim, output_dim): 52 | super(ParametricReLUNeuron, self).__init__() 53 | self.fc1 = nn.Linear(input_dim, hidden_dim) 54 | self.fc2 = nn.Linear(hidden_dim, output_dim) 55 | self.alpha = nn.Parameter(torch.tensor(0.1)) 56 | 57 | def forward(self, x): 58 | x = torch.relu(self.fc1(x)) 59 | x = self.alpha * x + (1 - self.alpha) * torch.relu(self.fc2(x)) 60 | return x 61 | 62 | class ParametricLeakyReLUNeuron(nn.Module): 63 | def __init__(self, input_dim, hidden_dim, output_dim): 64 | super(ParametricLeakyReLUNeuron, self).__init__() 65 | self.fc1 = nn.Linear(input_dim, hidden_dim) 66 | self.fc2 = nn.Linear(hidden_dim, output_dim) 67 | self.alpha = nn.Parameter(torch.tensor(0.1)) 68 | 69 | def forward(self, x): 70 | x = torch.relu(self.fc1(x)) 71 | x = self.alpha * x + (1 - self.alpha) * torch.relu(self.fc2(x)) 72 | return x 73 | 74 | class SwishNeuron(nn.Module): 75 | def __init__(self, input_dim, hidden_dim, output_dim): 76 | super(SwishNeuron, self).__init__() 77 | self.fc1 = nn.Linear(input_dim, hidden_dim) 78 | self.fc2 = nn.Linear(hidden_dim, output_dim) 79 | 80 | def forward(self, x): 81 | x = x * torch.sigmoid(x) 82 | x = self.fc1(x) 83 | x = x * torch.sigmoid(x) 84 | x = self.fc2(x) 85 | return x 86 | 87 | class GELUNeuron(nn.Module): 88 | def __init__(self, input_dim, hidden_dim, output_dim): 89 | super(GELUNeuron, self).__init__() 90 | self.fc1 = nn.Linear(input_dim, hidden_dim) 91 | self.fc2 = nn.Linear(hidden_dim, output_dim) 92 | 93 | def forward(self, x): 94 | x = 0.5 * x * (1 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))) 95 | x = self.fc1(x) 96 | x = 0.5 * x * (1 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))) 97 | x = self.fc2(x) 98 | return x 99 | 100 | class SoftClippingNeuron(nn.Module): 101 | def __init__(self, input_dim, hidden_dim, output_dim): 102 | super(SoftClippingNeuron, self).__init__() 103 | self.fc1 = nn.Linear(input_dim, hidden_dim) 104 | self.fc2 = nn.Linear(hidden_dim, output_dim) 105 | 106 | def forward(self, x): 107 | x = torch.clamp(x, min=-1, max=1) 108 | x = self.fc1(x) 109 | x = torch.clamp(x, min=-1, max=1) 110 | x = self.fc2(x) 111 | return x 112 | 113 | # Create an instance of each neuron 114 | relu_neuron = ReLUNeuron(5, 10, 5) 115 | sigmoidal_neuron = SigmoidalNeuron(5, 10, 5) 116 | softmax_neuron = SoftmaxNeuron(5, 10, 5) 117 | parametric_relu_neuron = ParametricReLUNeuron(5, 10, 5) 118 | parametric_leaky_relu_neuron = ParametricLeakyReLUNeuron(5, 10, 5) 119 | swish_neuron = SwishNeuron(5, 10, 5) 120 | gelu_neuron = GELUNeuron(5, 10, 5) 121 | softclipping_neuron = SoftClippingNeuron(5, 10, 5) 122 | 123 | # Train each neuron 124 | criterion = nn.MSELoss() 125 | optimizer = optim.SGD(relu_neuron.parameters(), lr=0.01) 126 | 127 | for epoch in range(100): 128 | optimizer.zero_grad() 129 | output = relu_neuron(torch.randn(1, 5)) 130 | loss = criterion(output, torch.randn(1, 5)) 131 | loss.backward() 132 | optimizer.step() 133 | 134 | optimizer.zero_grad() 135 | output = sigmoidal_neuron(torch.randn(1, 5)) 136 | loss = criterion(output, torch.randn(1, 5)) 137 | loss.backward() 138 | optimizer.step() 139 | 140 | optimizer.zero_grad() 141 | output = softmax_neuron(torch.randn(1, 5)) 142 | loss = criterion(output, torch.randn(1, 5)) 143 | loss.backward() 144 | optimizer.step() 145 | 146 | optimizer.zero_grad() 147 | output = parametric_relu_neuron(torch.randn(1, 5)) 148 | loss = criterion(output, torch.randn(1, 5)) 149 | loss.backward() 150 | optimizer.step() 151 | 152 | optimizer.zero_grad() 153 | output = parametric_leaky_relu_neuron(torch.randn(1, 5)) 154 | loss = criterion(output, torch.randn(1, 5)) 155 | loss.backward() 156 | optimizer.step() 157 | 158 | optimizer.zero_grad() 159 | output = swish_neuron(torch.randn(1, 5)) 160 | loss = criterion(output, torch.randn(1, 5)) 161 | loss.backward() 162 | optimizer.step() 163 | 164 | optimizer.zero_grad() 165 | output = gelu_neuron(torch.randn(1, 5)) 166 | loss = criterion(output, torch.randn(1, 5)) 167 | loss.backward() 168 | optimizer.step() 169 | 170 | optimizer.zero_grad() 171 | output = softclipping_neuron(torch.randn(1, 5)) 172 | loss = criterion(output, torch.randn(1, 5)) 173 | loss.backward() 174 | optimizer.step() 175 | -------------------------------------------------------------------------------- /neuromorphic/neurons/biological_neuron/biological_neuron.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class BiologicalNeuron(nn.Module): 7 | def __init__(self, input_dim, hidden_dim, output_dim): 8 | super(BiologicalNeuron, self).__init__() 9 | self.fc1 = nn.Linear(input_dim, hidden_dim) 10 | self.fc2 = nn.Linear(hidden_dim, output_dim) 11 | 12 | def forward(self, x): 13 | x = torch.relu(self.fc1(x)) 14 | x = self.fc2(x) 15 | return x 16 | 17 | class HodgkinHuxleyNeuron(nn.Module): 18 | def __init__(self, input_dim, hidden_dim, output_dim): 19 | super(HodgkinHuxleyNeuron, self).__init__() 20 | self.fc1 = nn.Linear(input_dim, hidden_dim) 21 | self.fc2 = nn.Linear(hidden_dim, output_dim) 22 | 23 | def forward(self, x): 24 | x = self.fc1(x) 25 | x = torch.sigmoid(x) 26 | x = self.fc2(x) 27 | return x 28 | 29 | class IzhikevichNeuron(nn.Module): 30 | def __init__(self, input_dim, hidden_dim, output_dim): 31 | super(IzhikevichNeuron, self).__init__() 32 | self.fc1 = nn.Linear(input_dim, hidden_dim) 33 | self.fc2 = nn.Linear(hidden_dim, output_dim) 34 | 35 | def forward(self, x): 36 | x = self.fc1(x) 37 | x = torch.tanh(x) 38 | x = self.fc2(x) 39 | return x 40 | 41 | class MorrisLecarNeuron(nn.Module): 42 | def __init__(self, input_dim, hidden_dim, output_dim): 43 | super(MorrisLecarNeuron, self).__init__() 44 | self.fc1 = nn.Linear(input_dim, hidden_dim) 45 | self.fc2 = nn.Linear(hidden_dim, output_dim) 46 | 47 | def forward(self, x): 48 | x = self.fc1(x) 49 | x = torch.sigmoid(x) 50 | x = self.fc2(x) 51 | return x 52 | 53 | class FitzHughNagumoNeuron(nn.Module): 54 | def __init__(self, input_dim, hidden_dim, output_dim): 55 | super(FitzHughNagumoNeuron, self).__init__() 56 | self.fc1 = nn.Linear(input_dim, hidden_dim) 57 | self.fc2 = nn.Linear(hidden_dim, output_dim) 58 | 59 | def forward(self, x): 60 | x = self.fc1(x) 61 | x = torch.tanh(x) 62 | x = self.fc2(x) 63 | return x 64 | 65 | class HindmarshRoseNeuron(nn.Module): 66 | def __init__(self, input_dim, hidden_dim, output_dim): 67 | super(HindmarshRoseNeuron, self).__init__() 68 | self.fc1 = nn.Linear(input_dim, hidden_dim) 69 | self.fc2 = nn.Linear(hidden_dim, output_dim) 70 | 71 | def forward(self, x): 72 | x = self.fc1(x) 73 | x = torch.sigmoid(x) 74 | x = self.fc2(x) 75 | return x 76 | 77 | class WilsonCowanNeuron(nn.Module): 78 | def __init__(self, input_dim, hidden_dim, output_dim): 79 | super(WilsonCowanNeuron, self).__init__() 80 | self.fc1 = nn.Linear(input_dim, hidden_dim) 81 | self.fc2 = nn.Linear(hidden_dim, output_dim) 82 | 83 | def forward(self, x): 84 | x = self.fc1(x) 85 | x = torch.sigmoid(x) 86 | x = self.fc2(x) 87 | return x 88 | 89 | # Create an instance of each neuron 90 | biological_neuron = BiologicalNeuron(5, 10, 5) 91 | hodgkin_huxley_neuron = HodgkinHuxleyNeuron(5, 10, 5) 92 | izhikevich_neuron = IzhikevichNeuron(5, 10, 5) 93 | morris_lecar_neuron = MorrisLecarNeuron(5, 10, 5) 94 | fitz_hugh_nagumo_neuron = FitzHughNagumoNeuron(5, 10, 5) 95 | hindmarsh_rose_neuron = HindmarshRoseNeuron(5, 10, 5) 96 | wilson_cowan_neuron = WilsonCowanNeuron(5, 10, 5) 97 | 98 | # Train each neuron 99 | criterion = nn.MSELoss() 100 | optimizer = optim.SGD(biological_neuron.parameters(), lr=0.01) 101 | 102 | for epoch in range(100): 103 | optimizer.zero_grad() 104 | output = biological_neuron(torch.randn(1, 5)) 105 | loss = criterion(output, torch.randn(1, 5)) 106 | loss.backward() 107 | optimizer.step() 108 | 109 | optimizer.zero_grad() 110 | output = hodgkin_huxley_neuron(torch.randn(1, 5)) 111 | loss = criterion(output, torch.randn(1, 5)) 112 | loss.backward() 113 | optimizer.step() 114 | 115 | optimizer.zero_grad() 116 | output = izhikevich_neuron(torch.randn(1, 5)) 117 | loss = criterion(output, torch.randn(1, 5)) 118 | loss.backward() 119 | optimizer.step() 120 | 121 | optimizer.zero_grad() 122 | output = morris_lecar_neuron(torch.randn(1, 5)) 123 | loss = criterion(output, torch.randn(1, 5)) 124 | loss.backward() 125 | optimizer.step() 126 | 127 | optimizer.zero_grad() 128 | output = fitz_hugh_nagumo_neuron(torch.randn(1, 5)) 129 | loss = criterion(output, torch.randn(1, 5)) 130 | loss.backward() 131 | optimizer.step() 132 | 133 | optimizer.zero_grad() 134 | output = hindmarsh_rose_neuron(torch.randn(1, 5)) 135 | loss = criterion(output, torch.randn(1, 5)) 136 | loss.backward() 137 | optimizer.step() 138 | 139 | optimizer.zero_grad() 140 | output = wilson_cowan_neuron(torch.randn(1, 5)) 141 | loss = criterion(output, torch.randn(1, 5)) 142 | loss.backward() 143 | optimizer.step() 144 | 145 | # Test each neuron 146 | test_input = torch.randn(1, 5) 147 | print("Biological Neuron Output:", biological_neuron(test_input)) 148 | print("Hodgkin Huxley Neuron Output:", hodgkin_huxley_neuron(test_input)) 149 | print("Izhikevich Neuron Output:", izhikevich_neuron(test_input)) 150 | print("Morris Lecar Neuron Output:", morris_lecar_neuron(test_input)) 151 | print("FitzHugh Nagumo Neuron Output:", fitz_hugh_nagumo_neuron(test_input)) 152 | print("Hindmarsh Rose Neuron Output:", hindmarsh_rose_neuron(test_input)) 153 | print("Wilson Cowan Neuron Output:", wilson_cowan_neuron(test_input)) 154 | -------------------------------------------------------------------------------- /neuromorphic/synapses/synaptic_plasticity/synaptic_plasticity.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class SynapticPlasticity(nn.Module): 7 | def __init__(self, input_size, output_size): 8 | super(SynapticPlasticity, self).__init__() 9 | self.input_size = input_size 10 | self.output_size = output_size 11 | 12 | self.fc1 = nn.Linear(input_size, output_size) 13 | 14 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 15 | 16 | def forward(self, x): 17 | output = torch.matmul(x, self.synaptic_weights) 18 | return output 19 | 20 | def update_synaptic_weights(self, x, y, learning_rate): 21 | dw = torch.matmul(x.T, (y - torch.matmul(x, self.synaptic_weights))) 22 | self.synaptic_weights.data += learning_rate * dw 23 | 24 | class HebbianLearning(nn.Module): 25 | def __init__(self, input_size, output_size): 26 | super(HebbianLearning, self).__init__() 27 | self.input_size = input_size 28 | self.output_size = output_size 29 | 30 | self.fc1 = nn.Linear(input_size, output_size) 31 | 32 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 33 | 34 | def forward(self, x): 35 | output = torch.matmul(x, self.synaptic_weights) 36 | return output 37 | 38 | def update_synaptic_weights(self, x, y, learning_rate): 39 | dw = torch.matmul(x.T, y) 40 | self.synaptic_weights.data += learning_rate * dw 41 | 42 | class SpikeTimingDependentPlasticity(nn.Module): 43 | def __init__(self, input_size, output_size): 44 | super(SpikeTimingDependentPlasticity, self).__init__() 45 | self.input_size = input_size 46 | self.output_size = output_size 47 | 48 | self.fc1 = nn.Linear(input_size, output_size) 49 | 50 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 51 | 52 | def forward(self, x): 53 | output = torch.matmul(x, self.synaptic_weights) 54 | return output 55 | 56 | def update_synaptic_weights(self, x, y, learning_rate, tau): 57 | dw = torch.matmul(x.T, (y - torch.matmul(x, self.synaptic_weights))) 58 | dw = dw * torch.exp(-torch.abs(y - torch.matmul(x, self.synaptic_weights)) / tau) 59 | self.synaptic_weights.data += learning_rate * dw 60 | 61 | class HomeostaticPlasticity(nn.Module): 62 | def __init__(self, input_size, output_size): 63 | super(HomeostaticPlasticity, self).__init__() 64 | self.input_size = input_size 65 | self.output_size = output_size 66 | 67 | self.fc1 = nn.Linear(input_size, output_size) 68 | 69 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 70 | 71 | def forward(self, x): 72 | output = torch.matmul(x, self.synaptic_weights) 73 | return output 74 | 75 | def update_synaptic_weights(self, x, y, learning_rate, target_rate): 76 | dw = torch.matmul(x.T, (y - target_rate)) 77 | self.synaptic_weights.data += learning_rate * dw 78 | 79 | class SynapticPlasticityTrainer: 80 | def __init__(self, model, optimizer, loss_fn): 81 | self.model = model 82 | self.optimizer = optimizer 83 | self.loss_fn = loss_fn 84 | 85 | def train(self, inputs, targets): 86 | self.optimizer.zero_grad() 87 | outputs = self.model(inputs) 88 | loss = self.loss_fn(outputs, targets) 89 | loss.backward() 90 | self.optimizer.step() 91 | return loss.item() 92 | 93 | def test(self, inputs, targets): 94 | outputs = self.model(inputs) 95 | loss = self.loss_fn(outputs, targets) 96 | return loss.item() 97 | 98 | # Example usage: 99 | if __name__ == "__main__": 100 | # Set random seed for reproducibility 101 | torch.manual_seed(0) 102 | 103 | # Define the synaptic plasticity model 104 | model = SynapticPlasticity(input_size=784, output_size=10) 105 | 106 | # Define the optimizer and loss function 107 | optimizer = optim.Adam(model.parameters(), lr=0.001) 108 | loss_fn = nn.CrossEntropyLoss() 109 | 110 | # Define the trainer 111 | trainer = SynapticPlasticityTrainer(model, optimizer, loss_fn) 112 | 113 | # Train the model 114 | inputs = torch.randn(100, 784) 115 | targets = torch.randint(0, 10, (100,)) 116 | for epoch in range(10): 117 | loss = trainer.train(inputs, targets) 118 | print(f"Epoch {epoch+1}, Loss: {loss:.4f}") 119 | 120 | # Test the model 121 | test_inputs = torch.randn(100, 784) 122 | test_targets = torch.randint(0, 10, (100,)) 123 | test_loss = trainer.test(test_inputs, test_targets) 124 | print(f"Test Loss: {test_loss:.4f}") 125 | -------------------------------------------------------------------------------- /neuromorphic/synapses/synaptic_transmission/synaptic_transmission.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | 6 | class SynapticTransmission(nn.Module): 7 | def __init__(self, input_size, output_size): 8 | super(SynapticTransmission, self).__init__() 9 | self.input_size = input_size 10 | self.output_size = output_size 11 | 12 | self.fc1 = nn.Linear(input_size, output_size) 13 | 14 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 15 | 16 | def forward(self, x): 17 | output = torch.matmul(x, self.synaptic_weights) 18 | return output 19 | 20 | def update_synaptic_weights(self, x, y, learning_rate): 21 | dw = torch.matmul(x.T, (y - torch.matmul(x, self.synaptic_weights))) 22 | self.synaptic_weights.data += learning_rate * dw 23 | 24 | class ChemicalSynapticTransmission(nn.Module): 25 | def __init__(self, input_size, output_size): 26 | super(ChemicalSynapticTransmission, self).__init__() 27 | self.input_size = input_size 28 | self.output_size = output_size 29 | 30 | self.fc1 = nn.Linear(input_size, output_size) 31 | 32 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 33 | 34 | def forward(self, x): 35 | output = torch.matmul(x, self.synaptic_weights) 36 | return output 37 | 38 | def update_synaptic_weights(self, x, y, learning_rate, tau): 39 | dw = torch.matmul(x.T, (y - torch.matmul(x, self.synaptic_weights))) 40 | dw = dw * torch.exp(-torch.abs(y - torch.matmul(x, self.synaptic_weights)) / tau) 41 | self.synaptic_weights.data += learning_rate * dw 42 | 43 | class ElectricalSynapticTransmission(nn.Module): 44 | def __init__(self, input_size, output_size): 45 | super(ElectricalSynapticTransmission, self).__init__() 46 | self.input_size = input_size 47 | self.output_size = output_size 48 | 49 | self.fc1 = nn.Linear(input_size, output_size) 50 | 51 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 52 | 53 | def forward(self, x): 54 | output = torch.matmul(x, self.synaptic_weights) 55 | return output 56 | 57 | def update_synaptic_weights(self, x, y, learning_rate): 58 | dw = torch.matmul(x.T, (y - torch.matmul(x, self.synaptic_weights))) 59 | self.synaptic_weights.data += learning_rate * dw 60 | 61 | class GapJunctionSynapticTransmission(nn.Module): 62 | def __init__(self, input_size, output_size): 63 | super(GapJunctionSynapticTransmission, self).__init__() 64 | self.input_size = input_size 65 | self.output_size = output_size 66 | 67 | self.fc1 = nn.Linear(input_size, output_size) 68 | 69 | self.synaptic_weights = nn.Parameter(torch.randn(input_size, output_size)) 70 | 71 | def forward(self, x): 72 | output = torch.matmul(x, self.synaptic_weights) 73 | return output 74 | 75 | def update_synaptic_weights(self, x, y, learning_rate): 76 | dw = torch.matmul(x.T, (y - torch.matmul(x, self.synaptic_weights))) 77 | self.synaptic_weights.data += learning_rate * dw 78 | 79 | class SynapticTransmissionTrainer: 80 | def __init__(self, model, optimizer, loss_fn): 81 | self.model = model 82 | self.optimizer = optimizer 83 | self.loss_fn = loss_fn 84 | 85 | def train(self, inputs, targets): 86 | self.optimizer.zero_grad() 87 | outputs = self.model(inputs) 88 | loss = self.loss_fn(outputs, targets) 89 | loss.backward() 90 | self.optimizer.step() 91 | return loss.item() 92 | 93 | def test(self, inputs, targets): 94 | outputs = self.model(inputs) 95 | loss = self.loss_fn(outputs, targets) 96 | return loss.item() 97 | 98 | # Example usage: 99 | if __name__ == "__main__": 100 | # Set random seed for reproducibility 101 | torch.manual_seed(0) 102 | 103 | # Define the synaptic transmission model 104 | model = SynapticTransmission(input_size=784, output_size=10) 105 | 106 | # Define the optimizer and loss function 107 | optimizer = optim.Adam(model.parameters(), lr=0.001) 108 | loss_fn = nn.CrossEntropyLoss() 109 | 110 | # Define the trainer 111 | trainer = SynapticTransmissionTrainer(model, optimizer, loss_fn) 112 | 113 | # Train the model 114 | inputs = torch.randn(100, 784) 115 | targets = torch.randint(0, 10, (100,)) 116 | for epoch in range(10): 117 | loss = trainer.train(inputs, targets) 118 | print(f"Epoch {epoch+1}, Loss: {loss:.4f}") 119 | 120 | # Test the model 121 | test_inputs = torch.randn(100, 784) 122 | test_targets = torch.randint(0, 10, (100,)) 123 | test_loss = trainer.test(test_inputs, test_targets) 124 | print(f"Test Loss: {test_loss:.4f}") 125 | -------------------------------------------------------------------------------- /quantum/quantum_algorithms/grover/grover.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qiskit import QuantumCircuit, execute, Aer 3 | 4 | class Grover: 5 | def __init__(self, n): 6 | self.n = n 7 | 8 | def grover(self): 9 | # Create a quantum circuit with n qubits 10 | circuit = QuantumCircuit(self.n) 11 | 12 | # Apply Hadamard gates to all qubits 13 | for i in range(self.n): 14 | circuit.h(i) 15 | 16 | # Apply the Grover iteration 17 | for _ in range(int(np.sqrt(2 ** self.n))): 18 | # Apply the oracle 19 | circuit.x(self.n - 1) 20 | circuit.barrier() 21 | # Apply the diffusion operator 22 | circuit.h(self.n - 1) 23 | circuit.x(self.n - 1) 24 | circuit.h(self.n - 1) 25 | circuit.barrier() 26 | 27 | # Measure the qubits 28 | circuit.measure_all() 29 | 30 | # Execute the circuit 31 | simulator = Aer.get_backend('qasm_simulator') 32 | job = execute(circuit, simulator) 33 | result = job.result() 34 | counts = result.get_counts() 35 | 36 | # Find the most likely outcome 37 | outcome = max(counts, key=counts.get) 38 | 39 | # Return the outcome 40 | return outcome 41 | 42 | def main(): 43 | grover = Grover(5) 44 | outcome = grover.grover() 45 | print(outcome) 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /quantum/quantum_algorithms/grover/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from grover import Grover 3 | 4 | def main(): 5 | grover = Grover(5) 6 | outcome = grover.grover() 7 | print(outcome) 8 | 9 | if __name__ == "__main__": 10 | main() 11 | -------------------------------------------------------------------------------- /quantum/quantum_algorithms/shor/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from shor import Shor 3 | 4 | def main(): 5 | shor = Shor(5) 6 | outcome = shor.shor() 7 | print(outcome) 8 | 9 | if __name__ == "__main__": 10 | main() 11 | -------------------------------------------------------------------------------- /quantum/quantum_simulators/cirq/cirq_simulator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cirq 3 | 4 | class CirqSimulator: 5 | def __init__(self, n_qubits): 6 | self.n_qubits = n_qubits 7 | 8 | def create_circuit(self): 9 | qubits = [cirq.LineQubit(i) for i in range(self.n_qubits)] 10 | circuit = cirq.Circuit() 11 | return circuit, qubits 12 | 13 | def add_hadamard(self, circuit, qubit): 14 | circuit.append(cirq.H(qubit)) 15 | return circuit 16 | 17 | def add_cnot(self, circuit, control, target): 18 | circuit.append(cirq.CNOT(control, target)) 19 | return circuit 20 | 21 | def add_measure(self, circuit, qubit): 22 | circuit.append(cirq.measure(qubit)) 23 | return circuit 24 | 25 | def simulate(self, circuit): 26 | simulator = cirq.Simulator() 27 | result = simulator.run(circuit) 28 | return result 29 | 30 | def main(): 31 | simulator = CirqSimulator(5) 32 | circuit, qubits = simulator.create_circuit() 33 | circuit = simulator.add_hadamard(circuit, qubits[0]) 34 | circuit = simulator.add_cnot(circuit, qubits[0], qubits[1]) 35 | circuit = simulator.add_measure(circuit, qubits[1]) 36 | result = simulator.simulate(circuit) 37 | print(result) 38 | 39 | if __name__ == "__main__": 40 | main() 41 | -------------------------------------------------------------------------------- /quantum/quantum_simulators/cirq/cirq_simulator_with_noise.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cirq 3 | 4 | class CirqSimulatorWithNoise: 5 | def __init__(self, n_qubits): 6 | self.n_qubits = n_qubits 7 | 8 | def create_circuit(self): 9 | qubits = [cirq.LineQubit(i) for i in range(self.n_qubits)] 10 | circuit = cirq.Circuit() 11 | return circuit, qubits 12 | 13 | def add_hadamard(self, circuit, qubit): 14 | circuit.append(cirq.H(qubit)) 15 | return circuit 16 | 17 | def add_cnot(self, circuit, control, target): 18 | circuit.append(cirq.CNOT(control, target)) 19 | return circuit 20 | 21 | def add_measure(self, circuit, qubit): 22 | circuit.append(cirq.measure(qubit)) 23 | return circuit 24 | 25 | def add_noise(self, circuit): 26 | # Add depolarizing noise to the circuit 27 | noise_model = cirq.depolarize(p=0.1) 28 | circuit = cirq.noise.depolarize(circuit, noise_model) 29 | return circuit 30 | 31 | def simulate(self, circuit): 32 | simulator = cirq.Simulator() 33 | result = simulator.run(circuit) 34 | return result 35 | 36 | def main(): 37 | simulator = CirqSimulatorWithNoise(5) 38 | circuit, qubits = simulator.create_circuit() 39 | circuit = simulator.add_hadamard(circuit, qubits[0]) 40 | circuit = simulator.add_cnot(circuit, qubits[0], qubits[1]) 41 | circuit = simulator.add_measure(circuit, qubits[1]) 42 | circuit = simulator.add_noise(circuit) 43 | result = simulator.simulate(circuit) 44 | print(result) 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /quantum/quantum_simulators/qiskit/qiskit_simulator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qiskit import QuantumCircuit , execute, Aer 3 | 4 | class QiskitSimulator: 5 | def __init__(self, n_qubits): 6 | self.n_qubits = n_qubits 7 | 8 | def create_circuit(self): 9 | circuit = QuantumCircuit(self.n_qubits) 10 | return circuit 11 | 12 | def add_hadamard(self, circuit, qubit): 13 | circuit.h(qubit) 14 | return circuit 15 | 16 | def add_cnot(self, circuit, control, target): 17 | circuit.cx(control, target) 18 | return circuit 19 | 20 | def add_measure(self, circuit, qubit): 21 | circuit.measure(qubit, qubit) 22 | return circuit 23 | 24 | def simulate(self, circuit): 25 | simulator = Aer.get_backend('qasm_simulator') 26 | job = execute(circuit, simulator) 27 | result = job.result() 28 | counts = result.get_counts() 29 | return counts 30 | 31 | def main(): 32 | simulator = QiskitSimulator(5) 33 | circuit = simulator.create_circuit() 34 | circuit = simulator.add_hadamard(circuit, 0) 35 | circuit = simulator.add_cnot(circuit, 0, 1) 36 | circuit = simulator.add_measure(circuit, 1) 37 | counts = simulator.simulate(circuit) 38 | print(counts) 39 | 40 | if __name__ == "__main__": 41 | main() 42 | -------------------------------------------------------------------------------- /quantum/quantum_simulators/qiskit/qiskit_simulator_with_noise.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qiskit import QuantumCircuit, execute, Aer 3 | 4 | class QiskitSimulatorWithNoise: 5 | def __init__(self, n_qubits): 6 | self.n_qubits = n_qubits 7 | 8 | def create_circuit(self): 9 | circuit = QuantumCircuit(self.n_qubits) 10 | return circuit 11 | 12 | def add_hadamard(self, circuit, qubit): 13 | circuit.h(qubit) 14 | return circuit 15 | 16 | def add_cnot(self, circuit, control, target): 17 | circuit.cx(control, target) 18 | return circuit 19 | 20 | def add_measure(self, circuit, qubit): 21 | circuit.measure(qubit, qubit) 22 | return circuit 23 | 24 | def add_noise(self, circuit): 25 | # Add depolarizing noise to the circuit 26 | noise_model = Aer.noise.NoiseModel() 27 | noise_model.add_quantum_error(Aer.noise.errors.depolarizing_error(0.1, 1), ['u1', 'u2', 'u3']) 28 | noise_model.add_quantum_error(Aer.noise.errors.depolarizing_error(0.1, 2), ['cx']) 29 | circuit = Aer.noise.noise_model_noise(circuit, noise_model) 30 | return circuit 31 | 32 | def simulate(self, circuit): 33 | simulator = Aer.get_backend('qasm_simulator') 34 | job = execute(circuit, simulator) 35 | result = job.result() 36 | counts = result.get_counts() 37 | return counts 38 | 39 | def main(): 40 | simulator = QiskitSimulatorWithNoise(5) 41 | circuit = simulator.create_circuit() 42 | circuit = simulator.add_hadamard(circuit, 0) 43 | circuit = simulator.add_cnot(circuit, 0, 1) 44 | circuit = simulator.add_measure(circuit, 1) 45 | circuit = simulator.add_noise(circuit) 46 | counts = simulator.simulate(circuit) 47 | print(counts) 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /quantum/qubits/qubit_measurement/qubit_measurement.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qiskit import QuantumCircuit, execute, Aer 3 | 4 | class QubitMeasurement: 5 | def __init__(self): 6 | pass 7 | 8 | def measure_z_basis(self, qubit): 9 | circuit = QuantumCircuit(1) 10 | circuit.measure(qubit, 0) 11 | return circuit 12 | 13 | def measure_x_basis(self, qubit): 14 | circuit = QuantumCircuit(1) 15 | circuit.h(qubit) 16 | circuit.measure(qubit, 0) 17 | return circuit 18 | 19 | def measure_y_basis(self, qubit): 20 | circuit = QuantumCircuit(1) 21 | circuit.s(qubit) 22 | circuit.h(qubit) 23 | circuit.measure(qubit, 0) 24 | return circuit 25 | 26 | def main(): 27 | qubit_measurement = QubitMeasurement() 28 | measure_z_circuit = qubit_measurement.measure_z_basis(0) 29 | measure_x_circuit = qubit_measurement.measure_x_basis(0) 30 | measure_y_circuit = qubit_measurement.measure_y_basis(0) 31 | 32 | simulator = Aer.get_backend('qasm_simulator') 33 | job = execute(measure_z_circuit, simulator) 34 | result = job.result() 35 | print(result.get_counts()) 36 | 37 | job = execute(measure_x_circuit, simulator) 38 | result = job.result() 39 | print(result.get_counts()) 40 | 41 | job = execute(measure_y_circuit, simulator) 42 | result = job.result() 43 | print(result.get_counts()) 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /quantum/qubits/qubit_measurement/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qubit_measurement import QubitMeasurement 3 | 4 | def main(): 5 | qubit_measurement = QubitMeasurement() 6 | measure_z_circuit = qubit_measurement.measure_z_basis(0) 7 | measure_x_circuit = qubit_measurement.measure_x_basis(0) 8 | measure_y_circuit = qubit_measurement.measure_y_basis(0) 9 | 10 | simulator = Aer.get_backend('qasm_simulator') 11 | job = execute(measure_z_circuit, simulator) 12 | result = job.result() 13 | print(result.get_counts()) 14 | 15 | job = execute(measure_x_circuit, simulator) 16 | result = job.result() 17 | print(result.get_counts()) 18 | 19 | job = execute(measure_y_circuit, simulator) 20 | result = job.result() 21 | print(result.get_counts()) 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /quantum/qubits/qubit_operations/qubit_operations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qiskit import QuantumCircuit, execute, Aer 3 | 4 | class QubitOperations: 5 | def __init__(self): 6 | pass 7 | 8 | def hadamard_gate(self, qubit): 9 | circuit = QuantumCircuit(1) 10 | circuit.h(qubit) 11 | return circuit 12 | 13 | def pauli_x_gate(self, qubit): 14 | circuit = QuantumCircuit(1) 15 | circuit.x(qubit) 16 | return circuit 17 | 18 | def pauli_y_gate(self, qubit): 19 | circuit = QuantumCircuit(1) 20 | circuit.y(qubit) 21 | return circuit 22 | 23 | def pauli_z_gate(self, qubit): 24 | circuit = QuantumCircuit(1) 25 | circuit.z(qubit) 26 | return circuit 27 | 28 | def cnot_gate(self, control_qubit, target_qubit): 29 | circuit = QuantumCircuit(2) 30 | circuit.cx(control_qubit, target_qubit) 31 | return circuit 32 | 33 | def swap_gate(self, qubit1, qubit2): 34 | circuit = QuantumCircuit(2) 35 | circuit.swap(qubit1, qubit2) 36 | return circuit 37 | 38 | def main(): 39 | qubit_operations = QubitOperations() 40 | hadamard_circuit = qubit_operations.hadamard_gate(0) 41 | pauli_x_circuit = qubit_operations.pauli_x_gate(0) 42 | pauli_y_circuit = qubit_operations.pauli_y_gate(0) 43 | pauli_z_circuit = qubit_operations.pauli_z_gate(0) 44 | cnot_circuit = qubit_operations.cnot_gate(0, 1) 45 | swap_circuit = qubit_operations.swap_gate(0, 1) 46 | 47 | simulator = Aer.get_backend('qasm_simulator') 48 | job = execute(hadamard_circuit, simulator) 49 | result = job.result() 50 | print(result.get_counts()) 51 | 52 | job = execute(pauli_x_circuit, simulator) 53 | result = job.result() 54 | print(result.get_counts()) 55 | 56 | job = execute(pauli_y_circuit, simulator) 57 | result = job.result() 58 | print(result.get_counts()) 59 | 60 | job = execute(pauli_z_circuit, simulator) 61 | result = job.result() 62 | print(result.get_counts()) 63 | 64 | job = execute(cnot_circuit, simulator) 65 | result = job.result() 66 | print(result.get_counts()) 67 | 68 | job = execute(swap_circuit, simulator) 69 | result = job.result() 70 | print(result.get_counts()) 71 | 72 | if __name__ == "__main__": 73 | main() 74 | -------------------------------------------------------------------------------- /quantum/qubits/qubit_operations/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from qubit_operations import QubitOperations 3 | 4 | def main(): 5 | qubit_operations = QubitOperations() 6 | hadamard_circuit = qubit_operations.hadamard_gate(0) 7 | pauli_x_circuit = qubit_operations.pauli_x_gate(0) 8 | pauli_y_circuit = qubit_operations.pauli_y_gate(0) 9 | pauli_z_circuit = qubit_operations.pauli_z_gate(0) 10 | cnot_circuit = qubit_operations.cnot_gate(0, 1) 11 | swap_circuit = qubit_operations.swap_gate(0, 1) 12 | 13 | simulator = Aer.get_backend('qasm_simulator') 14 | job = execute(hadamard_circuit, simulator) 15 | result = job.result() 16 | print(result.get_counts()) 17 | 18 | job = execute(pauli_x_circuit, simulator) 19 | result = job.result() 20 | print(result.get_counts()) 21 | 22 | job = execute(pauli_y_circuit, simulator) 23 | result = job.result() 24 | print(result.get_counts()) 25 | 26 | job = execute(pauli_z_circuit, simulator) 27 | result = job.result() 28 | print(result.get_counts()) 29 | 30 | job = execute(cnot_circuit, simulator) 31 | result = job.result() 32 | print(result.get_counts()) 33 | 34 | job = execute(swap_circuit, simulator) 35 | result = job.result() 36 | print(result.get_counts()) 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /security/access_control/neural_network_access_control.py: -------------------------------------------------------------------------------- 1 | # neural_network_access_control.py 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | class NeuralNetworkAccessControlProtocol: 8 | def __init__(self, input_size, hidden_size, output_size): 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | self.model = nn.Sequential( 13 | nn.Linear(input_size, hidden_size), 14 | nn.ReLU(), 15 | nn.Linear(hidden_size, output_size) 16 | ) 17 | self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) 18 | 19 | def grant_access(self, user_id): 20 | tensor_user_id = torch.tensor([ord(c) for c in user_id]) 21 | output = self.model(tensor_user_id) 22 | return output 23 | 24 | def deny_access(self, output): 25 | noise = torch.randn_like(output) 26 | denied_output = output + noise 27 | return denied_output 28 | 29 | def decode_access(self, denied_output): 30 | if denied_output.item() > 0.5: 31 | return "Access granted" 32 | else: 33 | return "Access denied" 34 | -------------------------------------------------------------------------------- /security/access_control/quantum_access_control.py: -------------------------------------------------------------------------------- 1 | # quantum_access_control.py 2 | 3 | import numpy as np 4 | from qiskit import QuantumCircuit, execute 5 | from qiskit.quantum_info import Statevector 6 | 7 | class QuantumAccessControlProtocol: 8 | def __init__(self, num_qubits): 9 | self.num_qubits = num_qubits 10 | self.qc = QuantumCircuit(num_qubits) 11 | 12 | def grant_access(self, user_id): 13 | binary_user_id = ''.join(format(ord(c), '08b') for c in user_id) 14 | self.qc.x(0) # Initialize the first qubit to |1 15 | for i, bit in enumerate(binary_user_id): 16 | if bit == '1': 17 | self.qc.x(i+1) # Apply X gate to grant access 18 | self.qc.barrier() 19 | self.qc.h(0) # Apply Hadamard gate to create a superposition 20 | self.qc.cx(0, 1) # Apply CNOT gate to entangle the qubits 21 | 22 | def deny_access(self): 23 | job = execute(self.qc, backend='qasm_simulator') 24 | result = job.result() 25 | counts = result.get_counts() 26 | access_granted = False 27 | for i in range(self.num_qubits): 28 | if counts.get('1' * (i+1) + '0' * (self.num_qubits - i - 1), 0) > 0: 29 | access_granted = True 30 | return access_granted 31 | 32 | def decode_access(self, access_granted): 33 | if access_granted: 34 | return "Access granted" 35 | else: 36 | return "Access denied" 37 | -------------------------------------------------------------------------------- /security/authentication/neural_network_authentication.py: -------------------------------------------------------------------------------- 1 | # neural_network_authentication.py 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | class NeuralNetworkAuthenticationProtocol: 8 | def __init__(self, input_size, hidden_size, output_size): 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | self.model = nn.Sequential( 13 | nn.Linear(input_size, hidden_size), 14 | nn.ReLU(), 15 | nn.Linear(hidden_size, output_size) 16 | ) 17 | self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) 18 | 19 | def authenticate_user(self, user_id): 20 | tensor_user_id = torch.tensor([ord(c) for c in user_id]) 21 | output = self.model(tensor_user_id) 22 | return output 23 | 24 | def verify_user(self, output): 25 | noise = torch.randn_like(output) 26 | verified_output = output + noise 27 | return verified_output 28 | 29 | def decode_user_id(self, verified_output): 30 | text_user_id = '' 31 | for i in range(verified_output.shape[0]): 32 | text_user_id += chr(int(verified_output[i].item())) 33 | return text_user_id 34 | -------------------------------------------------------------------------------- /security/authentication/quantum_authentication.py: -------------------------------------------------------------------------------- 1 | # quantum_authentication.py 2 | 3 | import numpy as np 4 | from qiskit import QuantumCircuit, execute 5 | from qiskit.quantum_info import Statevector 6 | 7 | class QuantumAuthenticationProtocol: 8 | def __init__(self, num_qubits): 9 | self.num_qubits = num_qubits 10 | self.qc = QuantumCircuit(num_qubits) 11 | 12 | def authenticate_user(self, user_id): 13 | binary_user_id = ''.join(format(ord(c), '08b') for c in user_id) 14 | self.qc.x(0) # Initialize the first qubit to |1 15 | for i, bit in enumerate(binary_user_id): 16 | if bit == '1': 17 | self.qc.x(i+1) # Apply X gate to authenticate the user 18 | self.qc.barrier() 19 | self.qc.h(0) # Apply Hadamard gate to create a superposition 20 | self.qc.cx(0, 1) # Apply CNOT gate to entangle the qubits 21 | 22 | def verify_user(self): 23 | job = execute(self.qc, backend='qasm_simulator') 24 | result = job.result() 25 | counts = result.get_counts() 26 | user_id = '' 27 | for i in range(self.num_qubits): 28 | if counts.get('1' * (i+1) + '0' * (self.num_qubits - i - 1), 0) > 0: 29 | user_id += '1' 30 | else: 31 | user_id += '0' 32 | return user_id 33 | 34 | def decode_user_id(self, user_id): 35 | text_user_id = '' 36 | for i in range(0, len(user_id), 8): 37 | byte = user_id[i:i+8] 38 | text_user_id += chr(int(byte, 2)) 39 | return text_user_id 40 | -------------------------------------------------------------------------------- /security/encryption/neural_network_encryption.py: -------------------------------------------------------------------------------- 1 | # neural_network_encryption.py 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.optim as optim 6 | 7 | class NeuralNetworkEncryptionProtocol: 8 | def __init__(self, input_size, hidden_size, output_size): 9 | self.input_size = input_size 10 | self.hidden_size = hidden_size 11 | self.output_size = output_size 12 | self.model = nn.Sequential( 13 | nn.Linear(input_size, hidden_size), 14 | nn.ReLU(), 15 | nn.Linear(hidden_size, output_size) 16 | ) 17 | self.optimizer = optim.Adam(self.model.parameters(), lr=0.001) 18 | 19 | def encrypt_message(self, message): 20 | tensor_message = torch.tensor([ord(c) for c in message]) 21 | output = self.model(tensor_message) 22 | return output 23 | 24 | def decrypt_message(self, output): 25 | noise = torch.randn_like(output) 26 | decrypted_output = output + noise 27 | return decrypted_output 28 | 29 | def decode_message(self, decrypted_output): 30 | text_message = '' 31 | for i in range(decrypted_output.shape[0]): 32 | text_message += chr(int(decrypted_output[i].item())) 33 | return text_message 34 | -------------------------------------------------------------------------------- /security/encryption/quantum_encryption.py: -------------------------------------------------------------------------------- 1 | # quantum_encryption.py 2 | 3 | import numpy as np 4 | from qiskit import QuantumCircuit, execute 5 | from qiskit.quantum_info import Statevector 6 | 7 | class QuantumEncryptionProtocol: 8 | def __init__(self, num_qubits): 9 | self.num_qubits = num_qubits 10 | self.qc = QuantumCircuit(num_qubits) 11 | 12 | def encrypt_message(self, message): 13 | binary_message = ''.join(format(ord(c), '08b') for c in message) 14 | self.qc.x(0) # Initialize the first qubit to |1 15 | for i, bit in enumerate(binary_message): 16 | if bit == '1': 17 | self.qc.x(i+1) # Apply X gate to encrypt the bit 18 | self.qc.barrier() 19 | self.qc.h(0) # Apply Hadamard gate to create a superposition 20 | self.qc.cx(0, 1) # Apply CNOT gate to entangle the qubits 21 | 22 | def decrypt_message(self): 23 | job = execute(self.qc, backend='qasm_simulator') 24 | result = job.result() 25 | counts = result.get_counts() 26 | message = '' 27 | for i in range(self.num_qubits): 28 | if counts.get('1' * (i+1) + '0' * (self.num_qubits - i - 1), 0) > 0: 29 | message += '1' 30 | else: 31 | message += '0' 32 | return message 33 | 34 | def decode_message(self, message): 35 | text_message = '' 36 | for i in range(0, len(message), 8): 37 | byte = message[i:i+8] 38 | text_message += chr(int(byte, 2)) 39 | return text_message 40 | -------------------------------------------------------------------------------- /tests/unit_tests/ai/test_neural_network.py: -------------------------------------------------------------------------------- 1 | # test_neural_network.py 2 | 3 | import unittest 4 | from ai.neural_network import NeuralNetwork 5 | 6 | class TestNeuralNetwork(unittest.TestCase): 7 | def test_forward_pass(self): 8 | nn = NeuralNetwork(784, 256, 10) 9 | input_data = np.random.rand(1, 784) 10 | output = nn.forward_pass(input_data) 11 | self.assertEqual(output.shape, (1, 10)) 12 | 13 | def test_backward_pass(self): 14 | nn = NeuralNetwork(784, 256, 10) 15 | input_data = np.random.rand(1, 784) 16 | output = nn.forward_pass(input_data) 17 | loss = nn.calculate_loss(output, np.random.rand(1, 10)) 18 | self.assertGreater(loss, 0) 19 | 20 | if __name__ == '__main__': 21 | unittest.main() 22 | -------------------------------------------------------------------------------- /tests/unit_tests/quantum/test_quantum_circuit.py: -------------------------------------------------------------------------------- 1 | # test_quantum_circuit.py 2 | 3 | import unittest 4 | from quantum.quantum_circuit import QuantumCircuit 5 | 6 | class TestQuantumCircuit(unittest.TestCase): 7 | def test_apply_gate(self): 8 | qc = QuantumCircuit(2) 9 | qc.apply_gate('X', 0) 10 | self.assertEqual(qc.get_state(), [0, 1, 0, 0]) 11 | 12 | def test_measure(self): 13 | qc = QuantumCircuit(2) 14 | qc.apply_gate('X', 0) 15 | measurement = qc.measure() 16 | self.assertIn(measurement, [0, 1]) 17 | 18 | if __name__ == '__main__': 19 | unittest.main() 20 | -------------------------------------------------------------------------------- /tests/unit_tests/quantum/test_quantum_simulation.py: -------------------------------------------------------------------------------- 1 | # test_quantum_simulation.py 2 | 3 | import unittest 4 | from quantum.quantum_simulation import QuantumSimulation 5 | 6 | class TestQuantumSimulation(unittest.TestCase): 7 | def test_simulate(self): 8 | qs = QuantumSimulation(2) 9 | qs.simulate() 10 | self.assertEqual(qs.get_state(), [0, 1, 0, 0]) 11 | 12 | def test_measure(self): 13 | qs = QuantumSimulation(2) 14 | qs.simulate() 15 | measurement = qs.measure() 16 | self.assertIn(measurement, [0, 1]) 17 | 18 | if __name__ == '__main__': 19 | unittest.main() 20 | -------------------------------------------------------------------------------- /utils/data_structures/graphs/graph.py: -------------------------------------------------------------------------------- 1 | # graph.py 2 | 3 | class Graph: 4 | def __init__(self): 5 | self.vertices = {} 6 | self.edges = {} 7 | 8 | def add_vertex(self, vertex): 9 | if vertex not in self.vertices: 10 | self.vertices[vertex] = [] 11 | 12 | def add_edge(self, vertex1, vertex2): 13 | if vertex1 in self.vertices and vertex2 in self.vertices: 14 | if vertex2 not in self.edges.get(vertex1, []): 15 | self.edges[vertex1] = self.edges.get(vertex1, []) + [vertex2] 16 | if vertex1 not in self.edges.get(vertex2, []): 17 | self.edges[vertex2] = self.edges.get(vertex2, []) + [vertex1] 18 | 19 | def remove_vertex(self, vertex): 20 | if vertex in self.vertices: 21 | del self.vertices[vertex] 22 | for adjacent_vertices in self.edges.values(): 23 | if vertex in adjacent_vertices: 24 | adjacent_vertices.remove(vertex) 25 | 26 | def remove_edge(self, vertex1, vertex2): 27 | if vertex1 in self.vertices and vertex2 in self.vertices: 28 | if vertex2 in self.edges.get(vertex1, []): 29 | self.edges[vertex1].remove(vertex2) 30 | if vertex1 in self.edges.get(vertex2, []): 31 | self.edges[vertex2].remove(vertex1) 32 | 33 | def get_adjacent_vertices(self, vertex): 34 | return self.edges.get(vertex, []) 35 | 36 | def get_vertices(self): 37 | return list(self.vertices.keys()) 38 | 39 | def get_edges(self): 40 | return self.edges 41 | 42 | def is_connected(self): 43 | visited = set() 44 | stack = [next(iter(self.vertices))] 45 | while stack: 46 | vertex = stack.pop() 47 | if vertex not in visited: 48 | visited.add(vertex) 49 | stack.extend(self.get_adjacent_vertices(vertex)) 50 | return len(visited) == len(self.vertices) 51 | 52 | def is_cyclic(self): 53 | visited = set() 54 | stack = [next(iter(self.vertices))] 55 | while stack: 56 | vertex = stack.pop() 57 | if vertex not in visited: 58 | visited.add(vertex) 59 | for adjacent_vertex in self.get_adjacent_vertices(vertex): 60 | if adjacent_vertex in visited: 61 | return True 62 | stack.append(adjacent_vertex) 63 | return False 64 | -------------------------------------------------------------------------------- /utils/data_structures/linked_lists/linked_list.py: -------------------------------------------------------------------------------- 1 | # linked_list.py 2 | 3 | class Node: 4 | def __init__(self, value): 5 | self.value = value 6 | self.next = None 7 | 8 | class LinkedList: 9 | def __init__(self): 10 | self.head = None 11 | 12 | def append(self, value): 13 | if not self.head: 14 | self.head = Node(value) 15 | else: 16 | current = self.head 17 | while current.next: 18 | current = current.next 19 | current.next = Node(value) 20 | 21 | def remove(self, value): 22 | if self.head is None: 23 | return 24 | if self.head.value == value: 25 | self.head = self.head.next 26 | else: 27 | current = self.head 28 | while current.next: 29 | if current.next.value == value: 30 | current.next = current.next.next 31 | return 32 | current = current.next 33 | 34 | def get_values(self): 35 | values = [] 36 | current = self.head 37 | while current: 38 | values.append(current.value) 39 | current = current.next 40 | return values 41 | 42 | def is_cyclic(self): 43 | slow = self.head 44 | fast = self.head 45 | while fast and fast.next: 46 | slow = slow.next 47 | fast = fast.next.next 48 | if slow == fast: 49 | return True 50 | return False 51 | -------------------------------------------------------------------------------- /utils/data_structures/queues/queue.py: -------------------------------------------------------------------------------- 1 | # queue.py 2 | 3 | class Queue: 4 | def __init__(self): 5 | self.items = [] 6 | 7 | def enqueue(self, item): 8 | self.items.append(item) 9 | 10 | def dequeue(self): 11 | if not self.is_empty(): 12 | return self.items.pop(0) 13 | else: 14 | return None 15 | 16 | def peek(self): 17 | if not self.is_empty(): 18 | return self.items[0] 19 | else: 20 | return None 21 | 22 | def is_empty(self): 23 | return len(self.items) == 0 24 | 25 | def size(self): 26 | return len(self.items) 27 | -------------------------------------------------------------------------------- /utils/data_structures/stacks/stack.py: -------------------------------------------------------------------------------- 1 | # stack.py 2 | 3 | class Stack: 4 | def __init__(self): 5 | self.items = [] 6 | 7 | def push(self, item): 8 | self.items.append(item) 9 | 10 | def pop(self): 11 | if not self.is_empty(): 12 | return self.items.pop() 13 | else: 14 | return None 15 | 16 | def peek(self): 17 | if not self.is_empty(): 18 | return self.items[-1] 19 | else: 20 | return None 21 | 22 | def is_empty(self): 23 | return len(self.items) == 0 24 | 25 | def size(self): 26 | return len(self.items) 27 | -------------------------------------------------------------------------------- /utils/data_structures/trees/tree.py: -------------------------------------------------------------------------------- 1 | # tree.py 2 | 3 | class Node: 4 | def __init__(self, value): 5 | self.value = value 6 | self.children = [] 7 | 8 | class Tree: 9 | def __init__(self, root): 10 | self.root = Node(root) 11 | 12 | def add_child(self, parent, child): 13 | if parent in self.get_values(): 14 | node = self.find_node(parent) 15 | node.children.append(Node(child)) 16 | 17 | def remove_child(self, parent, child): 18 | if parent in self.get_values(): 19 | node = self.find_node(parent) 20 | node.children = [n for n in node.children if n.value != child] 21 | 22 | def get_values(self): 23 | values = [] 24 | stack = [self.root] 25 | while stack: 26 | node = stack.pop() 27 | values.append(node.value) 28 | stack.extend(node.children) 29 | return values 30 | 31 | def find_node(self, value): 32 | stack = [self.root] 33 | while stack: 34 | node = stack.pop() 35 | if node.value == value: 36 | return node 37 | stack.extend(node.children) 38 | return None 39 | 40 | def is_balanced(self): 41 | def height(node): 42 | if node is None: 43 | return 0 44 | return 1 + max(height(n) for n in node.children) 45 | 46 | return abs(height(self.root.children[0]) - height(self.root.children[1])) <= 1 47 | 48 | def is_bst(self): 49 | def is_bst_node(node, min_value, max_value): 50 | if node is None: 51 | return True 52 | if not min_value < node.value < max_value: 53 | return False 54 | return (is_bst_node(node.children[0], min_value, node.value) and 55 | is_bst_node(node.children[1], node.value, max_value)) 56 | 57 | return is_bst_node(self.root, float('-inf'), float('inf')) 58 | --------------------------------------------------------------------------------