├── hki └── sertifikat_EC002024264348.pdf ├── library.properties ├── LICENSE ├── src ├── fnn.h └── fnn.cpp ├── examples └── sigmoid │ └── sigmoid.ino └── README.md /hki/sertifikat_EC002024264348.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/galihru/fnn/HEAD/hki/sertifikat_EC002024264348.pdf -------------------------------------------------------------------------------- /library.properties: -------------------------------------------------------------------------------- 1 | name=fnn 2 | version=1.0.0 3 | author=GALIH RIDHO UTOMO 4 | maintainer=GALIH RIDHO UTOMO 5 | sentence=Fuzzy Neural Network for Arduino. 6 | paragraph=The FNN (Fuzzy Neural Network) module implements a hybrid intelligent system that combines neural networks with fuzzy logic principles. This implementation is specifically optimized for Arduino platforms, providing efficient computation while maintaining prediction accuracy. 7 | category=Signal Input/Output 8 | url=https://github.com/4211421036/fnn.git 9 | architectures=* 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 GALIH RIDHO UTOMO 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/fnn.h: -------------------------------------------------------------------------------- 1 | #ifndef FNN_H 2 | #define FNN_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | class FNN { 11 | private: 12 | std::vector> weights; // Bobot untuk tiap layer 13 | std::vector biases; // Bias untuk tiap layer 14 | std::function activationFunction; // Fungsi aktivasi 15 | std::map fuzzyRules; // Aturan fuzzy 16 | 17 | float computeLoss(const std::vector& predicted, const std::vector& expected); 18 | 19 | // Defuzzifikasi 20 | std::string defuzzify(float fuzzyOutput); 21 | 22 | public: 23 | FNN(int inputSize = 3, float bias = 0.1, std::function activation = nullptr); 24 | 25 | // Set bobot 26 | void setWeights(const std::vector& newWeights); 27 | 28 | // Set bias 29 | void setBiases(const std::vector& newBiases); 30 | 31 | // Set fungsi aktivasi 32 | void setActivationFunction(std::function activation); 33 | 34 | // Set fuzzy rules 35 | void setFuzzyRules(const std::map& rules); 36 | 37 | // Prediksi FNN 38 | std::string predictFNN(const std::vector& inputs); 39 | 40 | // Fungsi pelatihan 41 | void train(const std::vector>& inputs, const std::vector& targets, int epochs = 100, float learningRate = 0.01); 42 | 43 | // Evaluasi 44 | // Evaluasi Akurasi 45 | float evaluateAccuracy(const std::vector>& testInputs, const std::vector& expectedOutputs); 46 | 47 | // Evaluasi Presisi 48 | float evaluatePrecision(const std::vector>& testInputs, const std::vector& expectedOutputs); 49 | 50 | // Fungsi aktivasi yang disediakan 51 | static float sigmoid(float x); 52 | static float tanh(float x); 53 | static std::function leakyRelu(float alpha = 0.01); 54 | static std::function elu(float alpha = 1.0); 55 | static float softplus(float x); 56 | }; 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /examples/sigmoid/sigmoid.ino: -------------------------------------------------------------------------------- 1 | #include "fnn.h" 2 | 3 | FNN fnn(6); // Neural network dengan 6 input 4 | 5 | void setup() { 6 | Serial.begin(9600); 7 | 8 | // Set bobot, bias, dan fungsi aktivasi 9 | fnn.setWeights({0.3, 0.5, 0.2, 0.4, 0.1, 0.6}); 10 | fnn.setBiases({0.1, 0.2}); // Menambahkan biases untuk hidden dan output layer 11 | fnn.setActivationFunction(FNN::sigmoid); // Mencoba fungsi aktivasi yang lebih cocok untuk klasifikasi 12 | 13 | // Aturan fuzzy (perhatikan bahwa Anda perlu menyesuaikan output dengan label) 14 | fnn.setFuzzyRules({ 15 | {"Tidak sesuai", 0.0}, 16 | {"Sedikit", 0.2}, 17 | {"Sangat Belum", 0.4}, 18 | {"Belum Banyak", 0.6}, 19 | {"Sedikit Banyak", 0.7}, 20 | {"Banyak", 1.0}, 21 | {"Extrem", 1.1} 22 | }); 23 | 24 | // Data pelatihan 25 | std::vector> trainingInputs = { 26 | {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}, 27 | {1.2, 0.6, 0.3, 0.5, 0.2, 0.7}, 28 | {0.4, 0.3, 0.2, 0.6, 0.5, 0.4}, 29 | {5.1, 2.4, 1.2, 4.1, 3.2, 6.5}, 30 | {3.3, 1.7, 0.6, 3.4, 2.3, 6.1} 31 | }; 32 | std::vector trainingTargets = {"Banyak", "Sedikit", "Tidak sesuai", "Sedikit Banyak", "Banyak"}; 33 | 34 | // Data testing 35 | std::vector> testInputs = { 36 | {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}, 37 | {1.2, 0.6, 0.3, 0.5, 0.2, 0.7}, 38 | {0.4, 0.3, 0.2, 0.6, 0.5, 0.4} 39 | }; 40 | std::vector testTargets = {"Banyak", "Sedikit", "Tidak sesuai"}; 41 | 42 | int numEpochs = 1000; 43 | float learningRate = 0.01; 44 | 45 | // Melatih model 46 | for (int epoch = 0; epoch < numEpochs; ++epoch) { 47 | fnn.train(trainingInputs, trainingTargets, numEpochs, learningRate); // Latih model 48 | 49 | if (epoch % 100 == 0) { // Evaluasi setiap 100 epoch 50 | float accuracy = fnn.evaluateAccuracy(testInputs, testTargets); 51 | float precision = fnn.evaluatePrecision(testInputs, testTargets); 52 | Serial.print("Epoch: "); 53 | Serial.print(epoch); 54 | Serial.print(" | Akurasi: "); 55 | Serial.print(accuracy); 56 | Serial.print("% | Presisi: "); 57 | Serial.print(precision); 58 | Serial.println("%"); 59 | } 60 | } 61 | 62 | // Prediksi setelah pelatihan 63 | Serial.println("Hasil Prediksi setelah Pelatihan:"); 64 | for (size_t i = 0; i < testInputs.size(); ++i) { 65 | String result = fnn.predictFNN(testInputs[i]).c_str(); // Prediksi hasil 66 | Serial.print("Input ke-"); 67 | Serial.print(i + 1); 68 | Serial.print(": "); 69 | Serial.println(result); 70 | } 71 | } 72 | 73 | void loop() { 74 | // Tidak ada loop untuk contoh ini 75 | } 76 | -------------------------------------------------------------------------------- /src/fnn.cpp: -------------------------------------------------------------------------------- 1 | #include "fnn.h" 2 | #include 3 | 4 | // Konstruktor 5 | FNN::FNN(int inputSize, float bias, std::function activation) 6 | : weights(2, std::vector(inputSize, 0.0)), biases(2, bias), activationFunction(activation) { 7 | if (!activationFunction) { 8 | activationFunction = sigmoid; // Default fungsi aktivasi adalah sigmoid 9 | } 10 | } 11 | 12 | // Set bobot 13 | void FNN::setWeights(const std::vector& newWeights) { 14 | if (newWeights.size() == weights[0].size()) { 15 | weights[0] = newWeights; 16 | } 17 | } 18 | 19 | // Set bias 20 | void FNN::setBiases(const std::vector& newBiases) { 21 | if (newBiases.size() == biases.size()) { 22 | biases = newBiases; 23 | } 24 | } 25 | 26 | // Set fungsi aktivasi 27 | void FNN::setActivationFunction(std::function activation) { 28 | activationFunction = activation; 29 | } 30 | 31 | // Set fuzzy rules 32 | void FNN::setFuzzyRules(const std::map& rules) { 33 | fuzzyRules = rules; 34 | } 35 | 36 | // Fungsi aktivasi: Sigmoid 37 | float FNN::sigmoid(float x) { 38 | return 1.0 / (1.0 + exp(-x)); 39 | } 40 | 41 | // Fungsi aktivasi: Tanh 42 | float FNN::tanh(float x) { 43 | return std::tanh(x); 44 | } 45 | 46 | // Fungsi aktivasi: Leaky ReLU 47 | std::function FNN::leakyRelu(float alpha) { 48 | return [alpha](float x) { return (x > 0) ? x : alpha * x; }; 49 | } 50 | 51 | // Fungsi aktivasi: ELU 52 | std::function FNN::elu(float alpha) { 53 | return [alpha](float x) { return (x > 0) ? x : alpha * (exp(x) - 1); }; 54 | } 55 | 56 | // Fungsi aktivasi: Softplus 57 | float FNN::softplus(float x) { 58 | return log(1 + exp(x)); 59 | } 60 | 61 | // Defuzzifikasi 62 | std::string FNN::defuzzify(float fuzzyOutput) { 63 | for (const auto& rule : fuzzyRules) { 64 | if (fuzzyOutput <= rule.second) { 65 | return rule.first; 66 | } 67 | } 68 | return "Undefined"; 69 | } 70 | 71 | // Compute Loss 72 | float FNN::computeLoss(const std::vector& predicted, const std::vector& expected) { 73 | float loss = 0.0f; 74 | for (size_t i = 0; i < predicted.size(); ++i) { 75 | loss += pow(predicted[i] - expected[i], 2); 76 | } 77 | return loss / predicted.size(); 78 | } 79 | 80 | // Train 81 | void FNN::train(const std::vector>& inputs, const std::vector& targets, int epochs, float learningRate) { 82 | for (int epoch = 0; epoch < epochs; ++epoch) { 83 | for (size_t i = 0; i < inputs.size(); ++i) { 84 | float hiddenSum = biases[0]; 85 | for (size_t j = 0; j < weights[0].size(); ++j) { 86 | hiddenSum += inputs[i][j] * weights[0][j]; 87 | } 88 | float hiddenOutput = activationFunction(hiddenSum); 89 | 90 | float outputSum = hiddenOutput * weights[1][0] + biases[1]; 91 | float output = activationFunction(outputSum); 92 | 93 | float outputError = fuzzyRules[targets[i]] - output; 94 | weights[1][0] += learningRate * outputError * hiddenOutput; 95 | biases[1] += learningRate * outputError; 96 | 97 | float hiddenError = outputError * weights[1][0]; 98 | for (size_t j = 0; j < weights[0].size(); ++j) { 99 | weights[0][j] += learningRate * hiddenError * inputs[i][j]; 100 | } 101 | biases[0] += learningRate * hiddenError; 102 | } 103 | } 104 | } 105 | 106 | // Predict 107 | std::string FNN::predictFNN(const std::vector& inputs) { 108 | float hiddenSum = biases[0]; 109 | for (size_t j = 0; j < weights[0].size(); ++j) { 110 | hiddenSum += inputs[j] * weights[0][j]; 111 | } 112 | float hiddenOutput = activationFunction(hiddenSum); 113 | 114 | float outputSum = hiddenOutput * weights[1][0] + biases[1]; 115 | float output = activationFunction(outputSum); 116 | 117 | return defuzzify(output); 118 | } 119 | // Evaluasi Akurasi 120 | float FNN::evaluateAccuracy(const std::vector>& testInputs, const std::vector& expectedOutputs) { 121 | int correctPredictions = 0; 122 | 123 | for (size_t i = 0; i < testInputs.size(); ++i) { 124 | std::string predictedOutput = predictFNN(testInputs[i]); 125 | if (predictedOutput == expectedOutputs[i]) { 126 | correctPredictions++; 127 | } 128 | } 129 | 130 | float accuracy = (float)correctPredictions / testInputs.size(); 131 | return accuracy * 100.0f; // Hasil dalam persen 132 | } 133 | 134 | // Evaluasi Presisi 135 | float FNN::evaluatePrecision(const std::vector>& testInputs, const std::vector& expectedOutputs) { 136 | int truePositives = 0; 137 | int falsePositives = 0; 138 | 139 | for (size_t i = 0; i < testInputs.size(); ++i) { 140 | std::string predictedOutput = predictFNN(testInputs[i]); 141 | 142 | if (predictedOutput == expectedOutputs[i]) { 143 | truePositives++; 144 | } else if (fuzzyRules.find(predictedOutput) != fuzzyRules.end()) { 145 | falsePositives++; 146 | } 147 | } 148 | 149 | if (truePositives + falsePositives == 0) { 150 | return 0.0f; // Hindari pembagian dengan nol 151 | } 152 | 153 | float precision = (float)truePositives / (truePositives + falsePositives); 154 | return precision * 100.0f; // Hasil dalam persen 155 | } 156 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FNN (Fuzzy Neural Network) Module Documentation 2 | 3 | ## Table of Contents 4 | 1. [Overview](#overview) 5 | 2. [Core Components](#core-components) 6 | 3. [Mathematical Foundation](#mathematical-foundation) 7 | 4. [Class Reference](#class-reference) 8 | 5. [Activation Functions](#activation-functions) 9 | 6. [Training Method](#training-methods) 10 | 7. [Evaluation Metrics](#evaluation-metrics) 11 | 8. [Implementation Guide](#implementation-guide) 12 | 9. [Example Usage](#example-usage) 13 | 14 | ## Overview 15 | The FNN (Fuzzy Neural Network) module implements a hybrid intelligent system that combines neural networks with fuzzy logic principles. This implementation is specifically optimized for Arduino platforms, providing efficient computation while maintaining prediction accuracy. 16 | 17 | ## Core Components 18 | 19 | ### Class Structure 20 | ```cpp 21 | class FNN { 22 | private: 23 | std::vector> weights; // Layer weights 24 | std::vector biases; // Layer biases 25 | std::function activationFunction; // Activation function 26 | std::map fuzzyRules; // Fuzzy ruleset 27 | ... 28 | } 29 | ``` 30 | 31 | ### Key Features 32 | - Multi-layer neural network architecture 33 | - Customizable activation functions 34 | - Integrated fuzzy rule system 35 | - Gradient descent-based training 36 | - Comprehensive evaluation metrics 37 | 38 | ## Mathematical Foundation 39 | 40 | ### 1. Network Architecture 41 | 42 | #### Input Layer 43 | - Accepts normalized input vectors 44 | - Dimension: `inputSize` (user-defined) 45 | - Data type: `std::vector` 46 | 47 | #### Hidden Layer 48 | Computation formula: 49 | 50 | $$ 51 | h_j = f( \sum_{i=1}^{n} w_{ji} x_i + b_j) 52 | $$ 53 | 54 | Where: 55 | - $h_j$: Hidden layer neuron output 56 | - $w_{ji}$: Connection weight 57 | - $x_i$: Input value 58 | - $b_j$: Bias term 59 | - $f$: Activation function 60 | 61 | #### Output Layer 62 | Final computation: 63 | 64 | $$o = f( \sum_{j=1}^{m} w_j h_j + b_o)$$ 65 | 66 | Parameters: 67 | - $h_j$: Hidden layer outputs 68 | - $w_j$: Output weights 69 | - $b_o$: Output bias 70 | 71 | ### 2. Learning Process 72 | 73 | #### Loss Function (MSE) 74 | 75 | $$ 76 | L = \frac{1}{N} \sum_{i=1}^{N} (y_i - \hat{y}_i)^2 77 | $$ 78 | 79 | Components: 80 | - $y_i$: Expected output 81 | - $\hat{y}_i$: Predicted output 82 | - $N$: Sample size 83 | 84 | #### Weight Update Rule 85 | 86 | $$ 87 | w_{new} = w_{old} - \eta \frac{\partial L}{\partial w} 88 | $$ 89 | 90 | Where: 91 | - $\eta$: Learning rate 92 | - $\frac{\partial L}{\partial w}$: Loss gradient 93 | 94 | ## Class Reference 95 | 96 | ### Constructor 97 | ```cpp 98 | FNN(int inputSize = 3, float bias = 0.1, std::function activation = nullptr) 99 | ``` 100 | Parameters: 101 | - `inputSize`: Number of input neurons 102 | - `bias`: Initial bias value 103 | - `activation`: Activation function (defaults to sigmoid) 104 | 105 | ### Public Methods 106 | 107 | #### `setWeights` 108 | ```cpp 109 | void setWeights(const std::vector& newWeights) 110 | ``` 111 | Purpose: Sets network layer weights 112 | Parameters: 113 | - `newWeights`: Vector of weight values 114 | Validation: Checks dimension compatibility 115 | 116 | #### `setBiases` 117 | ```cpp 118 | void setBiases(const std::vector& newBiases) 119 | ``` 120 | Purpose: Sets layer biases 121 | Parameters: 122 | - `newBiases`: Vector of bias values 123 | Validation: Verifies vector size 124 | 125 | #### `setFuzzyRules` 126 | ```cpp 127 | void setFuzzyRules(const std::map& rules) 128 | ``` 129 | Purpose: Defines fuzzy classification rules 130 | Parameters: 131 | - `rules`: Map of linguistic terms to numeric values 132 | 133 | ### Training Methods 134 | 135 | #### `train` 136 | ```cpp 137 | void train(const std::vector>& inputs, 138 | const std::vector& targets, 139 | int epochs = 100, 140 | float learningRate = 0.01) 141 | ``` 142 | Purpose: Trains the network 143 | Parameters: 144 | - `inputs`: Training data matrix 145 | - `targets`: Expected outputs 146 | - `epochs`: Training iterations 147 | - `learningRate`: Learning rate 148 | 149 | ## Activation Functions 150 | 151 | ### 1. Sigmoid 152 | 153 | $$ 154 | \sigma(x) = \frac{1}{1 + e^{-x}} 155 | $$ 156 | 157 | Implementation: 158 | ```cpp 159 | static float sigmoid(float x) { 160 | return 1.0 / (1.0 + exp(-x)); 161 | } 162 | ``` 163 | Use case: General classification tasks 164 | 165 | ### 2. Hyperbolic Tangent 166 | 167 | $$ 168 | \tanh(x) = \frac{e^x - e^{-x}}{e^x + e^{-x}} 169 | $$ 170 | 171 | Implementation: 172 | ```cpp 173 | static float tanh(float x) { 174 | return std::tanh(x); 175 | } 176 | ``` 177 | Use case: Normalized data ranges 178 | 179 | ### 3. Leaky ReLU 180 | 181 | $$ 182 | f(x) = \begin{cases} x, & x > 0 \\ \alpha x, & x \leq 0 \end{cases} 183 | $$ 184 | 185 | Implementation: 186 | ```cpp 187 | static std::function leakyRelu(float alpha = 0.01) 188 | ``` 189 | Use case: Deep networks, preventing dying ReLU 190 | 191 | ## Evaluation Metrics 192 | 193 | ### Accuracy 194 | ```cpp 195 | float evaluateAccuracy(const std::vector>& testInputs, 196 | const std::vector& expectedOutputs) 197 | ``` 198 | Calculation: 199 | ``` 200 | accuracy = (correct_predictions / total_predictions) * 100 201 | ``` 202 | 203 | ### Precision 204 | ```cpp 205 | float evaluatePrecision(const std::vector>& testInputs, 206 | const std::vector& expectedOutputs) 207 | ``` 208 | Calculation: 209 | ``` 210 | precision = (true_positives / (true_positives + false_positives)) * 100 211 | ``` 212 | 213 | ## Implementation Guide 214 | 215 | ### Basic Setup 216 | ```cpp 217 | #include "fnn.h" 218 | 219 | FNN fnn(6); // 6 input neurons 220 | ``` 221 | 222 | ### Configuration 223 | ```cpp 224 | // Weight initialization 225 | fnn.setWeights({0.3, 0.5, 0.2, 0.4, 0.1, 0.6}); 226 | 227 | // Bias configuration 228 | fnn.setBiases({0.1, 0.2}); 229 | 230 | // Activation function selection 231 | fnn.setActivationFunction(FNN::sigmoid); 232 | 233 | // Fuzzy rule definition 234 | fnn.setFuzzyRules({ 235 | {"Not Suitable", 0.0}, 236 | {"Low", 0.2}, 237 | {"High", 1.0} 238 | }); 239 | ``` 240 | 241 | ### Training Configuration 242 | ```cpp 243 | // Training parameters 244 | int numEpochs = 1000; 245 | float learningRate = 0.01; 246 | 247 | // Training data format 248 | std::vector> trainingInputs = { 249 | {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}, 250 | {1.2, 0.6, 0.3, 0.5, 0.2, 0.7} 251 | }; 252 | std::vector trainingTargets = {"High", "Low"}; 253 | ``` 254 | 255 | ## Example Usage 256 | 257 | ### Complete Arduino Implementation 258 | ```cpp 259 | #include "fnn.h" 260 | 261 | FNN fnn(6); 262 | 263 | void setup() { 264 | Serial.begin(9600); 265 | 266 | // Configuration 267 | fnn.setWeights({0.3, 0.5, 0.2, 0.4, 0.1, 0.6}); 268 | fnn.setBiases({0.1, 0.2}); 269 | fnn.setActivationFunction(FNN::sigmoid); 270 | 271 | // Fuzzy rules 272 | fnn.setFuzzyRules({ 273 | {"Not Suitable", 0.0}, 274 | {"Low", 0.2}, 275 | {"Very Low", 0.4}, 276 | {"Below Average", 0.6}, 277 | {"Above Average", 0.7}, 278 | {"High", 1.0}, 279 | {"Extreme", 1.1} 280 | }); 281 | 282 | // Training data 283 | std::vector> trainingInputs = { 284 | {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}, 285 | {1.2, 0.6, 0.3, 0.5, 0.2, 0.7}, 286 | {0.4, 0.3, 0.2, 0.6, 0.5, 0.4} 287 | }; 288 | std::vector trainingTargets = { 289 | "High", "Low", "Not Suitable" 290 | }; 291 | 292 | // Training 293 | int numEpochs = 1000; 294 | float learningRate = 0.01; 295 | 296 | // Train and evaluate 297 | for (int epoch = 0; epoch < numEpochs; ++epoch) { 298 | fnn.train(trainingInputs, trainingTargets, 1, learningRate); 299 | 300 | if (epoch % 100 == 0) { 301 | float accuracy = fnn.evaluateAccuracy(trainingInputs, trainingTargets); 302 | Serial.print("Epoch: "); 303 | Serial.print(epoch); 304 | Serial.print(" Accuracy: "); 305 | Serial.println(accuracy); 306 | } 307 | } 308 | } 309 | 310 | void loop() { 311 | // Prediction example 312 | std::vector newInput = {4.5, 2.8, 0.9, 3.7, 3.1, 7.9}; 313 | String prediction = fnn.predictFNN(newInput).c_str(); 314 | Serial.println(prediction); 315 | delay(1000); 316 | } 317 | ``` 318 | 319 | ### Performance Optimization 320 | 1. Use fixed-point arithmetic where possible 321 | 2. Minimize dynamic memory allocation 322 | 3. Optimize matrix operations 323 | 4. Cache frequently used calculations 324 | 325 | ## Error Handling 326 | The module includes comprehensive error checking: 327 | 1. Input validation 328 | 2. Memory allocation verification 329 | 3. Dimension compatibility checks 330 | 4. Fuzzy rule consistency validation 331 | 332 | ## Contributing 333 | Contributions are welcome. Please follow the standard pull request process: 334 | 1. Fork the repository 335 | 2. Create your feature branch 336 | 3. Commit your changes 337 | 4. Push to the branch 338 | 5. Create a Pull Request 339 | 340 | ## Support 341 | For issues and feature requests, please create an issue in the repository. 342 | 343 | ## Author 344 | 345 | | No | Name | Address | 346 | |----|------|--------| 347 | | 1 | Galih Ridho Utomo | Perum Permata Blok CB 20 / 10 Keluruhan Gelam Jaya, Kecamatan Pasar Kemis, Kabupaten Tangerang, Provinsi Banten, Pasar Kemis, Tangerang | 348 | | 2 | Septi Ida Suryani | Sraten RT 05/ RW 04, Cluring, Banyuwangi, Jawa Timur, Cluring, Banyuwangi | 349 | | 3 | Deffrian Prayogo | Jl. Sreni Indah, RT 02/03, Bategede, Kec. Nalumsari, Kab. Jepara, Jawa Tengah, Nalumsari, Jepara | 350 | | 4 | Achmad Rouf | Sempulur, RT 04, RW 01, Kel. Sempulur, Kec. Karanggede, Kab. Boyolali, Karanggede, Boyolali | 351 | | 5 | Fionita Fahra Azzahra | Karang Asem, 01/06, Ketapang, Kec. Susukan, Kab. Semarang, Jawa Tengah, Susukan, Semarang | 352 | | 6 | Dendy Randa | Tanjung Uncang, Batu Aji, Kota Batam, Kepulauan Riau, Batu Aji, Batam | 353 | | 7 | Fahma Mutia Sari | Dusun Pancuran RT 01/RW 13, Desa Banyubiru, Kecamatan Banyubiru, Kab. Semarang, Jawa Tengah, Banyubiru, Semarang | 354 | | 8 | Dava Arya Danendra | Jl. Wonodri Joho III No. 1002B, Wonodri, Semarang Selatan, Semarang Selatan, Semarang | 355 | | 9 | Qotrunnada Fauziah Hasna | Ds. Ngeluk,RT/RW 01/01, Kec. Penawangan, Kab. Grobogan , Penawangan, Grobogan | 356 | | 10 | Tamara Rizky Senda | RT 09/RW 02, Kelurahan Kramat Jati, Kec. Kramat Jati, Jakarta Timur, Provinsi DKI Jakarta , Kramat Jati, Jakarta Timur | 357 | | 11 | Dika Saputra | Griya Asri Bahagia Blok F5 No 4 Kel. Bahagia Kec. Babelan Kab. Bekasi,Babelan,Bekasi | 358 | 359 | ## Cite 360 | Please cite this respotory, if your use in the publication 361 | 362 | ```bibtex 363 | @misc{fnn, 364 | author = {Utomo, Galih Ridho, Septi Ida Suryani, Deffrian Prayogo, Achmad Rouf, Fionita Fahra Azzahra, Dendy Randa, Dava Arya Danendra, Qotrunnada Fauziah Hasna, Tamara Rizky Senda, Dika Saputra}, 365 | title = {FNN (Fuzzy Neural Network) module implements a hybrid intelligent system that combines neural networks with fuzzy logic principles. This implementation is specifically optimized for Arduino platforms, providing efficient computation while maintaining prediction accuracy}, 366 | year = {2025}, 367 | howpublished = {\url{https://github.com/4211421036/fnn}}, 368 | note = {GitHub repository}, 369 | } 370 | ``` 371 | --------------------------------------------------------------------------------