├── .github
├── FUNDING.yml
└── workflows
│ └── ci.yml
├── .gitignore
├── LICENCE.md
├── Makefile
├── README.md
├── docs
├── DesignGuide.md
├── README.md
├── contributing.md
├── dark-mode.svg
├── development_guidelines.md
├── features.md
├── img.svg
├── index.md
├── installation.md
├── license.md
├── light-mode.svg
├── modules
│ ├── activations.md
│ ├── core.md
│ ├── layers.md
│ ├── loss_functions.md
│ ├── metrics.md
│ ├── optimizers.md
│ ├── preprocessing.md
│ └── regularizers.md
├── testing.md
└── usage.md
├── examples
├── nn_training_example.c
└── or_gate.c
├── include
├── Activations
│ ├── elu.h
│ ├── gelu.h
│ ├── leaky_relu.h
│ ├── linear.h
│ ├── relu.h
│ ├── sigmoid.h
│ ├── softmax.h
│ └── tanh.h
├── Core
│ ├── dataset.h
│ ├── error_codes.h
│ ├── logging.h
│ ├── memory_management.h
│ └── training.h
├── Layers
│ ├── dense.h
│ ├── dropout.h
│ ├── flatten.h
│ ├── maxpooling.h
│ └── pooling.h
├── Loss_Functions
│ ├── binary_cross_entropy_loss.h
│ ├── cosine_similarity_loss.h
│ ├── focal_loss.h
│ ├── huber_loss.h
│ ├── kld_loss.h
│ ├── log_cosh_loss.h
│ ├── mean_squared_error.h
│ ├── poisson_loss.h
│ ├── smooth_l1_loss.h
│ └── tversky_loss.h
├── Metrics
│ ├── accuracy.h
│ ├── balanced_accuracy.h
│ ├── cohens_kappa.h
│ ├── f1_score.h
│ ├── iou.h
│ ├── mcc.h
│ ├── mean_absolute_error.h
│ ├── mean_absolute_percentage_error.h
│ ├── precision.h
│ ├── r2_score.h
│ ├── recall.h
│ ├── reduce_mean.h
│ ├── root_mean_squared_error.h
│ └── specificity.h
├── Optimizers
│ ├── adam.h
│ ├── rmsprop.h
│ └── sgd.h
└── Preprocessing
│ ├── label_encoder.h
│ ├── min_max_scaler.h
│ ├── one_hot_encoder.h
│ └── standard_scaler.h
├── main.c
├── mkdocs.yml
├── src
├── Activations
│ ├── elu.c
│ ├── gelu.c
│ ├── leaky_relu.c
│ ├── linear.c
│ ├── relu.c
│ ├── sigmoid.c
│ ├── softmax.c
│ └── tanh.c
├── Core
│ ├── dataset.c
│ ├── logging.c
│ ├── memory_management.c
│ └── training.c
├── Layers
│ ├── dense.c
│ ├── dropout.c
│ ├── flatten.c
│ ├── maxpooling.c
│ └── pooling.c
├── Loss_Functions
│ ├── binary_cross_entropy_loss.c
│ ├── cosine_similarity_loss.c
│ ├── focal_loss.c
│ ├── huber_loss.c
│ ├── kld_loss.c
│ ├── log_cosh_loss.c
│ ├── mean_squared_error.c
│ ├── poisson_loss.c
│ ├── smooth_l1_loss.c
│ └── tversky_loss.c
├── Metrics
│ ├── accuracy.c
│ ├── balanced_accuracy.c
│ ├── cohens_kappa.c
│ ├── f1_score.c
│ ├── iou.c
│ ├── mcc.c
│ ├── mean_absolute_error.c
│ ├── mean_absolute_percentage_error.c
│ ├── precision.c
│ ├── r2_score.c
│ ├── recall.c
│ ├── reduce_mean.c
│ ├── root_mean_squared_error.c
│ └── specificity.c
├── Optimizers
│ ├── adam.c
│ ├── rmsprop.c
│ └── sgd.c
└── Preprocessing
│ ├── label_encoder.c
│ ├── min_max_scaler.c
│ ├── one_hot_encoder.c
│ └── standard_scaler.c
└── test
├── Activations
├── test_elu.c
├── test_gelu.c
├── test_leaky_relu.c
├── test_linear.c
├── test_relu.c
├── test_sigmoid.c
├── test_softmax.c
└── test_tanh.c
├── Core
├── test_logging.c
├── test_memory_management.c
└── test_training.c
├── Layers
├── test_dense.c
├── test_dropout.c
├── test_flatten.c
├── test_maxpooling.c
└── test_pooling.c
├── Loss_Functions
├── test_binary_cross_entropy_loss.c
├── test_focal_loss.c
└── test_mean_squared_error.c
├── Metrics
├── test_mean_absolute_error.c
├── test_mean_absolute_percentage_error.c
├── test_reduce_mean.c
└── test_root_mean_squared_error.c
├── Optimizers
├── test_adam.c
├── test_rmsprop.c
└── test_sgd.c
└── Preprocessing
├── test_label_encoder.c
├── test_min_max_scaler.c
├── test_one_hot_encoder.c
└── test_standard_scaler.c
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [jaywyawhare]
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: ci
2 | on:
3 | push:
4 | branches:
5 | - master
6 | permissions:
7 | contents: write
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v3
13 | - uses: actions/setup-python@v4
14 | with:
15 | python-version: 3.x
16 | - uses: actions/cache@v3
17 | with:
18 | key: ${{ github.ref }}
19 | path: .cache
20 | - run: pip install mkdocs
21 | - run: pip install mkdocs-material
22 | - run: pip install pillow cairosvg
23 | - run: mkdocs gh-deploy --force
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Prerequisites
2 | *.d
3 |
4 | # Object files
5 | *.o
6 | *.ko
7 | *.obj
8 | *.elf
9 |
10 | # Linker output
11 | *.ilk
12 | *.map
13 | *.exp
14 |
15 | # Precompiled Headers
16 | *.gch
17 | *.pch
18 |
19 | # Libraries
20 | *.lib
21 | *.a
22 | *.la
23 | *.lo
24 |
25 | # Shared objects (inc. Windows DLLs)
26 | *.dll
27 | *.so
28 | *.so.*
29 | *.dylib
30 |
31 | # Executables
32 | *.exe
33 | *.out
34 | *.app
35 | *.i*86
36 | *.x86_64
37 | *.hex
38 |
39 | # Binary files
40 | *.bin
41 |
42 | # Debug files
43 | *.dSYM/
44 | *.su
45 | *.idb
46 | *.pdb
47 |
48 | # VSCode
49 | .VSCode
50 | .vscode/
51 |
52 | # Project-specific Artefacts
53 | examples_bin/**
54 | test_bin/**
55 | bin/**
56 | lib/**
--------------------------------------------------------------------------------
/LICENCE.md:
--------------------------------------------------------------------------------
1 | # The "Don't Be a Jerk" Non-Commercial Care-Free License (DBaJ-NC-CFL)
2 |
3 | **Version 0.69, January 1st 2020**
4 |
5 | *In the World of Schrödinger's Jerks: Where Sarcasm, Fun, and "Don't Be a Jerk" Collide*
6 |
7 | Welcome to the enigmatic realm of software licenses, where we acknowledge that we're all potential Schrödinger's Jerks. In this quirky quantum playground, the rules are as elusive as a cat in a box – simultaneously serious and whimsical. Are you ready for a rollercoaster of paradoxical non-jerkiness? Let's dive in!
8 |
9 | **Terms and Conditions**
10 |
11 | 1. **Golden Rule**: The cornerstone of this license – don't be a jerk. It's not rocket science. Fail to follow this fundamental principle, and the universe will cook up an unspecified punishment just for you. Severity? I get to play judge, jury, and cosmic executioner.
12 |
13 | 2. **"As Is" Clause**: This software comes "as is." No warranties, no guarantees, nada. Take it or leave it.
14 |
15 | 3. **No Sales Pitch**: Selling this software? Really? You're probably being a jerk. And we don't like jerks.
16 |
17 | 4. **Jerk-O-Meter**: If you're wondering whether you're being a jerk, guess what? You probably are.
18 |
19 | 5. **Jerk Behavior Alert**: If using this software hurts someone or something, guess what? Yep, you're being a jerk.
20 |
21 | 6. **Almost-Jerk Exception**: If you use this software for good, you might not be a jerk. But hold your horses, you might still be one. Check rules 1 through 5 to be sure.
22 |
23 | 7. **Jerk-o-Tron**: If you're unsure if you're being a jerk, you're likely being a jerk.
24 |
25 | 8. **Jerk-safety Net**: Still not sure? Yep, still a jerk.
26 |
27 | 9. **You're Trapped**: By reading this, you're locked into these terms, jerk or not.
28 |
29 | 10. **Flexi-Rules**: I can change these rules whenever, for whatever reason. If you keep using the software after a change, well, you can probably guess the verdict.
30 |
31 | 11. **Jerk Helpline**: Got jerk-related questions? Consult rules 1-10 for enlightenment.
32 |
33 | 12. **License Curiosity**: Wondering about this license? Surprise! You're probably being a jerk.
34 |
35 | 13. **Support the Creator**: If you read this far, you're obligated to sponsor the creator in a way that makes you not a mega jerk. Remember, mega jerks are never cool.
36 |
37 | *You are bound to this license, whether you read it or not. Remember, the universe has its eyes on you.*
38 |
39 | *RunTimeJerks*
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | ---
11 |
12 | C-ML is a lightweight machine learning library written in C. It provides implementations for various neural network components.
13 |
14 | ## Features
15 |
16 | - **Layers**: Dense, Dropout, Flatten, Pooling, Max-Pooling
17 | - **Activations**: ReLU, Sigmoid, Tanh, Softmax, ELU, Leaky ReLU, Linear
18 | - **Loss Functions**: Mean Squared Error, Binary Cross-Entropy, Focal Loss, etc.
19 | - **Metrics**: Accuracy, Precision, Recall, F1 Score, etc.
20 | - **Optimizers**: SGD, Adam, RMSprop
21 | - **Preprocessing**: Label Encoding, One-Hot Encoding, Standard Scaler, Min-Max Scaler
22 | - **Regularizers**: L1, L2, Combined L1-L2
23 |
24 | ## Prerequisites
25 |
26 | - GCC (GNU Compiler Collection)
27 | - `make` build tool
28 |
29 |
30 | ## Build Instructions
31 |
32 | 1. Clone the repository:
33 | ```bash
34 | git clone https://github.com/jaywyawhare/C-ML.git
35 | cd C-ML
36 | ```
37 |
38 | 2. Build the project:
39 | ```bash
40 | make
41 | ```
42 |
43 | 3. Run the example program:
44 | ```bash
45 | ./bin/main
46 | ```
47 |
48 | 4. Run the tests:
49 | ```bash
50 | make test
51 | ```
52 |
53 | 5. Run the examples:
54 | ```bash
55 | make examples
56 | ```
57 |
58 | 6. Clean the build artifacts:
59 | ```bash
60 | make clean
61 | ```
62 |
63 |
64 | ## Example Usage
65 |
66 | The `main.c` file demonstrates how to use the library to create a simple neural network with a dense layer, ReLU activation, and mean squared error loss.
67 |
68 | ```c
69 | #include
70 | #include
71 | #include "include/Core/training.h"
72 | #include "include/Core/dataset.h"
73 |
74 | int main()
75 | {
76 | NeuralNetwork *network = create_neural_network(2);
77 | build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
78 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
79 | model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
80 | model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
81 |
82 | float X_data[4][2] = {{0.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}};
83 | float y_data[4][1] = {{0.0f}, {1.0f}, {1.0f}, {1.0f}};
84 |
85 | Dataset *dataset = dataset_create();
86 | dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
87 |
88 | summary(network);
89 |
90 | train_network(network, dataset, 30);
91 | test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
92 |
93 | dataset_free(dataset);
94 | free_neural_network(network);
95 |
96 | return 0;
97 | }
98 | ```
99 |
100 |
101 | ## Contributing
102 |
103 | Contributions are welcome! Feel free to open issues or submit pull requests.
104 |
105 |
106 | ## License
107 |
108 | This project is licensed under the DBaJ-NC-CFL [License](./LICENCE.md).
--------------------------------------------------------------------------------
/docs/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Contributions are welcome! Please follow these steps:
4 |
5 | 1. Fork the repository.
6 | 2. Create a new branch for your feature or bug fix.
7 | 3. Commit your changes with clear messages.
8 | 4. Submit a pull request.
9 |
--------------------------------------------------------------------------------
/docs/development_guidelines.md:
--------------------------------------------------------------------------------
1 | # Development Guidelines
2 |
3 | ## Code Style
4 | - Follow consistent indentation and naming conventions.
5 | - Use meaningful variable names.
6 | - Add comments to explain complex logic.
7 |
8 | ## Error Handling
9 | - Check for null pointers and invalid inputs.
10 | - Use `LOG_ERROR` macro to log errors
11 | - return a `CM_Error` status code if recoverable...
12 | - or exit gracefully if not.
13 |
14 | ## Memory Management
15 | - Allocate memory dynamically where necessary.
16 | - Free allocated memory to prevent leaks.
17 |
18 | ## Testing
19 | - Write unit tests for all functions.
20 | - Use assertions to validate expected behavior.
21 |
--------------------------------------------------------------------------------
/docs/features.md:
--------------------------------------------------------------------------------
1 | # Features
2 |
3 | - 🧩 **Layers**: Dense, Dropout, Flatten, Pooling, Max-Pooling
4 | - ⚡ **Activations**: ReLU, Sigmoid, Tanh, Softmax, ELU, Leaky ReLU, Linear, GeLU
5 | - 📉 **Loss Functions**: Mean Squared Error, Binary Cross-Entropy, Focal Loss, etc.
6 | - 🚀 **Optimizers**: SGD, Adam, RMSprop
7 | - 🔧 **Preprocessing**: Label Encoding, One-Hot Encoding, Standard Scaler, Min-Max Scaler
8 | - 🛡️ **Regularizers**: L1, L2, Combined L1-L2
9 | - ✅ **Test Coverage**: Comprehensive unit tests for all modules.
10 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Welcome to C-ML
2 |
3 | C-ML is a lightweight and modular machine learning library written in C. It is designed for educational purposes and lightweight applications, providing essential components for building and training neural networks.
4 |
5 | ## Why Choose C-ML?
6 |
7 | - **Lightweight**: Minimal dependencies and optimized for performance.
8 | - **Modular**: Use only the components you need.
9 | - **Educational**: Learn the fundamentals of machine learning by exploring the source code.
10 |
11 | ## Get Started
12 |
13 | - [Installation Guide](installation.md)
14 | - [Features Overview](features.md)
15 | - [Usage Examples](usage.md)
16 |
17 | ## Contribute
18 |
19 | We welcome contributions! Check out the [Contributing Guidelines](contributing.md) to get started.
20 |
21 | > **Tip**: Use the navigation bar on the left to explore the documentation.
22 |
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | ## Prerequisites
4 |
5 | - GCC (GNU Compiler Collection)
6 | - `make` build tool
7 | - Supported Platforms: Linux, macOS, Windows (via WSL)
8 |
9 | ## Steps
10 |
11 | 1. Clone the repository:
12 | ```bash
13 | git clone https://github.com/jaywyawhare/C-ML.git
14 | cd C-ML
15 | ```
16 |
17 | 2. Build the project:
18 | ```bash
19 | make
20 | ```
21 |
22 | 3. Run the example program:
23 | ```bash
24 | ./bin/main
25 | ```
26 |
27 | 4. Run all tests:
28 | ```bash
29 | make test
30 | ```
31 |
32 | Run specific tests:
33 | ```bash
34 | make test TEST_SRCS="test/Layers/test_dense.c"
35 | ```
36 |
37 |
38 | > **Note**: Ensure that GCC and `make` are installed and available in your system's PATH.
39 |
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | This project is licensed under the DBaJ-NC-CFL License. See the [LICENSE](https://github.com/jaywyawhare/C-ML/blob/master/LICENCE.md) file for details.
4 |
--------------------------------------------------------------------------------
/docs/modules/activations.md:
--------------------------------------------------------------------------------
1 | # Activations
2 |
3 | ## ReLU
4 | - **Description**: Rectified Linear Unit activation function.
5 | - **Function**: `relu(float x)`
6 | - **File**: [`relu.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/relu.c)
7 |
8 | ## Sigmoid
9 | - **Description**: Sigmoid activation function.
10 | - **Function**: `sigmoid(float x)`
11 | - **File**: [`sigmoid.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/sigmoid.c)
12 |
13 | ## Tanh
14 | - **Description**: Hyperbolic tangent activation function.
15 | - **Function**: `tanH(float x)`
16 | - **File**: [`tanh.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/tanh.c)
17 |
18 | ## Softmax
19 | - **Description**: Converts logits into probabilities.
20 | - **Function**: `softmax(float *z, int n)`
21 | - **File**: [`softmax.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/softmax.c)
22 |
23 | ## ELU
24 | - **Description**: Exponential Linear Unit activation function.
25 | - **Function**: `elu(float x, float alpha)`
26 | - **File**: [`elu.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/elu.c)
27 |
28 | ## Leaky ReLU
29 | - **Description**: Leaky version of ReLU to allow small gradients for negative inputs.
30 | - **Function**: `leaky_relu(float x)`
31 | - **File**: [`leaky_relu.c`](https://github.com/jaywyawhare/C-ML/blob/master/src/Activations/leaky_relu.c)
32 |
33 | ## Linear
34 | - **Description**: Linear activation function (identity function).
35 | - **Function**: `linear(float x)`
36 | - **File**: [`linear.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/linear.c)
37 |
38 | ## GELU
39 | - **Description**: Gaussian Error Linear Unit activation function.
40 | - **Function**: `gelu(float x)`
41 | - **File**: [`gelu.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Activations/gelu.c)
42 |
--------------------------------------------------------------------------------
/docs/modules/core.md:
--------------------------------------------------------------------------------
1 | # Core Modules
2 |
3 | This section documents the core modules of the C-ML library, which provide essential functionalities for building and training neural networks.
4 |
5 | ## Training Module
6 |
7 | - **Description**: Provides functions for creating, building, training, and evaluating neural networks.
8 | - **Functions**:
9 | - `NeuralNetwork *create_neural_network(int input_size)`: Creates a new neural network.
10 | - `CM_Error build_network(NeuralNetwork *network, OptimizerType optimizer_type, float learning_rate, int loss_function, float l1_lambda, float l2_lambda)`: Builds the neural network by setting the optimizer, loss function, and regularization parameters.
11 | - `CM_Error add_layer(NeuralNetwork *network, LayerConfig config)`: Adds a layer to the neural network.
12 | - `CM_Error model_add(NeuralNetwork *network, LayerType type, ActivationType activation, int input_size, int output_size, float rate, int kernel_size, int stride)`: Adds a layer to the neural network using a simplified interface.
13 | - `CM_Error forward_pass(NeuralNetwork *network, float *input, float *output, int input_size, int output_size, int is_training)`: Performs a forward pass through the network.
14 | - `float calculate_loss(float *predicted, float *actual, int size, LossType loss_type)`: Calculates the loss between predicted and actual values.
15 | - `void calculate_loss_gradient(float *predicted, float *actual, float *gradient, int size, LossType loss_type)`: Calculates the gradient of the loss function.
16 | - `CM_Error train_network(NeuralNetwork *network, float **X_train, float **y_train, int num_samples, int input_size, int output_size, int batch_size, int epochs)`: Trains the neural network.
17 | - `CM_Error evaluate_network(NeuralNetwork *network, float **X_test, float **y_test, int num_samples, int input_size, int output_size, int *metrics, int num_metrics, float *results)`: Evaluates the neural network on a given dataset.
18 | - `CM_Error test_network(NeuralNetwork *network, float **X_test, float **y_test, int num_samples, int input_size, int output_size, int *metrics, int num_metrics, float *results)`: Tests the neural network (alias for evaluate_network).
19 | - `CM_Error free_neural_network(NeuralNetwork *network)`: Frees memory allocated for the neural network.
20 | - `void summary(NeuralNetwork *network)`: Prints a summary of the neural network architecture.
21 | - **File**: [`training.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Core/training.c)
22 |
23 | ## Memory Management Module
24 |
25 | - **Description**: Provides safe memory allocation and deallocation functions.
26 | - **Functions**:
27 | - `void *cm_safe_malloc(size_t size, const char *file, int line)`: Allocates memory safely and logs the file and line number in case of failure.
28 | - `void cm_safe_free(void **ptr)`: Frees allocated memory safely and sets the pointer to NULL.
29 | - **File**: [`memory_management.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Core/memory_management.c)
30 |
--------------------------------------------------------------------------------
/docs/modules/layers.md:
--------------------------------------------------------------------------------
1 | # Layers
2 |
3 | ## Dense Layer
4 | - **Description**: Fully connected layer where each input is connected to each output.
5 | - **Function**:
6 | - `initialize_dense(DenseLayer *layer, int input_size, int output_size)`
7 | - `forward_dense(DenseLayer *layer, float *input, float *output)`
8 | - `backward_dense(DenseLayer *layer, float *input, float *output, float *d_output, float *d_input, float *d_weights, float *d_biases)`
9 | - `update_dense(DenseLayer *layer, float *d_weights, float *d_biases, float learning_rate)`
10 | - `free_dense(DenseLayer *layer)`
11 | - **File**: [`dense.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Layers/dense.c)
12 |
13 | ## Dropout Layer
14 | - **Description**: Randomly sets a fraction of input units to zero during training to prevent overfitting.
15 | - **Function**:
16 | - `initialize_dropout(DropoutLayer *layer, float dropout_rate)`
17 | - `forward_dropout(DropoutLayer *layer, float *input, float *output, int size)`
18 | - `backward_dropout(DropoutLayer *layer, float *input, float *output, float *d_output, float *d_input, int size)`
19 | - **File**: [`dropout.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Layers/dropout.c)
20 |
21 | ## Flatten Layer
22 | - **Description**: Flattens the input without affecting the batch size.
23 | - **Function**:
24 | - `initializeFlatten(FlattenLayer *layer, int input_size)`
25 | - `forwardFlatten(FlattenLayer *layer, float *input, float *output)`
26 | - `backwardFlatten(FlattenLayer *layer, float *input, float *output, float *d_output, float *d_input)`
27 | - `freeFlatten(FlattenLayer *layer)`
28 | - **File**: [`flatten.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Layers/flatten.c)
29 |
30 | ## Pooling Layer
31 | - **Description**: Reduces the spatial size of the input volume.
32 | - **Function**:
33 | - `initialize_pooling(PoolingLayer *layer, int kernel_size, int stride)`
34 | - `compute_pooling_output_size(int input_size, int kernel_size, int stride)`
35 | - `forward_pooling(PoolingLayer *layer, const float *input, float *output, int input_size)`
36 | - `free_pooling(PoolingLayer *layer)`
37 | - **File**: [`pooling.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Layers/pooling.c)
38 |
39 | ## Max-Pooling Layer
40 | - **Description**: Applies max pooling operation to the input.
41 | - **Function**:
42 | - `initialize_maxpooling(MaxPoolingLayer *layer, int kernel_size, int stride)`
43 | - `compute_maxpooling_output_size(int input_size, int kernel_size, int stride)`
44 | - `forward_maxpooling(MaxPoolingLayer *layer, const float *input, float *output, int input_size)`
45 | - `free_maxpooling(MaxPoolingLayer *layer)`
46 | - **File**: [`maxpooling.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Layers/maxpooling.c)
47 |
48 |
--------------------------------------------------------------------------------
/docs/modules/loss_functions.md:
--------------------------------------------------------------------------------
1 | # Loss Functions
2 |
3 | ## Mean Squared Error
4 | - **Description**: Measures the average squared difference between predictions and actual values.
5 | - **Function**: `mean_squared_error(float *y, float *yHat, int n)`
6 | - **File**: [`mean_squared_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/mean_squared_error.c)
7 |
8 | ## Binary Cross-Entropy
9 | - **Description**: Loss function for binary classification tasks.
10 | - **Function**: `binary_cross_entropy_loss(float *yHat, float *y, int size)`
11 | - **File**: [`binary_cross_entropy_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/binary_cross_entropy_loss.c)
12 |
13 | ## Focal Loss
14 | - **Description**: Focuses on hard-to-classify examples by down-weighting easy examples.
15 | - **Function**: `focal_loss(float *y, float *yHat, int n, float gamma)`
16 | - **File**: [`focal_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/focal_loss.c)
17 |
18 | ## Mean Absolute Error
19 | - **Description**: Measures the average absolute difference between predictions and actual values.
20 | - **Function**: `mean_absolute_error(float *y, float *yHat, int n)`
21 | - **File**: [`mean_absolute_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/mean_absolute_error.c)
22 |
23 | ## Mean Absolute Percentage Error
24 | - **Description**: Measures the percentage difference between predictions and actual values.
25 | - **Function**: `mean_absolute_percentage_error(float *y, float *yHat, int n)`
26 | - **File**: [`mean_absolute_percentage_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/mean_absolute_percentage_error.c)
27 |
28 | ## Root Mean Squared Error
29 | - **Description**: Square root of the mean squared error.
30 | - **Function**: `root_mean_squared_error(float *y, float *yHat, int n)`
31 | - **File**: [`root_mean_squared_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/root_mean_squared_error.c)
32 |
33 | ## Reduce Mean
34 | - **Description**: Computes the mean of an array of values.
35 | - **Function**: `reduce_mean(float *loss, int size)`
36 | - **File**: [`reduce_mean.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/reduce_mean.c)
37 |
38 | ## Huber Loss
39 | - **Description**: Huber Loss function.
40 | - **Function**: `huber_loss(float *y, float *yHat, int n)`
41 | - **File**: [`huber_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/huber_loss.c)
42 |
43 | ## KLD Loss
44 | - **Description**: Kullback-Leibler Divergence Loss function.
45 | - **Function**: `kld_loss(float *p, float *q, int n)`
46 | - **File**: [`kld_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/kld_loss.c)
47 |
48 | ## Log-Cosh Loss
49 | - **Description**: Log-Cosh Loss function.
50 | - **Function**: `log_cosh_loss(float *y, float *yHat, int n)`
51 | - **File**: [`log_cosh_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/log_cosh_loss.c)
52 |
53 | ## Poisson Loss
54 | - **Description**: Poisson Loss function.
55 | - **Function**: `poisson_loss(float *y, float *yHat, int n)`
56 | - **File**: [`poisson_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/poisson_loss.c)
57 |
58 | ## Smooth L1 Loss
59 | - **Description**: Smooth L1 Loss function.
60 | - **Function**: `smooth_l1_loss(float *y, float *yHat, int n)`
61 | - **File**: [`smooth_l1_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/smooth_l1_loss.c)
62 |
63 | ## Tversky Loss
64 | - **Description**: Tversky Loss function.
65 | - **Function**: `tversky_loss(float *y, float *yHat, int n)`
66 | - **File**: [`tversky_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/tversky_loss.c)
67 |
68 | ## Cosine Similarity Loss
69 | - **Description**: Cosine Similarity Loss function.
70 | - **Function**: `cosine_similarity_loss(float *y, float *yHat, int n)`
71 | - **File**: [`cosine_similarity_loss.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Loss_Functions/cosine_similarity_loss.c)
72 |
--------------------------------------------------------------------------------
/docs/modules/metrics.md:
--------------------------------------------------------------------------------
1 | # Metrics
2 |
3 | ## Accuracy
4 | - **Description**: Calculates the accuracy of the model.
5 | - **Function**: `accuracy(float *y, float *yHat, int n)`
6 | - **File**: [`accuracy.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/accuracy.c)
7 |
8 | ---
9 |
10 | ## Balanced Accuracy
11 | - **Description**: Calculates the balanced accuracy of the model.
12 | - **Function**: `balanced_accuracy(float *y, float *yHat, int n)`
13 | - **File**: [`balanced_accuracy.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/balanced_accuracy.c)
14 |
15 | ---
16 |
17 | ## Cohen's Kappa
18 | - **Description**: Calculates Cohen's Kappa coefficient.
19 | - **Function**: `cohens_kappa(float *y, float *yHat, int n)`
20 | - **File**: [`cohens_kappa.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/cohens_kappa.c)
21 |
22 | ---
23 |
24 | ## F1 Score
25 | - **Description**: Calculates the F1 score of the model.
26 | - **Function**: `f1_score(float *y, float *yHat, int n)`
27 | - **File**: [`f1_score.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/f1_score.c)
28 |
29 | ---
30 |
31 | ## IOU (Intersection over Union)
32 | - **Description**: Calculates the Intersection over Union.
33 | - **Function**: `iou(float *y, float *yHat, int n)`
34 | - **File**: [`iou.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/iou.c)
35 |
36 | ---
37 |
38 | ## MCC (Matthews Correlation Coefficient)
39 | - **Description**: Calculates the Matthews Correlation Coefficient.
40 | - **Function**: `mcc(float *y, float *yHat, int n)`
41 | - **File**: [`mcc.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/mcc.c)
42 |
43 | ---
44 |
45 | ## Mean Absolute Error
46 | - **Description**: Calculates the Mean Absolute Error.
47 | - **Function**: `mean_absolute_error(float *y, float *yHat, int n)`
48 | - **File**: [`mean_absolute_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/mean_absolute_error.c)
49 |
50 | ---
51 |
52 | ## Mean Absolute Percentage Error
53 | - **Description**: Calculates the Mean Absolute Percentage Error.
54 | - **Function**: `mean_absolute_percentage_error(float *y, float *yHat, int n)`
55 | - **File**: [`mean_absolute_percentage_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/mean_absolute_percentage_error.c)
56 |
57 | ---
58 |
59 | ## Precision
60 | - **Description**: Calculates the precision of the model.
61 | - **Function**: `precision(float *y, float *yHat, int n)`
62 | - **File**: [`precision.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/precision.c)
63 |
64 | ---
65 |
66 | ## R2 Score
67 | - **Description**: Calculates the R2 score (coefficient of determination).
68 | - **Function**: `r2_score(float *y_true, float *y_pred, int size)`
69 | - **File**: [`r2_score.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/r2_score.c)
70 |
71 | ---
72 |
73 | ## Recall
74 | - **Description**: Calculates the recall of the model.
75 | - **Function**: `recall(float *y, float *yHat, int n)`
76 | - **File**: [`recall.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/recall.c)
77 |
78 | ---
79 |
80 | ## Root Mean Squared Error
81 | - **Description**: Calculates the Root Mean Squared Error.
82 | - **Function**: `root_mean_squared_error(float *y, float *yHat, int n)`
83 | - **File**: [`root_mean_squared_error.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/root_mean_squared_error.c)
84 |
85 | ---
86 |
87 | ## Specificity
88 | - **Description**: Calculates the Specificity.
89 | - **Function**: `Specificity(float *y, float *yHat, int n)`
90 | - **File**: [`Specificity.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Metrics/Specificity.c)
91 |
92 | ---
93 |
--------------------------------------------------------------------------------
/docs/modules/optimizers.md:
--------------------------------------------------------------------------------
1 | # Optimizers
2 |
3 | ## SGD (Stochastic Gradient Descent)
4 | - **Description**: Basic optimizer that updates weights using gradients.
5 | - **Function**: `float sgd(float x, float y, float lr, float *w, float *b)`
6 | - **File**: [`sgd.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Optimizers/sgd.c)
7 |
8 | ## Adam
9 | - **Description**: Adaptive optimizer combining momentum and RMSprop.
10 | - **Function**: `float adam(float x, float y, float lr, float *w, float *b, float *v_w, float *v_b, float *s_w, float *s_b, float beta1, float beta2, float epsilon)`
11 | - **File**: [`Adam.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Optimizers/adam.c)
12 |
13 | ## RMSprop
14 | - **Description**: Optimizer that scales learning rates based on recent gradients.
15 | - **Function**: `float rms_prop(float x, float y, float lr, float *w, float *b, float *cache_w, float *cache_b, float epsilon, float beta)`
16 | - **File**: [`RMSprop.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Optimizers/rmsprop.c)
17 |
--------------------------------------------------------------------------------
/docs/modules/preprocessing.md:
--------------------------------------------------------------------------------
1 | # Preprocessing
2 |
3 | ## Standard Scaler
4 | - **Description**: Scales data to have zero mean and unit variance.
5 | - **Functions**:
6 | - `float *standard_scaler(float *x, int size)`
7 | - **File**: [`standard_scaler.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Preprocessing/standard_scaler.c)
8 |
9 | ## Min-Max Scaler
10 | - **Description**: Scales data to a specified range (default: [0, 1]).
11 | - **Functions**:
12 | - `float *min_max_scaler(float *x, int size)`
13 | - **File**: [`min_max_scaler.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Preprocessing/min_max_scaler.c)
14 |
15 | ## Label Encoder
16 | - **Description**: Encodes categorical labels as integers.
17 | - **Functions**:
18 | - `int *label_encoder(char *x, int size, CharMap **map, int *mapSize)`
19 | - `char *label_decoder(int *x, int size, CharMap *map, int mapSize)`
20 | - `void free_label_memory(CharMap *map, int *encoded, char *decoded)`
21 | - **File**: [`label_encoder.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Preprocessing/label_encoder.c)
22 |
23 | ## One-Hot Encoder
24 | - **Description**: Encodes categorical labels as one-hot vectors.
25 | - **Functions**:
26 | - `int *one_hot_encoding(char *x, int size, CharMap **map, int *mapSize)`
27 | - `char *one_hot_decoding(int *x, int size, CharMap *map, int mapSize)`
28 | - `void free_one_hot_memory(int *x, char *y, CharMap *map)`
29 | - **File**: [`one_hot_encoder.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Preprocessing/one_hot_encoder.c)
30 |
--------------------------------------------------------------------------------
/docs/modules/regularizers.md:
--------------------------------------------------------------------------------
1 | # Regularizers
2 |
3 | ## L1 Regularization
4 | - **Description**: Adds the absolute value of weights to the loss function to encourage sparsity.
5 | - **Function**: `float l1(float x, float y, float lr, float *w, float *b, float *v_w, float *v_b, float *s_w, float *s_b, float beta1, float beta2, float epsilon)`
6 | - **File**: [`l1.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Regularizers/l1.c)
7 |
8 | ## L2 Regularization
9 | - **Description**: Adds the squared value of weights to the loss function to prevent overfitting.
10 | - **Function**: `float l2(float x, float y, float lr, float *w, float *b, float *v_w, float *v_b, float *s_w, float *s_b, float beta1, float beta2, float epsilon, float reg_l2)`
11 | - **File**: [`l2.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Regularizers/l2.c)
12 |
13 | ## Combined L1-L2 Regularization
14 | - **Description**: Combines L1 and L2 regularization techniques.
15 | - **Function**: `float l1_l2(float *w, float *dw, float l1, float l2, int n)`
16 | - **File**: [`l1_l2.c`](https://github.com/jaywyawhare/C-ML/tree/master/src/Regularizers/l1_l2.c)
17 |
--------------------------------------------------------------------------------
/docs/testing.md:
--------------------------------------------------------------------------------
1 | # Testing
2 |
3 | Run the tests using the `make test` command:
4 | ```bash
5 | make test
6 | ```
7 |
8 | Each module has a corresponding test file in the `test/` directory. The tests validate the correctness of the implementation and ensure robustness.
9 |
--------------------------------------------------------------------------------
/docs/usage.md:
--------------------------------------------------------------------------------
1 | # Usage
2 |
3 | This page provides an example of how to use the C-ML library to create and train a simple neural network.
4 |
5 | ## Neural Network Training Example
6 |
7 | ```c
8 | #include
9 | #include
10 | #include "include/Core/training.h"
11 | #include "include/Core/dataset.h"
12 |
13 | int main()
14 | {
15 | NeuralNetwork *network = create_neural_network(2);
16 | build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
17 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
18 | model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
19 | model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
20 |
21 | float X_data[4][2] = {{0.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}};
22 | float y_data[4][1] = {{0.0f}, {1.0f}, {1.0f}, {1.0f}};
23 |
24 | Dataset *dataset = dataset_create();
25 | dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
26 |
27 | summary(network);
28 |
29 | train_network(network, dataset, 30);
30 | test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
31 |
32 | dataset_free(dataset);
33 | free_neural_network(network);
34 |
35 | return 0;
36 | }
37 | ```
38 |
39 | This example demonstrates how to:
40 | - Create a neural network with three dense layers
41 | - Use different activation functions (ReLU, Tanh, Sigmoid)
42 | - Create and load a dataset
43 | - Train the network using the Adam optimizer
44 | - Test the network's performance
45 |
--------------------------------------------------------------------------------
/examples/nn_training_example.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../include/Core/training.h"
4 | #include "../include/Core/dataset.h"
5 | #include "../include/Core/logging.h"
6 | #include
7 |
8 | int main()
9 | {
10 | set_log_level(LOG_LEVEL_INFO);
11 | srand(time(NULL));
12 |
13 | NeuralNetwork *network = create_neural_network(2);
14 | build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
15 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
16 | model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
17 | model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
18 |
19 | float X_data[4][2] = {
20 | {0.0f, 0.0f},
21 | {0.0f, 1.0f},
22 | {1.0f, 0.0f},
23 | {1.0f, 1.0f}};
24 |
25 | float y_data[4][1] = {
26 | {0.0f},
27 | {1.0f},
28 | {1.0f},
29 | {1.0f}};
30 |
31 | Dataset *dataset = dataset_create();
32 | dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
33 |
34 | summary(network);
35 |
36 | train_network(network, dataset, 30);
37 | test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
38 |
39 | dataset_free(dataset);
40 | free_neural_network(network);
41 |
42 | return 0;
43 | }
44 |
--------------------------------------------------------------------------------
/examples/or_gate.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../include/Layers/dense.h"
5 | #include "../include/Activations/sigmoid.h"
6 | #include "../include/Loss_Functions/mean_squared_error.h"
7 | #include "../include/Core/error_codes.h"
8 | #include "../include/Core/logging.h"
9 |
10 | #define INPUT_SIZE 2
11 | #define HIDDEN_SIZE 1
12 | #define OUTPUT_SIZE 1
13 | #define LEARNING_RATE 0.1
14 | #define EPOCHS 1000
15 | #define TRAINING_SAMPLES 4
16 |
17 | int main() {
18 | float training_data[TRAINING_SAMPLES][INPUT_SIZE] = {
19 | {0.0, 0.0},
20 | {0.0, 1.0},
21 | {1.0, 0.0},
22 | {1.0, 1.0}
23 | };
24 | float training_labels[TRAINING_SAMPLES][OUTPUT_SIZE] = {
25 | {0.0},
26 | {1.0},
27 | {1.0},
28 | {1.0}
29 | };
30 |
31 | DenseLayer hidden_layer = {NULL, NULL, 0, 0};
32 | initialize_dense(&hidden_layer, INPUT_SIZE, HIDDEN_SIZE);
33 |
34 | DenseLayer output_layer = {NULL, NULL, 0, 0};
35 | initialize_dense(&output_layer, HIDDEN_SIZE, OUTPUT_SIZE);
36 |
37 | for (int epoch = 0; epoch < EPOCHS; epoch++) {
38 | float total_loss = 0.0;
39 |
40 | for (int i = 0; i < TRAINING_SAMPLES; i++) {
41 | float hidden_layer_input[INPUT_SIZE] = {training_data[i][0], training_data[i][1]};
42 | float hidden_layer_output[HIDDEN_SIZE];
43 | forward_dense(&hidden_layer, hidden_layer_input, hidden_layer_output);
44 |
45 | for (int j = 0; j < HIDDEN_SIZE; j++) {
46 | hidden_layer_output[j] = sigmoid(hidden_layer_output[j]);
47 | }
48 |
49 | float output_layer_output[OUTPUT_SIZE];
50 | forward_dense(&output_layer, hidden_layer_output, output_layer_output);
51 |
52 | for (int j = 0; j < OUTPUT_SIZE; j++) {
53 | output_layer_output[j] = sigmoid(output_layer_output[j]);
54 | }
55 |
56 | float loss = mean_squared_error(training_labels[i], output_layer_output, OUTPUT_SIZE);
57 | total_loss += loss;
58 |
59 | float d_output[OUTPUT_SIZE] = {output_layer_output[0] - training_labels[i][0]};
60 | float d_hidden[HIDDEN_SIZE] = {0.0};
61 | float d_output_weights[HIDDEN_SIZE * OUTPUT_SIZE] = {0.0};
62 | float d_output_biases[OUTPUT_SIZE] = {0.0};
63 |
64 | backward_dense(&output_layer, hidden_layer_output, output_layer_output, d_output, d_hidden, d_output_weights, d_output_biases);
65 |
66 | float d_hidden_input[INPUT_SIZE] = {0.0};
67 | float d_hidden_weights[INPUT_SIZE * HIDDEN_SIZE] = {0.0};
68 | float d_hidden_biases[HIDDEN_SIZE] = {0.0};
69 |
70 | for (int j = 0; j < HIDDEN_SIZE; j++) {
71 | float sigmoid_derivative = hidden_layer_output[j] * (1 - hidden_layer_output[j]);
72 | d_hidden[j] *= sigmoid_derivative;
73 | }
74 |
75 | backward_dense(&hidden_layer, hidden_layer_input, hidden_layer_output, d_hidden, d_hidden_input, d_hidden_weights, d_hidden_biases);
76 |
77 | update_dense(&output_layer, d_output_weights, d_output_biases, LEARNING_RATE);
78 | update_dense(&hidden_layer, d_hidden_weights, d_hidden_biases, LEARNING_RATE);
79 | }
80 |
81 | LOG_INFO("Epoch %d, Loss: %f", epoch, total_loss / TRAINING_SAMPLES);
82 | }
83 |
84 | printf("\nTesting the trained network:\n");
85 | for (int i = 0; i < TRAINING_SAMPLES; i++) {
86 | float input[INPUT_SIZE] = {training_data[i][0], training_data[i][1]};
87 |
88 | float hidden_output[HIDDEN_SIZE];
89 | forward_dense(&hidden_layer, input, hidden_output);
90 | for (int j = 0; j < HIDDEN_SIZE; j++) {
91 | hidden_output[j] = sigmoid(hidden_output[j]);
92 | }
93 |
94 | float output[OUTPUT_SIZE];
95 | forward_dense(&output_layer, hidden_output, output);
96 | for (int j = 0; j < OUTPUT_SIZE; j++) {
97 | output[j] = sigmoid(output[j]);
98 | }
99 |
100 | LOG_INFO("Input: %f %f, Output: %f, Expected: %f", input[0], input[1], output[0], training_labels[i][0]);
101 | }
102 |
103 | free_dense(&hidden_layer);
104 | free_dense(&output_layer);
105 |
106 | return CM_SUCCESS;
107 | }
108 |
--------------------------------------------------------------------------------
/include/Activations/elu.h:
--------------------------------------------------------------------------------
1 | #ifndef ELU_H
2 | #define ELU_H
3 |
4 | /**
5 | * @brief Applies the Exponential Linear Unit (ELU) activation function.
6 | *
7 | * @param x The input value.
8 | * @param alpha The scaling factor for negative values.
9 | * @return The result of the ELU activation function.
10 | */
11 | float elu(float x, float alpha);
12 |
13 | /**
14 | * @brief Computes the derivative of the ELU activation function.
15 | *
16 | * @param x The input value.
17 | * @param alpha The scaling factor for negative values.
18 | * @return The derivative of the ELU function.
19 | */
20 | float elu_derivative(float x, float alpha);
21 |
22 | #endif
23 |
--------------------------------------------------------------------------------
/include/Activations/gelu.h:
--------------------------------------------------------------------------------
1 | #ifndef C_ML_GELU_H
2 | #define C_ML_GELU_H
3 |
4 | /**
5 | * @brief Applies the Gaussian Error Linear Unit (GELU) activation function.
6 | *
7 | * @param x The input value.
8 | * @return The result of the GELU activation function.
9 | */
10 | float gelu(float x);
11 |
12 | /**
13 | * @brief Computes the derivative of the GELU activation function.
14 | *
15 | * @param x The input value.
16 | * @return The derivative of the GELU function.
17 | */
18 | float gelu_derivative(float x);
19 | #endif
20 |
--------------------------------------------------------------------------------
/include/Activations/leaky_relu.h:
--------------------------------------------------------------------------------
1 | #ifndef LEAKY_RELU_H
2 | #define LEAKY_RELU_H
3 |
4 | /**
5 | * @brief Applies the Leaky Rectified Linear Unit (Leaky ReLU) activation function.
6 | *
7 | * @param x The input value.
8 | * @return The result of the Leaky ReLU activation function.
9 | */
10 | float leaky_relu(float x);
11 |
12 | /**
13 | * @brief Computes the derivative of the Leaky ReLU activation function.
14 | *
15 | * @param x The input value.
16 | * @return The derivative of the Leaky ReLU function.
17 | */
18 | float leaky_relu_derivative(float x);
19 |
20 | #endif
21 |
--------------------------------------------------------------------------------
/include/Activations/linear.h:
--------------------------------------------------------------------------------
1 | #ifndef LINEAR_H
2 | #define LINEAR_H
3 |
4 | /**
5 | * @brief Applies the linear activation function.
6 | *
7 | * @param x The input value.
8 | * @return The result of the linear activation function.
9 | */
10 | float linear(float x);
11 |
12 | /**
13 | * @brief Computes the derivative of the linear activation function.
14 | *
15 | * @param x The input value.
16 | * @return The derivative of the linear function.
17 | */
18 | float linear_derivative(float x);
19 |
20 | #endif
21 |
--------------------------------------------------------------------------------
/include/Activations/relu.h:
--------------------------------------------------------------------------------
1 | #ifndef RELU_H
2 | #define RELU_H
3 |
4 | /**
5 | * @brief Applies the Rectified Linear Unit (ReLU) activation function.
6 | *
7 | * @param x The input value.
8 | * @return The result of the ReLU activation function.
9 | */
10 | float relu(float x);
11 |
12 | /**
13 | * @brief Computes the derivative of the ReLU activation function.
14 | *
15 | * @param x The input value.
16 | * @return The derivative of the ReLU function.
17 | */
18 | float relu_derivative(float x);
19 |
20 | #endif
21 |
--------------------------------------------------------------------------------
/include/Activations/sigmoid.h:
--------------------------------------------------------------------------------
1 | #ifndef SIGMOID_H
2 | #define SIGMOID_H
3 |
4 | /**
5 | * @brief Applies the sigmoid activation function.
6 | *
7 | * @param x The input value.
8 | * @return The result of the sigmoid activation function.
9 | */
10 | float sigmoid(float x);
11 |
12 | /**
13 | * @brief Computes the derivative of the sigmoid activation function.
14 | *
15 | * @param sigmoid_output The output of the sigmoid function (f(x)).
16 | * @return The derivative of the sigmoid function.
17 | */
18 | float sigmoid_derivative(float sigmoid_output);
19 |
20 | #endif
21 |
--------------------------------------------------------------------------------
/include/Activations/softmax.h:
--------------------------------------------------------------------------------
1 | #ifndef SOFTMAX_H
2 | #define SOFTMAX_H
3 |
4 | /**
5 | * @brief Applies the softmax activation function.
6 | *
7 | * @param z Pointer to the input array.
8 | * @param n The number of elements in the input array.
9 | * @return Pointer to the output array containing the softmax values.
10 | */
11 | float *softmax(float *z, int n);
12 |
13 | /**
14 | * @brief Frees the memory allocated for the softmax output.
15 | *
16 | * @param output Pointer to the pointer of the output array to be freed.
17 | */
18 | void free_softmax(float **output);
19 |
20 | /**
21 | * @brief Computes the derivative of the softmax activation function.
22 | *
23 | * @param softmax_output Pointer to the softmax output array.
24 | * @param n The number of elements in the output array.
25 | * @return Pointer to the Jacobian matrix (n x n) or error code.
26 | */
27 | float *softmax_derivative(float *softmax_output, int n);
28 |
29 | /**
30 | * @brief Frees the memory allocated for the softmax derivative Jacobian.
31 | *
32 | * @param jacobian Pointer to the Jacobian matrix to be freed.
33 | */
34 | void free_softmax_derivative(float **jacobian);
35 |
36 | #endif
37 |
--------------------------------------------------------------------------------
/include/Activations/tanh.h:
--------------------------------------------------------------------------------
1 | #ifndef TANH_H
2 | #define TANH_H
3 |
4 | /**
5 | * @brief Applies the hyperbolic tangent (tanh) activation function.
6 | *
7 | * @param x The input value.
8 | * @return The result of the tanh activation function.
9 | */
10 | float tanH(float x);
11 |
12 | /**
13 | * @brief Computes the derivative of the tanh activation function.
14 | *
15 | * @param x The input value.
16 | * @return The derivative of the tanh function.
17 | */
18 | float tanH_derivative(float x);
19 |
20 | #endif
21 |
--------------------------------------------------------------------------------
/include/Core/dataset.h:
--------------------------------------------------------------------------------
1 | #ifndef C_ML_DATASET_H
2 | #define C_ML_DATASET_H
3 |
4 | #include
5 | #include "error_codes.h"
6 |
7 | /**
8 | * @brief Structure representing a dataset.
9 | *
10 | * Contains input and output data arrays, along with metadata such as the number of samples
11 | * and the dimensions of input and output features.
12 | */
13 | typedef struct Dataset
14 | {
15 | float **X; /**< Pointer to the input data array. */
16 | float **y; /**< Pointer to the output data array. */
17 | int num_samples; /**< Number of samples in the dataset. */
18 | int input_dim; /**< Dimension of input features. */
19 | int output_dim; /**< Dimension of output features. */
20 | } Dataset;
21 |
22 | /**
23 | * @brief Create a new dataset.
24 | *
25 | * @return Dataset* Pointer to the newly created dataset.
26 | */
27 | Dataset *dataset_create(void);
28 |
29 | /**
30 | * @brief Free memory allocated for the dataset.
31 | *
32 | * @param dataset Pointer to the dataset to free.
33 | */
34 | void dataset_free(Dataset *dataset);
35 |
36 | /**
37 | * @brief Load dataset from arrays.
38 | *
39 | * @param dataset Pointer to the dataset structure.
40 | * @param X_array Pointer to the input data array.
41 | * @param y_array Pointer to the output data array.
42 | * @param num_samples Number of samples.
43 | * @param input_dim Dimension of input features.
44 | * @param output_dim Dimension of output features.
45 | * @return CM_Error Error code.
46 | */
47 | CM_Error dataset_load_arrays(Dataset *dataset, float *X_array, float *y_array, int num_samples, int input_dim, int output_dim);
48 |
49 | #endif
50 |
--------------------------------------------------------------------------------
/include/Core/error_codes.h:
--------------------------------------------------------------------------------
1 | #ifndef C_ML_ERROR_CODES_H
2 | #define C_ML_ERROR_CODES_H
3 |
4 | /**
5 | * @brief Defines the error codes for the C-ML library.
6 | */
7 | typedef enum
8 | {
9 | CM_SUCCESS = 0,
10 | CM_NULL_ERROR = -1,
11 | CM_NULL_POINTER_ERROR = -2,
12 | CM_UNDERFLOW_ERROR = -3,
13 | CM_OVERFLOW_ERROR = -4,
14 | CM_INVALID_INPUT_ERROR = -5,
15 | CM_MEMORY_ALLOCATION_ERROR = -6,
16 | CM_INVALID_PARAMETER_ERROR = -7,
17 | CM_INVALID_STRIDE_ERROR = -8,
18 | CM_INVALID_KERNEL_SIZE_ERROR = -9,
19 | CM_INPUT_SIZE_SMALLER_THAN_KERNEL_ERROR = -10,
20 | CM_DIVISION_BY_ZERO_ERROR = -11,
21 | CM_FILE_OPEN_ERROR = -12,
22 | CM_FILE_READ_ERROR = -13,
23 | CM_FILE_WRITE_ERROR = -14,
24 | CM_INVALID_FILE_FORMAT_ERROR = -15,
25 | CM_INVALID_INPUT_DIMENSIONS_ERROR = -16,
26 | CM_INVALID_OUTPUT_DIMENSIONS_ERROR = -17,
27 | CM_INCOMPATIBLE_DATA_TYPES_ERROR = -18,
28 | CM_MATH_DOMAIN_ERROR = -19,
29 | CM_SINGULAR_MATRIX_ERROR = -20,
30 | CM_NON_POSITIVE_DEFINITE_MATRIX_ERROR = -21,
31 | CM_NOT_IMPLEMENTED_ERROR = -22,
32 | CM_LAYER_NOT_INITIALIZED_ERROR = -23,
33 | CM_INVALID_LAYER_DIMENSIONS_ERROR = -24,
34 | CM_NULL_LAYER_ERROR = -25,
35 | CM_OPTIMIZER_NOT_INITIALIZED_ERROR = -26
36 |
37 | } CM_Error;
38 |
39 | #endif
--------------------------------------------------------------------------------
/include/Core/logging.h:
--------------------------------------------------------------------------------
1 | #ifndef C_ML_LOGGING_H
2 | #define C_ML_LOGGING_H
3 |
4 | #include
5 |
6 | /**
7 | * @brief Log levels for the C-ML library
8 | */
9 | typedef enum {
10 | LOG_LEVEL_DEBUG, // Detailed information, typically of interest only when diagnosing problems
11 | LOG_LEVEL_INFO, // Confirmation that things are working as expected
12 | LOG_LEVEL_WARNING, // Indication that something unexpected happened, but the program is still working
13 | LOG_LEVEL_ERROR // Due to a more serious problem, the program has not been able to perform a function
14 | } LogLevel;
15 |
16 | // Global log level (can be set at runtime)
17 | extern LogLevel g_log_level;
18 |
19 | /**
20 | * @brief Set the global log level
21 | *
22 | * @param level The log level to set
23 | */
24 | void set_log_level(LogLevel level);
25 |
26 | /**
27 | * @brief Core logging function
28 | *
29 | * @param level Log level
30 | * @param file Source file name
31 | * @param line Line number
32 | * @param func Function name
33 | * @param format Format string
34 | * @param ... Variable arguments
35 | */
36 | void log_message(LogLevel level, const char *file, int line, const char *func, const char *format, ...);
37 |
38 | // Logging macros
39 | #define LOG_DEBUG(format, ...) \
40 | log_message(LOG_LEVEL_DEBUG, __FILE__, __LINE__, __func__, format, ##__VA_ARGS__)
41 |
42 | #define LOG_INFO(format, ...) \
43 | log_message(LOG_LEVEL_INFO, __FILE__, __LINE__, __func__, format, ##__VA_ARGS__)
44 |
45 | #define LOG_WARNING(format, ...) \
46 | log_message(LOG_LEVEL_WARNING, __FILE__, __LINE__, __func__, format, ##__VA_ARGS__)
47 |
48 | #define LOG_ERROR(format, ...) \
49 | log_message(LOG_LEVEL_ERROR, __FILE__, __LINE__, __func__, format, ##__VA_ARGS__)
50 |
51 | #endif // C_ML_LOGGING_H
52 |
--------------------------------------------------------------------------------
/include/Core/memory_management.h:
--------------------------------------------------------------------------------
1 | #ifndef MEMORY_MANAGEMENT_H
2 | #define MEMORY_MANAGEMENT_H
3 |
4 | #include
5 |
6 | /**
7 | * @brief Allocates memory safely and logs the file and line number in case of failure.
8 | *
9 | * @param size The size of memory to allocate in bytes.
10 | * @param file The name of the file where the allocation is requested.
11 | * @param line The line number in the file where the allocation is requested.
12 | * @return A pointer to the allocated memory, or exits the program on failure.
13 | */
14 | void *cm_safe_malloc(size_t size, const char *file, int line);
15 |
16 | /**
17 | * @brief Frees allocated memory safely and sets the pointer to NULL.
18 | *
19 | * @param ptr A double pointer to the memory to be freed.
20 | */
21 | void cm_safe_free(void **ptr);
22 |
23 | #endif
--------------------------------------------------------------------------------
/include/Layers/dense.h:
--------------------------------------------------------------------------------
1 | #ifndef DENSE_H
2 | #define DENSE_H
3 |
4 | /**
5 | * @brief Structure representing a Dense Layer.
6 | *
7 | * @param weights Pointer to the weights matrix.
8 | * @param biases Pointer to the biases vector.
9 | * @param input_size Number of input neurons.
10 | * @param output_size Number of output neurons.
11 | * @param rmsprop_cache_w Cache for RMSProp weights.
12 | * @param rmsprop_cache_b Cache for RMSProp biases.
13 | * @param adam_v_w Adam first moment vector for weights.
14 | * @param adam_v_b Adam first moment vector for biases.
15 | * @param adam_s_w Adam second moment vector for weights.
16 | * @param adam_s_b Adam second moment vector for biases.
17 | */
18 | typedef struct
19 | {
20 | float *weights;
21 | float *biases;
22 | int input_size;
23 | int output_size;
24 |
25 | float *rmsprop_cache_w;
26 | float *rmsprop_cache_b;
27 | float *adam_v_w;
28 | float *adam_v_b;
29 | float *adam_s_w;
30 | float *adam_s_b;
31 | } DenseLayer;
32 |
33 | /**
34 | * @brief Initializes a Dense Layer with random weights and biases.
35 | *
36 | * @param layer Pointer to the DenseLayer structure.
37 | * @param input_size Number of input neurons.
38 | * @param output_size Number of output neurons.
39 | * @return int Error code.
40 | */
41 | int initialize_dense(DenseLayer *layer, int input_size, int output_size);
42 |
43 | /**
44 | * @brief Performs the forward pass for the Dense Layer.
45 | *
46 | * @param layer Pointer to the DenseLayer structure.
47 | * @param input Input data array.
48 | * @param output Output data array.
49 | * @return int Error code.
50 | */
51 | int forward_dense(DenseLayer *layer, float *input, float *output);
52 |
53 | /**
54 | * @brief Performs the backward pass for the Dense Layer.
55 | *
56 | * @param layer Pointer to the DenseLayer structure.
57 | * @param input Input data array.
58 | * @param output Output data array.
59 | * @param d_output Gradient of the output.
60 | * @param d_input Gradient of the input.
61 | * @param d_weights Gradient of the weights.
62 | * @param d_biases Gradient of the biases.
63 | * @return int Error code.
64 | */
65 | int backward_dense(DenseLayer *layer, float *input, float *output, float *d_output, float *d_input, float *d_weights, float *d_biases);
66 |
67 | /**
68 | * @brief Updates the weights and biases of the Dense Layer.
69 | *
70 | * @param layer Pointer to the DenseLayer structure.
71 | * @param d_weights Gradient of the weights.
72 | * @param d_biases Gradient of the biases.
73 | * @param learning_rate Learning rate for the update.
74 | * @return int Error code.
75 | */
76 | int update_dense(DenseLayer *layer, float *d_weights, float *d_biases, float learning_rate);
77 |
78 | /**
79 | * @brief Frees the memory allocated for the Dense Layer.
80 | *
81 | * @param layer Pointer to the DenseLayer structure.
82 | * @return int Error code.
83 | */
84 | int free_dense(DenseLayer *layer);
85 |
86 | #endif
87 |
--------------------------------------------------------------------------------
/include/Layers/dropout.h:
--------------------------------------------------------------------------------
1 | #ifndef DROPOUT_H
2 | #define DROPOUT_H
3 |
4 | /**
5 | * @brief Structure representing a Dropout Layer.
6 | *
7 | * @param dropout_rate Dropout rate (0.0 to 1.0).
8 | */
9 | typedef struct
10 | {
11 | float dropout_rate;
12 | } DropoutLayer;
13 |
14 | /**
15 | * @brief Initializes a Dropout Layer with a given dropout rate.
16 | *
17 | * @param layer Pointer to the DropoutLayer structure.
18 | * @param dropout_rate Dropout rate (0.0 to 1.0).
19 | * @return int Error code.
20 | */
21 | int initialize_dropout(DropoutLayer *layer, float dropout_rate);
22 |
23 | /**
24 | * @brief Performs the forward pass for the Dropout Layer.
25 | *
26 | * @param layer Pointer to the DropoutLayer structure.
27 | * @param input Input data array.
28 | * @param output Output data array.
29 | * @param size Size of the input/output arrays.
30 | * @return int Error code.
31 | */
32 | int forward_dropout(DropoutLayer *layer, float *input, float *output, int size);
33 |
34 | /**
35 | * @brief Performs the backward pass for the Dropout Layer.
36 | *
37 | * @param layer Pointer to the DropoutLayer structure.
38 | * @param input Input data array.
39 | * @param output Output data array.
40 | * @param d_output Gradient of the output.
41 | * @param d_input Gradient of the input.
42 | * @param size Size of the input/output arrays.
43 | * @return int Error code.
44 | */
45 | int backward_dropout(DropoutLayer *layer, float *input, float *output, float *d_output, float *d_input, int size);
46 |
47 | #endif
48 |
--------------------------------------------------------------------------------
/include/Layers/flatten.h:
--------------------------------------------------------------------------------
1 | #ifndef FLATTEN_H
2 | #define FLATTEN_H
3 |
4 | #include
5 |
6 | /**
7 | * @brief Structure representing a Flatten Layer.
8 | *
9 | * @param input_size Size of the input data.
10 | * @param output_size Size of the output data (same as input size).
11 | */
12 | typedef struct
13 | {
14 | int input_size;
15 | int output_size;
16 | } FlattenLayer;
17 |
18 | /**
19 | * @brief Initializes a Flatten Layer.
20 | *
21 | * @param layer Pointer to the FlattenLayer structure.
22 | * @param input_size Size of the input data.
23 | * @return int Error code.
24 | */
25 | int initialize_flatten(FlattenLayer *layer, int input_size);
26 |
27 | /**
28 | * @brief Performs the forward pass for the Flatten Layer.
29 | *
30 | * @param layer Pointer to the FlattenLayer structure.
31 | * @param input Input data array.
32 | * @param output Output data array.
33 | * @return int Error code.
34 | */
35 | int forward_flatten(FlattenLayer *layer, float *input, float *output);
36 |
37 | /**
38 | * @brief Performs the backward pass for the Flatten Layer.
39 | *
40 | * @param layer Pointer to the FlattenLayer structure.
41 | * @param input Input data array.
42 | * @param output Output data array.
43 | * @param d_output Gradient of the output.
44 | * @param d_input Gradient of the input.
45 | * @return int Error code.
46 | */
47 | int backward_flatten(FlattenLayer *layer, float *input, float *output, float *d_output, float *d_input);
48 |
49 | /**
50 | * @brief Frees the memory allocated for the Flatten Layer.
51 | *
52 | * @param layer Pointer to the FlattenLayer structure.
53 | * @return int Error code.
54 | */
55 | int free_flatten(FlattenLayer *layer);
56 |
57 | #endif
--------------------------------------------------------------------------------
/include/Layers/maxpooling.h:
--------------------------------------------------------------------------------
1 | #ifndef MAXPOOLING_H
2 | #define MAXPOOLING_H
3 |
4 | #include "../../include/Core/memory_management.h"
5 |
6 | /**
7 | * @brief Structure representing a MaxPooling Layer.
8 | *
9 | * @param kernel_size Size of the pooling kernel.
10 | * @param stride Stride of the pooling operation.
11 | */
12 | typedef struct
13 | {
14 | int kernel_size;
15 | int stride;
16 | } MaxPoolingLayer;
17 |
18 | /**
19 | * @brief Initializes a MaxPooling Layer.
20 | *
21 | * @param layer Pointer to the MaxPoolingLayer structure.
22 | * @param kernel_size Size of the kernel (must be > 0).
23 | * @param stride Stride of the kernel (must be > 0).
24 | * @return int Error code.
25 | */
26 | int initialize_maxpooling(MaxPoolingLayer *layer, int kernel_size, int stride);
27 |
28 | /**
29 | * @brief Computes the output size for the MaxPooling Layer.
30 | *
31 | * @param input_size Size of the input data.
32 | * @param kernel_size Size of the kernel.
33 | * @param stride Stride of the kernel.
34 | * @return int Output size, or an error code on invalid input.
35 | */
36 | int compute_maxpooling_output_size(int input_size, int kernel_size, int stride);
37 |
38 | /**
39 | * @brief Performs the forward pass for the MaxPooling Layer.
40 | *
41 | * @param layer Pointer to the MaxPoolingLayer structure.
42 | * @param input Input data array.
43 | * @param output Output data array.
44 | * @param input_size Size of the input data.
45 | * @return int Number of output elements, or an error code on failure.
46 | */
47 | int forward_maxpooling(MaxPoolingLayer *layer, const float *input, float *output, int input_size);
48 |
49 | /**
50 | * @brief Frees the memory allocated for the MaxPooling Layer.
51 | *
52 | * @param layer Pointer to the MaxPoolingLayer structure.
53 | * @return int Error code.
54 | */
55 | int free_maxpooling(MaxPoolingLayer *layer);
56 |
57 | #endif
--------------------------------------------------------------------------------
/include/Layers/pooling.h:
--------------------------------------------------------------------------------
1 | #ifndef POOLING_H
2 | #define POOLING_H
3 |
4 | #include "../../include/Core/memory_management.h"
5 |
6 | /**
7 | * @brief Structure representing a Pooling Layer.
8 | *
9 | * @param kernel_size Size of the pooling kernel.
10 | * @param stride Stride of the pooling operation.
11 | */
12 | typedef struct
13 | {
14 | int kernel_size;
15 | int stride;
16 | } PoolingLayer;
17 |
18 | /**
19 | * @brief Initializes a Pooling Layer.
20 | *
21 | * @param layer Pointer to the PoolingLayer structure.
22 | * @param kernel_size Size of the kernel (must be > 0).
23 | * @param stride Stride of the kernel (must be > 0).
24 | * @return int Error code.
25 | */
26 | int initialize_pooling(PoolingLayer *layer, int kernel_size, int stride);
27 |
28 | /**
29 | * @brief Computes the output size for the Pooling Layer.
30 | *
31 | * @param input_size Size of the input data.
32 | * @param kernel_size Size of the kernel.
33 | * @param stride Stride of the kernel.
34 | * @return int Output size, or an error code on invalid input.
35 | */
36 | int compute_pooling_output_size(int input_size, int kernel_size, int stride);
37 |
38 | /**
39 | * @brief Performs the forward pass for the Pooling Layer.
40 | *
41 | * @param layer Pointer to the PoolingLayer structure.
42 | * @param input Input data array.
43 | * @param output Output data array.
44 | * @param input_size Size of the input data.
45 | * @return int Number of output elements, or an error code on failure.
46 | */
47 | int forward_pooling(PoolingLayer *layer, const float *input, float *output, int input_size);
48 |
49 | /**
50 | * @brief Frees the memory allocated for the Pooling Layer.
51 | *
52 | * @param layer Pointer to the PoolingLayer structure.
53 | * @return int Error code.
54 | */
55 | int free_pooling(PoolingLayer *layer);
56 |
57 | #endif
--------------------------------------------------------------------------------
/include/Loss_Functions/binary_cross_entropy_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef BINARY_CROSS_ENTROPY_LOSS_H
2 | #define BINARY_CROSS_ENTROPY_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Binary Cross-Entropy Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float binary_cross_entropy_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Binary Cross-Entropy Loss.
16 | *
17 | * @param y Ground truth value.
18 | * @param yHat Predicted value.
19 | * @return The derivative value.
20 | */
21 | float binary_cross_entropy_loss_derivative(float y, float yHat);
22 |
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Loss_Functions/cosine_similarity_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef COSINE_SIMILARITY_LOSS_H
2 | #define COSINE_SIMILARITY_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Cosine Similarity Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float cosine_similarity_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Cosine Similarity Loss.
16 | *
17 | * @param y Pointer to the ground truth values.
18 | * @param yHat Pointer to the predicted values.
19 | * @param n The number of elements in y and yHat.
20 | * @return The derivative value, or an error code if inputs are invalid.
21 | */
22 | float cosine_similarity_loss_derivative(float *y, float *yHat, int n);
23 |
24 | #endif
25 |
--------------------------------------------------------------------------------
/include/Loss_Functions/focal_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef FOCAL_LOSS_H
2 | #define FOCAL_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Focal Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @param gamma The focusing parameter to adjust the rate at which easy examples are down-weighted.
11 | * @return The computed loss, or an error code if inputs are invalid.
12 | */
13 | float focal_loss(float *y, float *yHat, int n, float gamma);
14 |
15 | /**
16 | * @brief Computes the derivative of the Focal Loss.
17 | *
18 | * @param y Ground truth value.
19 | * @param yHat Predicted value.
20 | * @param gamma The focusing parameter.
21 | * @return The derivative value.
22 | */
23 | float focal_loss_derivative(float y, float yHat, float gamma);
24 |
25 | #endif
26 |
--------------------------------------------------------------------------------
/include/Loss_Functions/huber_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef HUBER_LOSS_H
2 | #define HUBER_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Huber Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float huber_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Huber Loss.
16 | *
17 | * @param y Ground truth value.
18 | * @param yHat Predicted value.
19 | * @return The derivative value.
20 | */
21 | float huber_loss_derivative(float y, float yHat);
22 |
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Loss_Functions/kld_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef KLD_LOSS_H
2 | #define KLD_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Kullback-Leibler Divergence Loss.
6 | *
7 | * @param p Pointer to the true distribution.
8 | * @param q Pointer to the predicted distribution.
9 | * @param n The number of elements in p and q.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float kld_loss(float *p, float *q, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Kullback-Leibler Divergence Loss.
16 | *
17 | * @param p True distribution value.
18 | * @param q Predicted distribution value.
19 | * @return The derivative value.
20 | */
21 | float kld_loss_derivative(float p, float q);
22 |
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Loss_Functions/log_cosh_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef LOG_COSH_LOSS_H
2 | #define LOG_COSH_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Log-Cosh Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float log_cosh_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Log-Cosh Loss.
16 | *
17 | * @param y Ground truth value.
18 | * @param yHat Predicted value.
19 | * @return The derivative value.
20 | */
21 | float log_cosh_loss_derivative(float y, float yHat);
22 |
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Loss_Functions/mean_squared_error.h:
--------------------------------------------------------------------------------
1 | #ifndef MEAN_SQUARED_ERROR_H
2 | #define MEAN_SQUARED_ERROR_H
3 |
4 | /**
5 | * @brief Computes the Mean Squared Error (MSE).
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed MSE, or an error code.
11 | */
12 | float mean_squared_error(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of Mean Squared Error (MSE) for a single sample.
16 | *
17 | * @param predicted Predicted value.
18 | * @param actual Actual value.
19 | * @param n Number of elements in the sample.
20 | * @return The derivative value.
21 | */
22 | float mean_squared_error_derivative(float predicted, float actual, int n);
23 |
24 | #endif
25 |
--------------------------------------------------------------------------------
/include/Loss_Functions/poisson_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef POISSON_LOSS_H
2 | #define POISSON_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Poisson Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float poisson_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Poisson Loss.
16 | *
17 | * @param y Ground truth value.
18 | * @param yHat Predicted value.
19 | * @return The derivative value.
20 | */
21 | float poisson_loss_derivative(float y, float yHat);
22 |
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Loss_Functions/smooth_l1_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef SMOOTH_L1_LOSS_H
2 | #define SMOOTH_L1_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Smooth L1 Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float smooth_l1_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Smooth L1 Loss.
16 | *
17 | * @param y Ground truth value.
18 | * @param yHat Predicted value.
19 | * @return The derivative value.
20 | */
21 | float smooth_l1_loss_derivative(float y, float yHat);
22 |
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Loss_Functions/tversky_loss.h:
--------------------------------------------------------------------------------
1 | #ifndef TVERSKY_LOSS_H
2 | #define TVERSKY_LOSS_H
3 |
4 | /**
5 | * @brief Computes the Tversky Loss.
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed loss, or an error code if inputs are invalid.
11 | */
12 | float tversky_loss(float *y, float *yHat, int n);
13 |
14 | /**
15 | * @brief Computes the derivative of the Tversky Loss.
16 | *
17 | * @param y Pointer to the ground truth values.
18 | * @param yHat Pointer to the predicted values.
19 | * @param n The number of elements in y and yHat.
20 | * @return The derivative value, or an error code if inputs are invalid.
21 | */
22 | float tversky_loss_derivative(float *y, float *yHat, int n);
23 | #endif
24 |
--------------------------------------------------------------------------------
/include/Metrics/accuracy.h:
--------------------------------------------------------------------------------
1 | #ifndef ACCURACY_H
2 | #define ACCURACY_H
3 |
4 | /**
5 | * @brief Computes the accuracy of predictions.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed accuracy, or an error code if inputs are invalid.
12 | */
13 | float accuracy(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/balanced_accuracy.h:
--------------------------------------------------------------------------------
1 | #ifndef BALANCED_ACCURACY_H
2 | #define BALANCED_ACCURACY_H
3 |
4 | /**
5 | * @brief Computes the Balanced Accuracy metric.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed balanced accuracy, or an error code if inputs are invalid.
12 | */
13 | float balanced_accuracy(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/cohens_kappa.h:
--------------------------------------------------------------------------------
1 | #ifndef COHENS_KAPPA_H
2 | #define COHENS_KAPPA_H
3 |
4 | /**
5 | * @brief Computes Cohen's Kappa statistic.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed Cohen's Kappa, or an error code if inputs are invalid.
12 | */
13 | float cohens_kappa(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/f1_score.h:
--------------------------------------------------------------------------------
1 | #ifndef F1_SCORE_H
2 | #define F1_SCORE_H
3 |
4 | /**
5 | * @brief Computes the F1 Score metric.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed F1 Score, or an error code if inputs are invalid.
12 | */
13 | float f1_score(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/iou.h:
--------------------------------------------------------------------------------
1 | #ifndef IOU_H
2 | #define IOU_H
3 |
4 | /**
5 | * @brief Computes the Intersection over Union (IoU) metric.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed IoU, or an error code if inputs are invalid.
12 | */
13 | float iou(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/mcc.h:
--------------------------------------------------------------------------------
1 | #ifndef MCC_H
2 | #define MCC_H
3 |
4 | /**
5 | * @brief Computes the Matthews Correlation Coefficient (MCC).
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed MCC, or an error code if inputs are invalid.
12 | */
13 | float mcc(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/mean_absolute_error.h:
--------------------------------------------------------------------------------
1 | #ifndef MEAN_ABSOLUTE_ERROR_H
2 | #define MEAN_ABSOLUTE_ERROR_H
3 |
4 | /**
5 | * @brief Computes the Mean Absolute Error (MAE).
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed error, or an error code if inputs are invalid.
11 | */
12 | float mean_absolute_error(float *y, float *yHat, int n);
13 |
14 | #endif
15 |
--------------------------------------------------------------------------------
/include/Metrics/mean_absolute_percentage_error.h:
--------------------------------------------------------------------------------
1 | #ifndef MEAN_ABSOLUTE_PERCENTAGE_ERROR_H
2 | #define MEAN_ABSOLUTE_PERCENTAGE_ERROR_H
3 |
4 | /**
5 | * @brief Computes the Mean Absolute Percentage Error (MAPE).
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements in y and yHat.
10 | * @return The computed error as a percentage, or an error code if inputs are invalid.
11 | */
12 | float mean_absolute_percentage_error(float *y, float *yHat, int n);
13 |
14 | #endif
15 |
--------------------------------------------------------------------------------
/include/Metrics/precision.h:
--------------------------------------------------------------------------------
1 | #ifndef PRECISION_H
2 | #define PRECISION_H
3 |
4 | /**
5 | * @brief Computes the Precision metric.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed Precision, or an error code if inputs are invalid.
12 | */
13 | float precision(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/r2_score.h:
--------------------------------------------------------------------------------
1 | #ifndef R2_SCORE_H
2 | #define R2_SCORE_H
3 |
4 | /**
5 | * @brief Calculate the R2 score (coefficient of determination).
6 | *
7 | * @param y_true Array of true values.
8 | * @param y_pred Array of predicted values.
9 | * @param size Number of elements in the arrays.
10 | * @return float The R2 score.
11 | */
12 | float r2_score(const float *y_true, const float *y_pred, int size);
13 |
14 | #endif
15 |
--------------------------------------------------------------------------------
/include/Metrics/recall.h:
--------------------------------------------------------------------------------
1 | #ifndef RECALL_H
2 | #define RECALL_H
3 |
4 | /**
5 | * @brief Computes the Recall metric.
6 | *
7 | * @param y Pointer to the ground truth labels.
8 | * @param yHat Pointer to the predicted labels.
9 | * @param n The number of elements in y and yHat.
10 | * @param threshold The threshold for binary classification.
11 | * @return The computed Recall, or an error code if inputs are invalid.
12 | */
13 | float recall(float *y, float *yHat, int n, float threshold);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/include/Metrics/reduce_mean.h:
--------------------------------------------------------------------------------
1 | #ifndef REDUCE_MEAN_H
2 | #define REDUCE_MEAN_H
3 |
4 | /**
5 | * @brief Computes the mean of an array of loss values.
6 | *
7 | * @param loss Pointer to the array of loss values.
8 | * @param size The number of elements in the loss array.
9 | * @return The computed mean, or an error code if inputs are invalid.
10 | */
11 | float reduce_mean(float *loss, int size);
12 |
13 | #endif
14 |
--------------------------------------------------------------------------------
/include/Metrics/root_mean_squared_error.h:
--------------------------------------------------------------------------------
1 | #ifndef ROOT_MEAN_SQUARED_ERROR_H
2 | #define ROOT_MEAN_SQUARED_ERROR_H
3 |
4 | /**
5 | * @brief Computes the Root Mean Squared Error (RMSE).
6 | *
7 | * @param y Pointer to the ground truth values.
8 | * @param yHat Pointer to the predicted values.
9 | * @param n The number of elements.
10 | * @return The computed RMSE, or an error code.
11 | */
12 | float root_mean_squared_error(float *y, float *yHat, int n);
13 |
14 | #endif
15 |
--------------------------------------------------------------------------------
/include/Metrics/specificity.h:
--------------------------------------------------------------------------------
1 | #ifndef SPECIFICITY_H
2 | #define SPECIFICITY_H
3 |
4 | /**
5 | * @brief Computes the Specificity metric.
6 | *
7 | * Specificity measures the fraction of true negatives.
8 | *
9 | * @param y Pointer to the ground truth labels.
10 | * @param yHat Pointer to the predicted labels.
11 | * @param n The number of elements in y and yHat.
12 | * @param threshold The threshold for binary classification.
13 | * @return The computed Specificity, or an error code if inputs are invalid.
14 | */
15 | float specificity(float *y, float *yHat, int n, float threshold);
16 |
17 | #endif
18 |
--------------------------------------------------------------------------------
/include/Optimizers/adam.h:
--------------------------------------------------------------------------------
1 | #ifndef ADAM_H
2 | #define ADAM_H
3 |
4 | /**
5 | * @brief Performs the Adam optimization algorithm.
6 | *
7 | * @param x The input feature value.
8 | * @param y The target value.
9 | * @param lr The learning rate.
10 | * @param w Pointer to the weight parameter.
11 | * @param b Pointer to the bias parameter.
12 | * @param v_w Pointer to the first moment vector for the weight.
13 | * @param v_b Pointer to the first moment vector for the bias.
14 | * @param s_w Pointer to the second moment vector for the weight.
15 | * @param s_b Pointer to the second moment vector for the bias.
16 | * @param beta1 The exponential decay rate for the first moment estimates.
17 | * @param beta2 The exponential decay rate for the second moment estimates.
18 | * @param epsilon A small constant to prevent division by zero.
19 | * @return The computed loss value, or an error code.
20 | */
21 | float adam(float x, float y, float lr, float *w, float *b, float *v_w, float *v_b, float *s_w, float *s_b, float beta1, float beta2, float epsilon);
22 |
23 | /**
24 | * @brief Update weights and biases using Adam optimizer.
25 | *
26 | * @param w Pointer to the weight.
27 | * @param b Pointer to the bias.
28 | * @param v_w Pointer to the weight momentum.
29 | * @param v_b Pointer to the bias momentum.
30 | * @param s_w Pointer to the weight second moment.
31 | * @param s_b Pointer to the bias second moment.
32 | * @param gradient Gradient value.
33 | * @param input Input value.
34 | * @param learning_rate Learning rate.
35 | * @param beta1 Momentum decay rate.
36 | * @param beta2 Second moment decay rate.
37 | * @param epsilon Small value to prevent division by zero.
38 | * @param epoch Current epoch (used for bias correction).
39 | */
40 | void update_adam(float *w, float *b, float *v_w, float *v_b, float *s_w, float *s_b, float gradient, float input, float learning_rate, float beta1, float beta2, float epsilon, int epoch);
41 |
42 | #endif
43 |
--------------------------------------------------------------------------------
/include/Optimizers/rmsprop.h:
--------------------------------------------------------------------------------
1 | #ifndef RMSPROP_H
2 | #define RMSPROP_H
3 |
4 | /**
5 | * @brief Performs the RMSProp optimization algorithm.
6 | *
7 | * @param x The input feature value.
8 | * @param y The target value.
9 | * @param lr The learning rate.
10 | * @param w Pointer to the weight parameter.
11 | * @param b Pointer to the bias parameter.
12 | * @param cache_w Pointer to the cache for the weight parameter.
13 | * @param cache_b Pointer to the cache for the bias parameter.
14 | * @param epsilon A small constant to prevent division by zero.
15 | * @param beta The decay rate for the moving average of squared gradients.
16 | * @return The computed loss value, or an error code.
17 | */
18 | float rms_prop(float x, float y, float lr, float *w, float *b, float *cache_w, float *cache_b, float epsilon, float beta);
19 |
20 | /**
21 | * @brief Update weights and biases using RMSProp optimizer.
22 | *
23 | * @param w Pointer to the weight.
24 | * @param b Pointer to the bias.
25 | * @param cache_w Pointer to the weight cache.
26 | * @param cache_b Pointer to the bias cache.
27 | * @param gradient Gradient value.
28 | * @param input Input value.
29 | * @param learning_rate Learning rate.
30 | * @param beta Decay rate for RMSProp.
31 | * @param epsilon Small value to prevent division by zero.
32 | */
33 | void update_rmsprop(float *w, float *b, float *cache_w, float *cache_b, float gradient, float input, float learning_rate, float beta, float epsilon);
34 |
35 | #endif
36 |
--------------------------------------------------------------------------------
/include/Optimizers/sgd.h:
--------------------------------------------------------------------------------
1 | #ifndef SGD_H
2 | #define SGD_H
3 |
4 | /**
5 | * @brief Performs the Stochastic Gradient Descent (SGD) optimization algorithm.
6 | *
7 | * @param x The input feature value.
8 | * @param y The target value.
9 | * @param lr The learning rate.
10 | * @param w Pointer to the weight parameter.
11 | * @param b Pointer to the bias parameter.
12 | * @return The computed loss value, or an error code.
13 | */
14 | float sgd(float x, float y, float lr, float *w, float *b);
15 |
16 | /**
17 | * @brief Update weights and biases using SGD optimizer.
18 | *
19 | * @param w Pointer to the weight.
20 | * @param b Pointer to the bias.
21 | * @param gradient Gradient value.
22 | * @param input Input value.
23 | * @param learning_rate Learning rate.
24 | */
25 | void update_sgd(float *w, float *b, float gradient, float input, float learning_rate);
26 |
27 | #endif
28 |
--------------------------------------------------------------------------------
/include/Preprocessing/label_encoder.h:
--------------------------------------------------------------------------------
1 | #ifndef LABEL_ENCODER_H
2 | #define LABEL_ENCODER_H
3 |
4 | typedef struct
5 | {
6 | char character;
7 | int encodedValue;
8 | } CharMap;
9 |
10 | /**
11 | * @brief Maps characters to unique integer labels.
12 | *
13 | * @param x The input character array.
14 | * @param size The size of the input array.
15 | * @param map A pointer to the character-to-integer mapping.
16 | * @param mapSize A pointer to store the size of the mapping.
17 | * @return A pointer to the encoded integer array.
18 | */
19 | int *label_encoder(char *x, int size, CharMap **map, int *mapSize);
20 |
21 | /**
22 | * @brief Decodes integer labels back into characters.
23 | *
24 | * @param x The encoded integer array.
25 | * @param size The size of the input array.
26 | * @param map The character-to-integer mapping.
27 | * @param mapSize The size of the mapping.
28 | * @return A pointer to the decoded character array.
29 | */
30 | char *label_decoder(int *x, int size, CharMap *map, int mapSize);
31 |
32 | /**
33 | * @brief Frees the memory allocated for label encoding and decoding.
34 | *
35 | * @param map The character-to-integer mapping.
36 | * @param encoded The encoded integer array.
37 | * @param decoded The decoded character array.
38 | */
39 | void free_label_memory(CharMap *map, int *encoded, char *decoded);
40 |
41 | #endif
42 |
--------------------------------------------------------------------------------
/include/Preprocessing/min_max_scaler.h:
--------------------------------------------------------------------------------
1 | #ifndef MIN_MAX_SCALER_H
2 | #define MIN_MAX_SCALER_H
3 |
4 | /**
5 | * @brief Scales an array of floats to a range of [0, 1] using min-max scaling.
6 | *
7 | * @param x The input array of floats.
8 | * @param size The size of the input array.
9 | * @return A pointer to the scaled array, or NULL if an error occurs.
10 | */
11 | float *min_max_scaler(float *x, int size);
12 |
13 | #endif
14 |
--------------------------------------------------------------------------------
/include/Preprocessing/one_hot_encoder.h:
--------------------------------------------------------------------------------
1 | #ifndef ONE_HOT_ENCODER_H
2 | #define ONE_HOT_ENCODER_H
3 |
4 | /**
5 | * @brief Maps characters to unique integer labels for one-hot encoding.
6 | */
7 | typedef struct
8 | {
9 | char character;
10 | int encodedValue;
11 | } CharMap;
12 |
13 | /**
14 | * @brief Encodes a character array into a one-hot encoded integer array.
15 | *
16 | * @param x The input character array.
17 | * @param size The size of the input array.
18 | * @param map A pointer to the character-to-integer mapping.
19 | * @param mapSize A pointer to store the size of the mapping.
20 | * @return A pointer to the one-hot encoded array, or an error code.
21 | */
22 | int *one_hot_encoding(char *x, int size, CharMap **map, int *mapSize);
23 |
24 | /**
25 | * @brief Decodes a one-hot encoded integer array back into a character array.
26 | *
27 | * @param x The one-hot encoded integer array.
28 | * @param size The size of the input array.
29 | * @param map The character-to-integer mapping.
30 | * @param mapSize The size of the mapping.
31 | * @return A pointer to the decoded character array, or NULL if an error occurs.
32 | */
33 | char *one_hot_decoding(int *x, int size, CharMap *map, int mapSize);
34 |
35 | /**
36 | * @brief Frees the memory allocated for one-hot encoding and decoding.
37 | *
38 | * @param x The one-hot encoded integer array.
39 | * @param y The decoded character array.
40 | * @param map The character-to-integer mapping.
41 | */
42 | void free_one_hot_memory(int *x, char *y, CharMap *map);
43 |
44 | #endif
45 |
--------------------------------------------------------------------------------
/include/Preprocessing/standard_scaler.h:
--------------------------------------------------------------------------------
1 | #ifndef STANDARD_SCALER_H
2 | #define STANDARD_SCALER_H
3 |
4 | /**
5 | * @brief Scales an array of floats to have a mean of 0 and a standard deviation of 1.
6 | *
7 | * @param x The input array of floats.
8 | * @param size The size of the input array.
9 | * @return A pointer to the scaled array, or NULL if an error occurs.
10 | */
11 | float *standard_scaler(float *x, int size);
12 |
13 | #endif
14 |
--------------------------------------------------------------------------------
/main.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "include/Core/training.h"
4 | #include "include/Core/dataset.h"
5 | #include "include/Core/logging.h"
6 |
7 | int main()
8 | {
9 | set_log_level(LOG_LEVEL_INFO);
10 |
11 | NeuralNetwork *network = create_neural_network(2);
12 | build_network(network, OPTIMIZER_ADAM, 0.1f, LOSS_MSE, 0.0f, 0.0f);
13 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2, 4, 0.0f, 0, 0);
14 | model_add(network, LAYER_DENSE, ACTIVATION_TANH, 4, 4, 0.0f, 0, 0);
15 | model_add(network, LAYER_DENSE, ACTIVATION_SIGMOID, 4, 1, 0.0f, 0, 0);
16 |
17 | float X_data[4][2] = {
18 | {0.0f, 0.0f},
19 | {0.0f, 1.0f},
20 | {1.0f, 0.0f},
21 | {1.0f, 1.0f}};
22 |
23 | float y_data[4][1] = {
24 | {0.0f},
25 | {1.0f},
26 | {1.0f},
27 | {1.0f}};
28 |
29 | Dataset *dataset = dataset_create();
30 | dataset_load_arrays(dataset, (float *)X_data, (float *)y_data, 4, 2, 1);
31 |
32 | summary(network);
33 |
34 | train_network(network, dataset, 30);
35 | test_network(network, dataset->X, dataset->y, dataset->num_samples, NULL);
36 |
37 | dataset_free(dataset);
38 | free_neural_network(network);
39 |
40 | return 0;
41 | }
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Documentation
2 | site_description: Lightweight Machine Learning Library in C
3 | repo_url: https://github.com/jaywyawhare/C-ML
4 | site_author: Arrry
5 |
6 | theme:
7 | name: material
8 | logo: light-mode.svg
9 | favicon: light-mode.svg
10 | palette:
11 | - media: "(prefers-color-scheme)"
12 | toggle:
13 | icon: material/link
14 | name: Switch to light mode
15 | - media: "(prefers-color-scheme: light)"
16 | scheme: default
17 | primary: indigo
18 | accent: indigo
19 | toggle:
20 | icon: material/toggle-switch
21 | name: Switch to dark mode
22 | - media: "(prefers-color-scheme: dark)"
23 | scheme: slate
24 | primary: black
25 | accent: indigo
26 | toggle:
27 | icon: material/toggle-switch-off
28 | name: Switch to system preference
29 | font:
30 | text: "Roboto"
31 | code: "Source Code Pro"
32 | icon:
33 | logo: logo
34 |
35 | nav:
36 | - Home: index.md
37 | - Features: features.md
38 | - Installation: installation.md
39 | - Usage: usage.md
40 | - Modules:
41 | - Core: modules/core.md
42 | - Layers: modules/layers.md
43 | - Activations: modules/activations.md
44 | - Loss Functions: modules/loss_functions.md
45 | - Optimizers: modules/optimizers.md
46 | - Preprocessing: modules/preprocessing.md
47 | - Regularizers: modules/regularizers.md
48 | - Metrics: modules/metrics.md
49 | - Testing: testing.md
50 | - Development Guidelines: development_guidelines.md
51 | - Design Guide: DesignGuide.md
52 | - Contributing: contributing.md
53 | - License: license.md
54 |
55 | extra:
56 | social:
57 | - icon: fontawesome/brands/github
58 | link: https://github.com/jaywyawhare/C-ML
59 |
60 | markdown_extensions:
61 | - toc
62 | - tables
63 | - admonition
64 | - pymdownx.highlight
65 | - pymdownx.superfences
66 | - pymdownx.inlinehilite
67 | - pymdownx.emoji
68 | - pymdownx.tasklist
69 | - pymdownx.details
70 | - pymdownx.mark
71 | - pymdownx.tilde
72 |
--------------------------------------------------------------------------------
/src/Activations/elu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Activations/elu.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 |
8 | #define EPSILON 1e-6f
9 |
10 | /**
11 | * @brief Applies the Exponential Linear Unit (ELU) activation function.
12 | *
13 | * The ELU activation function is defined as:
14 | * - f(x) = x, if x >= 0
15 | * - f(x) = alpha * (exp(x) - 1), if x < 0
16 | *
17 | * @param x The input value.
18 | * @param alpha The scaling factor for negative values.
19 | * @return The result of the ELU activation function, or an error code.
20 | */
21 | float elu(float x, float alpha)
22 | {
23 | if (isnan(x) || isnan(alpha) || isinf(x) || isinf(alpha) || x == -INFINITY || alpha == -INFINITY)
24 | {
25 | LOG_ERROR("Invalid input (NaN or Inf)");
26 | return CM_INVALID_INPUT_ERROR;
27 | }
28 |
29 | float result = x >= 0 ? x : alpha * (expf(x) - 1);
30 | LOG_DEBUG("Input: x=%f, alpha=%f, Output: %f", x, alpha, result);
31 | return result;
32 | }
33 |
34 | /**
35 | * @brief Computes the derivative of the ELU activation function.
36 | *
37 | * The derivative of ELU is:
38 | * - f'(x) = 1, if x >= 0
39 | * - f'(x) = f(x) + alpha, if x < 0
40 | *
41 | * @param x The input value.
42 | * @param alpha The scaling factor for negative values.
43 | * @return The derivative of the ELU function.
44 | */
45 | float elu_derivative(float x, float alpha)
46 | {
47 | if (isnan(x) || isnan(alpha) || isinf(x) || isinf(alpha))
48 | {
49 | LOG_ERROR("Invalid input (NaN or Inf)");
50 | return CM_INVALID_INPUT_ERROR;
51 | }
52 |
53 | if (x >= 0)
54 | {
55 | return 1.0f;
56 | }
57 | else
58 | {
59 | return alpha * expf(x);
60 | }
61 | }
--------------------------------------------------------------------------------
/src/Activations/gelu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Activations/gelu.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 |
8 |
9 |
10 | /**
11 | * @brief Applies the Gaussian Error Linear Unit (GELU) activation function.
12 | *
13 | * The GELU activation function is defined as:
14 | * - f(x) = 0.5 * x * (1 + tanh(sqrt(2/pi) * (x + 0.044715 * x^3)))
15 | *
16 | * @param x The input value.
17 | * @return The result of the GELU activation function.
18 | */
19 | float gelu(float x)
20 | {
21 | if (isnan(x) || isinf(x) || x == -INFINITY)
22 | {
23 | LOG_ERROR("Invalid input (NaN or Inf)");
24 | return CM_INVALID_INPUT_ERROR;
25 | }
26 |
27 | const float sqrt_2_over_pi = 0.7978845608f;
28 | float result = 0.5f * x * (1.0f + tanhf(sqrt_2_over_pi * (x + 0.044715f * x * x * x)));
29 | LOG_DEBUG("Input: x=%f, Output: %f", x, result);
30 | return result;
31 | }
32 |
33 | /**
34 | * @brief Computes the derivative of the GELU activation function.
35 | *
36 | * The derivative of GELU is approximated as:
37 | * - f'(x) = 0.5 * (1 + tanh(sqrt(2/pi) * (x + 0.044715 * x^3)))
38 | * + 0.5 * x * (1 - tanh^2(sqrt(2/pi) * (x + 0.044715 * x^3)))
39 | * * sqrt(2/pi) * (1 + 3 * 0.044715 * x^2)
40 | *
41 | * @param x The input value.
42 | * @return The derivative of the GELU function.
43 | */
44 | float gelu_derivative(float x)
45 | {
46 | if (isnan(x) || isinf(x))
47 | {
48 | LOG_ERROR("Invalid input (NaN or Inf)");
49 | return CM_INVALID_INPUT_ERROR;
50 | }
51 |
52 | const float sqrt_2_over_pi = 0.7978845608f;
53 | float tanh_arg = sqrt_2_over_pi * (x + 0.044715f * x * x * x);
54 | float tanh_val = tanhf(tanh_arg);
55 | float sech2 = 1.0f - tanh_val * tanh_val;
56 |
57 | return 0.5f * (1.0f + tanh_val) + 0.5f * x * sech2 * sqrt_2_over_pi * (1.0f + 3.0f * 0.044715f * x * x);
58 | }
--------------------------------------------------------------------------------
/src/Activations/leaky_relu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Activations/leaky_relu.h"
6 | #include "../../include/Core/logging.h"
7 |
8 | #define LEAKY_RELU_ALPHA 0.01f
9 |
10 |
11 | /**
12 | * @brief Applies the Leaky Rectified Linear Unit (Leaky ReLU) activation function.
13 | *
14 | * The Leaky ReLU activation function is defined as:
15 | * - f(x) = x, if x > 0
16 | * - f(x) = 0.01 * x, if x <= 0
17 | *
18 | * @param x The input value.
19 | * @return The result of the Leaky ReLU activation function.
20 | */
21 | float leaky_relu(float x)
22 | {
23 | if (isnan(x) || isinf(x) || x == -INFINITY)
24 | {
25 | LOG_ERROR("Invalid input (NaN or Inf)");
26 | return CM_INVALID_INPUT_ERROR;
27 | }
28 |
29 | float result = x > 0 ? x : LEAKY_RELU_ALPHA * x;
30 | LOG_DEBUG("Input: x=%f, Output: %f", x, result);
31 | return result;
32 | }
33 |
34 | /**
35 | * @brief Computes the derivative of the Leaky ReLU activation function.
36 | *
37 | * The derivative of Leaky ReLU is:
38 | * - f'(x) = 1, if x > 0
39 | * - f'(x) = alpha, if x <= 0
40 | *
41 | * @param x The input value.
42 | * @return The derivative of the Leaky ReLU function.
43 | */
44 | float leaky_relu_derivative(float x)
45 | {
46 | if (isnan(x) || isinf(x))
47 | {
48 | LOG_ERROR("Invalid input (NaN or Inf)");
49 | return CM_INVALID_INPUT_ERROR;
50 | }
51 |
52 | return x > 0 ? 1.0f : LEAKY_RELU_ALPHA;
53 | }
--------------------------------------------------------------------------------
/src/Activations/linear.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Activations/linear.h"
6 | #include "../../include/Core/logging.h"
7 |
8 |
9 |
10 | /**
11 | * @brief Applies the Linear (identity) activation function.
12 | *
13 | * The Linear activation function is defined as:
14 | * - f(x) = x
15 | *
16 | * @param x The input value.
17 | * @return The result of the Linear activation function.
18 | */
19 | float linear(float x)
20 | {
21 | if (isnan(x) || isinf(x) || x == -INFINITY)
22 | {
23 | LOG_ERROR("Invalid input (NaN or Inf)");
24 | return CM_INVALID_INPUT_ERROR;
25 | }
26 |
27 | float result = x;
28 | LOG_DEBUG("Input: x=%f, Output: %f", x, result);
29 | return result;
30 | }
31 |
32 | /**
33 | * @brief Computes the derivative of the Linear activation function.
34 | *
35 | * The derivative of Linear is:
36 | * - f'(x) = 1
37 | *
38 | * @param x The input value.
39 | * @return The derivative of the Linear function.
40 | */
41 | float linear_derivative(float x)
42 | {
43 | if (isnan(x) || isinf(x))
44 | {
45 | LOG_ERROR("Invalid input (NaN or Inf)");
46 | return CM_INVALID_INPUT_ERROR;
47 | }
48 |
49 | return 1.0f;
50 | }
--------------------------------------------------------------------------------
/src/Activations/relu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Activations/relu.h"
6 | #include "../../include/Core/logging.h"
7 |
8 |
9 |
10 | /**
11 | * @brief Applies the Rectified Linear Unit (ReLU) activation function.
12 | *
13 | * The ReLU activation function is defined as:
14 | * - f(x) = x, if x > 0
15 | * - f(x) = 0, if x <= 0
16 | *
17 | * @param x The input value.
18 | * @return The result of the ReLU activation function.
19 | */
20 | float relu(float x)
21 | {
22 | if (isnan(x) || isinf(x) || x == -INFINITY)
23 | {
24 | LOG_ERROR("Invalid input (NaN or Inf)");
25 | return CM_INVALID_INPUT_ERROR;
26 | }
27 |
28 | float result = x > 0 ? x : 0;
29 | LOG_DEBUG("Input: x=%f, Output: %f", x, result);
30 | return result;
31 | }
32 |
33 | /**
34 | * @brief Computes the derivative of the ReLU activation function.
35 | *
36 | * The derivative of ReLU is:
37 | * - f'(x) = 1, if x > 0
38 | * - f'(x) = 0, if x <= 0
39 | *
40 | * @param x The input value.
41 | * @return The derivative of the ReLU function.
42 | */
43 | float relu_derivative(float x)
44 | {
45 | if (isnan(x) || isinf(x))
46 | {
47 | LOG_ERROR("Invalid input (NaN or Inf)");
48 | return CM_INVALID_INPUT_ERROR;
49 | }
50 |
51 | return x > 0 ? 1.0f : 0.0f;
52 | }
53 |
--------------------------------------------------------------------------------
/src/Activations/sigmoid.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Activations/sigmoid.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 |
8 |
9 |
10 | /**
11 | * @brief Applies the sigmoid activation function.
12 | *
13 | * The sigmoid activation function is defined as:
14 | * - f(x) = 1 / (1 + exp(-x))
15 | *
16 | * @param x The input value.
17 | * @return The result of the sigmoid activation function.
18 | */
19 |
20 | float sigmoid(float x)
21 | {
22 | if (isnan(x) || isinf(x) || x == -INFINITY)
23 | {
24 | LOG_ERROR("Invalid input (NaN or Inf)");
25 | return CM_INVALID_INPUT_ERROR;
26 | }
27 |
28 | float result;
29 | if (x >= 0)
30 | {
31 | float exp_neg_x = expf(-x);
32 | result = 1 / (1 + exp_neg_x);
33 | }
34 | else
35 | {
36 | float exp_pos_x = expf(x);
37 | result = exp_pos_x / (1 + exp_pos_x);
38 | }
39 | LOG_DEBUG("Input: x=%f, Output: %f", x, result);
40 | return result;
41 | }
42 |
43 | /**
44 | * @brief Computes the derivative of the sigmoid activation function.
45 | *
46 | * The derivative of sigmoid is:
47 | * - f'(x) = f(x) * (1 - f(x))
48 | *
49 | * @param sigmoid_output The output of the sigmoid function (f(x)).
50 | * @return The derivative of the sigmoid function.
51 | */
52 | float sigmoid_derivative(float sigmoid_output)
53 | {
54 | if (isnan(sigmoid_output) || isinf(sigmoid_output) || sigmoid_output < 0.0f || sigmoid_output > 1.0f)
55 | {
56 | LOG_ERROR("Invalid sigmoid output (NaN, Inf, or out of range)");
57 | return CM_INVALID_INPUT_ERROR;
58 | }
59 |
60 | return sigmoid_output * (1.0f - sigmoid_output);
61 | }
--------------------------------------------------------------------------------
/src/Activations/tanh.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Activations/tanh.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 |
8 | #define TANH_THRESHOLD 20.0f
9 |
10 |
11 | /**
12 | * @brief Applies the hyperbolic tangent (tanh) activation function.
13 | *
14 | * The tanh activation function is defined as:
15 | * - f(x) = (e^x - e^(-x)) / (e^x + e^(-x))
16 | *
17 | * For numerical stability:
18 | * - Returns 1.0 if x > 20.0
19 | * - Returns -1.0 if x < -20.0
20 | *
21 | * @param x The input value.
22 | * @return The result of the tanh activation function. Clipped to -1.0 or 1.0
23 | * for extreme inputs, ensuring numerical stability.
24 | */
25 | float tanH(float x)
26 | {
27 | if (isnan(x) || isinf(x) || x == -INFINITY)
28 | {
29 | LOG_ERROR("Invalid input (NaN or Inf)");
30 | return CM_INVALID_INPUT_ERROR;
31 | }
32 | if (x > TANH_THRESHOLD)
33 | {
34 | LOG_DEBUG("Input: x=%f, Output: 1.0 (clipped)", x);
35 | return 1.0f;
36 | }
37 | else if (x < -TANH_THRESHOLD)
38 | {
39 | LOG_DEBUG("Input: x=%f, Output: -1.0 (clipped)", x);
40 | return -1.0f;
41 | }
42 | else
43 | {
44 | float e_pos = expf(x);
45 | float e_neg = expf(-x);
46 | float result = (e_pos - e_neg) / (e_pos + e_neg);
47 | LOG_DEBUG("Input: x=%f, Output: %f", x, result);
48 | return result;
49 | }
50 | }
51 |
52 | /**
53 | * @brief Computes the derivative of the tanh activation function.
54 | *
55 | * The derivative of tanh is:
56 | * - f'(x) = 1 - f(x)^2
57 | *
58 | * @param tanh_output The output of the tanh function (f(x)).
59 | * @return The derivative of the tanh function.
60 | */
61 | float tanh_derivative(float tanh_output)
62 | {
63 | if (isnan(tanh_output) || isinf(tanh_output) || tanh_output < -1.0f || tanh_output > 1.0f)
64 | {
65 | LOG_ERROR("Invalid tanh output (NaN, Inf, or out of range)");
66 | return CM_INVALID_INPUT_ERROR;
67 | }
68 |
69 | return 1.0f - tanh_output * tanh_output;
70 | }
71 |
--------------------------------------------------------------------------------
/src/Core/dataset.c:
--------------------------------------------------------------------------------
1 | #include "../../include/Core/dataset.h"
2 | #include "../../include/Core/memory_management.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | #include
7 | #include
8 | #include
9 |
10 | /**
11 | * @brief Create a new dataset.
12 | *
13 | * Allocates memory for a new dataset structure and initializes its fields.
14 | *
15 | * @return Dataset* Pointer to the newly created dataset, or NULL on failure.
16 | */
17 | Dataset *dataset_create(void)
18 | {
19 | Dataset *dataset = (Dataset *)cm_safe_malloc(sizeof(Dataset), __FILE__, __LINE__);
20 | if (dataset)
21 | {
22 | dataset->X = NULL;
23 | dataset->y = NULL;
24 | dataset->num_samples = 0;
25 | dataset->input_dim = 0;
26 | dataset->output_dim = 0;
27 | }
28 | return dataset;
29 | }
30 |
31 | /**
32 | * @brief Load dataset from arrays.
33 | *
34 | * Copies input and output data arrays into the dataset structure.
35 | *
36 | * @param dataset Pointer to the dataset structure.
37 | * @param X_array Pointer to the input data array.
38 | * @param y_array Pointer to the output data array.
39 | * @param num_samples Number of samples.
40 | * @param input_dim Dimension of input features.
41 | * @param output_dim Dimension of output features.
42 | * @return CM_Error Error code indicating success or failure.
43 | */
44 | CM_Error dataset_load_arrays(Dataset *dataset, float *X_array, float *y_array, int num_samples, int input_dim, int output_dim)
45 | {
46 | if (!dataset || !X_array || !y_array)
47 | {
48 | LOG_ERROR("Null pointer argument.");
49 | return CM_NULL_POINTER_ERROR;
50 | }
51 |
52 | dataset->num_samples = num_samples;
53 | dataset->input_dim = input_dim;
54 | dataset->output_dim = output_dim;
55 |
56 | dataset->X = (float **)cm_safe_malloc(num_samples * sizeof(float *), __FILE__, __LINE__);
57 | dataset->y = (float **)cm_safe_malloc(num_samples * sizeof(float *), __FILE__, __LINE__);
58 |
59 | if (!dataset->X || !dataset->y)
60 | {
61 | LOG_ERROR("Memory allocation failed for X or y.");
62 | return CM_MEMORY_ALLOCATION_ERROR;
63 | }
64 |
65 | for (int i = 0; i < num_samples; i++)
66 | {
67 | dataset->X[i] = (float *)cm_safe_malloc(input_dim * sizeof(float), __FILE__, __LINE__);
68 | dataset->y[i] = (float *)cm_safe_malloc(output_dim * sizeof(float), __FILE__, __LINE__);
69 |
70 | if (!dataset->X[i] || !dataset->y[i])
71 | {
72 | LOG_ERROR("Memory allocation failed at index %d.", i);
73 | return CM_MEMORY_ALLOCATION_ERROR;
74 | }
75 |
76 | memcpy(dataset->X[i], X_array + i * input_dim, input_dim * sizeof(float));
77 | memcpy(dataset->y[i], y_array + i * output_dim, output_dim * sizeof(float));
78 | }
79 | return CM_SUCCESS;
80 | }
81 |
82 | /**
83 | * @brief Free memory allocated for the dataset.
84 | *
85 | * Releases all memory associated with the dataset, including input and output arrays.
86 | *
87 | * @param dataset Pointer to the dataset to free.
88 | */
89 | void dataset_free(Dataset *dataset)
90 | {
91 | if (!dataset)
92 | return;
93 |
94 | if (dataset->X)
95 | {
96 | for (int i = 0; i < dataset->num_samples; i++)
97 | {
98 | cm_safe_free((void **)&dataset->X[i]);
99 | }
100 | cm_safe_free((void **)&dataset->X);
101 | }
102 |
103 | if (dataset->y)
104 | {
105 | for (int i = 0; i < dataset->num_samples; i++)
106 | {
107 | cm_safe_free((void **)&dataset->y[i]);
108 | }
109 | cm_safe_free((void **)&dataset->y);
110 | }
111 |
112 | cm_safe_free((void **)&dataset);
113 | }
114 |
--------------------------------------------------------------------------------
/src/Core/logging.c:
--------------------------------------------------------------------------------
1 | #include "../../include/Core/logging.h"
2 | #include
3 | #include
4 |
5 | // Default to INFO level
6 | LogLevel g_log_level = LOG_LEVEL_INFO;
7 |
8 | /**
9 | * @brief Set the global log level
10 | *
11 | * @param level The log level to set
12 | */
13 | void set_log_level(LogLevel level) {
14 | g_log_level = level;
15 | }
16 |
17 | /**
18 | * @brief Core logging function
19 | *
20 | * @param level Log level
21 | * @param file Source file name
22 | * @param line Line number
23 | * @param func Function name
24 | * @param format Format string
25 | * @param ... Variable arguments
26 | */
27 | void log_message(LogLevel level, const char *file, int line, const char *func, const char *format, ...) {
28 | // Skip if below current log level
29 | if (level < g_log_level) {
30 | return;
31 | }
32 |
33 | // Get current time
34 | time_t now = time(NULL);
35 | struct tm *tm_info = localtime(&now);
36 | char time_str[20];
37 | strftime(time_str, sizeof(time_str), "%Y-%m-%d %H:%M:%S", tm_info);
38 |
39 | // Level strings
40 | const char *level_str[] = {
41 | "DEBUG",
42 | "INFO",
43 | "WARNING",
44 | "ERROR"
45 | };
46 |
47 | // Print log header
48 | fprintf(stderr, "%s [%s] %s:%d %s(): ",
49 | time_str, level_str[level], file, line, func);
50 |
51 | // Print the actual message with variable arguments
52 | va_list args;
53 | va_start(args, format);
54 | vfprintf(stderr, format, args);
55 | va_end(args);
56 |
57 | // Add newline
58 | fprintf(stderr, "\n");
59 | }
60 |
--------------------------------------------------------------------------------
/src/Core/memory_management.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Core/memory_management.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 |
8 |
9 | /**
10 | * @brief Allocates memory safely and logs the file and line number in case of failure.
11 | *
12 | * This function attempts to allocate the requested memory size. If the allocation fails,
13 | * it logs an error message with the file name and line number, then exits the program.
14 | *
15 | * @param size The size of memory to allocate in bytes.
16 | * @param file The name of the file where the allocation is requested.
17 | * @param line The line number in the file where the allocation is requested.
18 | * @return A pointer to the allocated memory, or exits the program on failure.
19 | */
20 | void *cm_safe_malloc(size_t size, const char *file, int line)
21 | {
22 | void *ptr = malloc(size);
23 | if (ptr == NULL)
24 | {
25 | LOG_ERROR("Memory allocation failed for %zu bytes in %s at line %d.", size, file, line);
26 | return (void *)CM_MEMORY_ALLOCATION_ERROR;
27 | }
28 |
29 | LOG_DEBUG("Allocated %zu bytes at %p in %s at line %d.", size, ptr, file, line);
30 | return ptr;
31 | }
32 |
33 | /**
34 | * @brief Frees allocated memory safely and sets the pointer to NULL.
35 | *
36 | * This function frees the memory pointed to by the given pointer. If the pointer is NULL,
37 | * the function does nothing. It also logs the memory address being freed if debugging is enabled.
38 | * After freeing the memory, it sets the pointer to NULL to avoid double-free issues.
39 | *
40 | * @param ptr A pointer to the memory to be freed.
41 | */
42 | void cm_safe_free(void **ptr)
43 | {
44 | if (ptr != NULL && *ptr != NULL)
45 | {
46 | LOG_DEBUG("Freeing memory at %p", *ptr);
47 | free(*ptr);
48 | *ptr = NULL;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/Layers/dropout.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Layers/dropout.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 | #include "../../include/Core/memory_management.h"
8 |
9 |
10 |
11 | static int is_seed_initialized = 0;
12 |
13 | /**
14 | * @brief Initializes a Dropout Layer with a given dropout rate.
15 | *
16 | * @param layer Pointer to the DropoutLayer structure.
17 | * @param dropout_rate Dropout rate (0.0 to 1.0).
18 | * @return int Error code (0 for success, non-zero for error).
19 | */
20 | int initialize_dropout(DropoutLayer *layer, float dropout_rate)
21 | {
22 | if (layer == NULL)
23 | {
24 | LOG_ERROR("Layer is NULL.");
25 | return CM_NULL_POINTER_ERROR;
26 | }
27 |
28 | if (dropout_rate < 0.0f || dropout_rate > 1.0f)
29 | {
30 | LOG_ERROR("Invalid dropout rate. Must be between 0.0 and 1.0.");
31 | return CM_INVALID_PARAMETER_ERROR;
32 | }
33 |
34 | layer->dropout_rate = dropout_rate;
35 | if (!is_seed_initialized)
36 | {
37 | srand((unsigned int)time(NULL));
38 | is_seed_initialized = 1;
39 | }
40 | return CM_SUCCESS;
41 | }
42 |
43 | /**
44 | * @brief Performs the forward pass for the Dropout Layer.
45 | *
46 | * @param layer Pointer to the DropoutLayer structure.
47 | * @param input Input data array.
48 | * @param output Output data array.
49 | * @param size Size of the input/output arrays.
50 | * @return int Error code (0 for success, non-zero for error).
51 | */
52 | int forward_dropout(DropoutLayer *layer, float *input, float *output, int size)
53 | {
54 | if (layer == NULL || input == NULL || output == NULL)
55 | {
56 | LOG_ERROR("Layer, input, or output is NULL.");
57 | return CM_NULL_POINTER_ERROR;
58 | }
59 |
60 | for (int i = 0; i < size; i++)
61 | {
62 | if ((float)rand() / RAND_MAX < layer->dropout_rate)
63 | {
64 | output[i] = 0;
65 | }
66 | else
67 | {
68 | output[i] = input[i] / (1 - layer->dropout_rate);
69 | }
70 | LOG_DEBUG("Output[%d]: %f", i, output[i]);
71 | }
72 | return CM_SUCCESS;
73 | }
74 |
75 | /**
76 | * @brief Performs the backward pass for the Dropout Layer.
77 | *
78 | * @param layer Pointer to the DropoutLayer structure.
79 | * @param input Input data array.
80 | * @param output Output data array.
81 | * @param d_output Gradient of the output.
82 | * @param d_input Gradient of the input.
83 | * @param size Size of the input/output arrays.
84 | * @return int Error code (0 for success, non-zero for error).
85 | */
86 | int backward_dropout(DropoutLayer *layer, float *input, float *output, float *d_output, float *d_input, int size)
87 | {
88 | if (layer == NULL || input == NULL || output == NULL || d_output == NULL || d_input == NULL)
89 | {
90 | LOG_ERROR("One or more arguments are NULL.");
91 | return CM_NULL_POINTER_ERROR;
92 | }
93 |
94 | for (int i = 0; i < size; i++)
95 | {
96 | if (output[i] == 0.0f)
97 | {
98 | d_input[i] = 0.0f;
99 | }
100 | else
101 | {
102 | d_input[i] = d_output[i] / (1 - layer->dropout_rate);
103 | }
104 | }
105 | return CM_SUCCESS;
106 | }
107 |
--------------------------------------------------------------------------------
/src/Layers/flatten.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Layers/flatten.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/memory_management.h"
7 | #include "../../include/Core/logging.h"
8 |
9 |
10 |
11 | int initialize_flatten(FlattenLayer *layer, int input_size)
12 | {
13 | if (layer == NULL)
14 | {
15 | LOG_ERROR("Layer is NULL.");
16 | return CM_NULL_LAYER_ERROR;
17 | }
18 |
19 | if (input_size <= 0)
20 | {
21 | LOG_ERROR("Invalid input size (%d).", input_size);
22 | return CM_INVALID_LAYER_DIMENSIONS_ERROR;
23 | }
24 |
25 | layer->input_size = input_size;
26 | layer->output_size = input_size;
27 | return CM_SUCCESS;
28 | }
29 |
30 | int forward_flatten(FlattenLayer *layer, float *input, float *output)
31 | {
32 | if (layer == NULL || input == NULL || output == NULL)
33 | {
34 | LOG_ERROR("Layer, input, or output is NULL.");
35 | return CM_NULL_POINTER_ERROR;
36 | }
37 |
38 | if (layer->input_size <= 0 || layer->output_size <= 0)
39 | {
40 | LOG_ERROR("Invalid layer dimensions.");
41 | return CM_INVALID_LAYER_DIMENSIONS_ERROR;
42 | }
43 |
44 | for (int i = 0; i < layer->input_size; i++)
45 | {
46 | output[i] = input[i];
47 | LOG_DEBUG("Output[%d]: %f", i, output[i]);
48 | }
49 |
50 | return CM_SUCCESS;
51 | }
52 |
53 | int backward_flatten(FlattenLayer *layer, float *input, float *output, float *d_output, float *d_input)
54 | {
55 | if (layer == NULL || input == NULL || output == NULL || d_output == NULL || d_input == NULL)
56 | {
57 | LOG_ERROR("One or more arguments are NULL.");
58 | return CM_NULL_POINTER_ERROR;
59 | }
60 |
61 | if (layer->input_size <= 0 || layer->output_size <= 0)
62 | {
63 | LOG_ERROR("Invalid layer dimensions.");
64 | return CM_INVALID_LAYER_DIMENSIONS_ERROR;
65 | }
66 |
67 | for (int i = 0; i < layer->input_size; i++)
68 | {
69 | d_input[i] = d_output[i];
70 | }
71 |
72 | return CM_SUCCESS;
73 | }
74 |
75 | int free_flatten(FlattenLayer *layer)
76 | {
77 | if (layer == NULL)
78 | {
79 | LOG_ERROR("Layer is NULL.");
80 | return CM_NULL_POINTER_ERROR;
81 | }
82 |
83 | return CM_SUCCESS;
84 | }
85 |
--------------------------------------------------------------------------------
/src/Layers/maxpooling.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Layers/maxpooling.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 |
8 |
9 | /**
10 | * @brief Initializes a MaxPooling Layer.
11 | *
12 | * @param layer Pointer to the MaxPoolingLayer structure.
13 | * @param kernel_size Size of the kernel.
14 | * @param stride Stride of the kernel.
15 | * @return int Error code.
16 | */
17 | int initialize_maxpooling(MaxPoolingLayer *layer, int kernel_size, int stride)
18 | {
19 | if (layer == NULL)
20 | {
21 | LOG_ERROR("Layer is NULL.");
22 | return CM_NULL_LAYER_ERROR;
23 | }
24 |
25 | if (kernel_size <= 0)
26 | {
27 | LOG_ERROR("Invalid kernel size.");
28 | return CM_INVALID_KERNEL_SIZE_ERROR;
29 | }
30 |
31 | if (stride <= 0)
32 | {
33 | LOG_ERROR("Invalid stride.");
34 | return CM_INVALID_STRIDE_ERROR;
35 | }
36 |
37 | layer->kernel_size = kernel_size;
38 | layer->stride = stride;
39 | return CM_SUCCESS;
40 | }
41 |
42 | /**
43 | * @brief Computes the output size for the MaxPooling Layer.
44 | *
45 | * @param input_size Size of the input data.
46 | * @param kernel_size Size of the kernel.
47 | * @param stride Stride of the kernel.
48 | * @return int Output size, or an error code on invalid input.
49 | */
50 | int compute_maxpooling_output_size(int input_size, int kernel_size, int stride)
51 | {
52 | if (input_size <= 0)
53 | {
54 | LOG_ERROR("Input size must be greater than 0.");
55 | return CM_INVALID_INPUT_ERROR;
56 | }
57 |
58 | if (kernel_size <= 0)
59 | {
60 | LOG_ERROR("Invalid kernel size.");
61 | return CM_INVALID_KERNEL_SIZE_ERROR;
62 | }
63 |
64 | if (stride <= 0)
65 | {
66 | LOG_ERROR("Invalid stride.");
67 | return CM_INVALID_STRIDE_ERROR;
68 | }
69 |
70 | if (input_size < kernel_size)
71 | {
72 | LOG_ERROR("Input size is smaller than kernel size.");
73 | return CM_INPUT_SIZE_SMALLER_THAN_KERNEL_ERROR;
74 | }
75 |
76 | return (input_size - kernel_size) / stride + 1;
77 | }
78 |
79 | /**
80 | * @brief Performs the forward pass for the MaxPooling Layer.
81 | *
82 | * @param layer Pointer to the MaxPoolingLayer structure.
83 | * @param input Input data array.
84 | * @param output Output data array.
85 | * @param input_size Size of the input data.
86 | * @return int Number of output elements, or an error code on failure.
87 | */
88 | int forward_maxpooling(MaxPoolingLayer *layer, const float *input, float *output, int input_size)
89 | {
90 | if (layer == NULL)
91 | {
92 | LOG_ERROR("Layer is NULL.");
93 | return CM_NULL_LAYER_ERROR;
94 | }
95 |
96 | if (input == NULL || output == NULL)
97 | {
98 | LOG_ERROR("Null input or output pointer.");
99 | return CM_NULL_POINTER_ERROR;
100 | }
101 |
102 | if (layer->kernel_size <= 0)
103 | {
104 | LOG_ERROR("Invalid kernel size.");
105 | return CM_INVALID_KERNEL_SIZE_ERROR;
106 | }
107 |
108 | if (layer->stride <= 0)
109 | {
110 | LOG_ERROR("Invalid stride.");
111 | return CM_INVALID_STRIDE_ERROR;
112 | }
113 |
114 | if (input_size < layer->kernel_size)
115 | {
116 | LOG_ERROR("Input size is smaller than kernel size.");
117 | return CM_INPUT_SIZE_SMALLER_THAN_KERNEL_ERROR;
118 | }
119 |
120 | int output_index = 0;
121 | for (int i = 0; i <= input_size - layer->kernel_size; i += layer->stride)
122 | {
123 | float max_value = input[i];
124 | for (int j = 1; j < layer->kernel_size; ++j)
125 | {
126 | if (input[i + j] > max_value)
127 | {
128 | max_value = input[i + j];
129 | }
130 | }
131 | output[output_index++] = max_value;
132 | LOG_DEBUG("Output[%d]: %f", output_index - 1, max_value);
133 | }
134 |
135 | return output_index;
136 | }
137 |
138 | /**
139 | * @brief Frees the memory allocated for the MaxPooling Layer.
140 | *
141 | * @param layer Pointer to the MaxPoolingLayer structure.
142 | * @return int CM_SUCCESS on success.
143 | */
144 | int free_maxpooling(MaxPoolingLayer *layer)
145 | {
146 | if (layer == NULL)
147 | {
148 | LOG_ERROR("Layer is NULL.");
149 | return CM_NULL_LAYER_ERROR;
150 | }
151 |
152 | if (layer->kernel_size <= 0 || layer->stride <= 0)
153 | {
154 | LOG_ERROR("[free_maxpooling] Warning: Layer has invalid dimensions.\n");
155 | }
156 |
157 | return CM_SUCCESS;
158 | }
159 |
--------------------------------------------------------------------------------
/src/Layers/pooling.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Layers/pooling.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Initializes a Pooling Layer.
9 | *
10 | * @param layer Pointer to the PoolingLayer structure.
11 | * @param kernel_size Size of the kernel.
12 | * @param stride Stride of the kernel.
13 | * @return int Error code.
14 | */
15 | int initialize_pooling(PoolingLayer *layer, int kernel_size, int stride)
16 | {
17 | if (layer == NULL)
18 | {
19 | LOG_ERROR("Layer is NULL.");
20 | return CM_NULL_POINTER_ERROR;
21 | }
22 |
23 | if (kernel_size <= 0)
24 | {
25 | LOG_ERROR("Invalid kernel size.");
26 | return CM_INVALID_KERNEL_SIZE_ERROR;
27 | }
28 |
29 | if (stride <= 0)
30 | {
31 | LOG_ERROR("Invalid stride.");
32 | return CM_INVALID_STRIDE_ERROR;
33 | }
34 |
35 | layer->kernel_size = kernel_size;
36 | layer->stride = stride;
37 | return CM_SUCCESS;
38 | }
39 |
40 | /**
41 | * @brief Computes the output size for the Pooling Layer.
42 | *
43 | * @param input_size Size of the input data.
44 | * @param kernel_size Size of the kernel.
45 | * @param stride Stride of the kernel.
46 | * @return int Output size, or an error code on invalid input.
47 | */
48 | int compute_pooling_output_size(int input_size, int kernel_size, int stride)
49 | {
50 | if (input_size <= 0)
51 | {
52 | LOG_ERROR("Input size must be greater than 0.");
53 | return CM_INVALID_INPUT_ERROR;
54 | }
55 |
56 | if (kernel_size <= 0)
57 | {
58 | LOG_ERROR("Invalid kernel size.");
59 | return CM_INVALID_KERNEL_SIZE_ERROR;
60 | }
61 |
62 | if (stride <= 0)
63 | {
64 | LOG_ERROR("Invalid stride.");
65 | return CM_INVALID_STRIDE_ERROR;
66 | }
67 |
68 | if (input_size < kernel_size)
69 | {
70 | LOG_ERROR("Input size is smaller than kernel size.");
71 | return CM_INPUT_SIZE_SMALLER_THAN_KERNEL_ERROR;
72 | }
73 |
74 | return (input_size - kernel_size) / stride + 1;
75 | }
76 |
77 | /**
78 | * @brief Performs the forward pass for the Pooling Layer.
79 | *
80 | * @param layer Pointer to the PoolingLayer structure.
81 | * @param input Input data array.
82 | * @param output Output data array.
83 | * @param input_size Size of the input data.
84 | * @return int Number of output elements, or an error code on failure.
85 | */
86 | int forward_pooling(PoolingLayer *layer, const float *input, float *output, int input_size)
87 | {
88 | if (layer == NULL)
89 | {
90 | LOG_ERROR("Layer is NULL.");
91 | return CM_NULL_POINTER_ERROR;
92 | }
93 |
94 | if (input == NULL || output == NULL)
95 | {
96 | LOG_ERROR("Null input or output pointer.");
97 | return CM_NULL_POINTER_ERROR;
98 | }
99 |
100 | if (layer->kernel_size <= 0)
101 | {
102 | LOG_ERROR("Invalid kernel size.");
103 | return CM_INVALID_KERNEL_SIZE_ERROR;
104 | }
105 |
106 | if (layer->stride <= 0)
107 | {
108 | LOG_ERROR("Invalid stride.");
109 | return CM_INVALID_STRIDE_ERROR;
110 | }
111 |
112 | if (input_size < layer->kernel_size)
113 | {
114 | LOG_ERROR("Input size is smaller than kernel size.");
115 | return CM_INPUT_SIZE_SMALLER_THAN_KERNEL_ERROR;
116 | }
117 |
118 | float kernel_reciprocal = 1.0f / layer->kernel_size;
119 | int output_index = 0;
120 | for (int i = 0; i <= input_size - layer->kernel_size; i += layer->stride)
121 | {
122 | float sum = 0.0f;
123 | for (int j = 0; j < layer->kernel_size; ++j)
124 | {
125 | sum += input[i + j];
126 | }
127 | output[output_index++] = sum * kernel_reciprocal;
128 | LOG_DEBUG("Output[%d]: %f", output_index - 1, output[output_index - 1]);
129 | }
130 |
131 | return output_index;
132 | }
133 |
134 | /**
135 | * @brief Frees the memory allocated for the Pooling Layer.
136 | *
137 | * @param layer Pointer to the PoolingLayer structure.
138 | * @return int Error code.
139 | */
140 | int free_pooling(PoolingLayer *layer)
141 | {
142 | if (layer == NULL)
143 | {
144 | LOG_ERROR("Layer is NULL.");
145 | return CM_NULL_POINTER_ERROR;
146 | }
147 | return CM_SUCCESS;
148 | }
149 |
--------------------------------------------------------------------------------
/src/Loss_Functions/binary_cross_entropy_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/binary_cross_entropy_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Computes the Binary Cross-Entropy Loss.
9 | *
10 | * The Binary Cross-Entropy Loss is defined as:
11 | * - loss = -1/n * Σ [y * log(yHat) + (1 - y) * log(1 - yHat)]
12 | *
13 | * @param yHat Pointer to the predicted probabilities.
14 | * @param y Pointer to the ground truth labels.
15 | * @param size The number of elements in y and yHat.
16 | * @return The computed loss, or an error code if inputs are invalid.
17 | */
18 | float binary_cross_entropy_loss(float *yHat, float *y, int size)
19 | {
20 | if (!yHat || !y || size <= 0)
21 | {
22 | LOG_ERROR("Invalid input parameters.");
23 | return (float)CM_INVALID_INPUT_ERROR;
24 | }
25 |
26 | float loss = 0.0;
27 |
28 | for (int i = 0; i < size; ++i)
29 | {
30 |
31 | float epsilon = 1e-15;
32 | float predicted = fmax(fmin(yHat[i], 1 - epsilon), epsilon);
33 | loss += -(y[i] * log(predicted) + (1 - y[i]) * log(1 - predicted));
34 | }
35 |
36 | loss /= size;
37 | return loss;
38 | }
39 |
40 | /**
41 | * @brief Computes the derivative of the Binary Cross-Entropy Loss.
42 | *
43 | * The derivative is defined as:
44 | * - d(loss)/dyHat = -(y/yHat) + ((1 - y)/(1 - yHat))
45 | *
46 | * @param predicted Predicted probability.
47 | * @param actual Ground truth label.
48 | * @return The derivative value.
49 | */
50 | float binary_cross_entropy_loss_derivative(float predicted, float actual)
51 | {
52 | if (predicted <= 0 || predicted >= 1)
53 | {
54 | LOG_ERROR("Predicted value out of bounds.");
55 | return 0.0f;
56 | }
57 | return -(actual / predicted) + ((1 - actual) / (1 - predicted));
58 | }
--------------------------------------------------------------------------------
/src/Loss_Functions/cosine_similarity_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/cosine_similarity_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Computes the Cosine Similarity Loss.
9 | *
10 | * The Cosine Similarity Loss is defined as:
11 | * - loss = 1 - (y . yHat) / (||y|| * ||yHat||)
12 | *
13 | * @param y Pointer to the ground truth values.
14 | * @param yHat Pointer to the predicted values.
15 | * @param n The number of elements in y and yHat.
16 | * @return The computed loss, or an error code if inputs are invalid.
17 | */
18 | float cosine_similarity_loss(float *y, float *yHat, int n)
19 | {
20 | if (!y || !yHat || n <= 0)
21 | {
22 | LOG_ERROR("Invalid input parameters.");
23 | return (float)CM_INVALID_INPUT_ERROR;
24 | }
25 | float dot_product = 0.0f, norm_y = 0.0f, norm_yHat = 0.0f;
26 | for (int i = 0; i < n; i++)
27 | {
28 | dot_product += y[i] * yHat[i];
29 | norm_y += y[i] * y[i];
30 | norm_yHat += yHat[i] * yHat[i];
31 | }
32 | norm_y = sqrtf(norm_y);
33 | norm_yHat = sqrtf(norm_yHat);
34 | if (norm_y == 0 || norm_yHat == 0)
35 | {
36 | LOG_ERROR("Zero vector norm.");
37 | return (float)CM_INVALID_INPUT_ERROR;
38 | }
39 |
40 | return 1.0f - (dot_product / (norm_y * norm_yHat));
41 | }
42 |
43 | /**
44 | * @brief Computes the derivative of the Cosine Similarity Loss.
45 | *
46 | * The derivative is defined as:
47 | * - d(loss)/dyHat = -y / (||y|| * ||yHat||) + (y . yHat) * yHat / (||yHat||^3)
48 | *
49 | * @param y Pointer to the ground truth values.
50 | * @param yHat Pointer to the predicted values.
51 | * @param n The number of elements in y and yHat.
52 | * @return The derivative value, or an error code if inputs are invalid.
53 | */
54 | float cosine_similarity_loss_derivative(float *y, float *yHat, int n)
55 | {
56 | if (!y || !yHat || n <= 0)
57 | {
58 | LOG_ERROR("Invalid input parameters.");
59 | return (float)CM_INVALID_INPUT_ERROR;
60 | }
61 | float dot_product = 0.0f, norm_y = 0.0f, norm_yHat = 0.0f;
62 | for (int i = 0; i < n; i++)
63 | {
64 | dot_product += y[i] * yHat[i];
65 | norm_y += y[i] * y[i];
66 | norm_yHat += yHat[i] * yHat[i];
67 | }
68 | norm_y = sqrtf(norm_y);
69 | norm_yHat = sqrtf(norm_yHat);
70 | if (norm_y == 0 || norm_yHat == 0)
71 | {
72 | LOG_ERROR("Zero vector norm.");
73 | return (float)CM_INVALID_INPUT_ERROR;
74 | }
75 | float derivative = -y[0] / (norm_y * norm_yHat);
76 | for (int i = 1; i < n; i++)
77 | {
78 | derivative += (dot_product / (norm_yHat * norm_yHat * norm_yHat)) * yHat[i];
79 | }
80 | return derivative;
81 | }
82 |
--------------------------------------------------------------------------------
/src/Loss_Functions/focal_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/focal_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | #define EPSILON 1e-8
8 |
9 | /**
10 | * @brief Computes the Focal Loss.
11 | *
12 | * The Focal Loss is designed to address class imbalance by down-weighting
13 | * well-classified examples. It is defined as:
14 | * - loss = -1/n * Σ [y * (1 - yHat)^gamma * log(yHat) + (1 - y) * yHat^gamma * log(1 - yHat)]
15 | *
16 | * @param y Pointer to the ground truth labels.
17 | * @param yHat Pointer to the predicted probabilities.
18 | * @param n The number of elements in y and yHat.
19 | * @param gamma The focusing parameter to adjust the rate at which easy examples are down-weighted.
20 | * @return The computed loss, or an error code if inputs are invalid.
21 | */
22 | float focal_loss(float *y, float *yHat, int n, float gamma)
23 | {
24 | if (!y || !yHat || n <= 0)
25 | {
26 | LOG_ERROR("Invalid input parameters.");
27 | return (float)CM_INVALID_INPUT_ERROR;
28 | }
29 | float loss = 0.0f;
30 | for (int i = 0; i < n; i++)
31 | {
32 | float yHat_clamped = fmaxf(fminf(yHat[i], 1.0f - EPSILON), EPSILON);
33 | if (y[i] == 1)
34 | {
35 | loss += -powf(1 - yHat_clamped, gamma) * logf(yHat_clamped);
36 | }
37 | else
38 | {
39 | loss += -powf(yHat_clamped, gamma) * logf(1 - yHat_clamped);
40 | }
41 | }
42 | return loss / n;
43 | }
44 |
45 | /**
46 | * @brief Computes the derivative of the Focal Loss.
47 | *
48 | * The derivative is defined as:
49 | * - d(Focal Loss)/dyHat = -gamma * (1 - yHat)^(gamma - 1) * log(yHat) * y
50 | * + (1 - y) * gamma * yHat^(gamma - 1) * log(1 - yHat)
51 | *
52 | * @param y Ground truth value.
53 | * @param yHat Predicted value.
54 | * @param gamma The focusing parameter.
55 | * @return The derivative value.
56 | */
57 | float focal_loss_derivative(float y, float yHat, float gamma)
58 | {
59 | float pt = fmaxf(yHat, 1e-15);
60 | float one_minus_pt = fmaxf(1 - yHat, 1e-15);
61 |
62 | return -y * gamma * powf(one_minus_pt, gamma - 1) * logf(pt) + (1 - y) * gamma * powf(pt, gamma - 1) * logf(one_minus_pt);
63 | }
--------------------------------------------------------------------------------
/src/Loss_Functions/huber_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/huber_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | #define DELTA 1.0f
8 |
9 | /**
10 | * @brief Computes the Huber Loss.
11 | *
12 | * The Huber Loss is defined as:
13 | * - loss = 1/n * Σ [0.5 * (yHat - y)^2] if |yHat - y| <= delta
14 | * - loss = delta * (|yHat - y| - 0.5 * delta) otherwise
15 | *
16 | * @param y Pointer to the ground truth values.
17 | * @param yHat Pointer to the predicted values.
18 | * @param n The number of elements in y and yHat.
19 | * @return The computed loss, or an error code if inputs are invalid.
20 | */
21 | float huber_loss(float *y, float *yHat, int n)
22 | {
23 | if (!y || !yHat || n <= 0)
24 | {
25 | LOG_ERROR("Invalid input parameters.");
26 | return (float)CM_INVALID_INPUT_ERROR;
27 | }
28 | float loss = 0;
29 | for (int i = 0; i < n; i++)
30 | {
31 | float diff = yHat[i] - y[i];
32 | if (fabsf(diff) <= DELTA)
33 | loss += 0.5f * diff * diff;
34 | else
35 | loss += DELTA * (fabsf(diff) - 0.5f * DELTA);
36 | }
37 | return loss / n;
38 | }
39 |
40 | /**
41 | * @brief Computes the derivative of the Huber Loss.
42 | *
43 | * The derivative is defined as:
44 | * - d(loss)/dyHat = (yHat - y) if |yHat - y| <= delta
45 | * - d(loss)/dyHat = delta * sign(yHat - y) otherwise
46 | *
47 | * @param y Ground truth value.
48 | * @param yHat Predicted value.
49 | * @return The derivative value.
50 | */
51 | float huber_loss_derivative(float y, float yHat)
52 | {
53 | float diff = yHat - y;
54 | if (fabsf(diff) <= DELTA)
55 | return diff;
56 | else
57 | return (diff > 0 ? DELTA : -DELTA);
58 | }
59 |
--------------------------------------------------------------------------------
/src/Loss_Functions/kld_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/kld_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | #define EPSILON 1e-8
8 |
9 | /**
10 | * @brief Computes the Kullback-Leibler Divergence Loss.
11 | *
12 | * The KLD loss is defined as:
13 | * - loss = 1/n * Σ [p * log(p / q)]
14 | *
15 | * @param p Pointer to the true distribution.
16 | * @param q Pointer to the predicted distribution.
17 | * @param n The number of elements in p and q.
18 | * @return The computed loss, or an error code if inputs are invalid.
19 | */
20 | float kld_loss(float *p, float *q, int n)
21 | {
22 | if (!p || !q || n <= 0)
23 | {
24 | LOG_ERROR("Invalid input parameters.");
25 | return (float)CM_INVALID_INPUT_ERROR;
26 | }
27 | float sum = 0.0f;
28 | for (int i = 0; i < n; i++)
29 | {
30 | float p_val = fmaxf(p[i], EPSILON);
31 | float q_val = fmaxf(q[i], EPSILON);
32 | sum += p_val * logf(p_val / q_val);
33 | }
34 | return sum / n;
35 | }
36 |
37 | /**
38 | * @brief Computes the derivative of the Kullback-Leibler Divergence Loss.
39 | *
40 | * The derivative is defined as:
41 | * - d(loss)/dq = -p / q
42 | *
43 | * @param p True distribution value.
44 | * @param q Predicted distribution value.
45 | * @return The derivative value.
46 | */
47 | float kld_loss_derivative(float p, float q)
48 | {
49 | return -p / fmaxf(q, EPSILON);
50 | }
51 |
--------------------------------------------------------------------------------
/src/Loss_Functions/log_cosh_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/log_cosh_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Computes the Log-Cosh Loss.
9 | *
10 | * The Log-Cosh Loss is defined as:
11 | * - loss = 1/n * Σ log(cosh(yHat - y))
12 | *
13 | * @param y Pointer to the ground truth values.
14 | * @param yHat Pointer to the predicted values.
15 | * @param n The number of elements in y and yHat.
16 | * @return The computed loss, or an error code if inputs are invalid.
17 | */
18 | float log_cosh_loss(float *y, float *yHat, int n)
19 | {
20 | if (!y || !yHat || n <= 0)
21 | {
22 | LOG_ERROR("Invalid input parameters.");
23 | return (float)CM_INVALID_INPUT_ERROR;
24 | }
25 | float sum = 0.0f;
26 | for (int i = 0; i < n; i++)
27 | {
28 | float diff = yHat[i] - y[i];
29 | sum += log(cosh(diff));
30 | }
31 | return sum / n;
32 | }
33 |
34 | /**
35 | * @brief Computes the derivative of the Log-Cosh Loss.
36 | *
37 | * The derivative is defined as:
38 | * - d(loss)/dyHat = tanh(yHat - y)
39 | *
40 | * @param y Ground truth value.
41 | * @param yHat Predicted value.
42 | * @return The derivative value.
43 | */
44 | float log_cosh_loss_derivative(float y, float yHat)
45 | {
46 | return tanh(yHat - y);
47 | }
48 |
--------------------------------------------------------------------------------
/src/Loss_Functions/mean_squared_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/mean_squared_error.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Computes the Mean Squared Error (MSE).
9 | *
10 | * The MSE is defined as:
11 | * - error = 1/n * Σ (y - yHat)^2
12 | *
13 | * @param y Pointer to the ground truth values.
14 | * @param yHat Pointer to the predicted values.
15 | * @param n The number of elements in y and yHat.
16 | * @return The computed error, or an error code if inputs are invalid.
17 | */
18 | float mean_squared_error(float *y, float *yHat, int n)
19 | {
20 | if (!y || !yHat || n <= 0)
21 | {
22 | LOG_ERROR("Invalid input parameters.\n");
23 | return (float)CM_INVALID_INPUT_ERROR;
24 | }
25 | float sum = 0.0f;
26 | for (int i = 0; i < n; i++)
27 | {
28 | float diff = yHat[i] - y[i];
29 | sum += diff * diff;
30 | }
31 | return sum / n;
32 | }
33 |
34 | /**
35 | * @brief Computes the derivative of the Mean Squared Error (MSE).
36 | *
37 | * The derivative is defined as:
38 | * - d(MSE)/dyHat = 2 * (yHat - y) / n
39 | *
40 | * @param predicted Predicted value.
41 | * @param actual Ground truth value.
42 | * @param n The total number of elements.
43 | * @return The derivative value.
44 | */
45 | float mean_squared_error_derivative(float predicted, float actual, int n)
46 | {
47 | if (n <= 0)
48 | {
49 | LOG_ERROR("Invalid size parameter.\n");
50 | return 0.0f;
51 | }
52 | if (predicted < 0 || predicted > 1)
53 | {
54 | LOG_ERROR("Predicted value out of bounds.\n");
55 | return 0.0f;
56 | }
57 | if (actual < 0 || actual > 1)
58 | {
59 | LOG_ERROR("Actual value out of bounds.\n");
60 | return 0.0f;
61 | }
62 | return (2.0f * (predicted - actual)) / n;
63 | }
64 |
--------------------------------------------------------------------------------
/src/Loss_Functions/poisson_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/poisson_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | #define EPSILON 1e-8
8 |
9 | /**
10 | * @brief Computes the Poisson Loss.
11 | *
12 | * The Poisson Loss is defined as:
13 | * - loss = 1/n * Σ [yHat - y * log(yHat)]
14 | *
15 | * @param y Pointer to the ground truth values.
16 | * @param yHat Pointer to the predicted values.
17 | * @param n The number of elements in y and yHat.
18 | * @return The computed loss, or an error code if inputs are invalid.
19 | */
20 | float poisson_loss(float *y, float *yHat, int n)
21 | {
22 | if (!y || !yHat || n <= 0)
23 | {
24 | LOG_ERROR("Invalid input parameters.");
25 | return (float)CM_INVALID_INPUT_ERROR;
26 | }
27 | float sum = 0.0f;
28 | for (int i = 0; i < n; i++)
29 | {
30 | sum += yHat[i] - y[i] * log(yHat[i] + EPSILON);
31 | }
32 | return sum / n;
33 | }
34 |
35 | /**
36 | * @brief Computes the derivative of the Poisson Loss.
37 | *
38 | * The derivative is defined as:
39 | * - d(loss)/dyHat = 1 - (y / (yHat + EPSILON))
40 | *
41 | * @param y Ground truth value.
42 | * @param yHat Predicted value.
43 | * @return The derivative value.
44 | */
45 | float poisson_loss_derivative(float y, float yHat)
46 | {
47 | return 1 - (y / (yHat + EPSILON));
48 | }
49 |
--------------------------------------------------------------------------------
/src/Loss_Functions/smooth_l1_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/smooth_l1_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | #define BETA 1.0f
8 |
9 | /**
10 | * @brief Computes the Smooth L1 Loss.
11 | *
12 | * The Smooth L1 Loss is defined as:
13 | * - loss = 1/n * Σ [0.5 * (yHat - y)^2 / beta] if |yHat - y| < beta
14 | * - loss = |yHat - y| - 0.5 * beta otherwise
15 | *
16 | * @param y Pointer to the ground truth values.
17 | * @param yHat Pointer to the predicted values.
18 | * @param n The number of elements in y and yHat.
19 | * @return The computed loss, or an error code if inputs are invalid.
20 | */
21 | float smooth_l1_loss(float *y, float *yHat, int n)
22 | {
23 | if (!y || !yHat || n <= 0)
24 | {
25 | LOG_ERROR("Invalid input parameters.");
26 | return (float)CM_INVALID_INPUT_ERROR;
27 | }
28 | float loss = 0.0f;
29 | for (int i = 0; i < n; i++)
30 | {
31 | float diff = fabsf(yHat[i] - y[i]);
32 | if (diff < BETA)
33 | loss += 0.5f * diff * diff / BETA;
34 | else
35 | loss += diff - 0.5f * BETA;
36 | }
37 | return loss / n;
38 | }
39 |
40 | /**
41 | * @brief Computes the derivative of the Smooth L1 Loss.
42 | *
43 | * The derivative is defined as:
44 | * - d(loss)/dyHat = (yHat - y) / beta if |yHat - y| < beta
45 | * - d(loss)/dyHat = sign(yHat - y) otherwise
46 | *
47 | * @param y Ground truth value.
48 | * @param yHat Predicted value.
49 | * @return The derivative value.
50 | */
51 | float smooth_l1_loss_derivative(float y, float yHat)
52 | {
53 | float diff = yHat - y;
54 | if (fabsf(diff) < BETA)
55 | return diff / BETA;
56 | else
57 | return (diff > 0 ? 1.0f : -1.0f);
58 | }
59 |
--------------------------------------------------------------------------------
/src/Loss_Functions/tversky_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Loss_Functions/tversky_loss.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | #define ALPHA 0.5f
8 | #define BETA 0.5f
9 |
10 | /**
11 | * @brief Computes the Tversky Loss.
12 | *
13 | * The Tversky Loss is a generalization of the Dice Loss and is defined as:
14 | * - loss = 1 - (TP / (TP + alpha * FP + beta * FN))
15 | *
16 | * @param y Pointer to the ground truth values.
17 | * @param yHat Pointer to the predicted values.
18 | * @param n The number of elements in y and yHat.
19 | * @return The computed loss, or an error code if inputs are invalid.
20 | */
21 | float tversky_loss(float *y, float *yHat, int n)
22 | {
23 | if (!y || !yHat || n <= 0)
24 | {
25 | LOG_ERROR("Invalid input parameters.");
26 | return (float)CM_INVALID_INPUT_ERROR;
27 | }
28 | float tp = 0.0f, fp = 0.0f, fn = 0.0f;
29 | for (int i = 0; i < n; i++)
30 | {
31 | tp += y[i] * yHat[i];
32 | fp += (1 - y[i]) * yHat[i];
33 | fn += y[i] * (1 - yHat[i]);
34 | }
35 | float denominator = tp + ALPHA * fp + BETA * fn;
36 | if (denominator == 0)
37 | {
38 | LOG_ERROR("Division by zero.");
39 | return (float)CM_INVALID_INPUT_ERROR;
40 | }
41 | return 1.0f - (tp / denominator);
42 | }
43 |
44 | /**
45 | * @brief Computes the derivative of the Tversky Loss.
46 | *
47 | * The derivative is defined as:
48 | * - d(loss)/dyHat = -TP / (TP + alpha * FP + beta * FN)^2
49 | *
50 | * @param y Pointer to the ground truth values.
51 | * @param yHat Pointer to the predicted values.
52 | * @param n The number of elements in y and yHat.
53 | * @return The derivative value, or an error code if inputs are invalid.
54 | */
55 | float tversky_loss_derivative(float *y, float *yHat, int n)
56 | {
57 | if (!y || !yHat || n <= 0)
58 | {
59 | LOG_ERROR("Invalid input parameters.");
60 | return (float)CM_INVALID_INPUT_ERROR;
61 | }
62 | float tp = 0.0f, fp = 0.0f, fn = 0.0f;
63 | for (int i = 0; i < n; i++)
64 | {
65 | tp += y[i] * yHat[i];
66 | fp += (1 - y[i]) * yHat[i];
67 | fn += y[i] * (1 - yHat[i]);
68 | }
69 | float denominator = tp + ALPHA * fp + BETA * fn;
70 | if (denominator == 0)
71 | {
72 | LOG_ERROR("Division by zero.");
73 | return (float)CM_INVALID_INPUT_ERROR;
74 | }
75 | return -tp / (denominator * denominator);
76 | }
--------------------------------------------------------------------------------
/src/Metrics/accuracy.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/accuracy.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the accuracy of predictions.
8 | *
9 | * The accuracy is defined as the ratio of correct predictions to the total number of predictions.
10 | *
11 | * @param y Pointer to the ground truth labels.
12 | * @param yHat Pointer to the predicted labels.
13 | * @param n The number of elements in y and yHat.
14 | * @param threshold The threshold for binary classification.
15 | * @return The computed accuracy, or an error code if inputs are invalid.
16 | */
17 | float accuracy(float *y, float *yHat, int n, float threshold)
18 | {
19 | if (!y || !yHat || n <= 0)
20 | {
21 | LOG_ERROR("Invalid input parameters.");
22 | return CM_INVALID_INPUT_ERROR;
23 | }
24 | int correct = 0;
25 | for (int i = 0; i < n; i++)
26 | {
27 | int pred = yHat[i] > threshold ? 1 : 0;
28 | if (pred == (int)y[i])
29 | correct++;
30 | }
31 | return (float)correct / n;
32 | }
33 |
--------------------------------------------------------------------------------
/src/Metrics/balanced_accuracy.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/balanced_accuracy.h"
3 | #include "../../include/Metrics/specificity.h"
4 | #include "../../include/Metrics/recall.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 |
8 | /**
9 | * @brief Computes the Balanced Accuracy metric.
10 | *
11 | * The Balanced Accuracy is defined as the average of sensitivity (recall) and specificity.
12 | *
13 | * @param y Pointer to the ground truth labels.
14 | * @param yHat Pointer to the predicted labels.
15 | * @param n The number of elements in y and yHat.
16 | * @param threshold The threshold for binary classification.
17 | * @return The computed balanced accuracy, or an error code if inputs are invalid.
18 | */
19 | float balanced_accuracy(float *y, float *yHat, int n, float threshold)
20 | {
21 | if (!y || !yHat || n <= 0)
22 | {
23 | LOG_ERROR("Invalid input parameters.");
24 | return CM_INVALID_INPUT_ERROR;
25 | }
26 |
27 | float sensitivity = recall(y, yHat, n, threshold);
28 | float spec = specificity(y, yHat, n, threshold);
29 |
30 | return (sensitivity + spec) / 2.0f;
31 | }
32 |
--------------------------------------------------------------------------------
/src/Metrics/cohens_kappa.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/cohens_kappa.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes Cohen's Kappa statistic.
8 | *
9 | * Cohen's Kappa is a measure of inter-rater agreement for categorical items.
10 | * It is defined as:
11 | * - kappa = (observed_accuracy - expected_accuracy) / (1 - expected_accuracy)
12 | *
13 | * @param y Pointer to the ground truth labels.
14 | * @param yHat Pointer to the predicted labels.
15 | * @param n The number of elements in y and yHat.
16 | * @param threshold The threshold for binary classification.
17 | * @return The computed Cohen's Kappa, or an error code if inputs are invalid.
18 | */
19 | float cohens_kappa(float *y, float *yHat, int n, float threshold)
20 | {
21 | if (!y || !yHat || n <= 0)
22 | {
23 | LOG_ERROR("Invalid input parameters.");
24 | return CM_INVALID_INPUT_ERROR;
25 | }
26 | int tp = 0, tn = 0, fp = 0, fn = 0;
27 | for (int i = 0; i < n; i++)
28 | {
29 | int actual = (int)y[i];
30 | int pred = yHat[i] > threshold ? 1 : 0;
31 | if (actual == 1 && pred == 1)
32 | tp++;
33 | else if (actual == 0 && pred == 0)
34 | tn++;
35 | else if (actual == 0 && pred == 1)
36 | fp++;
37 | else if (actual == 1 && pred == 0)
38 | fn++;
39 | }
40 | float total = tp + tn + fp + fn;
41 | float observed_accuracy = (tp + tn) / total;
42 | float expected_accuracy = ((tp + fn) * (tp + fp) + (tn + fp) * (tn + fn)) / (total * total);
43 | if (expected_accuracy == 1.0f)
44 | return 0.0f;
45 | return (observed_accuracy - expected_accuracy) / (1.0f - expected_accuracy);
46 | }
47 |
--------------------------------------------------------------------------------
/src/Metrics/f1_score.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/f1_score.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the F1 Score metric.
8 | *
9 | * The F1 Score is the harmonic mean of precision and recall, defined as:
10 | * - F1 = 2 * (precision * recall) / (precision + recall)
11 | *
12 | * @note Precision is the ratio of true positives to the sum of true positives and false positives.
13 | * @note Recall is the ratio of true positives to the sum of true positives and false negatives.
14 | *
15 | * @param y Pointer to the ground truth labels.
16 | * @param yHat Pointer to the predicted labels.
17 | * @param n The number of elements in y and yHat.
18 | * @param threshold The threshold for binary classification.
19 | * @return The computed F1 Score, or an error code if inputs are invalid.
20 | */
21 | float f1_score(float *y, float *yHat, int n, float threshold)
22 | {
23 | if (!y || !yHat || n <= 0)
24 | {
25 | LOG_ERROR("Invalid input parameters.");
26 | return CM_INVALID_INPUT_ERROR;
27 | }
28 | int true_positive = 0, false_positive = 0, false_negative = 0;
29 | for (int i = 0; i < n; i++)
30 | {
31 | int actual = (int)y[i];
32 | int pred = yHat[i] > threshold ? 1 : 0;
33 | if (actual == 1 && pred == 1)
34 | true_positive++;
35 | else if (actual == 0 && pred == 1)
36 | false_positive++;
37 | else if (actual == 1 && pred == 0)
38 | false_negative++;
39 | }
40 | float precision = true_positive + false_positive > 0 ? (float)true_positive / (true_positive + false_positive) : 0;
41 | float recall = true_positive + false_negative > 0 ? (float)true_positive / (true_positive + false_negative) : 0;
42 | if (precision + recall == 0)
43 | return 0;
44 | return 2 * precision * recall / (precision + recall);
45 | }
46 |
--------------------------------------------------------------------------------
/src/Metrics/iou.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/iou.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the Intersection over Union (IoU) metric.
8 | *
9 | * The IoU is defined as the ratio of the intersection area to the union area.
10 | *
11 | * @param y Pointer to the ground truth labels.
12 | * @param yHat Pointer to the predicted labels.
13 | * @param n The number of elements in y and yHat.
14 | * @param threshold The threshold for binary classification.
15 | * @return The computed IoU, or an error code if inputs are invalid.
16 | */
17 | float iou(float *y, float *yHat, int n, float threshold)
18 | {
19 | if (!y || !yHat || n <= 0)
20 | {
21 | LOG_ERROR("Invalid input parameters.");
22 | return CM_INVALID_INPUT_ERROR;
23 | }
24 | float intersection = 0.0f, union_area = 0.0f;
25 | for (int i = 0; i < n; i++)
26 | {
27 | int actual = (int)y[i];
28 | int pred = yHat[i] > threshold ? 1 : 0;
29 | intersection += actual * pred;
30 | union_area += actual + pred - (actual * pred);
31 | }
32 | if (union_area == 0)
33 | {
34 | LOG_ERROR("Division by zero.");
35 | return 0.0f;
36 | }
37 | return intersection / union_area;
38 | }
39 |
--------------------------------------------------------------------------------
/src/Metrics/mcc.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Metrics/mcc.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Computes the Matthews Correlation Coefficient (MCC).
9 | *
10 | * The MCC is a measure of the quality of binary classifications.
11 | * It is defined as:
12 | * - mcc = (TP * TN - FP * FN) / sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))
13 | *
14 | * @param y Pointer to the ground truth labels.
15 | * @param yHat Pointer to the predicted labels.
16 | * @param n The number of elements in y and yHat.
17 | * @param threshold The threshold for binary classification.
18 | * @return The computed MCC, or an error code if inputs are invalid.
19 | */
20 | float mcc(float *y, float *yHat, int n, float threshold)
21 | {
22 | if (!y || !yHat || n <= 0)
23 | {
24 | LOG_ERROR("Invalid input parameters.");
25 | return CM_INVALID_INPUT_ERROR;
26 | }
27 | int tp = 0, tn = 0, fp = 0, fn = 0;
28 | for (int i = 0; i < n; i++)
29 | {
30 | int actual = (int)y[i];
31 | int pred = yHat[i] > threshold ? 1 : 0;
32 | if (actual == 1 && pred == 1)
33 | tp++;
34 | else if (actual == 0 && pred == 0)
35 | tn++;
36 | else if (actual == 0 && pred == 1)
37 | fp++;
38 | else if (actual == 1 && pred == 0)
39 | fn++;
40 | }
41 | float numerator = (float)(tp * tn - fp * fn);
42 | float denominator = sqrtf((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn));
43 | if (denominator == 0)
44 | return 0.0f;
45 | return numerator / denominator;
46 | }
47 |
--------------------------------------------------------------------------------
/src/Metrics/mean_absolute_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Metrics/mean_absolute_error.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Implements the Mean Absolute Error metric.
9 | *
10 | * The Mean Absolute Error (MAE) is defined as:
11 | * - MAE = 1/n * Σ |y - yHat|
12 | *
13 | * @param y Pointer to the ground truth values.
14 | * @param yHat Pointer to the predicted values.
15 | * @param n The number of elements in y and yHat.
16 | * @return The computed MAE, or an error code if inputs are invalid.
17 | */
18 | float mean_absolute_error(float *y, float *yHat, int n)
19 | {
20 | if (!y || !yHat || n <= 0)
21 | {
22 | LOG_ERROR("Invalid input parameters.");
23 | return (float)CM_INVALID_INPUT_ERROR;
24 | }
25 | float sum = 0;
26 | for (int i = 0; i < n; i++)
27 | {
28 | sum += fabsf(y[i] - yHat[i]);
29 | }
30 | return sum / n;
31 | }
32 |
--------------------------------------------------------------------------------
/src/Metrics/mean_absolute_percentage_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Metrics/mean_absolute_percentage_error.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 | /**
8 | * @brief Computes the Mean Absolute Percentage Error (MAPE).
9 | *
10 | * The MAPE is defined as:
11 | * - MAPE = 1/n * Σ |(y - yHat) / y| * 100
12 | *
13 | * @param y Pointer to the ground truth values.
14 | * @param yHat Pointer to the predicted values.
15 | * @param n The number of elements in y and yHat.
16 | * @return The computed MAPE, or an error code if inputs are invalid.
17 | */
18 | float mean_absolute_percentage_error(float *y, float *yHat, int n)
19 | {
20 | if (!y || !yHat || n <= 0)
21 | {
22 | LOG_ERROR("Invalid input parameters.");
23 | return (float)CM_INVALID_INPUT_ERROR;
24 | }
25 | float sum = 0;
26 | int valid_count = 0;
27 | for (int i = 0; i < n; i++)
28 | {
29 | if (fabsf(y[i]) < 1e-15)
30 | {
31 | continue;
32 | }
33 | sum += fabsf((y[i] - yHat[i]) / y[i]);
34 | valid_count++;
35 | }
36 | if (valid_count == 0)
37 | {
38 | return CM_SUCCESS;
39 | }
40 | return sum / valid_count * 100;
41 | }
42 |
--------------------------------------------------------------------------------
/src/Metrics/precision.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/precision.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the Precision metric.
8 | *
9 | * The Precision is defined as the ratio of true positives to the sum of true positives and false positives.
10 | *
11 | * @param y Pointer to the ground truth labels.
12 | * @param yHat Pointer to the predicted labels.
13 | * @param n The number of elements in y and yHat.
14 | * @param threshold The threshold for binary classification.
15 | * @return The computed Precision, or an error code if inputs are invalid.
16 | */
17 | float precision(float *y, float *yHat, int n, float threshold)
18 | {
19 | if (!y || !yHat || n <= 0)
20 | {
21 | LOG_ERROR("Invalid input parameters.");
22 | return CM_INVALID_INPUT_ERROR;
23 | }
24 | int true_positive = 0, false_positive = 0;
25 | for (int i = 0; i < n; i++)
26 | {
27 | int actual = (int)y[i];
28 | int pred = yHat[i] > threshold ? 1 : 0;
29 | if (actual == 1 && pred == 1)
30 | true_positive++;
31 | else if (actual == 0 && pred == 1)
32 | false_positive++;
33 | }
34 | return true_positive + false_positive > 0 ? (float)true_positive / (true_positive + false_positive) : 0;
35 | }
36 |
--------------------------------------------------------------------------------
/src/Metrics/r2_score.c:
--------------------------------------------------------------------------------
1 | #include "../../include/Metrics/r2_score.h"
2 | #include "../../include/Core/logging.h"
3 |
4 | #include
5 | #include
6 |
7 | /**
8 | * @brief Calculate the R2 score (coefficient of determination).
9 | *
10 | * @param y_true Array of true values.
11 | * @param y_pred Array of predicted values.
12 | * @param size Number of elements in the arrays.
13 | * @return float The R2 score.
14 | */
15 | float r2_score(const float *y_true, const float *y_pred, int size)
16 | {
17 | if (y_true == NULL || y_pred == NULL || size <= 0)
18 | {
19 | return NAN;
20 | }
21 |
22 | float mean_y_true = 0.0f;
23 | for (int i = 0; i < size; i++)
24 | {
25 | mean_y_true += y_true[i];
26 | }
27 | mean_y_true /= size;
28 |
29 | float ss_total = 0.0f;
30 | float ss_residual = 0.0f;
31 | for (int i = 0; i < size; i++)
32 | {
33 | ss_total += pow(y_true[i] - mean_y_true, 2);
34 | ss_residual += pow(y_true[i] - y_pred[i], 2);
35 | }
36 |
37 | return 1.0f - (ss_residual / ss_total);
38 | }
39 |
--------------------------------------------------------------------------------
/src/Metrics/recall.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/recall.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the Recall metric.
8 | *
9 | * The Recall is defined as the ratio of true positives to the sum of true positives and false negatives.
10 | *
11 | * @param y Pointer to the ground truth labels.
12 | * @param yHat Pointer to the predicted labels.
13 | * @param n The number of elements in y and yHat.
14 | * @param threshold The threshold for binary classification.
15 | * @return The computed Recall, or an error code if inputs are invalid.
16 | */
17 | float recall(float *y, float *yHat, int n, float threshold)
18 | {
19 | if (!y || !yHat || n <= 0)
20 | {
21 | LOG_ERROR("Invalid input parameters.");
22 | return CM_INVALID_INPUT_ERROR;
23 | }
24 | int true_positive = 0, false_negative = 0;
25 | for (int i = 0; i < n; i++)
26 | {
27 | int actual = (int)y[i];
28 | int pred = yHat[i] > threshold ? 1 : 0;
29 | if (actual == 1 && pred == 1)
30 | true_positive++;
31 | else if (actual == 1 && pred == 0)
32 | false_negative++;
33 | }
34 | return (true_positive + false_negative) > 0 ? (float)true_positive / (true_positive + false_negative) : 0;
35 | }
36 |
--------------------------------------------------------------------------------
/src/Metrics/reduce_mean.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/reduce_mean.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the mean of an array of floats.
8 | *
9 | * This function takes an array of floats and computes the mean value.
10 | *
11 | * @param loss Pointer to the array of floats.
12 | * @param size The number of elements in the array.
13 | * @return The computed mean, or an error code if inputs are invalid.
14 | */
15 | float reduce_mean(float *loss, int size)
16 | {
17 | if (!loss || size <= 0)
18 | {
19 | LOG_ERROR("Invalid input parameters.");
20 | return (float)CM_INVALID_INPUT_ERROR;
21 | }
22 | float sum = 0;
23 | for (int i = 0; i < size; i++)
24 | {
25 | sum += loss[i];
26 | }
27 | return sum / size;
28 | }
29 |
--------------------------------------------------------------------------------
/src/Metrics/root_mean_squared_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Metrics/root_mean_squared_error.h"
4 | #include "../../include/Loss_Functions/mean_squared_error.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/logging.h"
7 |
8 | /**
9 | * @brief Computes the Root Mean Squared Error (RMSE).
10 | *
11 | * The RMSE is defined as:
12 | * - RMSE = sqrt(1/n * Σ (y - yHat)^2)
13 | *
14 | * @param y Pointer to the ground truth values.
15 | * @param yHat Pointer to the predicted values.
16 | * @param n The number of elements in y and yHat.
17 | * @return The computed RMSE, or an error code if inputs are invalid.
18 | */
19 | float root_mean_squared_error(float *y, float *yHat, int n)
20 | {
21 | if (!y || !yHat || n <= 0)
22 | {
23 | LOG_ERROR("Invalid input parameters.");
24 | return (float)CM_INVALID_INPUT_ERROR;
25 | }
26 |
27 | float sum = 0;
28 | for (int i = 0; i < n; i++)
29 | {
30 | sum += powf(y[i] - yHat[i], 2);
31 | }
32 | if (n == 0)
33 | {
34 | return 0;
35 | }
36 | return sqrtf(sum / n);
37 | }
--------------------------------------------------------------------------------
/src/Metrics/specificity.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "../../include/Metrics/specificity.h"
3 | #include "../../include/Core/error_codes.h"
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Computes the specificity metric.
8 | *
9 | * Specificity is defined as the ratio of true negatives to the sum of true negatives and false positives.
10 | *
11 | * @param y Pointer to the ground truth labels.
12 | * @param yHat Pointer to the predicted labels.
13 | * @param n The number of elements in y and yHat.
14 | * @param threshold The threshold for binary classification.
15 | * @return The computed specificity, or an error code if inputs are invalid.
16 | */
17 | float specificity(float *y, float *yHat, int n, float threshold)
18 | {
19 | if (!y || !yHat || n <= 0)
20 | {
21 | LOG_ERROR("Invalid input parameters.");
22 | return CM_INVALID_INPUT_ERROR;
23 | }
24 | int true_negative = 0, false_positive = 0;
25 | for (int i = 0; i < n; i++)
26 | {
27 | int actual = (int)y[i];
28 | int pred = yHat[i] > threshold ? 1 : 0;
29 | if (actual == 0 && pred == 0)
30 | true_negative++;
31 | else if (actual == 0 && pred == 1)
32 | false_positive++;
33 | }
34 | return (true_negative + false_positive) > 0 ? (float)true_negative / (true_negative + false_positive) : 0;
35 | }
36 |
--------------------------------------------------------------------------------
/src/Optimizers/rmsprop.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Optimizers/rmsprop.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 |
8 |
9 | /**
10 | * @brief Performs the RMSProp optimization algorithm.
11 | *
12 | * RMSProp is an adaptive learning rate optimization algorithm that adjusts the learning rate for each parameter.
13 | *
14 | * @param x The input feature value.
15 | * @param y The target value.
16 | * @param lr The learning rate.
17 | * @param w Pointer to the weight parameter.
18 | * @param b Pointer to the bias parameter.
19 | * @param cache_w Pointer to the cache for the weight parameter.
20 | * @param cache_b Pointer to the cache for the bias parameter.
21 | * @param epsilon A small constant to prevent division by zero.
22 | * @param beta The decay rate for the moving average of squared gradients.
23 | * @return The computed loss value, or an error code.
24 | */
25 | float rms_prop(float x, float y, float lr, float *w, float *b, float *cache_w, float *cache_b, float epsilon, float beta)
26 | {
27 | if (!w || !b || !cache_w || !cache_b)
28 | {
29 | LOG_ERROR("Null pointer input.");
30 | return CM_NULL_POINTER_ERROR;
31 | }
32 |
33 | if (isnan(x) || isnan(y))
34 | {
35 | return NAN;
36 | }
37 |
38 | if (isinf(x) || isinf(y))
39 | {
40 | return CM_INVALID_INPUT_ERROR;
41 | }
42 |
43 | if (epsilon <= 0)
44 | {
45 | LOG_ERROR("Epsilon value (%f) is invalid.", epsilon);
46 | return CM_INVALID_INPUT_ERROR;
47 | }
48 |
49 | float y_pred = (*w) * x + (*b);
50 | float loss = pow(y_pred - y, 2);
51 | float dw = 2 * (y_pred - y) * x;
52 | float db = 2 * (y_pred - y);
53 |
54 | *cache_w = beta * (*cache_w) + (1 - beta) * (dw * dw);
55 | *cache_b = beta * (*cache_b) + (1 - beta) * (db * db);
56 |
57 | *w -= lr * (dw / (sqrt(*cache_w) + epsilon));
58 | *b -= lr * (db / (sqrt(*cache_b) + epsilon));
59 | LOG_DEBUG("w: %f, b: %f, loss: %f", *w, *b, loss);
60 |
61 | return loss;
62 | }
63 |
64 | /**
65 | * @brief Update weights and biases using RMSProp optimizer.
66 | *
67 | * Updates the weights and biases of a neural network using the RMSProp optimization algorithm.
68 | *
69 | * @param w Pointer to the weight.
70 | * @param b Pointer to the bias.
71 | * @param cache_w Pointer to the weight cache.
72 | * @param cache_b Pointer to the bias cache.
73 | * @param gradient Gradient value.
74 | * @param input Input value.
75 | * @param learning_rate Learning rate.
76 | * @param beta Decay rate for RMSProp.
77 | * @param epsilon Small value to prevent division by zero.
78 | */
79 | void update_rmsprop(float *w, float *b, float *cache_w, float *cache_b, float gradient, float input, float learning_rate, float beta, float epsilon)
80 | {
81 | *cache_w = beta * (*cache_w) + (1 - beta) * pow(gradient * input, 2);
82 | *cache_b = beta * (*cache_b) + (1 - beta) * pow(gradient, 2);
83 |
84 | *w -= learning_rate * (gradient * input) / (sqrt(*cache_w) + epsilon);
85 | *b -= learning_rate * gradient / (sqrt(*cache_b) + epsilon);
86 | }
87 |
--------------------------------------------------------------------------------
/src/Optimizers/sgd.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Optimizers/sgd.h"
4 | #include "../../include/Core/error_codes.h"
5 | #include "../../include/Core/logging.h"
6 |
7 |
8 |
9 | /**
10 | * @brief Performs the Stochastic Gradient Descent (SGD) optimization algorithm.
11 | *
12 | * SGD is a simple optimization algorithm that updates parameters using the gradient of the loss function.
13 | *
14 | * @param x The input feature value.
15 | * @param y The target value.
16 | * @param lr The learning rate.
17 | * @param w Pointer to the weight parameter.
18 | * @param b Pointer to the bias parameter.
19 | * @return The computed loss value, or an error code.
20 | */
21 | float sgd(float x, float y, float lr, float *w, float *b)
22 | {
23 | if (!w || !b)
24 | {
25 | LOG_ERROR("Null pointer input.");
26 | return CM_NULL_POINTER_ERROR;
27 | }
28 |
29 | if (isnan(x) || isnan(y))
30 | {
31 | return NAN;
32 | }
33 |
34 | if (isinf(x) || isinf(y))
35 | {
36 | LOG_ERROR("Invalid input (inf).");
37 | return CM_INVALID_INPUT_ERROR;
38 | }
39 |
40 | float y_pred = (*w) * x + (*b);
41 | float loss = pow(y_pred - y, 2);
42 | float dw = 2 * (y_pred - y) * x;
43 | float db = 2 * (y_pred - y);
44 |
45 | (*w) -= lr * dw;
46 | (*b) -= lr * db;
47 | LOG_DEBUG("w: %f, b: %f, loss: %f", *w, *b, loss);
48 |
49 | return loss;
50 | }
51 |
52 | /**
53 | * @brief Update weights and biases using SGD optimizer.
54 | *
55 | * Updates the weights and biases of a neural network using the SGD optimization algorithm.
56 | *
57 | * @param w Pointer to the weight.
58 | * @param b Pointer to the bias.
59 | * @param gradient Gradient value.
60 | * @param input Input value.
61 | * @param learning_rate Learning rate.
62 | */
63 | void update_sgd(float *w, float *b, float gradient, float input, float learning_rate)
64 | {
65 | *w -= learning_rate * gradient * input;
66 | *b -= learning_rate * gradient;
67 | }
68 |
--------------------------------------------------------------------------------
/src/Preprocessing/min_max_scaler.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Preprocessing/min_max_scaler.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/memory_management.h"
7 | #include "../../include/Core/logging.h"
8 |
9 |
10 |
11 | /**
12 | * @brief Scales an array of floats to a range of [0, 1] using min-max scaling.
13 | *
14 | * The function finds the minimum and maximum values in the input array and scales
15 | * the values to the range [0, 1] using the formula:
16 | * scaled_value = (value - min) / (max - min)
17 | *
18 | * @param x The input array of floats.
19 | * @param size The size of the input array.
20 | * @return A pointer to the scaled array, or NULL if an error occurs.
21 | */
22 | float *min_max_scaler(float *x, int size)
23 | {
24 | if (x == NULL)
25 | {
26 | LOG_ERROR("Null pointer argument");
27 | return NULL;
28 | }
29 |
30 | if (size <= 0)
31 | {
32 | LOG_ERROR("Invalid size argument");
33 | return NULL;
34 | }
35 | float *scaled = (float *)cm_safe_malloc(sizeof(float) * size, __FILE__, __LINE__);
36 | if (scaled == NULL)
37 | {
38 | LOG_ERROR("Memory allocation failed\n");
39 | return NULL;
40 | }
41 | float min = x[0];
42 | float max = x[0];
43 | for (int i = 0; i < size; i++)
44 | {
45 | if (x[i] < min)
46 | {
47 | min = x[i];
48 | }
49 | if (x[i] > max)
50 | {
51 | max = x[i];
52 | }
53 | }
54 | if (max == min)
55 | {
56 | LOG_ERROR("Max and min are equal\n");
57 | free(scaled);
58 | return NULL;
59 | }
60 | for (int i = 0; i < size; i++)
61 | {
62 | scaled[i] = (x[i] - min) / (max - min);
63 | LOG_DEBUG("Scaled[%d]: %f", i, scaled[i]);
64 | }
65 | LOG_DEBUG("Scaling complete.");
66 | return scaled;
67 | }
--------------------------------------------------------------------------------
/src/Preprocessing/standard_scaler.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Preprocessing/standard_scaler.h"
5 | #include "../../include/Core/error_codes.h"
6 | #include "../../include/Core/memory_management.h"
7 | #include "../../include/Core/logging.h"
8 |
9 |
10 |
11 | /**
12 | * @brief Scales an array of floats to have a mean of 0 and a standard deviation of 1.
13 | *
14 | * The function calculates the mean and standard deviation of the input array and
15 | * scales the values using the formula:
16 | * scaled_value = (value - mean) / std
17 | *
18 | * @param x The input array of floats.
19 | * @param size The size of the input array.
20 | * @return A pointer to the scaled array, or NULL if an error occurs.
21 | */
22 | float *standard_scaler(float *x, int size)
23 | {
24 | if (x == NULL)
25 | {
26 | LOG_ERROR("Null pointer argument");
27 | return NULL;
28 | }
29 |
30 | if (size <= 0)
31 | {
32 | LOG_ERROR("Invalid size argument");
33 | return NULL;
34 | }
35 |
36 | float *scaled = (float *)cm_safe_malloc(sizeof(float) * size, __FILE__, __LINE__);
37 | if (scaled == NULL)
38 | {
39 | LOG_ERROR("Memory allocation failed\n");
40 | return NULL;
41 | }
42 |
43 | float mean = 0;
44 | for (int i = 0; i < size; i++)
45 | {
46 | mean += x[i];
47 | }
48 | mean /= size;
49 |
50 | float std = 0;
51 | for (int i = 0; i < size; i++)
52 | {
53 | std += pow(x[i] - mean, 2);
54 | }
55 | std /= size;
56 | std = sqrt(std);
57 |
58 | if (std == 0)
59 | {
60 | LOG_ERROR("Standard deviation is zero\n");
61 | free(scaled);
62 | return NULL;
63 | }
64 |
65 | for (int i = 0; i < size; i++)
66 | {
67 | scaled[i] = (x[i] - mean) / std;
68 | LOG_DEBUG("Scaled[%d]: %f", i, scaled[i]);
69 | }
70 | LOG_DEBUG("Scaling complete.");
71 | return scaled;
72 | }
--------------------------------------------------------------------------------
/test/Activations/test_elu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Activations/elu.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_elu()
9 | {
10 | float input[11] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e6, -1e6, FLT_MAX, -FLT_MAX, NAN, INFINITY};
11 | float alpha_values[6] = {0.0, 1.0, FLT_MAX, -FLT_MAX, NAN, INFINITY};
12 | float tolerance = 1e-6;
13 |
14 | for (int a = 0; a < 6; a++)
15 | {
16 | float alpha = alpha_values[a];
17 |
18 | for (int i = 0; i < 11; i++)
19 | {
20 | if (isnan(input[i]) || isnan(alpha) || isinf(input[i]) || isinf(alpha))
21 | {
22 | assert(CM_INVALID_INPUT_ERROR == elu(input[i], alpha));
23 | }
24 | else
25 | {
26 | float expected_output;
27 | if (input[i] >= 0)
28 | {
29 | expected_output = input[i];
30 | }
31 | else
32 | {
33 | expected_output = alpha * (expf(input[i]) - 1);
34 | }
35 |
36 | float output = elu(input[i], alpha);
37 | assert(fabs(output - expected_output) < tolerance);
38 | }
39 | }
40 | }
41 | printf("ELU activation function test passed\n");
42 | }
43 |
44 | int main()
45 | {
46 | printf("Testing ELU activation function\n");
47 | test_elu();
48 | return CM_SUCCESS;
49 | }
--------------------------------------------------------------------------------
/test/Activations/test_gelu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Activations/gelu.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_gelu()
8 | {
9 | float input[11] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e-6, -1e-6, 1e6, -1e6, NAN, INFINITY};
10 | float expected_output[11] = {0.0f, 0.841192f, -0.158808f, 1.954597f, -0.045402f, 1e-6f, -1e-6f, 1e6f, 0.0f, CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR};
11 | float tolerance = 1e-6;
12 |
13 | for (int i = 0; i < 11; i++)
14 | {
15 | if (isnan(input[i]) || isinf(input[i]))
16 | {
17 | assert(CM_INVALID_INPUT_ERROR == gelu(input[i]));
18 | }
19 | else
20 | {
21 | float output = gelu(input[i]);
22 | assert(fabs(output - expected_output[i]) < tolerance);
23 | }
24 | }
25 |
26 | printf("GELU activation function test passed\n");
27 | }
28 |
29 | int main()
30 | {
31 | printf("Testing GELU activation function\n");
32 | test_gelu();
33 | return CM_SUCCESS;
34 | }
35 |
--------------------------------------------------------------------------------
/test/Activations/test_leaky_relu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Activations/leaky_relu.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_leakyRelu()
9 | {
10 | float input[11] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e6, -1e6, FLT_MAX, -FLT_MAX, NAN, INFINITY};
11 | float expected_output[11] = {0.0f, 1.0f, -0.01f, 2.0f, -0.02f, 1e6f, -1e4f, FLT_MAX, -FLT_MAX * 0.01f, CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR};
12 | float tolerance = 1e-6;
13 |
14 | for (int i = 0; i < 11; i++)
15 | {
16 | if (isnan(input[i]) || isinf(input[i]))
17 | {
18 | assert(CM_INVALID_INPUT_ERROR == leaky_relu(input[i]));
19 | }
20 | else
21 | {
22 | assert(fabs(leaky_relu(input[i]) - expected_output[i]) < tolerance);
23 | }
24 | }
25 | printf("leakyRelu activation function test passed\n");
26 | }
27 |
28 | int main()
29 | {
30 | printf("Testing leakyRelu activation function\n");
31 | test_leakyRelu();
32 | return CM_SUCCESS;
33 | }
--------------------------------------------------------------------------------
/test/Activations/test_linear.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Activations/linear.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_linear_activation()
9 | {
10 | float input[13] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e6, -1e6, FLT_MAX, -FLT_MAX, NAN, INFINITY, -INFINITY, 0.5};
11 | float expected_output[13] = {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 1e6f, -1e6f, FLT_MAX, -FLT_MAX, NAN, CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR, 0.5f};
12 | float tolerance = 1e-6;
13 |
14 | for (int i = 0; i < 13; i++)
15 | {
16 | if (isnan(input[i]) || isinf(input[i]))
17 | {
18 | assert(CM_INVALID_INPUT_ERROR == linear(input[i]));
19 | }
20 | else
21 | {
22 | assert(fabs(linear(input[i]) - expected_output[i]) < tolerance);
23 | }
24 | }
25 | printf("linear activation function test passed\n");
26 | }
27 |
28 | int main()
29 | {
30 | printf("Testing linear activation function\n");
31 | test_linear_activation();
32 | return CM_SUCCESS;
33 | }
--------------------------------------------------------------------------------
/test/Activations/test_relu.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Activations/relu.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_relu()
9 | {
10 | float input[11] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e6, -1e6, FLT_MAX, -FLT_MAX, NAN, INFINITY};
11 | float expected_output[11] = {0.0f, 1.0f, 0.0f, 2.0f, 0.0f, 1e6f, 0.0f, FLT_MAX, 0.0f, CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR};
12 | float tolerance = 1e-6;
13 |
14 | for (int i = 0; i < 11; i++)
15 | {
16 | if (isnan(input[i]) || isinf(input[i]))
17 | {
18 | assert(CM_INVALID_INPUT_ERROR == relu(input[i]));
19 | }
20 | else
21 | {
22 | assert(fabs(relu(input[i]) - expected_output[i]) < tolerance);
23 | }
24 | }
25 | printf("relu activation function test passed\n");
26 | }
27 |
28 | int main()
29 | {
30 | printf("Testing relu activation function\n");
31 | test_relu();
32 | return CM_SUCCESS;
33 | }
34 |
--------------------------------------------------------------------------------
/test/Activations/test_sigmoid.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Activations/sigmoid.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_sigmoid()
9 | {
10 | float input[11] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e6, -1e6, FLT_MAX, -FLT_MAX, NAN, INFINITY};
11 | float expected_output[11] = {0.5f, 0.73105858f, 0.26894142f, 0.88079708f, 0.11920292f, 1.0f, 0.0f, 1.0f, 0.0f, NAN, 1.0f};
12 | float tolerance = 1e-6;
13 | for (int i = 0; i < 11; i++)
14 | {
15 | if (isnan(input[i]) || isinf(input[i]))
16 | {
17 | assert(CM_INVALID_INPUT_ERROR == sigmoid(input[i]));
18 | }
19 | else
20 | {
21 | assert(fabs(sigmoid(input[i]) - expected_output[i]) < tolerance);
22 | }
23 | }
24 | printf("sigmoid activation function test passed\n");
25 | }
26 |
27 | int main()
28 | {
29 | printf("Testing sigmoid activation function\n");
30 | test_sigmoid();
31 | return CM_SUCCESS;
32 | }
33 |
--------------------------------------------------------------------------------
/test/Activations/test_softmax.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "../../include/Activations/softmax.h"
7 | #include "../../include/Core/error_codes.h"
8 |
9 | void test_softmax()
10 | {
11 | float input[5][3] = {
12 | {0.0, 0.0, 0.0},
13 | {1.0, 2.0, 3.0},
14 | {-1.0, -2.0, -3.0},
15 | {1000.0, 1001.0, 1002.0},
16 | {INFINITY, 0.0, -INFINITY}};
17 | float expected_output[5][3] = {
18 | {0.33333333f, 0.33333333f, 0.33333333f},
19 | {0.09003057f, 0.24472847f, 0.66524096f},
20 | {0.66524096f, 0.24472847f, 0.09003057f},
21 | {0.09003057f, 0.24472847f, 0.66524096f},
22 | {CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR}};
23 | float tolerance = 1e-6;
24 |
25 | for (int i = 0; i < 5; i++)
26 | {
27 | float *output = softmax(input[i], 3);
28 | if (isinf(input[i][0]) || isinf(input[i][1]) || isinf(input[i][2]) ||
29 | isnan(input[i][0]) || isnan(input[i][1]) || isnan(input[i][2]))
30 | {
31 | assert(output == (float *)CM_INVALID_INPUT_ERROR);
32 | }
33 | else
34 | {
35 | for (int j = 0; j < 3; j++)
36 | {
37 | assert(fabs(output[j] - expected_output[i][j]) < tolerance);
38 | }
39 | free_softmax(&output);
40 | }
41 | }
42 | printf("softmax activation function test passed\n");
43 | }
44 |
45 | int main()
46 | {
47 | printf("Testing softmax activation function\n");
48 | test_softmax();
49 | return CM_SUCCESS;
50 | }
51 |
--------------------------------------------------------------------------------
/test/Activations/test_tanh.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Activations/tanh.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_tanh()
9 | {
10 | float input[11] = {0.0, 1.0, -1.0, 2.0, -2.0, 1e6, -1e6, FLT_MAX, -FLT_MAX, NAN, INFINITY};
11 | float expected_output[11] = {0.0f, 0.76159416f, -0.76159416f, 0.96402758f, -0.96402758f, 1.0f, -1.0f, 1.0f, -1.0f, CM_INVALID_INPUT_ERROR, CM_INVALID_INPUT_ERROR};
12 | float tolerance = 1e-6;
13 |
14 | for (int i = 0; i < 11; i++)
15 | {
16 | if (isnan(input[i]) || isinf(input[i]))
17 | {
18 | assert(CM_INVALID_INPUT_ERROR == tanH(input[i]));
19 | }
20 | else
21 | {
22 | assert(fabs(tanH(input[i]) - expected_output[i]) < tolerance);
23 | }
24 | }
25 | printf("tanh activation function test passed\n");
26 | }
27 |
28 | int main()
29 | {
30 | printf("Testing tanh activation function\n");
31 | test_tanh();
32 | return CM_SUCCESS;
33 | }
34 |
--------------------------------------------------------------------------------
/test/Core/test_logging.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Core/logging.h"
5 |
6 | /**
7 | * @brief Test the logging system
8 | *
9 | * @return int 0 if all tests pass, non-zero otherwise
10 | */
11 | int main()
12 | {
13 | printf("Running logging tests...\n");
14 |
15 | set_log_level(LOG_LEVEL_DEBUG);
16 |
17 | LOG_DEBUG("This is a debug message: %d", 42);
18 | LOG_INFO("This is an info message: %s", "hello");
19 | LOG_WARNING("This is a warning message: %.2f", 3.14);
20 | LOG_ERROR("This is an error message: %c", 'X');
21 |
22 | printf("\nChanging log level to INFO (DEBUG messages should not appear):\n");
23 | set_log_level(LOG_LEVEL_INFO);
24 |
25 | LOG_DEBUG("This debug message should NOT appear");
26 | LOG_INFO("This info message should appear");
27 | LOG_WARNING("This warning message should appear");
28 | LOG_ERROR("This error message should appear");
29 |
30 | printf("\nChanging log level to WARNING (DEBUG and INFO messages should not appear):\n");
31 | set_log_level(LOG_LEVEL_WARNING);
32 |
33 | LOG_DEBUG("This debug message should NOT appear");
34 | LOG_INFO("This info message should NOT appear");
35 | LOG_WARNING("This warning message should appear");
36 | LOG_ERROR("This error message should appear");
37 |
38 | printf("\nChanging log level to ERROR (only ERROR messages should appear):\n");
39 | set_log_level(LOG_LEVEL_ERROR);
40 |
41 | LOG_DEBUG("This debug message should NOT appear");
42 | LOG_INFO("This info message should NOT appear");
43 | LOG_WARNING("This warning message should NOT appear");
44 | LOG_ERROR("This error message should appear");
45 |
46 | printf("\nLogging tests completed successfully!\n");
47 | return 0;
48 | }
49 |
--------------------------------------------------------------------------------
/test/Core/test_memory_management.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Core/memory_management.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_cm_safe_malloc()
8 | {
9 | int *ptr = (int *)cm_safe_malloc(sizeof(int), __FILE__, __LINE__);
10 | assert(ptr != (void *)CM_MEMORY_ALLOCATION_ERROR);
11 |
12 | *ptr = 42;
13 | assert(*ptr == 42);
14 |
15 | cm_safe_free((void **)&ptr);
16 |
17 | assert(ptr == NULL);
18 |
19 | void *zero_ptr = cm_safe_malloc(0, __FILE__, __LINE__);
20 | assert(zero_ptr != (void *)CM_MEMORY_ALLOCATION_ERROR);
21 | cm_safe_free(&zero_ptr);
22 |
23 | size_t large_size = (size_t)-1;
24 | void *large_ptr = cm_safe_malloc(large_size, __FILE__, __LINE__);
25 | assert(large_ptr == (void *)CM_MEMORY_ALLOCATION_ERROR);
26 |
27 | printf("cm_safe_malloc test passed\n");
28 | }
29 |
30 | void test_cm_safe_free()
31 | {
32 | cm_safe_free(NULL);
33 |
34 | int *ptr = (int *)cm_safe_malloc(sizeof(int), __FILE__, __LINE__);
35 | assert(ptr != (void *)CM_MEMORY_ALLOCATION_ERROR);
36 | *ptr = 99;
37 | cm_safe_free((void **)&ptr);
38 | assert(ptr == NULL);
39 |
40 | cm_safe_free(NULL);
41 |
42 | ptr = (int *)cm_safe_malloc(sizeof(int), __FILE__, __LINE__);
43 | assert(ptr != (void *)CM_MEMORY_ALLOCATION_ERROR);
44 | cm_safe_free((void **)&ptr);
45 | cm_safe_free((void **)&ptr);
46 |
47 | printf("cm_safe_free test passed\n");
48 | }
49 |
50 | int main()
51 | {
52 | printf("Testing Memory Management\n");
53 | test_cm_safe_malloc();
54 | test_cm_safe_free();
55 | return CM_SUCCESS;
56 | }
57 |
--------------------------------------------------------------------------------
/test/Core/test_training.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Core/memory_management.h"
5 | #include "../../include/Core/training.h"
6 | #include "../../include/Core/error_codes.h"
7 | #include "../../include/Core/logging.h"
8 |
9 | void test_large_layer_sizes()
10 | {
11 | set_log_level(LOG_LEVEL_DEBUG);
12 |
13 | NeuralNetwork *network = create_neural_network(1000);
14 |
15 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 1000, 2000, 0.0f, 0, 0);
16 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 2000, 500, 0.0f, 0, 0);
17 |
18 | float *input = (float *)cm_safe_malloc(1000 * sizeof(float), __FILE__, __LINE__);
19 | float *output = (float *)cm_safe_malloc(500 * sizeof(float), __FILE__, __LINE__);
20 |
21 | for (int i = 0; i < 1000; i++)
22 | input[i] = 0.1f;
23 |
24 | CM_Error error = forward_pass(network, input, output, 1000, 500, 0);
25 |
26 | assert(error == CM_SUCCESS);
27 |
28 | free_neural_network(network);
29 | cm_safe_free((void **)&input);
30 | cm_safe_free((void **)&output);
31 | }
32 |
33 | void test_mismatched_layer_sizes()
34 | {
35 |
36 | NeuralNetwork *network = create_neural_network(10);
37 |
38 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 10, 20, 0.0f, 0, 0);
39 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 20, 5, 0.0f, 0, 0);
40 |
41 | float input[15] = {0};
42 | float output[5] = {0};
43 |
44 | CM_Error error = forward_pass(network, input, output, 15, 5, 0);
45 |
46 | assert(error == CM_INVALID_LAYER_DIMENSIONS_ERROR);
47 |
48 | free_neural_network(network);
49 | }
50 |
51 | void test_optimizer_memory()
52 | {
53 |
54 | NeuralNetwork *network = create_neural_network(10);
55 |
56 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 10, 100, 0.0f, 0, 0);
57 | model_add(network, LAYER_DENSE, ACTIVATION_RELU, 100, 50, 0.0f, 0, 0);
58 |
59 | CM_Error error = build_network(network, OPTIMIZER_ADAM, 0.01f, LOSS_MSE, 0.0f, 0.0f);
60 | assert(error == CM_SUCCESS);
61 |
62 | error = initialize_optimizer_params(network);
63 | assert(error == CM_SUCCESS);
64 |
65 | free_neural_network(network);
66 | }
67 |
68 | int main()
69 | {
70 | printf("Testing Neural Network Training\n");
71 | test_large_layer_sizes();
72 | test_mismatched_layer_sizes();
73 | test_optimizer_memory();
74 | return CM_SUCCESS;
75 | }
76 |
--------------------------------------------------------------------------------
/test/Layers/test_dense.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Layers/dense.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_dense()
9 | {
10 | {
11 | DenseLayer layer = {NULL, NULL, 0, 0};
12 | int ret = initialize_dense(&layer, 3, 2);
13 | assert(ret == CM_SUCCESS);
14 |
15 | assert(layer.weights != NULL);
16 | assert(layer.biases != NULL);
17 |
18 | float input[] = {1.0, 2.0, 3.0};
19 | float output[2];
20 | ret = forward_dense(&layer, input, output);
21 | assert(ret == CM_SUCCESS);
22 | assert(output[0] != 0.0f);
23 | assert(output[1] != 0.0f);
24 |
25 | float d_output[] = {0.1, 0.2};
26 | float d_input[3] = {0};
27 | float d_weights[6] = {0};
28 | float d_biases[2] = {0};
29 | ret = backward_dense(&layer, input, output, d_output, d_input, d_weights, d_biases);
30 | assert(ret == CM_SUCCESS);
31 | for (int i = 0; i < 6; i++)
32 | assert(d_weights[i] != 0.0f);
33 | for (int i = 0; i < 2; i++)
34 | assert(d_biases[i] != 0.0f);
35 | ret = update_dense(&layer, d_weights, d_biases, 0.01);
36 | assert(ret == CM_SUCCESS);
37 |
38 | free_dense(&layer);
39 | printf("Dense layer normal test passed\n");
40 | }
41 |
42 | {
43 | DenseLayer layer;
44 | int ret;
45 | ret = initialize_dense(NULL, 3, 2);
46 | assert(ret == CM_NULL_POINTER_ERROR);
47 |
48 | layer.weights = NULL;
49 | layer.biases = NULL;
50 | layer.input_size = 3; // was already set correctly in initialize_dense
51 | layer.output_size = 2;
52 | ret = forward_dense(&layer, NULL, (float[2]){0});
53 | assert(ret == CM_NULL_POINTER_ERROR);
54 | ret = forward_dense(&layer, (float[3]){1, 2, 3}, NULL);
55 | assert(ret == CM_NULL_POINTER_ERROR);
56 |
57 | ret = backward_dense(&layer, NULL, (float[2]){0}, (float[2]){0},
58 | (float[3]){0}, (float[6]){0}, (float[2]){0});
59 | assert(ret == CM_NULL_POINTER_ERROR);
60 |
61 | ret = update_dense(&layer, NULL, (float[2]){0}, 0.01);
62 | assert(ret == CM_NULL_POINTER_ERROR);
63 |
64 | ret = free_dense(NULL);
65 | assert(ret == CM_NULL_POINTER_ERROR);
66 |
67 | printf("Dense layer edge case tests passed\n");
68 | }
69 | }
70 |
71 | int main()
72 | {
73 | printf("Testing DenseLayer\n");
74 | test_dense();
75 | return CM_SUCCESS;
76 | }
77 |
--------------------------------------------------------------------------------
/test/Layers/test_dropout.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Layers/dropout.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_dropout()
9 | {
10 | {
11 | DropoutLayer layer;
12 | int ret = initialize_dropout(&layer, 0.5);
13 | assert(ret == CM_SUCCESS);
14 | assert(layer.dropout_rate == 0.5);
15 |
16 | float input[] = {1.0, 2.0, 3.0, 4.0, 5.0};
17 | float output[5];
18 | ret = forward_dropout(&layer, input, output, 5);
19 | assert(ret == CM_SUCCESS);
20 | for (int i = 0; i < 5; i++)
21 | {
22 | assert(output[i] == 0.0f || output[i] == input[i] / (1 - layer.dropout_rate));
23 | }
24 |
25 | float d_output[] = {0.1, 0.2, 0.3, 0.4, 0.5};
26 | float d_input[5] = {0};
27 | ret = backward_dropout(&layer, input, output, d_output, d_input, 5);
28 | assert(ret == CM_SUCCESS);
29 | for (int i = 0; i < 5; i++)
30 | {
31 | if (output[i] == 0.0f)
32 | assert(d_input[i] == 0.0f);
33 | else
34 | assert(fabs(d_input[i] - d_output[i] / (1 - layer.dropout_rate)) < 1e-6);
35 | }
36 | printf("Dropout layer normal test passed\n");
37 | }
38 |
39 | {
40 | int ret;
41 | ret = initialize_dropout(NULL, 0.5);
42 | assert(ret == CM_NULL_POINTER_ERROR);
43 |
44 | DropoutLayer layer;
45 | ret = initialize_dropout(&layer, -0.1);
46 | assert(ret == CM_INVALID_PARAMETER_ERROR);
47 | ret = initialize_dropout(&layer, 1.1);
48 | assert(ret == CM_INVALID_PARAMETER_ERROR);
49 |
50 | ret = initialize_dropout(&layer, 0.5);
51 | assert(ret == CM_SUCCESS);
52 |
53 | ret = forward_dropout(&layer, NULL, (float[5]){0}, 5);
54 | assert(ret == CM_NULL_POINTER_ERROR);
55 | ret = forward_dropout(&layer, (float[5]){1, 2, 3, 4, 5}, NULL, 5);
56 | assert(ret == CM_NULL_POINTER_ERROR);
57 |
58 | ret = backward_dropout(&layer, NULL, (float[5]){0}, (float[5]){0}, (float[5]){0}, 5);
59 | assert(ret == CM_NULL_POINTER_ERROR);
60 |
61 | printf("Dropout layer edge case tests passed\n");
62 | }
63 | }
64 |
65 | int main()
66 | {
67 | printf("Testing DropoutLayer\n");
68 | test_dropout();
69 | return CM_SUCCESS;
70 | }
71 |
--------------------------------------------------------------------------------
/test/Layers/test_flatten.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Layers/flatten.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_flatten()
9 | {
10 | {
11 | FlattenLayer layer = {0, 0};
12 | int ret = initialize_flatten(&layer, 3);
13 | assert(ret == CM_SUCCESS);
14 |
15 | float input[] = {1.0, 2.0, 3.0};
16 | float output[3];
17 | ret = forward_flatten(&layer, input, output);
18 | assert(ret == CM_SUCCESS);
19 | for (int i = 0; i < 3; i++)
20 | assert(output[i] == input[i]);
21 |
22 | float d_output[] = {0.5, 0.5, 0.5};
23 | float d_input[3] = {0};
24 | ret = backward_flatten(&layer, input, output, d_output, d_input);
25 | assert(ret == CM_SUCCESS);
26 | for (int i = 0; i < 3; i++)
27 | assert(d_input[i] == d_output[i]);
28 |
29 | ret = free_flatten(&layer);
30 | assert(ret == CM_SUCCESS);
31 | printf("Flatten layer normal test passed\n");
32 | }
33 |
34 | {
35 | int ret;
36 | ret = initialize_flatten(NULL, 3);
37 | assert(ret == CM_NULL_LAYER_ERROR);
38 |
39 | FlattenLayer layer = {0, 0};
40 | ret = initialize_flatten(&layer, 0);
41 | assert(ret == CM_INVALID_LAYER_DIMENSIONS_ERROR);
42 |
43 | ret = initialize_flatten(&layer, 3);
44 | assert(ret == CM_SUCCESS);
45 |
46 | ret = forward_flatten(&layer, NULL, (float[3]){0});
47 | assert(ret == CM_NULL_POINTER_ERROR);
48 | ret = forward_flatten(&layer, (float[3]){1, 2, 3}, NULL);
49 | assert(ret == CM_NULL_POINTER_ERROR);
50 |
51 | ret = backward_flatten(&layer, NULL, (float[3]){0}, (float[3]){0}, (float[3]){0});
52 | assert(ret == CM_NULL_POINTER_ERROR);
53 |
54 | printf("Flatten layer edge case tests passed\n");
55 | }
56 | }
57 |
58 | int main()
59 | {
60 | printf("Testing FlattenLayer\n");
61 | test_flatten();
62 | return CM_SUCCESS;
63 | }
64 |
--------------------------------------------------------------------------------
/test/Layers/test_maxpooling.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Layers/maxpooling.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_maxpooling()
9 | {
10 | {
11 | MaxPoolingLayer layer;
12 | int ret = initialize_maxpooling(&layer, 2, 2);
13 | assert(ret == CM_SUCCESS);
14 |
15 | ret = compute_maxpooling_output_size(8, 2, 2);
16 | assert(ret == 4);
17 |
18 | float input[8] = {1, 3, 2, 5, 0, 1, 4, 2};
19 | float output[4];
20 | ret = forward_maxpooling(&layer, input, output, 8);
21 | assert(ret > 0);
22 | assert(fabs(output[0] - 3) < 1e-6);
23 | assert(fabs(output[1] - 5) < 1e-6);
24 | assert(fabs(output[2] - 1) < 1e-6);
25 | assert(fabs(output[3] - 4) < 1e-6);
26 | printf("MaxPooling layer normal test passed\n");
27 | }
28 |
29 | {
30 | int ret;
31 | MaxPoolingLayer layer;
32 | ret = initialize_maxpooling(&layer, 0, 2);
33 | assert(ret == CM_INVALID_KERNEL_SIZE_ERROR);
34 |
35 | ret = initialize_maxpooling(&layer, 2, 0);
36 | assert(ret == CM_INVALID_STRIDE_ERROR);
37 |
38 | ret = initialize_maxpooling(&layer, 2, 2);
39 | assert(ret == CM_SUCCESS);
40 |
41 | ret = compute_maxpooling_output_size(-1, 2, 2);
42 | assert(ret == CM_INVALID_INPUT_ERROR);
43 |
44 | ret = forward_maxpooling(&layer, NULL, (float[4]){0}, 8);
45 | assert(ret == CM_NULL_POINTER_ERROR);
46 | ret = forward_maxpooling(&layer, (float[8]){1, 2, 3, 4, 5, 6, 7, 8}, NULL, 8);
47 | assert(ret == CM_NULL_POINTER_ERROR);
48 |
49 | ret = free_maxpooling(NULL);
50 | assert(ret == CM_NULL_LAYER_ERROR);
51 | printf("MaxPooling layer edge case tests passed\n");
52 | }
53 | }
54 |
55 | int main()
56 | {
57 | printf("Testing MaxPoolingLayer\n");
58 | test_maxpooling();
59 | return CM_SUCCESS;
60 | }
61 |
--------------------------------------------------------------------------------
/test/Layers/test_pooling.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Layers/pooling.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_pooling()
9 | {
10 | {
11 | PoolingLayer layer;
12 | int ret = initialize_pooling(&layer, 2, 2);
13 | assert(ret == CM_SUCCESS);
14 |
15 | ret = compute_pooling_output_size(8, 2, 2);
16 | assert(ret == 4);
17 |
18 | float input[8] = {1, 3, 2, 5, 0, 1, 4, 2};
19 | float output[4];
20 | ret = forward_pooling(&layer, input, output, 8);
21 | assert(ret == 4);
22 | assert(fabs(output[0] - 2) < 1e-6);
23 | assert(fabs(output[1] - 3.5) < 1e-6);
24 | assert(fabs(output[2] - 0.5) < 1e-6);
25 | assert(fabs(output[3] - 3) < 1e-6);
26 |
27 | ret = free_pooling(&layer);
28 | assert(ret == CM_SUCCESS);
29 | printf("Pooling layer normal test passed\n");
30 | }
31 |
32 | {
33 | int ret;
34 | PoolingLayer layer;
35 | ret = initialize_pooling(&layer, 0, 2);
36 | assert(ret == CM_INVALID_KERNEL_SIZE_ERROR);
37 |
38 | ret = initialize_pooling(&layer, 2, 0);
39 | assert(ret == CM_INVALID_STRIDE_ERROR);
40 |
41 | ret = initialize_pooling(&layer, 2, 2);
42 | assert(ret == CM_SUCCESS);
43 |
44 | ret = compute_pooling_output_size(-1, 2, 2);
45 | assert(ret == CM_INVALID_INPUT_ERROR);
46 |
47 | ret = forward_pooling(&layer, NULL, (float[4]){0}, 8);
48 | assert(ret == CM_NULL_POINTER_ERROR);
49 | ret = forward_pooling(&layer, (float[8]){1, 2, 3, 4, 5, 6, 7, 8}, NULL, 8);
50 | assert(ret == CM_NULL_POINTER_ERROR);
51 |
52 | printf("Pooling layer edge case tests passed\n");
53 | }
54 | }
55 |
56 | int main()
57 | {
58 | printf("Testing PoolingLayer\n");
59 | test_pooling();
60 | return CM_SUCCESS;
61 | }
62 |
--------------------------------------------------------------------------------
/test/Loss_Functions/test_binary_cross_entropy_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Loss_Functions/binary_cross_entropy_loss.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_binary_cross_entropy_loss()
8 | {
9 | float y[3] = {0.0, 1.0, 0.0};
10 | float yHat[3] = {0.1, 0.9, 0.2};
11 | float expected = 0.14462153f;
12 | float tolerance = 1e-6;
13 |
14 | float actual = binary_cross_entropy_loss(yHat, y, 3);
15 | assert(fabs(actual - expected) < tolerance);
16 |
17 | float y2[3] = {1.0, 0.0, 1.0};
18 | float yHat2[3] = {0.9999, 0.0001, 0.9999};
19 | float expected2 = 0.0001f;
20 |
21 | float actual2 = binary_cross_entropy_loss(yHat2, y2, 3);
22 | assert(fabs(actual2 - expected2) < tolerance);
23 |
24 | float y3[3] = {0.0, 1.0, 0.0};
25 | float yHat3[3] = {0.5, 0.5, 0.5};
26 | float expected3 = 0.693147181f;
27 | assert(fabs(binary_cross_entropy_loss(yHat3, y3, 3) - expected3) < tolerance);
28 |
29 | float y_empty[0] = {};
30 | float yHat_empty[0] = {};
31 | float result = binary_cross_entropy_loss(yHat_empty, y_empty, 0);
32 | assert(result == CM_INVALID_INPUT_ERROR);
33 |
34 | assert(binary_cross_entropy_loss(NULL, y, 3) == CM_INVALID_INPUT_ERROR);
35 | assert(binary_cross_entropy_loss(yHat, NULL, 3) == CM_INVALID_INPUT_ERROR);
36 |
37 | assert(binary_cross_entropy_loss(yHat, y, 0) == CM_INVALID_INPUT_ERROR);
38 |
39 | printf("binary_cross_entropy_loss test passed\n");
40 | }
41 |
42 | int main()
43 | {
44 | printf("Testing binary_cross_entropy_loss\n");
45 | test_binary_cross_entropy_loss();
46 | return CM_SUCCESS;
47 | }
48 |
--------------------------------------------------------------------------------
/test/Loss_Functions/test_focal_loss.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Loss_Functions/focal_loss.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_focal_loss()
8 | {
9 | float y[3] = {1.0, 0.0, 1.0};
10 | float yHat[3] = {0.9, 0.1, 0.8};
11 | float gamma = 2.0;
12 |
13 | float expected = (-1.0 * powf(1 - 0.9, 2.0) * logf(0.9) - (1 - 0.0) * powf(0.1, 2.0) * logf(0.9) - 1.0 * powf(1 - 0.8, 2.0) * logf(0.8)) / 3.0f;
14 |
15 | float tolerance = 1e-6;
16 | assert(fabs(focal_loss(y, yHat, 3, gamma) - expected) < tolerance);
17 |
18 | float y4[3] = {1.0, 0.0, 1.0};
19 | float yHat4[3] = {0.9, 0.1, 0.8};
20 | float gamma0 = 0.0;
21 | float expected0 = (-1.0 * logf(0.9) - (1 - 0.0) * logf(0.9) - 1.0 * logf(0.8)) / 3.0f;
22 | assert(fabs(focal_loss(y4, yHat4, 3, gamma0) - expected0) < tolerance);
23 |
24 | assert(focal_loss(NULL, yHat, 3, gamma) == CM_INVALID_INPUT_ERROR);
25 | assert(focal_loss(y, NULL, 3, gamma) == CM_INVALID_INPUT_ERROR);
26 |
27 | assert(focal_loss(y, yHat, 0, gamma) == CM_INVALID_INPUT_ERROR);
28 |
29 | printf("focal_loss test passed\n");
30 | }
31 |
32 | int main()
33 | {
34 | printf("Testing focal_loss\n");
35 | test_focal_loss();
36 | return CM_SUCCESS;
37 | }
38 |
--------------------------------------------------------------------------------
/test/Loss_Functions/test_mean_squared_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Loss_Functions/mean_squared_error.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_mean_squared_error()
8 | {
9 | float y[3] = {1.0, 2.0, 3.0};
10 | float yHat[3] = {1.1, 1.9, 3.2};
11 | float expected = 0.02f;
12 | float tolerance = 1e-6;
13 | assert(fabs(mean_squared_error(y, yHat, 3) - expected) < tolerance);
14 |
15 | float y_empty[0] = {};
16 | float yHat_empty[0] = {};
17 | assert(mean_squared_error(y_empty, yHat_empty, 0) == CM_INVALID_INPUT_ERROR);
18 |
19 | float y_identical[3] = {1.0, 2.0, 3.0};
20 | float yHat_identical[3] = {1.0, 2.0, 3.0};
21 | assert(mean_squared_error(y_identical, yHat_identical, 3) == 0);
22 |
23 | float y_large[3] = {1e6, 2e6, 3e6};
24 | float yHat_large[3] = {1e6 + 1, 2e6 - 1, 3e6 + 2};
25 | float expected_large = (1 + 1 + 4) / 3.0f;
26 | assert(fabs(mean_squared_error(y_large, yHat_large, 3) - expected_large) < tolerance);
27 |
28 | assert(mean_squared_error(NULL, yHat, 3) == CM_INVALID_INPUT_ERROR);
29 | assert(mean_squared_error(y, NULL, 3) == CM_INVALID_INPUT_ERROR);
30 |
31 | assert(mean_squared_error(y, yHat, 0) == CM_INVALID_INPUT_ERROR);
32 |
33 | printf("mean_squared_error test passed\n");
34 | }
35 |
36 | int main()
37 | {
38 | printf("Testing mean_squared_error\n");
39 | test_mean_squared_error();
40 | return CM_SUCCESS;
41 | }
42 |
--------------------------------------------------------------------------------
/test/Metrics/test_mean_absolute_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Metrics/mean_absolute_error.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_mean_absolute_error()
8 | {
9 | float y[3] = {1.0, 2.0, 3.0};
10 | float yHat[3] = {1.1, 1.9, 3.2};
11 | float expected = 0.13333333f;
12 | float tolerance = 1e-6;
13 | assert(fabs(mean_absolute_error(y, yHat, 3) - expected) < tolerance);
14 |
15 | float y_empty[0] = {};
16 | float yHat_empty[0] = {};
17 | assert(mean_absolute_error(y_empty, yHat_empty, 0) == CM_INVALID_INPUT_ERROR);
18 |
19 | assert(mean_absolute_error(NULL, yHat, 3) == CM_INVALID_INPUT_ERROR);
20 | assert(mean_absolute_error(y, NULL, 3) == CM_INVALID_INPUT_ERROR);
21 |
22 | assert(mean_absolute_error(y, yHat, 0) == CM_INVALID_INPUT_ERROR);
23 |
24 | printf("mean_absolute_error test passed\n");
25 | }
26 |
27 | int main()
28 | {
29 | printf("Testing mean_absolute_error\n");
30 | test_mean_absolute_error();
31 | return CM_SUCCESS;
32 | }
33 |
--------------------------------------------------------------------------------
/test/Metrics/test_mean_absolute_percentage_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Metrics/mean_absolute_percentage_error.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_mean_absolute_percentage_error()
8 | {
9 | float y[3] = {1.0, 2.0, 3.0};
10 | float yHat[3] = {1.1, 1.9, 3.2};
11 | float expected = 7.2222222f;
12 | float tolerance = 1e-5;
13 | assert(fabs(mean_absolute_percentage_error(y, yHat, 3) - expected) < tolerance);
14 |
15 | float y_zero[3] = {0.0, 2.0, 3.0};
16 | float yHat_zero[3] = {1.0, 2.0, 3.0};
17 | float expected_zero = 0.0f;
18 | assert(fabs(mean_absolute_percentage_error(y_zero, yHat_zero, 3) - expected_zero) < tolerance);
19 |
20 | float y_large[3] = {1e6, 2e6, 3e6};
21 | float yHat_large[3] = {1e6 + 1, 2e6 - 1, 3e6 + 2};
22 | float expected_large = ((1.0 / 1e6 + 1.0 / 2e6 + 2.0 / 3e6) / 3) * 100;
23 | assert(fabs(mean_absolute_percentage_error(y_large, yHat_large, 3) - expected_large) < tolerance);
24 |
25 | assert(mean_absolute_percentage_error(NULL, yHat, 3) == CM_INVALID_INPUT_ERROR);
26 | assert(mean_absolute_percentage_error(y, NULL, 3) == CM_INVALID_INPUT_ERROR);
27 |
28 | assert(mean_absolute_percentage_error(y, yHat, 0) == CM_INVALID_INPUT_ERROR);
29 |
30 | printf("mean_absolute_percentage_error test passed\n");
31 | }
32 |
33 | int main()
34 | {
35 | printf("Testing mean_absolute_percentage_error\n");
36 | test_mean_absolute_percentage_error();
37 | return CM_SUCCESS;
38 | }
39 |
--------------------------------------------------------------------------------
/test/Metrics/test_reduce_mean.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "../../include/Metrics/reduce_mean.h"
4 | #include "../../include/Core/error_codes.h"
5 |
6 | void test_reduce_mean()
7 | {
8 | float loss[3] = {1.0, 2.0, 3.0};
9 | float expected = 2.0f;
10 | assert(reduce_mean(loss, 3) == expected);
11 |
12 | float loss_empty[0] = {};
13 | assert(reduce_mean(loss_empty, 0) == CM_INVALID_INPUT_ERROR);
14 |
15 | float loss_identical[3] = {5.0, 5.0, 5.0};
16 | assert(reduce_mean(loss_identical, 3) == 5.0f);
17 |
18 | assert(reduce_mean(NULL, 3) == CM_INVALID_INPUT_ERROR);
19 |
20 | assert(reduce_mean(loss, 0) == CM_INVALID_INPUT_ERROR);
21 |
22 | printf("reduce_mean test passed\n");
23 | }
24 |
25 | int main()
26 | {
27 | printf("Testing reduce_mean\n");
28 | test_reduce_mean();
29 | return CM_SUCCESS;
30 | }
31 |
--------------------------------------------------------------------------------
/test/Metrics/test_root_mean_squared_error.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Metrics/root_mean_squared_error.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_root_mean_squared_error()
8 | {
9 | float y[3] = {1.0, 2.0, 3.0};
10 | float yHat[3] = {1.1, 1.9, 3.2};
11 | float expected = 0.14142136f;
12 | float tolerance = 1e-6;
13 | assert(fabs(root_mean_squared_error(y, yHat, 3) - expected) < tolerance);
14 |
15 | float y_empty[0] = {};
16 | float yHat_empty[0] = {};
17 | assert(root_mean_squared_error(y_empty, yHat_empty, 0) == CM_INVALID_INPUT_ERROR);
18 |
19 | float y_identical[3] = {1.0, 2.0, 3.0};
20 | float yHat_identical[3] = {1.0, 2.0, 3.0};
21 | assert(root_mean_squared_error(y_identical, yHat_identical, 3) == 0);
22 |
23 | float y_large[3] = {1e6, 2e6, 3e6};
24 | float yHat_large[3] = {1e6 + 1, 2e6 - 1, 3e6 + 2};
25 | float expected_large = sqrtf((1 + 1 + 4) / 3.0f);
26 | assert(fabs(root_mean_squared_error(y_large, yHat_large, 3) - expected_large) < tolerance);
27 |
28 | assert(root_mean_squared_error(NULL, yHat, 3) == CM_INVALID_INPUT_ERROR);
29 | assert(root_mean_squared_error(y, NULL, 3) == CM_INVALID_INPUT_ERROR);
30 |
31 | assert(root_mean_squared_error(y, yHat, 0) == CM_INVALID_INPUT_ERROR);
32 |
33 | printf("root_mean_squared_error test passed\n");
34 | }
35 |
36 | int main()
37 | {
38 | printf("Testing root_mean_squared_error\n");
39 | test_root_mean_squared_error();
40 | return CM_SUCCESS;
41 | }
42 |
--------------------------------------------------------------------------------
/test/Optimizers/test_adam.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Optimizers/adam.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_adam()
8 | {
9 | float loss_thresh = 1e-6f;
10 | float param_thresh = 1e-4f;
11 | float final_param_thresh = 0.1f;
12 |
13 | float x = 1.0f;
14 | float y = 2.0f;
15 | float lr = 0.01f;
16 | float w = 0.5f;
17 | float b = 0.5f;
18 | float v_w = 0.0f;
19 | float v_b = 0.0f;
20 | float s_w = 0.0f;
21 | float s_b = 0.0f;
22 | float beta1 = 0.9f;
23 | float beta2 = 0.999f;
24 | float epsilon = 1e-8f;
25 |
26 | float expected_loss = 1.0f;
27 | float expected_w = 0.51f;
28 | float expected_b = 0.51f;
29 |
30 | float actual_loss = adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon);
31 |
32 | assert(fabs(actual_loss - expected_loss) < loss_thresh);
33 | assert(fabs(w - expected_w) < param_thresh);
34 | assert(fabs(b - expected_b) < param_thresh);
35 |
36 | float x_vals[5] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f};
37 | float y_vals[5] = {3.0f, 5.0f, 7.0f, 9.0f, 11.0f};
38 |
39 | float w_train = 0.5f;
40 | float b_train = 0.5f;
41 | v_w = 0.0f;
42 | v_b = 0.0f;
43 | s_w = 0.0f;
44 | s_b = 0.0f;
45 |
46 | int epochs = 1000;
47 | for (int epoch = 0; epoch < epochs; epoch++)
48 | {
49 | for (int i = 0; i < 5; i++)
50 | {
51 | adam(x_vals[i], y_vals[i], lr, &w_train, &b_train, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon);
52 | }
53 | }
54 |
55 | assert(fabs(w_train - 2.0f) < final_param_thresh);
56 | assert(fabs(b_train - 1.0f) < final_param_thresh);
57 |
58 | x = 1.0f;
59 | y = 2.0f;
60 | lr = 0.01f;
61 | w = 0.5f;
62 | b = 0.5f;
63 | v_w = 0.0f;
64 | v_b = 0.0f;
65 | s_w = 0.0f;
66 | s_b = 0.0f;
67 | beta1 = 0.9f;
68 | beta2 = 0.999f;
69 | epsilon = 1e-8f;
70 | assert(adam(x, y, lr, NULL, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_NULL_POINTER_ERROR);
71 | assert(adam(x, y, lr, &w, NULL, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_NULL_POINTER_ERROR);
72 | assert(adam(x, y, lr, &w, &b, NULL, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_NULL_POINTER_ERROR);
73 | assert(adam(x, y, lr, &w, &b, &v_w, NULL, &s_w, &s_b, beta1, beta2, epsilon) == CM_NULL_POINTER_ERROR);
74 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, NULL, &s_b, beta1, beta2, epsilon) == CM_NULL_POINTER_ERROR);
75 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, NULL, beta1, beta2, epsilon) == CM_NULL_POINTER_ERROR);
76 |
77 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, 0.0f) == CM_INVALID_INPUT_ERROR);
78 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, -1.0f) == CM_INVALID_INPUT_ERROR);
79 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, 1.0f, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
80 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, 1.0f, epsilon) == CM_INVALID_INPUT_ERROR);
81 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, -0.1f, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
82 | assert(adam(x, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, -0.1f, epsilon) == CM_INVALID_INPUT_ERROR);
83 | assert(adam(x, y, -0.01f, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
84 |
85 | float nan_val = NAN;
86 | float inf_val = INFINITY;
87 |
88 | assert(adam(nan_val, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
89 | assert(adam(inf_val, y, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
90 | assert(adam(x, nan_val, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
91 | assert(adam(x, inf_val, lr, &w, &b, &v_w, &v_b, &s_w, &s_b, beta1, beta2, epsilon) == CM_INVALID_INPUT_ERROR);
92 | }
93 |
94 | int main()
95 | {
96 | printf("Testing adam\n");
97 | test_adam();
98 | printf("adam test passed\n");
99 | return CM_SUCCESS;
100 | }
101 |
--------------------------------------------------------------------------------
/test/Optimizers/test_rmsprop.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Optimizers/rmsprop.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_rmsprop()
8 | {
9 | float loss_thresh = 1e-6f;
10 | float w = 2.0f;
11 | float b = 1.0f;
12 | float x = 3.0f;
13 | float y = 7.0f;
14 | float lr = 0.01f;
15 | float cache_w = 0.0f;
16 | float cache_b = 0.0f;
17 | float eps = 1e-8f;
18 | float beta = 0.9f;
19 |
20 | float y_pred = w * x + b;
21 | float expected_loss = pow(y_pred - y, 2);
22 | float loss = rms_prop(x, y, lr, &w, &b, &cache_w, &cache_b, eps, beta);
23 |
24 | assert(fabs(loss - expected_loss) < loss_thresh);
25 |
26 | w = 2.0f;
27 | b = 1.0f;
28 | x = 3.0f;
29 | y = 7.0f;
30 | lr = 0.01f;
31 | cache_w = 0.0f;
32 | cache_b = 0.0f;
33 | float eps_invalid = 0.0f;
34 | beta = 0.9f;
35 |
36 | loss = rms_prop(x, y, lr, &w, &b, &cache_w, &cache_b, eps_invalid, beta);
37 | assert(loss == CM_INVALID_INPUT_ERROR);
38 |
39 | eps_invalid = 1e-8f;
40 | loss = rms_prop(x, y, lr, NULL, &b, &cache_w, &cache_b, eps_invalid, beta);
41 | assert(loss == CM_NULL_POINTER_ERROR);
42 |
43 | loss = rms_prop(x, y, lr, &w, NULL, &cache_w, &cache_b, eps_invalid, beta);
44 | assert(loss == CM_NULL_POINTER_ERROR);
45 |
46 | loss = rms_prop(x, y, lr, &w, &b, NULL, &cache_b, eps_invalid, beta);
47 | assert(loss == CM_NULL_POINTER_ERROR);
48 |
49 | loss = rms_prop(x, y, lr, &w, &b, &cache_w, NULL, eps_invalid, beta);
50 | assert(loss == CM_NULL_POINTER_ERROR);
51 |
52 | float nan_val = NAN;
53 | float inf_val = INFINITY;
54 |
55 | loss = rms_prop(nan_val, y, lr, &w, &b, &cache_w, &cache_b, eps_invalid, beta);
56 | assert(isnan(loss));
57 |
58 | loss = rms_prop(inf_val, y, lr, &w, &b, &cache_w, &cache_b, eps_invalid, beta);
59 | assert(loss == CM_INVALID_INPUT_ERROR);
60 |
61 | loss = rms_prop(x, nan_val, lr, &w, &b, &cache_w, &cache_b, eps_invalid, beta);
62 | assert(isnan(loss));
63 |
64 | loss = rms_prop(x, inf_val, lr, &w, &b, &cache_w, &cache_b, eps_invalid, beta);
65 | assert(loss == CM_INVALID_INPUT_ERROR);
66 |
67 | float final_pred_thresh = 1e-3f;
68 | loss_thresh = 1e-6f;
69 | w = 5.0f;
70 | b = 3.0f;
71 | x = 3.0f;
72 | y = 7.0f;
73 | lr = 0.01f;
74 | cache_w = 0.0f;
75 | cache_b = 0.0f;
76 | eps = 1e-8f;
77 | beta = 0.9f;
78 | float prev_loss = INFINITY;
79 | int iterations = 3000;
80 | int i;
81 |
82 | for (i = 0; i < iterations; i++)
83 | {
84 | loss = rms_prop(x, y, lr, &w, &b, &cache_w, &cache_b, eps, beta);
85 | assert(loss <= prev_loss || fabs(loss - prev_loss) < loss_thresh);
86 | prev_loss = loss;
87 | }
88 |
89 | float y_pred_final = w * x + b;
90 | assert(fabs(y_pred_final - y) < final_pred_thresh);
91 |
92 | w = 2.0f;
93 | b = 1.0f;
94 | x = 3.0f;
95 | y = 7.0f;
96 | lr = 0.01f;
97 | cache_w = 0.0f;
98 | cache_b = 0.0f;
99 | eps = 1e-8f;
100 | float beta_invalid = 1.1f;
101 |
102 | loss = rms_prop(x, y, lr, &w, &b, &cache_w, &cache_b, eps, beta_invalid);
103 | assert(isnan(loss) || !isnan(loss));
104 | }
105 |
106 | int main()
107 | {
108 | printf("Testing RMSprop\n");
109 | test_rmsprop();
110 | printf("RMSprop test passed\n");
111 | return CM_SUCCESS;
112 | }
113 |
--------------------------------------------------------------------------------
/test/Optimizers/test_sgd.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "../../include/Optimizers/sgd.h"
5 | #include "../../include/Core/error_codes.h"
6 |
7 | void test_sgd()
8 | {
9 | float loss_thresh = 1e-6f;
10 | float w = 2.0f;
11 | float b = 1.0f;
12 | float x = 3.0f;
13 | float y = 7.0f;
14 | float lr = 0.01f;
15 |
16 | float y_pred = w * x + b;
17 | float expected_loss = pow(y_pred - y, 2);
18 | float dw_expected = 2 * (y_pred - y) * x;
19 | float db_expected = 2 * (y_pred - y);
20 | float w_expected = w - lr * dw_expected;
21 | float b_expected = b - lr * db_expected;
22 |
23 | float loss = sgd(x, y, lr, &w, &b);
24 |
25 | assert(fabs(loss - expected_loss) < loss_thresh);
26 | assert(fabs(w - w_expected) < loss_thresh);
27 | assert(fabs(b - b_expected) < loss_thresh);
28 |
29 | x = 3.0f;
30 | y = 7.0f;
31 | lr = 0.01f;
32 |
33 | loss = sgd(x, y, lr, NULL, NULL);
34 | assert(loss == CM_NULL_POINTER_ERROR);
35 |
36 | w = 2.0f;
37 | loss = sgd(x, y, lr, &w, NULL);
38 | assert(loss == CM_NULL_POINTER_ERROR);
39 |
40 | float b_val = 1.0f;
41 | loss = sgd(x, y, lr, NULL, &b_val);
42 | assert(loss == CM_NULL_POINTER_ERROR);
43 |
44 | float nan_val = NAN;
45 | float inf_val = INFINITY;
46 |
47 | w = 2.0f;
48 | b_val = 1.0f;
49 |
50 | loss = sgd(nan_val, y, lr, &w, &b_val);
51 | assert(isnan(loss));
52 |
53 | loss = sgd(inf_val, y, lr, &w, &b_val);
54 | assert(loss == CM_INVALID_INPUT_ERROR);
55 |
56 | loss = sgd(x, nan_val, lr, &w, &b_val);
57 | assert(isnan(loss));
58 |
59 | loss = sgd(x, inf_val, lr, &w, &b_val);
60 | assert(loss == CM_INVALID_INPUT_ERROR);
61 |
62 | float pred_thresh = 1e-1f;
63 | loss_thresh = 1e-6f;
64 | w = 5.0f;
65 | b_val = 3.0f;
66 | x = 3.0f;
67 | y = 7.0f;
68 | lr = 0.01f;
69 | float prev_loss = INFINITY;
70 |
71 | for (int i = 0; i < 100; i++)
72 | {
73 | loss = sgd(x, y, lr, &w, &b_val);
74 | assert(loss <= prev_loss || fabs(loss - prev_loss) < loss_thresh);
75 | prev_loss = loss;
76 | }
77 |
78 | y_pred = w * x + b_val;
79 | assert(fabs(y_pred - y) < pred_thresh);
80 |
81 | x = 3.0f;
82 | y = 7.0f;
83 | lr = -0.01f;
84 | w = 2.0f;
85 | b_val = 1.0f;
86 |
87 | sgd(x, y, lr, &w, &b_val);
88 | }
89 |
90 | int main()
91 | {
92 | printf("Testing SGD\n");
93 | test_sgd();
94 | printf("SGD test passed\n");
95 | return CM_SUCCESS;
96 | }
97 |
--------------------------------------------------------------------------------
/test/Preprocessing/test_label_encoder.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Preprocessing/label_encoder.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_label_encoder()
9 | {
10 | char input[] = "abca";
11 | int size = strlen(input);
12 | int mapSize = 0;
13 | CharMap *map = NULL;
14 | int *encoded = label_encoder(input, size, &map, &mapSize);
15 | assert(mapSize == 3);
16 | char *decoded = label_decoder(encoded, size, map, mapSize);
17 | assert(strlen(decoded) == size);
18 | free_label_memory(map, encoded, decoded);
19 |
20 | mapSize = 0;
21 | map = NULL;
22 | encoded = label_encoder(NULL, 4, &map, &mapSize);
23 | assert(encoded == (int *)CM_NULL_POINTER_ERROR);
24 |
25 | input[0] = 'a';
26 | mapSize = 0;
27 | map = NULL;
28 | encoded = label_encoder(input, 0, &map, &mapSize);
29 | assert(encoded == (int *)CM_INVALID_PARAMETER_ERROR);
30 |
31 | printf("label_encoder test passed\n");
32 | }
33 |
34 | int main()
35 | {
36 | printf("Testing label_encoder\n");
37 | test_label_encoder();
38 | return CM_SUCCESS;
39 | }
40 |
--------------------------------------------------------------------------------
/test/Preprocessing/test_min_max_scaler.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Preprocessing/min_max_scaler.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_min_max_scaler()
9 | {
10 | float data[] = {10, 20, 30, 40, 50};
11 | int size = sizeof(data) / sizeof(data[0]);
12 | float *scaled = min_max_scaler(data, size);
13 | for (int i = 0; i < size; i++)
14 | {
15 | float expected = (data[i] - 10) / 40.0f;
16 | assert(fabs(scaled[i] - expected) < 1e-6);
17 | }
18 | free(scaled);
19 |
20 | scaled = min_max_scaler(NULL, 5);
21 | assert(scaled == NULL);
22 |
23 | float data2[] = {10, 20, 30, 40, 50};
24 | scaled = min_max_scaler(data2, 0);
25 | assert(scaled == NULL);
26 |
27 | printf("min_max_scaler test passed\n");
28 | }
29 |
30 | int main()
31 | {
32 | printf("Testing min_max_scaler\n");
33 | test_min_max_scaler();
34 | return CM_SUCCESS;
35 | }
36 |
--------------------------------------------------------------------------------
/test/Preprocessing/test_one_hot_encoder.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Preprocessing/one_hot_encoder.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_one_hot_encoder()
9 | {
10 | char input[] = "abac";
11 | int size = strlen(input);
12 | int mapSize = 0;
13 | CharMap *map = NULL;
14 | int *encoded = one_hot_encoding(input, size, &map, &mapSize);
15 | assert(mapSize == 3);
16 | char *decoded = one_hot_decoding(encoded, size, map, mapSize);
17 | assert(strcmp(decoded, input) == 0);
18 | free_one_hot_memory(encoded, decoded, map);
19 |
20 | mapSize = 0;
21 | map = NULL;
22 | encoded = one_hot_encoding(NULL, 4, &map, &mapSize);
23 | assert(encoded == (int *)CM_NULL_POINTER_ERROR);
24 |
25 | input[0] = 'a';
26 | mapSize = 0;
27 | map = NULL;
28 | encoded = one_hot_encoding(input, 0, &map, &mapSize);
29 | assert(encoded == (int *)CM_INVALID_PARAMETER_ERROR);
30 |
31 | printf("one_hot_encoder test passed\n");
32 | }
33 |
34 | int main()
35 | {
36 | printf("Testing one_hot_encoder\n");
37 | test_one_hot_encoder();
38 | return CM_SUCCESS;
39 | }
40 |
--------------------------------------------------------------------------------
/test/Preprocessing/test_standard_scaler.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "../../include/Preprocessing/standard_scaler.h"
6 | #include "../../include/Core/error_codes.h"
7 |
8 | void test_standard_scaler()
9 | {
10 | float data[] = {1, 2, 3, 4, 5};
11 | int size = sizeof(data) / sizeof(data[0]);
12 | float *scaled = standard_scaler(data, size);
13 | float mean = 0, std = 0;
14 | for (int i = 0; i < size; i++)
15 | {
16 | mean += scaled[i];
17 | }
18 | mean /= size;
19 | for (int i = 0; i < size; i++)
20 | {
21 | std += pow(scaled[i] - mean, 2);
22 | }
23 | std = sqrt(std / size);
24 | assert(fabs(mean) < 1e-6);
25 | assert(fabs(std - 1) < 1e-6);
26 | free(scaled);
27 |
28 | scaled = standard_scaler(NULL, 5);
29 | assert(scaled == NULL);
30 |
31 | float data2[] = {1, 2, 3, 4, 5};
32 | scaled = standard_scaler(data2, 0);
33 | assert(scaled == NULL);
34 |
35 | printf("standard_scaler test passed\n");
36 | }
37 |
38 | int main()
39 | {
40 | printf("Testing standard_scaler\n");
41 | test_standard_scaler();
42 | return CM_SUCCESS;
43 | }
44 |
--------------------------------------------------------------------------------