├── .gitattributes ├── README.md ├── TODO ├── docs ├── activation_functions.html ├── cost_functions.html ├── index.html ├── layer_activation.html ├── layer_dense.html ├── network.html ├── style.css ├── training_backprop.html ├── training_minibatched_backprop.html └── webfonts │ ├── JetBrainsMono-Bold.woff2 │ ├── JetBrainsMono-BoldItalic.woff2 │ ├── JetBrainsMono-ExtraBold.woff2 │ ├── JetBrainsMono-ExtraBoldItalic.woff2 │ ├── JetBrainsMono-ExtraLight.woff2 │ ├── JetBrainsMono-ExtraLightItalic.woff2 │ ├── JetBrainsMono-Italic.woff2 │ ├── JetBrainsMono-Light.woff2 │ ├── JetBrainsMono-LightItalic.woff2 │ ├── JetBrainsMono-Medium.woff2 │ ├── JetBrainsMono-MediumItalic.woff2 │ ├── JetBrainsMono-Regular.woff2 │ ├── JetBrainsMono-SemiBold.woff2 │ ├── JetBrainsMono-SemiBoldItalic.woff2 │ ├── JetBrainsMono-Thin.woff2 │ └── JetBrainsMono-ThinItalic.woff2 ├── examples ├── ggui_example.v ├── mnist │ ├── t10k-images-idx3-ubyte │ ├── t10k-labels-idx1-ubyte │ ├── train-images-idx3-ubyte │ └── train-labels-idx1-ubyte ├── mnist_tester.v ├── mnist_tester_acc.v ├── saveMNIST-0014-93 ├── saveXOR ├── train_mnist.v ├── train_xor.v ├── train_xor_minibatches.v └── visualise_data-augmented_mnist.v ├── ggui ├── button.v ├── gui.v ├── gui_objects.v ├── rect.v ├── text.v └── utilities.v ├── la ├── blas.v ├── matrix.v ├── matrix_ops.v └── vector.v ├── neural_networks ├── activ_funcs.v ├── classifier_utilities.v ├── cost_functions.v ├── image_processing.v ├── layer_activation.v ├── layer_base.v ├── layer_dense.v ├── neural_network.v ├── training_mode_backprop.v ├── training_mode_base.v ├── training_mode_minibatches_backprop.v ├── utilities.v └── v.mod ├── neural_networks_acc ├── activ_funcs.v ├── classifier_utilities.v ├── cost_functions.v ├── image_processing.v ├── layer_activation.v ├── layer_base.v ├── layer_dense.v ├── neural_network.v ├── training_mode_backprop.v ├── training_mode_base.v ├── training_mode_minibatches_backprop.v ├── utilities.v └── v.mod ├── v.mod └── vblas ├── conversions.v ├── dgemm.v ├── dgemv.v ├── dgemv_test.v ├── error.v ├── level1f64.v ├── level1f64_ddot.v ├── level2f64.v ├── level3f64.v └── util.v /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is a V module to create neural networks. (there is also a gui module named `ggui` that I used for the mnist example) 2 | 3 | For the moment there is two examples, the XOR logic gate example and the MNIST example. 4 | Here is a screenshot from the MNIST example tester `examples/mnist_tester.v` (it needs to be launched into the examples folder to have the right path to load the neural network save): 5 | 6 | ![image](https://github.com/Eliyaan/NeuralNetworks-V-Module/assets/103932369/d4e784a2-b63b-457e-b708-822a8285c727) 7 | 8 | Here is the new documentation that just started: https://eliyaan.github.io/NeuralNetworks-V-Module/ 9 | 10 | I hope you enjoy your time with it, if you have any question dont mind asking me here or on discord! There is still a lot to be worked on for the module to be entirely complete! 11 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | Needed: 2 | 3 | 4 | Later: 5 | - classifier loss 6 | - pooling layer 7 | - Convolution layer 8 | - multithreading / GPU 9 | - option to save the costs for momentum ? 10 | - dataset with any type, and f64 cast on forward/backprop 11 | 12 | -------------------------------------------------------------------------------- /docs/activation_functions.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Activation functions 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | What is it 27 | Different functions 28 |
29 | 30 |
31 |

Activation functions

32 |
33 |

What is it

34 |

35 | Activation functions are non-linear functions used in the Activation layer. 36 |

37 |
38 |
39 |

Different functions

40 |

41 | Leaky relu: ActivationFunctions.leaky_relu 42 |

43 |

44 | Tanh: ActivationFunctions.tanh 45 |

46 |
47 |
48 | 49 | 50 | -------------------------------------------------------------------------------- /docs/cost_functions.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Activation functions 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | What is it 27 | Different functions 28 |
29 | 30 |
31 |

Activation functions

32 |
33 |

What is it

34 |

35 | Cost functions are functions that gives a metric to tell how bad the network is. 36 | It allow the network to know its flaws for example during backpropagation. 37 |

38 |
39 |
40 |

Different functions

41 |

42 | Mean Squared Error: CostFunctions.mse 43 |

44 |
45 |
46 | 47 | 48 | -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Neural Networks V Module Docs 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | Home Page 16 | XOR Neural Network 17 |

Layers

18 | Dense Layer 19 | Activation Layer 20 |

21 | Activation functions 22 | Cost functions 23 |

Training modes

24 | Backpropagation 25 | Minibatched backprop 26 |
27 | 28 |
29 | Home Page 30 |
31 | 32 |
33 |

Neural Networks V Module Docs

34 |
35 |

Home Page

36 |

37 | This is the documentation for the module. 38 |

39 |

40 | You can learn how to create a neural networks here: > Creating a network. 41 |

42 |
43 |
44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /docs/layer_activation.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Activation layer 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | What is it 27 | How to create it 28 |
29 | 30 |
31 |

Layers > Activation

32 |
33 |

What is it

34 |

35 | The Activation layer takes all the inputs and applies the activation function on each one. 36 |

37 |

38 | It allows the network to learn non-linear functions, so without an Activation layer the network would only be able to learn linear functions. 39 | So it's an important part of the network. 40 |

41 |
42 |
43 |

How to create an Activation layer

44 |

45 | To create a layer we first need a neural network to create the layer in, as seen in the XOR example. 46 |

47 |

48 | We can then call the add_layer method on the neural network and create the layer that we want as the parameter for the function. 49 | In our case we are going to create an Activation layer. 50 |

51 |

52 | We only need one parameter, it is the activation function we chose for this layer. 53 | A commonly used one is the leaky relu function, as the derivative is easy to compute and the derivative does not fades out as for a sigmoid for example. 54 | Other activation functions can be found in here. 55 |

56 |

57 | model.add_layer(nn.Activation.new(.leaky_relu)) 58 |

59 |
60 |
61 | 62 | 63 | -------------------------------------------------------------------------------- /docs/layer_dense.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Dense layer 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | What is it 27 | How to create it 28 |
29 | 30 |
31 |

Layers > Dense

32 |
33 |

What is it

34 |

35 | A neural network is made out of layers. The dense layer is also called the fully connected layer or just known as a 'normal' layer. 36 | It is composed of weights connecting each input with every output, and then it adds the biases to each output. 37 |

38 |

39 | The Dense layer is complementary with the Activation layer as the dense layer is the part that will train and improve and the activation layer 40 | produces non-linearity or said in another way: it allows the network to learn many more things. 41 |

42 |
43 |
44 |

How to create a Dense layer

45 |

46 | To create a layer we first need a neural network to create the layer in, as seen in the XOR example. 47 |

48 |

49 | We can then call the add_layer method on the neural network and create the layer that we want as the parameter for the function. 50 | In our case we are going to create a Dense layer. 51 |

52 |

53 | The first two arguments are for the number of inputs and the number of outputs of the layer, because as said earlier each input is connected with every output and there is a bias for each output. 54 | The third ad fourth arguments are the range for initialisation of weights and biases respectively. In this example for weights, they can be initialized between -0.7 to 0.7. 55 |

56 |

57 | model.add_layer(nn.Dense.new(2, 3, 0.7, 0.65)) 58 |

59 |
60 |
61 | 62 | 63 | -------------------------------------------------------------------------------- /docs/network.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Creating a Network 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | Requirements 27 | Structure of the code 28 |
29 | 30 |
31 |

> Creating a Neural Network

32 |
33 |

Requirements

34 |

35 | You need to have V installed, you can verify that by running v run .. 36 | You also need to have the vsl module installed, you can install it with v install vsl. 37 | Then you need to clone the module's repo and create a v file in it that we are going to use. 38 |

39 |
40 |
41 |

Structure of the code

42 |

43 | We are going to create a neural network to train a neural network that will be able to perform the XOR logic gate. The result can be found here. 44 |

45 |

46 | First we are going to import the neural network module and create a main function. 47 | Then we create the neural network that we are going to train. 48 | The 0 is the seed for the random weights and biases to be able to get the same neural network at each run. 49 |

50 |

51 | import neural_networks as nn 52 | 53 | fn main() { 54 | mut model := nn.NeuralNetwork.new(0) 55 | } 56 |

57 |

58 | Then we add the layers that we want our network to have. 59 | We need our network to have 2 inputs and 1 output to match the XOR gate. 60 | So we will first add a Dense layer with 2 inputs and 3 outputs, 3 is arbitrary but works well. 61 | The two numbers after the number of inputs/outputs is the range for the initialisation of random weights and biases. 62 |

63 |

64 | Then an Activation layer, the Dense and Activation layers are complementary so we will add one Activation per Dense layer. 65 | The Activation function that we will use for this layer is leaky relu, as it is convenient. 66 | We add a second Dense layer with 3 input and 1 output and the Activation layer that goes with it. 67 |

68 |

69 | model.add_layer(nn.Dense.new(2, 3, 0.7, 0.65)) 70 | model.add_layer(nn.Activation.new(.leaky_relu)) 71 | model.add_layer(nn.Dense.new(3, 1, 0.6, 0.65)) 72 | model.add_layer(nn.Activation.new(.leaky_relu)) 73 |

74 |

75 | Then we need to create the parametters for the training. 76 | The learning rate, momentum, number of epochs are found by trial and error and these work well. 77 | The Cost function that we will use is the Mean Squared Error (MSE). 78 |

79 |

80 | We then add the dataset that the network will use for it's training. 81 | And same for the testing, in a real example the test data is unseen during the training to be able to see how well the networks does in an unseen situation 82 | but as we have only 4 different possible inputs we can not show unseen data to the network so we will use the same data. 83 |

84 |

85 | The neural newtork will print it's performance every print_interval epochs. 86 | For the test parameters, every training_interval epochs it will run the test dataset and print the results from the print_startth element of the test dataset to the print_endth one. 87 |

88 |

89 | training_parameters := nn.BackpropTrainingParams{ 90 | learning_rate: 0.37 91 | momentum: 0.9 92 | nb_epochs: 300 93 | print_interval: 25 94 | cost_function: .mse // mean squared error 95 | training: nn.Dataset { 96 | inputs: [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] 97 | expected_outputs: [[0.0], [1.0], [1.0], [0.0]] 98 | } 99 | test: nn.Dataset { 100 | inputs: [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] 101 | expected_outputs: [[0.0], [1.0], [1.0], [0.0]] 102 | } 103 | test_params: nn.TestParams{ 104 | print_start: 0 105 | print_end: 3 106 | training_interval: 100 107 | } 108 | } 109 |

110 |

111 | Now it's the time to train the network! 112 |

113 |

114 | model.train(training_parameters) 115 |

116 |

117 | We can also save the model by adding that to the end of the program: 118 |

119 |

120 | model.save_model('saveXOR') 121 |

122 |

123 | And to load a model (to use it or to train it further) you just need to create an empty model like we did at the start and then do: 124 |

125 |

126 | model.load_model('saveXOR') 127 |

128 |

129 | There it is, we can just run the program and it will train! 130 |

131 |
132 |
133 | 134 | 135 | -------------------------------------------------------------------------------- /docs/style.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: "JetBrainsMono"; 3 | src: url("webfonts/JetBrainsMono-Regular.woff2") format("woff2"); 4 | font-weight: normal; 5 | } 6 | 7 | @font-face { 8 | font-family: "JetBrainsMonoMedium"; 9 | src: url("webfonts/JetBrainsMono-Medium.woff2") format("woff2"); 10 | font-weight: normal; 11 | } 12 | 13 | @font-face { 14 | font-family: "JetBrainsMonoLight"; 15 | src: url("webfonts/JetBrainsMono-Light.woff2") format("woff2"); 16 | font-weight: normal; 17 | } 18 | 19 | @font-face { 20 | font-family: "JetBrainsMonoXLight"; 21 | src: url("webfonts/JetBrainsMono-ExtraLight.woff2") format("woff2"); 22 | font-weight: normal; 23 | } 24 | 25 | body { 26 | font-family: 'JetBrainsMono'; 27 | background-color: #11111b; 28 | color:#cdd6f4; 29 | } 30 | 31 | h1 { 32 | padding-left: 0.5em; 33 | font-family: "JetBrainsMonoMedium"; 34 | color:#89dceb; 35 | } 36 | 37 | h2{ 38 | color:#b4befe; 39 | } 40 | 41 | a { 42 | color:#a6e3a1; 43 | } 44 | 45 | .sidenavbar { 46 | height: 100%; 47 | width: 230px; 48 | position: fixed; 49 | z-index: 1; 50 | top: 0; 51 | left: 0; 52 | background-color: #181825; 53 | overflow-x: hidden; 54 | padding-top: 2em; 55 | padding-left: 1.5em 56 | } 57 | 58 | .sidelist { 59 | width: 220px; 60 | position: fixed; 61 | z-index: 1; 62 | top: 0; 63 | right: 0; 64 | overflow-x: hidden; 65 | margin-top: 5em; 66 | padding-top: 1em; 67 | padding-bottom: 1em; 68 | padding-left: 1em; 69 | border: solid; 70 | border-width: 0.1em; 71 | border-color: #f9e2af; 72 | border-top-left-radius: 1em; 73 | border-bottom-left-radius: 1em; 74 | } 75 | 76 | .sidelist a { 77 | color:#f9e2af; 78 | display: block; 79 | text-decoration: none; 80 | } 81 | 82 | .sidelist a:hover { 83 | text-decoration: underline; 84 | } 85 | 86 | .main { 87 | margin-left: 280px; /* Same as the width of the sidenav */ 88 | font-size: 20px; /* Increased text to enable scrolling */ 89 | padding: 0px 10px; 90 | padding-right:13em; 91 | } 92 | 93 | .sidenavbar a { 94 | padding-left: 0.5em; 95 | text-decoration: none; 96 | font-size: 18px; 97 | color: #a6adc8; 98 | display: block; 99 | } 100 | 101 | .sidenavbar p{ 102 | margin-bottom: 0em; 103 | text-decoration: none; 104 | font-size: 18px; 105 | color: #a6adc8; 106 | } 107 | 108 | .sidenavbar a:hover { 109 | color: #94e2d5; 110 | } 111 | 112 | i { 113 | font-style: normal; 114 | font-family: "JetBrainsMonoXLight"; 115 | background-color:#45475a; 116 | padding: 0em 0.5em 0em 0.5em; 117 | border-radius: 0.99em; 118 | } 119 | 120 | .part { 121 | font-family: "JetBrainsMonoLight"; 122 | font-size: 0.8em; 123 | margin-top: 2em; 124 | padding: 0.01em 1em 0.1em 1em; 125 | border-radius: 1.0em; 126 | background-color: #1e1e2e; 127 | } 128 | 129 | .snippet { 130 | white-space: pre; 131 | font-style: normal; 132 | font-family: "JetBrainsMonoXLight"; 133 | background-color:#313244; 134 | padding: 0em 0em 0em 1em; 135 | border-radius: 0.8em; 136 | } -------------------------------------------------------------------------------- /docs/training_backprop.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Backpropagation 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | What is it 27 | Parameters 28 |
29 | 30 |
31 |

Training > Backpropagation

32 |
33 |

What is it

34 |

35 | Backpropagation is a commonly used training algorithm for neural networks. 36 |

37 |
38 |
39 |

Parameters

40 |

41 | We need to specify different parameters to be able to make the neural network train successfully. 42 |

43 |

44 | The learning_rate is a factor that controls the speed of the adjustements of the weights and biases. If it is high the training can be faster but it can overshoot too. 45 | If it is low the learning will take more time but can be more precise and get closer to the solution more safely. 46 |

47 |

48 | The momentum is a factor that controls how much of the precedent adjustement is applied with the new adjustement to the weights and biases. 49 | It allows the network to converge a lot faster in a lot of cases as the modification will get more and more momentum towards the solution and less towards noise from the data. 50 |

51 |

52 | The nb_epochs is how many epochs the learning will last. An epoch is finished when the neural network has seen all the dataset one time. 53 | It will apply the modifications calculated with backpropagation on each item of the dataset at the end of each epoch. 54 |

55 |

56 | classifier tells the training algorithm if it needs to keep track of the accuracy of the classification and other convenient things for classifiers. 57 |

58 |

59 | The neural newtork will print it's performance every print_interval epochs. 60 |

61 |

62 | The cost_function is the cost function that you choose for this training. 63 |

64 |

65 | training and test are the dataset that are used respectively for the training and the tests. 66 |

67 |

68 | test_params allows finer control over frequency of the tests and what is displayed during the tests. 69 |

70 |

71 | struct BackpropTrainingParams { 72 | learning_rate f64 73 | momentum f64 74 | nb_epochs int 75 | classifier bool 76 | print_interval int 77 | cost_function CostFunctions 78 | training Dataset 79 | test Dataset 80 | test_params TestParams 81 | } 82 |

83 |
84 |
85 | 86 | 87 | -------------------------------------------------------------------------------- /docs/training_minibatched_backprop.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Backpropagation 7 | 8 | 9 | 10 | 11 |
12 | Home Page 13 | XOR Neural Network 14 |

Layers

15 | Dense Layer 16 | Activation Layer 17 |

18 | Activation functions 19 | Cost functions 20 |

Training modes

21 | Backpropagation 22 | Minibatched backprop 23 |
24 | 25 |
26 | What is it 27 | Parameters 28 |
29 | 30 |
31 |

Training > Backpropagation

32 |
33 |

What is it

34 |

35 | Backpropagation is a commonly used training algorithm for neural networks. 36 |

37 |
38 |
39 |

Parameters

40 |

41 | We need to specify different parameters to be able to make the neural network train successfully. 42 |

43 |

44 | The learning_rate is a factor that controls the speed of the adjustements of the weights and biases. If it is high the training can be faster but it can overshoot too. 45 | If it is low the learning will take more time but can be more precise and get closer to the solution more safely. 46 |

47 |

48 | The momentum is a factor that controls how much of the precedent adjustement is applied with the new adjustement to the weights and biases. 49 | It allows the network to converge a lot faster in a lot of cases as the modification will get more and more momentum towards the solution and less towards noise from the data. 50 |

51 |

52 | The batch_size is how many items of a dataset will be used to create each batch. 53 | A batch is a collection of items that the neural network will go through and then apply the changes calculated during these items's backpropagation. 54 | And then it will go through the next batch and repeat. 55 |

56 |

57 | The nb_epochs is how many epochs the learning will last. An epoch is finished when the neural network has seen all the dataset one time. 58 |

59 |

60 | classifier tells the training algorithm if it needs to keep track of the accuracy of the classification and other convenient things for classifiers. 61 |

62 |

63 | The neural newtork will print it's performance every print_batch_interval batchs on every print_interval epochs. 64 |

65 |

66 | The cost_function is the cost function that you choose for this training. 67 |

68 |

69 | training and test are the dataset that are used respectively for the training and the tests. 70 |

71 |

72 | test_params allows finer control over frequency of the tests and what is displayed during the tests. 73 |

74 |

75 | pub struct MinibatchesBackpropTrainingParams { 76 | learning_rate f64 77 | momentum f64 78 | batch_size int = 1 79 | nb_epochs int 80 | classifier bool 81 | print_interval int 82 | print_batch_interval int 83 | cost_function CostFunctions 84 | training Dataset 85 | test Dataset 86 | test_params TestParams 87 | } 88 |

89 |
90 |
91 | 92 | 93 | -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-Bold.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-BoldItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-BoldItalic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-ExtraBold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-ExtraBold.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-ExtraBoldItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-ExtraBoldItalic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-ExtraLight.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-ExtraLight.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-ExtraLightItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-ExtraLightItalic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-Italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-Italic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-Light.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-Light.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-LightItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-LightItalic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-Medium.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-Medium.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-MediumItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-MediumItalic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-Regular.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-SemiBold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-SemiBold.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-SemiBoldItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-SemiBoldItalic.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-Thin.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-Thin.woff2 -------------------------------------------------------------------------------- /docs/webfonts/JetBrainsMono-ThinItalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/docs/webfonts/JetBrainsMono-ThinItalic.woff2 -------------------------------------------------------------------------------- /examples/ggui_example.v: -------------------------------------------------------------------------------- 1 | import ggui 2 | import gg 3 | import gx 4 | 5 | 6 | const ( 7 | win_width = 601 8 | win_height = 601 9 | theme = ggui.CatppuchinMocha{} 10 | buttons_shape = ggui.RoundedShape{20, 20, 5, .top_left} 11 | ) 12 | 13 | enum Id { 14 | @none 15 | img_nb_text 16 | click_nb_text 17 | } 18 | 19 | fn id(id Id) int { 20 | return int(id) 21 | } 22 | 23 | @[heap] 24 | struct App { 25 | mut: 26 | gg &gg.Context = unsafe { nil } 27 | gui &ggui.Gui = unsafe { nil } 28 | clickables []ggui.Clickable 29 | elements []ggui.Element 30 | actual_image int 31 | actual_click int 32 | } 33 | 34 | fn main() { 35 | mut app := &App{} 36 | app.gui = &ggui.Gui(app) 37 | app.gg = gg.new_context( 38 | width: win_width 39 | height: win_height 40 | create_window: true 41 | window_title: 'ggui example' 42 | bg_color: theme.base 43 | user_data: app 44 | frame_fn: on_frame 45 | event_fn: on_event 46 | sample_count: 4 47 | ui_mode: true 48 | ) 49 | plus_text := ggui.Text{0, 0, 0, "+", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 50 | minus_text := ggui.Text{0, 0, 0, "-", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 51 | text_cfg := gx.TextCfg{color:theme.text, size:20, align:.right, vertical_align:.top} 52 | 53 | app.clickables << ggui.Button{0, 300+50, 10, buttons_shape, minus_text, theme.red, prev_img} 54 | app.clickables << ggui.Button{0, 300+75, 10, buttons_shape, plus_text, theme.green, next_img} 55 | app.elements << ggui.Text{id(.img_nb_text), 300+45, 10, "Image n°${app.actual_image}", text_cfg} 56 | 57 | app.elements << ggui.Rect{x:300, y:40, shape:ggui.RoundedShape{160, 30, 5, .top}, color:theme.mantle} 58 | 59 | app.clickables << ggui.Button{0, 320, 45, buttons_shape, minus_text, theme.red, prev_click} 60 | app.clickables << ggui.Button{0, 345, 45, buttons_shape, plus_text, theme.green, next_click} 61 | app.elements << ggui.Text{id(.click_nb_text), 315, 45, "Click n°${app.actual_click}", text_cfg} 62 | 63 | app.gg.run() 64 | } 65 | 66 | fn on_frame(mut app App) { 67 | //Draw 68 | app.gg.begin() 69 | app.gui.render() 70 | app.gg.end() 71 | } 72 | 73 | fn on_event(e &gg.Event, mut app App){ 74 | match e.typ { 75 | .key_down { 76 | match e.key_code { 77 | .escape {app.gg.quit()} 78 | else {} 79 | } 80 | } 81 | .mouse_up { 82 | match e.mouse_button{ 83 | .left{ 84 | app.gui.check_clicks(e.mouse_x, e.mouse_y) 85 | } 86 | else{} 87 | } 88 | } 89 | else {} 90 | } 91 | } 92 | 93 | fn next_img(mut app ggui.Gui) { 94 | if mut app is App { 95 | app.actual_image += 1 96 | app.gui.change_text(id(.img_nb_text), "Image n°${app.actual_image}") 97 | } 98 | } 99 | 100 | fn prev_img(mut app ggui.Gui) { 101 | if mut app is App { 102 | app.actual_image -= 1 103 | app.gui.change_text(id(.img_nb_text), "Image n°${app.actual_image}") 104 | } 105 | } 106 | 107 | fn next_click(mut app ggui.Gui) { 108 | if mut app is App { 109 | app.actual_click += 1 110 | app.gui.change_text(id(.click_nb_text), "Click n°${app.actual_click}") 111 | } 112 | } 113 | 114 | fn prev_click(mut app ggui.Gui) { 115 | if mut app is App { 116 | app.actual_click -= 1 117 | app.gui.change_text(id(.click_nb_text), "Click n°${app.actual_click}") 118 | } 119 | } -------------------------------------------------------------------------------- /examples/mnist/t10k-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/examples/mnist/t10k-images-idx3-ubyte -------------------------------------------------------------------------------- /examples/mnist/t10k-labels-idx1-ubyte: -------------------------------------------------------------------------------- 1 | '                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                             -------------------------------------------------------------------------------- /examples/mnist/train-images-idx3-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/examples/mnist/train-images-idx3-ubyte -------------------------------------------------------------------------------- /examples/mnist/train-labels-idx1-ubyte: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/examples/mnist/train-labels-idx1-ubyte -------------------------------------------------------------------------------- /examples/mnist_tester.v: -------------------------------------------------------------------------------- 1 | import neural_networks as nn 2 | import gg 3 | import gx 4 | import ggui 5 | import math 6 | 7 | const path = 'saveMNIST-0014-93' 8 | const theme = ggui.CatppuchinLatte{} 9 | const buttons_shape = ggui.RoundedShape{20, 20, 5, .center} 10 | const px_size = 5 11 | 12 | enum Id { 13 | @none 14 | prediction 15 | text_0 16 | text_1 17 | text_2 18 | text_3 19 | text_4 20 | text_5 21 | text_6 22 | text_7 23 | text_8 24 | text_9 25 | } 26 | 27 | fn id(id Id) int { 28 | return int(id) 29 | } 30 | 31 | 32 | struct App { 33 | mut: 34 | gg &gg.Context = unsafe { nil } 35 | gui &ggui.Gui = unsafe { nil } 36 | clickables []ggui.Clickable 37 | elements []ggui.Element 38 | model nn.NeuralNetwork 39 | image []f64 = []f64{len:28*28} 40 | mouse_held bool 41 | } 42 | 43 | 44 | fn main() { 45 | mut app := &App{} 46 | app.gui = &ggui.Gui(app) 47 | app.gg = gg.new_context( 48 | width: 300 49 | height: 280 50 | create_window: true 51 | window_title: 'Mnist Tester' 52 | user_data: app 53 | bg_color: theme.base 54 | frame_fn: on_frame 55 | event_fn: on_event 56 | sample_count: 6 57 | ) 58 | app.model.load_model(path) 59 | 60 | erase_text := ggui.Text{0, 0, 0, "~", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 61 | text_cfg := gx.TextCfg{color:theme.text, size:20, align:.left, vertical_align:.top} 62 | 63 | app.clickables << ggui.Button{0, 280, 30, buttons_shape, erase_text, theme.flamingo, erase} 64 | 65 | app.elements << ggui.Rect{x:150, y:10, shape:ggui.RoundedShape{160, 265, 8, .top_left}, color:theme.mantle} 66 | 67 | app.elements << ggui.Text{id(.text_0), 125+45, 10+10, "0 : 00.00%", text_cfg} 68 | app.elements << ggui.Text{id(.text_1), 125+45, 10+35, "1 : 00.00%", text_cfg} 69 | app.elements << ggui.Text{id(.text_2), 125+45, 10+60, "2 : 00.00%", text_cfg} 70 | app.elements << ggui.Text{id(.text_3), 125+45, 10+85, "3 : 00.00%", text_cfg} 71 | app.elements << ggui.Text{id(.text_4), 125+45, 10+110, "4 : 00.00%", text_cfg} 72 | app.elements << ggui.Text{id(.text_5), 125+45, 10+135, "5 : 00.00%", text_cfg} 73 | app.elements << ggui.Text{id(.text_6), 125+45, 10+160, "6 : 00.00%", text_cfg} 74 | app.elements << ggui.Text{id(.text_7), 125+45, 10+185, "7 : 00.00%", text_cfg} 75 | app.elements << ggui.Text{id(.text_8), 125+45, 10+210, "8 : 00.00%", text_cfg} 76 | app.elements << ggui.Text{id(.text_9), 125+45, 10+235, "9 : 00.00%", text_cfg} 77 | 78 | app.elements << ggui.Text{id(.prediction), 14*px_size, 28*px_size, "-", gx.TextCfg{color:theme.text, size:40, align:.center, vertical_align:.top}} 79 | 80 | app.gg.run() 81 | } 82 | 83 | fn on_frame(mut app App) { 84 | centered_image := nn.center_image(app.image, 28, 28) 85 | output := softmax(app.model.forward_propagation(centered_image)) 86 | 87 | 88 | app.gui.change_text(id(.text_0), "0 : ${output[0]*100:.2}%") 89 | app.gui.change_text(id(.text_1), "1 : ${output[1]*100:.2}%") 90 | app.gui.change_text(id(.text_2), "2 : ${output[2]*100:.2}%") 91 | app.gui.change_text(id(.text_3), "3 : ${output[3]*100:.2}%") 92 | app.gui.change_text(id(.text_4), "4 : ${output[4]*100:.2}%") 93 | app.gui.change_text(id(.text_5), "5 : ${output[5]*100:.2}%") 94 | app.gui.change_text(id(.text_6), "6 : ${output[6]*100:.2}%") 95 | app.gui.change_text(id(.text_7), "7 : ${output[7]*100:.2}%") 96 | app.gui.change_text(id(.text_8), "8 : ${output[8]*100:.2}%") 97 | app.gui.change_text(id(.text_9), "9 : ${output[9]*100:.2}%") 98 | guess := nn.match_output_array_to_number(output) 99 | app.gui.change_text(id(.prediction), "$guess") 100 | 101 | 102 | 103 | app.gg.begin() 104 | app.render_image() 105 | app.gui.render() 106 | app.gg.end() 107 | } 108 | 109 | fn (mut app App) render_image() { 110 | img_size := 28 111 | for y in 0..img_size { 112 | for x in 0..img_size { 113 | px := u8(app.image[y*img_size+x]) 114 | app.gg.draw_rect_filled(f32(x*px_size), f32(y*px_size), px_size, px_size, gg.Color{px,px,px,255}) 115 | } 116 | } 117 | } 118 | 119 | fn softmax(a []f64) []f64 { 120 | e := []f64{len:a.len, init:math.exp(a[index]*10)} 121 | mut total := 0.0 122 | for elem in e { 123 | total += elem 124 | } 125 | output := []f64{len:e.len, init:e[index]/total} 126 | return output 127 | } 128 | 129 | fn erase(mut app ggui.Gui) { 130 | if mut app is App { 131 | app.image = []f64{len:28*28} 132 | } 133 | } 134 | 135 | fn on_event(e &gg.Event, mut app App){ 136 | match e.typ { 137 | .key_down { 138 | match e.key_code { 139 | .escape {app.gg.quit()} 140 | .backspace {erase(mut app)} 141 | else {} 142 | } 143 | } 144 | .mouse_down {app.mouse_held = true} 145 | .mouse_up { 146 | match e.mouse_button{ 147 | .left{app.gui.check_clicks(e.mouse_x, e.mouse_y)} 148 | else{} 149 | } 150 | app.mouse_held = false 151 | } 152 | else {} 153 | } 154 | app.check_buttons(e.mouse_x, e.mouse_y) 155 | } 156 | 157 | fn (mut app App) check_buttons(mouse_x f64, mouse_y f64){ 158 | if app.mouse_held{ 159 | if mouse_x < (px_size*28) && mouse_y < (px_size*28) && mouse_x >= 0 && mouse_y >= 0{ 160 | index := int(mouse_y/px_size)*28 + int(mouse_x/px_size) 161 | for l in -1..2{ 162 | for c in -1..2{ 163 | color := 255 - (120*c*c) - (120*l*l) 164 | if index%28+c < 28 && index%28+c >= 0 && index + c + l*28 < 28*28 && index + c + l*28 >= 0 { 165 | if (app.image[index+c+l*28] + color) > 255{ 166 | app.image[index+c+l*28] = 255 167 | }else{ 168 | app.image[index+c+l*28] = app.image[index+c+l*28] + color 169 | } 170 | } 171 | } 172 | } 173 | } 174 | } 175 | } -------------------------------------------------------------------------------- /examples/mnist_tester_acc.v: -------------------------------------------------------------------------------- 1 | import neural_networks_acc as nn 2 | import gg 3 | import gx 4 | import ggui 5 | import math 6 | 7 | const path = 'saveMNIST-0014-93' 8 | const theme = ggui.CatppuchinLatte{} 9 | const buttons_shape = ggui.RoundedShape{20, 20, 5, .center} 10 | const px_size = 5 11 | 12 | enum Id { 13 | @none 14 | prediction 15 | text_0 16 | text_1 17 | text_2 18 | text_3 19 | text_4 20 | text_5 21 | text_6 22 | text_7 23 | text_8 24 | text_9 25 | } 26 | 27 | fn id(id Id) int { 28 | return int(id) 29 | } 30 | 31 | 32 | struct App { 33 | mut: 34 | gg &gg.Context = unsafe { nil } 35 | gui &ggui.Gui = unsafe { nil } 36 | clickables []ggui.Clickable 37 | elements []ggui.Element 38 | model nn.NeuralNetwork 39 | image []f64 = []f64{len:28*28} 40 | mouse_held bool 41 | } 42 | 43 | 44 | fn main() { 45 | mut app := &App{} 46 | app.gui = &ggui.Gui(app) 47 | app.gg = gg.new_context( 48 | width: 300 49 | height: 280 50 | create_window: true 51 | window_title: 'Mnist Tester' 52 | user_data: app 53 | bg_color: theme.base 54 | frame_fn: on_frame 55 | event_fn: on_event 56 | sample_count: 6 57 | ) 58 | app.model.load_model(path) 59 | 60 | erase_text := ggui.Text{0, 0, 0, "~", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 61 | text_cfg := gx.TextCfg{color:theme.text, size:20, align:.left, vertical_align:.top} 62 | 63 | app.clickables << ggui.Button{0, 280, 30, buttons_shape, erase_text, theme.flamingo, erase} 64 | 65 | app.elements << ggui.Rect{x:150, y:10, shape:ggui.RoundedShape{160, 265, 8, .top_left}, color:theme.mantle} 66 | 67 | app.elements << ggui.Text{id(.text_0), 125+45, 10+10, "0 : 00.00%", text_cfg} 68 | app.elements << ggui.Text{id(.text_1), 125+45, 10+35, "1 : 00.00%", text_cfg} 69 | app.elements << ggui.Text{id(.text_2), 125+45, 10+60, "2 : 00.00%", text_cfg} 70 | app.elements << ggui.Text{id(.text_3), 125+45, 10+85, "3 : 00.00%", text_cfg} 71 | app.elements << ggui.Text{id(.text_4), 125+45, 10+110, "4 : 00.00%", text_cfg} 72 | app.elements << ggui.Text{id(.text_5), 125+45, 10+135, "5 : 00.00%", text_cfg} 73 | app.elements << ggui.Text{id(.text_6), 125+45, 10+160, "6 : 00.00%", text_cfg} 74 | app.elements << ggui.Text{id(.text_7), 125+45, 10+185, "7 : 00.00%", text_cfg} 75 | app.elements << ggui.Text{id(.text_8), 125+45, 10+210, "8 : 00.00%", text_cfg} 76 | app.elements << ggui.Text{id(.text_9), 125+45, 10+235, "9 : 00.00%", text_cfg} 77 | 78 | app.elements << ggui.Text{id(.prediction), 14*px_size, 28*px_size, "-", gx.TextCfg{color:theme.text, size:40, align:.center, vertical_align:.top}} 79 | 80 | app.gg.run() 81 | } 82 | 83 | fn on_frame(mut app App) { 84 | centered_image := nn.center_image(app.image, 28, 28) 85 | output := softmax(app.model.forward_propagation(centered_image)) 86 | 87 | 88 | app.gui.change_text(id(.text_0), "0 : ${output[0]*100:.2}%") 89 | app.gui.change_text(id(.text_1), "1 : ${output[1]*100:.2}%") 90 | app.gui.change_text(id(.text_2), "2 : ${output[2]*100:.2}%") 91 | app.gui.change_text(id(.text_3), "3 : ${output[3]*100:.2}%") 92 | app.gui.change_text(id(.text_4), "4 : ${output[4]*100:.2}%") 93 | app.gui.change_text(id(.text_5), "5 : ${output[5]*100:.2}%") 94 | app.gui.change_text(id(.text_6), "6 : ${output[6]*100:.2}%") 95 | app.gui.change_text(id(.text_7), "7 : ${output[7]*100:.2}%") 96 | app.gui.change_text(id(.text_8), "8 : ${output[8]*100:.2}%") 97 | app.gui.change_text(id(.text_9), "9 : ${output[9]*100:.2}%") 98 | guess := nn.match_output_array_to_number(output) 99 | app.gui.change_text(id(.prediction), "$guess") 100 | 101 | 102 | 103 | app.gg.begin() 104 | app.render_image() 105 | app.gui.render() 106 | app.gg.end() 107 | } 108 | 109 | fn (mut app App) render_image() { 110 | img_size := 28 111 | for y in 0..img_size { 112 | for x in 0..img_size { 113 | px := u8(app.image[y*img_size+x]) 114 | app.gg.draw_rect_filled(f32(x*px_size), f32(y*px_size), px_size, px_size, gg.Color{px,px,px,255}) 115 | } 116 | } 117 | } 118 | 119 | fn softmax(a []f64) []f64 { 120 | e := []f64{len:a.len, init:math.exp(a[index]*10)} 121 | mut total := 0.0 122 | for elem in e { 123 | total += elem 124 | } 125 | output := []f64{len:e.len, init:e[index]/total} 126 | return output 127 | } 128 | 129 | fn erase(mut app ggui.Gui) { 130 | if mut app is App { 131 | app.image = []f64{len:28*28} 132 | } 133 | } 134 | 135 | fn on_event(e &gg.Event, mut app App){ 136 | match e.typ { 137 | .key_down { 138 | match e.key_code { 139 | .escape {app.gg.quit()} 140 | .backspace {erase(mut app)} 141 | else {} 142 | } 143 | } 144 | .mouse_down {app.mouse_held = true} 145 | .mouse_up { 146 | match e.mouse_button{ 147 | .left{app.gui.check_clicks(e.mouse_x, e.mouse_y)} 148 | else{} 149 | } 150 | app.mouse_held = false 151 | } 152 | else {} 153 | } 154 | app.check_buttons(e.mouse_x, e.mouse_y) 155 | } 156 | 157 | fn (mut app App) check_buttons(mouse_x f64, mouse_y f64){ 158 | if app.mouse_held{ 159 | if mouse_x < (px_size*28) && mouse_y < (px_size*28) && mouse_x >= 0 && mouse_y >= 0{ 160 | index := int(mouse_y/px_size)*28 + int(mouse_x/px_size) 161 | for l in -1..2{ 162 | for c in -1..2{ 163 | color := 255 - (120*c*c) - (120*l*l) 164 | if index%28+c < 28 && index%28+c >= 0 && index + c + l*28 < 28*28 && index + c + l*28 >= 0 { 165 | if (app.image[index+c+l*28] + color) > 255{ 166 | app.image[index+c+l*28] = 255 167 | }else{ 168 | app.image[index+c+l*28] = app.image[index+c+l*28] + color 169 | } 170 | } 171 | } 172 | } 173 | } 174 | } 175 | } -------------------------------------------------------------------------------- /examples/saveMNIST-0014-93: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/examples/saveMNIST-0014-93 -------------------------------------------------------------------------------- /examples/saveXOR: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Eliyaan/NeuralNetworks-V-Module/ea98f318425dfde5b8688145c6ec58a58abbcdaa/examples/saveXOR -------------------------------------------------------------------------------- /examples/train_mnist.v: -------------------------------------------------------------------------------- 1 | import neural_networks as nn 2 | import math 3 | import os 4 | import rand 5 | import rand.config as rdconfig 6 | 7 | /* 8 | If you get a lot of errors you probably need to run : 9 | 10 | v install vsl 11 | 12 | and then run : 13 | 14 | v run . 15 | */ 16 | 17 | fn main() { 18 | mut model := nn.NeuralNetwork.new(0) 19 | 20 | println('Creating a new model') 21 | model.add_layer(nn.Dense.new(784, 400, 0.01, 0.01)) 22 | model.add_layer(nn.Activation.new(.leaky_relu)) 23 | model.add_layer(nn.Dense.new(400, 300, 0.01, 0.01)) 24 | model.add_layer(nn.Activation.new(.leaky_relu)) 25 | model.add_layer(nn.Dense.new(300, 200, 0.01, 0.01)) 26 | model.add_layer(nn.Activation.new(.leaky_relu)) 27 | model.add_layer(nn.Dense.new(200, 10, 0.01, 0.01)) 28 | model.add_layer(nn.Activation.new(.leaky_relu)) 29 | // model.load_model('') 30 | 31 | for i in 0..30 { 32 | println("Epoch n°$i") 33 | training_parameters := nn.MinibatchesBackpropTrainingParams{ 34 | learning_rate: 0.02 35 | momentum: 0.9 36 | batch_size: 100 37 | classifier: true 38 | nb_epochs: 1 39 | print_interval: 1 40 | print_batch_interval: 10 41 | cost_function: .mse // mean squared error 42 | training: load_mnist_training(60000) 43 | test: load_mnist_test(10000) 44 | test_params: nn.TestParams{ 45 | print_start: 0 46 | print_end: 3 47 | training_interval: 1 48 | training_batch_interval: 600 49 | } 50 | } 51 | 52 | model.train(training_parameters) 53 | model.save_model('saveMNIST-${model.cost}-${model.accuracy}') 54 | } 55 | } 56 | 57 | @[direct_array_access] 58 | fn load_mnist_training(nb_training int) nn.Dataset { 59 | println('Loading training mnist...') 60 | train_labels := os.open('mnist/train-labels-idx1-ubyte') or { panic(err) } 61 | train_images := os.open('mnist/train-images-idx3-ubyte') or { panic(err) } 62 | mut dataset := nn.Dataset{[][]f64{}, [][]f64{}} 63 | mut order_array := []u64{len:nb_training, init:u64(index)} 64 | rand.shuffle(mut order_array, rdconfig.ShuffleConfigStruct{}) or {panic(err)} 65 | for i in order_array { 66 | dataset.inputs << [train_images.read_bytes_at(784, i * 784 + 16).map(f64(it))] 67 | dataset.expected_outputs << [ 68 | nn.match_number_to_classifier_array(train_labels.read_bytes_at(1, i + 8)[0]) 69 | ] 70 | } 71 | augment_images(mut dataset) 72 | println('Finished loading training mnist!') 73 | return dataset 74 | } 75 | 76 | @[direct_array_access] 77 | fn load_mnist_test(nb_tests int) nn.Dataset { 78 | println('Loading test mnist...') 79 | test_labels := os.open('mnist/t10k-labels-idx1-ubyte') or { panic(err) } 80 | test_images := os.open('mnist/t10k-images-idx3-ubyte') or { panic(err) } 81 | mut dataset := nn.Dataset{[][]f64{}, [][]f64{}} 82 | for i in 0 .. nb_tests { 83 | dataset.inputs << [test_images.read_bytes_at(784, i * 784 + 16).map(f64(it))] 84 | dataset.expected_outputs << [ 85 | nn.match_number_to_classifier_array(test_labels.read_bytes_at(1, i + 8)[0]) 86 | ] 87 | } 88 | augment_images(mut dataset) 89 | println('Finished loading test mnist!') 90 | return dataset 91 | } 92 | 93 | fn augment_images(mut d nn.Dataset) { 94 | for mut input in d.inputs { 95 | augment(mut input) 96 | } 97 | } 98 | 99 | fn augment(mut input []f64) { 100 | input = nn.rotate(input, rand.f64_in_range(-45, 45) or {0}, 28, 28) 101 | mut image_side_size := nn.ceil(math.sqrt(input.len)) 102 | input = nn.scale_img(input, rand.f64_in_range(1-0.2, 1+0.2) or {0}, image_side_size, image_side_size) 103 | image_side_size = nn.ceil(math.sqrt(input.len)) 104 | input = nn.rand_noise(input, 15, 255) 105 | input = nn.center_image(input, image_side_size, image_side_size) 106 | input = nn.crop(input, image_side_size, image_side_size, 28, 28) 107 | } -------------------------------------------------------------------------------- /examples/train_xor.v: -------------------------------------------------------------------------------- 1 | import neural_networks as nn 2 | import os 3 | 4 | /* 5 | If you get a lot of errors you certainly need to run : 6 | 7 | v install vsl 8 | 9 | and then run : 10 | 11 | v run . 12 | */ 13 | 14 | fn main() { 15 | mut model := nn.NeuralNetwork.new(0) 16 | 17 | if os.input('Do you want to load a saved model ? [y/n]') != 'y' { 18 | println('Creating a new model') 19 | model.add_layer(nn.Dense.new(2, 3, 0.7, 0.65)) 20 | model.add_layer(nn.Activation.new(.leaky_relu)) 21 | model.add_layer(nn.Dense.new(3, 1, 0.6, 0.65)) 22 | model.add_layer(nn.Activation.new(.leaky_relu)) 23 | } else { 24 | println('Loading the saved model') 25 | model.load_model('saveXOR') 26 | } 27 | 28 | training_parameters := nn.BackpropTrainingParams{ 29 | learning_rate: 0.37 30 | momentum: 0.9 31 | nb_epochs: 300 32 | print_interval: 25 33 | cost_function: .mse // mean squared error 34 | training: nn.Dataset { 35 | inputs: [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] 36 | expected_outputs: [[0.0], [1.0], [1.0], [0.0]] 37 | } 38 | test: nn.Dataset { 39 | inputs: [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] 40 | expected_outputs: [[0.0], [1.0], [1.0], [0.0]] 41 | } 42 | test_params: nn.TestParams{ 43 | print_start: 0 44 | print_end: 3 45 | training_interval: 100 46 | } 47 | } 48 | model.train(training_parameters) 49 | // model.test(training_parameters) this can be use to test the model outside the training 50 | model.save_model('saveXOR') 51 | } 52 | -------------------------------------------------------------------------------- /examples/train_xor_minibatches.v: -------------------------------------------------------------------------------- 1 | import neural_networks as nn 2 | import os 3 | 4 | /* 5 | If you get a lot of errors you probably need to run : 6 | 7 | v install vsl 8 | 9 | and then run : 10 | 11 | v run . 12 | */ 13 | 14 | /* 15 | The minibatches are not very efficient on this too little dataset that 16 | does not require generalisation over the whole data. 17 | It's here to show how to use it. 18 | */ 19 | 20 | fn main() { 21 | mut model := nn.NeuralNetwork.new(0) 22 | 23 | if os.input('Do you want to load a saved model ? [y/n]') != 'y' { 24 | println('Creating a new model') 25 | model.add_layer(nn.Dense.new(2, 3, 0.7, 0.65)) 26 | model.add_layer(nn.Activation.new(.leaky_relu)) 27 | model.add_layer(nn.Dense.new(3, 1, 0.6, 0.65)) 28 | model.add_layer(nn.Activation.new(.leaky_relu)) 29 | } else { 30 | println('Loading the saved model') 31 | model.load_model('saveXOR') 32 | } 33 | 34 | training_parameters := nn.MinibatchesBackpropTrainingParams{ 35 | learning_rate: 1.0 36 | momentum: 0.5 37 | batch_size: 2 38 | nb_epochs: 30 39 | print_interval: 1 40 | print_batch_interval: 2 41 | cost_function: .mse // mean squared error 42 | training: nn.Dataset { 43 | inputs: [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] 44 | expected_outputs: [[0.0], [1.0], [1.0], [0.0]] 45 | } 46 | test: nn.Dataset { 47 | inputs: [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]] 48 | expected_outputs: [[0.0], [1.0], [1.0], [0.0]] 49 | } 50 | test_params: nn.TestParams{ 51 | print_start: 0 52 | print_end: 3 53 | training_interval: 30 54 | training_batch_interval: 2 55 | } 56 | } 57 | model.train(training_parameters) 58 | 59 | model.save_model('saveXOR') 60 | } 61 | -------------------------------------------------------------------------------- /examples/visualise_data-augmented_mnist.v: -------------------------------------------------------------------------------- 1 | import neural_networks as nn 2 | import ggui 3 | import os 4 | import math 5 | import gg 6 | import gx 7 | import rand as rd 8 | import rand.config as rdconfig 9 | 10 | 11 | const ( 12 | win_width = 601 13 | win_height = 601 14 | theme = ggui.CatppuchinMocha{} 15 | bg_color = theme.base 16 | px_size = 4 17 | box_offset_x = 300 18 | box_offset_y = 10 19 | buttons_shape = ggui.RoundedShape{20, 20, 5, .top_left} 20 | ) 21 | 22 | enum Id { 23 | @none 24 | img_nb_text 25 | img_label 26 | final_scale_text 27 | noise_range_text 28 | noise_probability_text 29 | scale_range_text 30 | rota_range_text 31 | } 32 | 33 | fn id(id Id) int { 34 | return int(id) 35 | } 36 | 37 | @[heap] 38 | struct App { 39 | mut: 40 | gg &gg.Context = unsafe { nil } 41 | gui &ggui.Gui = unsafe { nil } 42 | clickables []ggui.Clickable 43 | elements []ggui.Element 44 | base_dataset nn.Dataset 45 | dataset nn.Dataset 46 | actual_image int 47 | final_scale f64 = 1 48 | noise_probability int = 15 49 | noise_range int = 255 50 | scale_range f64 = 0.2 51 | rota_range int = 45 52 | final_nb_pixels int 53 | augment_asked bool = true 54 | } 55 | 56 | fn main() { 57 | mut app := &App{} 58 | app.gui = &ggui.Gui(app) 59 | app.gg = gg.new_context( 60 | width: win_width 61 | height: win_height 62 | create_window: true 63 | window_title: 'Mnist Data Augmentation Visualiser' 64 | user_data: app 65 | bg_color: bg_color 66 | frame_fn: on_frame 67 | event_fn: on_event 68 | sample_count: 4 69 | ui_mode: true 70 | ) 71 | app.base_dataset = load_mnist_training(100) 72 | app.dataset = app.base_dataset.clone() 73 | 74 | plus_text := ggui.Text{0, 0, 0, "+", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 75 | minus_text := ggui.Text{0, 0, 0, "-", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 76 | reload_text := ggui.Text{0, 0, 0, "~", gx.TextCfg{color:theme.base, size:20, align:.center, vertical_align:.middle}} 77 | button_description_cfg := gx.TextCfg{color:theme.text, size:20, align:.right, vertical_align:.top} 78 | 79 | app.clickables << ggui.Button{0, box_offset_x+105, box_offset_y+5, buttons_shape, reload_text, theme.flamingo, ask_augment} 80 | 81 | app.clickables << ggui.Button{0, box_offset_x+50, box_offset_y+5, buttons_shape, minus_text, theme.red, prev_img} 82 | app.clickables << ggui.Button{0, box_offset_x+75, box_offset_y+5, buttons_shape, plus_text, theme.green, next_img} 83 | 84 | app.clickables << ggui.Button{0, box_offset_x+50, box_offset_y+30, buttons_shape, minus_text, theme.red, sub_final_scale} 85 | app.clickables << ggui.Button{0, box_offset_x+75, box_offset_y+30, buttons_shape, plus_text, theme.green, add_final_scale} 86 | 87 | app.clickables << ggui.Button{0, box_offset_x+50, box_offset_y+55, buttons_shape, minus_text, theme.red, sub_noise_range} 88 | app.clickables << ggui.Button{0, box_offset_x+75, box_offset_y+55, buttons_shape, plus_text, theme.green, add_noise_range} 89 | 90 | app.clickables << ggui.Button{0, box_offset_x+50, box_offset_y+80, buttons_shape, minus_text, theme.red, sub_noise_probability} 91 | app.clickables << ggui.Button{0, box_offset_x+75, box_offset_y+80, buttons_shape, plus_text, theme.green, add_noise_probability} 92 | 93 | app.clickables << ggui.Button{0, box_offset_x+50, box_offset_y+105, buttons_shape, minus_text, theme.red, sub_scale_range} 94 | app.clickables << ggui.Button{0, box_offset_x+75, box_offset_y+105, buttons_shape, plus_text, theme.green, add_scale_range} 95 | 96 | app.clickables << ggui.Button{0, box_offset_x+50, box_offset_y+130, buttons_shape, minus_text, theme.red, sub_rota_range} 97 | app.clickables << ggui.Button{0, box_offset_x+75, box_offset_y+130, buttons_shape, plus_text, theme.green, add_rota_range} 98 | 99 | app.elements << ggui.Text{id(.img_label), 14*px_size, 28*px_size, nn.match_classifier_array_to_number(app.dataset.expected_outputs[app.actual_image]).str(), gx.TextCfg{color:theme.text, size:20, align:.center, vertical_align:.top}} 100 | 101 | app.elements << ggui.Rect{x:150, y:10, shape:ggui.RoundedShape{280, 160, 5, .top_left}, color:theme.mantle} 102 | 103 | app.elements << ggui.Text{id(.img_nb_text), box_offset_x+45, box_offset_y+5, "Image n°${app.actual_image}", button_description_cfg} 104 | 105 | app.elements << ggui.Text{id(.final_scale_text), box_offset_x+45, box_offset_y+30, "Final scale: ${app.final_scale}", button_description_cfg} 106 | 107 | app.elements << ggui.Text{id(.noise_range_text), box_offset_x+45, box_offset_y+55, "Noise range: ${app.noise_range}", button_description_cfg} 108 | 109 | app.elements << ggui.Text{id(.noise_probability_text), box_offset_x+45, box_offset_y+80, "Noise probability: ${app.noise_probability}", button_description_cfg} 110 | 111 | app.elements << ggui.Text{id(.scale_range_text), box_offset_x+45, box_offset_y+105, "Scale range: ${app.scale_range}", button_description_cfg} 112 | 113 | app.elements << ggui.Text{id(.rota_range_text), box_offset_x+45, box_offset_y+130, "Rotation range: ${app.rota_range}", button_description_cfg} 114 | 115 | app.augment_images() 116 | app.gg.run() 117 | } 118 | 119 | fn on_frame(mut app App) { 120 | if app.augment_asked { 121 | app.augment(app.actual_image) 122 | app.augment_asked = false 123 | mut img_label_text := app.gui.get_element_with_id(id(.img_label)) or {panic(err)} 124 | img_label_text.y = app.final_nb_pixels*px_size 125 | img_label_text.x = img_label_text.y/2 126 | } 127 | //Draw 128 | app.gg.begin() 129 | app.gui.render() 130 | app.render_image() 131 | app.gg.end() 132 | } 133 | 134 | fn on_event(e &gg.Event, mut app App){ 135 | match e.typ { 136 | .key_down { 137 | match e.key_code { 138 | .escape {app.gg.quit()} 139 | else {} 140 | } 141 | } 142 | .mouse_up { 143 | match e.mouse_button{ 144 | .left{ 145 | app.gui.check_clicks(int(e.mouse_x), int(e.mouse_y)) 146 | } 147 | else{} 148 | } 149 | } 150 | else {} 151 | } 152 | } 153 | 154 | fn (mut app App) augment_images() { 155 | println("Start data augmentation") 156 | for i, _ in app.dataset.inputs { 157 | app.augment(i) 158 | } 159 | println("Data augmentation finished!") 160 | } 161 | 162 | fn (mut app App) augment(i int) { 163 | app.dataset.inputs[i] = app.base_dataset.inputs[i].clone() 164 | app.dataset.inputs[i] = nn.rotate(app.dataset.inputs[i], rd.f64_in_range(-app.rota_range, app.rota_range) or {0}, 28, 28) 165 | mut image_side_size := nn.ceil(math.sqrt(app.dataset.inputs[i].len)) 166 | app.dataset.inputs[i] = nn.scale_img(app.dataset.inputs[i], rd.f64_in_range(1-app.scale_range, 1+app.scale_range) or {0}, image_side_size, image_side_size) 167 | image_side_size = nn.ceil(math.sqrt(app.dataset.inputs[i].len)) 168 | app.dataset.inputs[i] = nn.center_image(app.dataset.inputs[i], image_side_size, image_side_size) 169 | app.dataset.inputs[i] = nn.rand_noise(app.dataset.inputs[i], app.noise_probability, app.noise_range) 170 | app.dataset.inputs[i] = nn.crop(app.dataset.inputs[i], image_side_size, image_side_size, 28, 28) 171 | image_side_size = nn.ceil(math.sqrt(app.dataset.inputs[i].len)) 172 | app.dataset.inputs[i] = nn.scale_img(app.dataset.inputs[i], app.final_scale, image_side_size, image_side_size) 173 | app.final_nb_pixels = int(math.sqrt(app.dataset.inputs[i].len)) 174 | } 175 | 176 | fn next_img(mut app ggui.Gui) { 177 | if mut app is App { 178 | if app.actual_image == app.dataset.inputs.len - 1 { 179 | app.actual_image = 0 180 | }else{ 181 | app.actual_image += 1 182 | } 183 | app.gui.change_text(id(.img_nb_text), "Image n°${app.actual_image}") 184 | app.gui.change_text(id(.img_label), nn.match_classifier_array_to_number(app.dataset.expected_outputs[app.actual_image]).str()) 185 | 186 | ask_augment(mut app) 187 | } 188 | } 189 | 190 | fn prev_img(mut app ggui.Gui) { 191 | if mut app is App { 192 | if app.actual_image == 0 { 193 | app.actual_image = app.dataset.inputs.len - 1 194 | }else{ 195 | app.actual_image -= 1 196 | } 197 | app.gui.change_text(id(.img_nb_text), "Image n°${app.actual_image}") 198 | app.gui.change_text(id(.img_label), nn.match_classifier_array_to_number(app.dataset.expected_outputs[app.actual_image]).str()) 199 | ask_augment(mut app) 200 | } 201 | } 202 | 203 | fn ask_augment(mut app ggui.Gui) { 204 | if mut app is App { 205 | app.augment_asked = true 206 | } 207 | } 208 | 209 | fn add_final_scale(mut app ggui.Gui) { 210 | if mut app is App { 211 | app.final_scale = math.round_sig(app.final_scale+0.05, 2) 212 | app.gui.change_text(id(.final_scale_text), "Final scale: ${app.final_scale}") 213 | ask_augment(mut app) 214 | } 215 | } 216 | 217 | fn sub_final_scale(mut app ggui.Gui) { 218 | if mut app is App { 219 | app.final_scale = math.round_sig(app.final_scale-0.05, 2) 220 | app.gui.change_text(id(.final_scale_text), "Final scale: ${app.final_scale}") 221 | ask_augment(mut app) 222 | } 223 | } 224 | 225 | fn add_noise_range(mut app ggui.Gui) { 226 | if mut app is App { 227 | app.noise_range += 1 228 | app.gui.change_text(id(.noise_range_text), "Noise range: ${app.noise_range}") 229 | ask_augment(mut app) 230 | } 231 | } 232 | 233 | fn sub_noise_range(mut app ggui.Gui) { 234 | if mut app is App { 235 | app.noise_range -= 1 236 | app.gui.change_text(id(.noise_range_text), "Noise range: ${app.noise_range}") 237 | ask_augment(mut app) 238 | } 239 | } 240 | 241 | fn add_noise_probability(mut app ggui.Gui) { 242 | if mut app is App { 243 | app.noise_probability += 1 244 | app.gui.change_text(id(.noise_probability_text), "Noise probability: ${app.noise_probability}") 245 | ask_augment(mut app) 246 | } 247 | } 248 | 249 | fn sub_noise_probability(mut app ggui.Gui) { 250 | if mut app is App { 251 | app.noise_probability -= 1 252 | app.gui.change_text(id(.noise_probability_text), "Noise probability: ${app.noise_probability}") 253 | ask_augment(mut app) 254 | } 255 | } 256 | 257 | fn add_scale_range(mut app ggui.Gui) { 258 | if mut app is App { 259 | app.scale_range = math.round_sig(app.scale_range+0.01, 2) 260 | app.gui.change_text(id(.scale_range_text), "Scale range: ${app.scale_range}") 261 | ask_augment(mut app) 262 | } 263 | } 264 | 265 | fn sub_scale_range(mut app ggui.Gui) { 266 | if mut app is App { 267 | app.scale_range = math.round_sig(app.scale_range-0.01, 2) 268 | app.gui.change_text(id(.scale_range_text), "Scale range: ${app.scale_range}") 269 | ask_augment(mut app) 270 | } 271 | } 272 | 273 | fn add_rota_range(mut app ggui.Gui) { 274 | if mut app is App { 275 | app.rota_range += 1 276 | app.gui.change_text(id(.rota_range_text), "Rotation range: ${app.rota_range}") 277 | ask_augment(mut app) 278 | } 279 | } 280 | 281 | fn sub_rota_range(mut app ggui.Gui) { 282 | if mut app is App { 283 | app.rota_range -= 1 284 | app.gui.change_text(id(.rota_range_text), "Rotation range: ${app.rota_range}") 285 | ask_augment(mut app) 286 | } 287 | } 288 | 289 | fn (mut app App) render_image() { 290 | img_size := int(math.sqrt(app.dataset.inputs[app.actual_image].len)) 291 | for y in 0..img_size { 292 | for x in 0..img_size { 293 | px := u8(app.dataset.inputs[app.actual_image][y*img_size+x]) 294 | app.gg.draw_rect_filled(f32(x*px_size), f32(y*px_size), px_size, px_size, gx.Color{px,px,px,255}) 295 | } 296 | } 297 | } 298 | 299 | @[direct_array_access] 300 | fn load_mnist_training(nb_training int) nn.Dataset { 301 | println('Loading training mnist...') 302 | train_labels := os.open('mnist/train-labels-idx1-ubyte') or { panic(err) } 303 | train_images := os.open('mnist/train-images-idx3-ubyte') or { panic(err) } 304 | mut dataset := nn.Dataset{[][]f64{}, [][]f64{}} 305 | mut order_array := []u64{len:nb_training, init:u64(index)} 306 | rd.shuffle(mut order_array, rdconfig.ShuffleConfigStruct{}) or {panic(err)} 307 | for i in order_array { 308 | dataset.inputs << [train_images.read_bytes_at(784, i * 784 + 16).map(f64(it))] 309 | dataset.expected_outputs << [ 310 | nn.match_number_to_classifier_array(train_labels.read_bytes_at(1, i + 8)[0]) 311 | ] 312 | } 313 | println('Finished loading training mnist!') 314 | return dataset 315 | } 316 | 317 | @[direct_array_access] 318 | fn load_mnist_test(nb_tests int) nn.Dataset { 319 | println('Loading test mnist...') 320 | test_labels := os.open('mnist/t10k-labels-idx1-ubyte') or { panic(err) } 321 | test_images := os.open('mnist/t10k-images-idx3-ubyte') or { panic(err) } 322 | mut dataset := nn.Dataset{[][]f64{}, [][]f64{}} 323 | for i in 0 .. nb_tests { 324 | dataset.inputs << [test_images.read_bytes_at(784, i * 784 + 16).map(f64(it))] 325 | dataset.expected_outputs << [ 326 | nn.match_number_to_classifier_array(test_labels.read_bytes_at(1, i + 8)[0]) 327 | ] 328 | } 329 | println('Finished loading test mnist!') 330 | return dataset 331 | } -------------------------------------------------------------------------------- /ggui/button.v: -------------------------------------------------------------------------------- 1 | module ggui 2 | import gx 3 | 4 | @[heap] 5 | pub struct Button { 6 | pub mut: 7 | id int 8 | x f32 9 | y f32 10 | shape Area 11 | text Text 12 | color gx.Color 13 | click_func fn (mut g Gui) @[required] 14 | } 15 | 16 | pub fn (b Button) render(mut g Gui, x_offset f32, y_offset f32) { 17 | x_coo := b.x + x_offset 18 | y_coo := b.y + y_offset 19 | mut text_x_offset := x_coo 20 | mut text_y_offset := y_coo 21 | match b.shape.relative_pos { 22 | .center {} 23 | .left {text_x_offset += b.shape.width/2} 24 | .right {text_x_offset -= b.shape.width/2} 25 | .top {text_y_offset += b.shape.height/2} 26 | .bottom {text_y_offset -= b.shape.height/2} 27 | .top_left { 28 | text_x_offset += b.shape.width/2 29 | text_y_offset += b.shape.height/2 30 | } 31 | .top_right { 32 | text_x_offset -= b.shape.width/2 33 | text_y_offset += b.shape.height/2 34 | } 35 | .bottom_left { 36 | text_x_offset += b.shape.width/2 37 | text_y_offset -= b.shape.height/2 38 | } 39 | .bottom_right { 40 | text_x_offset -= b.shape.width/2 41 | text_y_offset -= b.shape.height/2 42 | } 43 | } 44 | b.shape.render(mut g, x_coo, y_coo, b.color) 45 | b.text.render(mut g, text_x_offset, text_y_offset) 46 | } -------------------------------------------------------------------------------- /ggui/gui.v: -------------------------------------------------------------------------------- 1 | module ggui 2 | import gg 3 | 4 | pub interface Gui { 5 | mut: 6 | gg &gg.Context 7 | clickables []Clickable 8 | elements []Element 9 | } 10 | 11 | pub fn (mut g Gui) get_clickables_with_id(id int) []&Clickable{ 12 | mut result := []&Clickable{} 13 | for obj in g.clickables { 14 | unsafe { 15 | if obj.id == id { 16 | result << &obj 17 | } 18 | } 19 | } 20 | return result 21 | } 22 | 23 | pub fn (mut g Gui) get_clickable_with_id(id int) !&Clickable{ 24 | for obj in g.clickables { 25 | unsafe { 26 | if obj.id == id { 27 | return &obj 28 | } 29 | } 30 | } 31 | return error("No object matching") 32 | } 33 | 34 | pub fn (mut g Gui) get_elements_with_id(id int) []&Element{ 35 | mut result := []&Element{} 36 | for obj in g.elements { 37 | unsafe { 38 | if obj.id == id { 39 | result << &obj 40 | } 41 | } 42 | } 43 | return result 44 | } 45 | 46 | pub fn (mut g Gui) get_element_with_id(id int) !&Element{ 47 | for obj in g.elements { 48 | unsafe { 49 | if obj.id == id { 50 | return &obj 51 | } 52 | } 53 | } 54 | return error("No object matching") 55 | } 56 | 57 | pub fn (mut g Gui) render() { 58 | g.render_elements() 59 | g.render_clickables() 60 | } 61 | 62 | pub fn (mut g Gui) render_clickables() { 63 | for mut obj in g.clickables { 64 | obj.render(mut g, 0, 0) 65 | } 66 | } 67 | 68 | pub fn (mut g Gui) render_elements() { 69 | for mut elem in g.elements { 70 | elem.render(mut g, 0, 0) 71 | } 72 | } 73 | 74 | pub fn (mut g Gui) check_clicks(mouse_x f32, mouse_y f32) { 75 | click_x := mouse_x 76 | click_y := mouse_y 77 | for obj in g.clickables { 78 | x_rel, y_rel := obj.shape.offset() 79 | obj_x := obj.x + x_rel 80 | obj_y := obj.y + y_rel 81 | 82 | if in_range(click_x, click_y, obj_x, obj_y, obj_x + obj.shape.width, obj_y + obj.shape.height) { 83 | obj.click_func(mut g) 84 | } 85 | } 86 | } 87 | 88 | pub fn (mut g Gui) change_text(id int, text string) { 89 | mut text_obj := g.get_element_with_id(id) or {panic(err)} 90 | if mut text_obj is Text { 91 | text_obj.text = text 92 | } 93 | } -------------------------------------------------------------------------------- /ggui/gui_objects.v: -------------------------------------------------------------------------------- 1 | module ggui 2 | import gx 3 | 4 | pub struct CatppuchinMocha { 5 | pub: 6 | crust gx.Color = gx.Color{17, 17, 27, 255} 7 | mantle gx.Color = gx.Color{24, 24, 37, 255} 8 | base gx.Color = gx.Color{30, 30, 46, 255} 9 | surface0 gx.Color = gx.Color{49, 50, 68, 255} 10 | surface1 gx.Color = gx.Color{69, 71, 90, 255} 11 | surface2 gx.Color = gx.Color{88, 91, 112, 255} 12 | overlay0 gx.Color = gx.Color{108, 112, 134, 255} 13 | overlay1 gx.Color = gx.Color{127, 132, 156, 255} 14 | overlay2 gx.Color = gx.Color{147, 153, 178, 255} 15 | subtext0 gx.Color = gx.Color{166, 173, 200, 255} 16 | subtext1 gx.Color = gx.Color{186, 194, 222, 255} 17 | text gx.Color = gx.Color{205, 214, 244, 255} 18 | lavender gx.Color = gx.Color{180, 190, 254, 255} 19 | blue gx.Color = gx.Color{137, 180, 250, 255} 20 | sapphire gx.Color = gx.Color{116, 199, 236, 255} 21 | sky gx.Color = gx.Color{137, 220, 235, 255} 22 | teal gx.Color = gx.Color{148, 227, 213, 255} 23 | green gx.Color = gx.Color{166, 214, 161, 255} 24 | yellow gx.Color = gx.Color{249, 226, 175, 255} 25 | peach gx.Color = gx.Color{250, 179, 135, 255} 26 | maroon gx.Color = gx.Color{235, 160, 172, 255} 27 | red gx.Color = gx.Color{243, 139, 168, 255} 28 | mauve gx.Color = gx.Color{203, 166, 247, 255} 29 | pink gx.Color = gx.Color{245, 194, 231, 255} 30 | flamingo gx.Color = gx.Color{242, 205, 205, 255} 31 | rosewater gx.Color = gx.Color{245, 224, 220, 255} 32 | } 33 | 34 | pub struct CatppuchinFrappe { 35 | pub: 36 | crust gx.Color = gx.Color{35, 38, 52, 255} 37 | mantle gx.Color = gx.Color{41, 44, 60, 255} 38 | base gx.Color = gx.Color{48, 52, 70, 255} 39 | surface0 gx.Color = gx.Color{65, 69, 89, 255} 40 | surface1 gx.Color = gx.Color{81, 87, 109, 255} 41 | surface2 gx.Color = gx.Color{98, 104, 128, 255} 42 | overlay0 gx.Color = gx.Color{115, 121, 148, 255} 43 | overlay1 gx.Color = gx.Color{131, 139, 167, 255} 44 | overlay2 gx.Color = gx.Color{148, 156, 187, 255} 45 | subtext0 gx.Color = gx.Color{165, 173, 206, 255} 46 | subtext1 gx.Color = gx.Color{181, 191, 226, 255} 47 | text gx.Color = gx.Color{198, 208, 245, 255} 48 | lavender gx.Color = gx.Color{186, 187, 241, 255} 49 | blue gx.Color = gx.Color{140, 170, 238, 255} 50 | sapphire gx.Color = gx.Color{133, 193, 220, 255} 51 | sky gx.Color = gx.Color{153, 209, 219, 255} 52 | teal gx.Color = gx.Color{129, 200, 190, 255} 53 | green gx.Color = gx.Color{166, 209, 137, 255} 54 | yellow gx.Color = gx.Color{229, 200, 144, 255} 55 | peach gx.Color = gx.Color{239, 159, 118, 255} 56 | maroon gx.Color = gx.Color{234, 153, 156, 255} 57 | red gx.Color = gx.Color{231, 130, 132, 255} 58 | mauve gx.Color = gx.Color{202, 158, 230, 255} 59 | pink gx.Color = gx.Color{244, 184, 228, 255} 60 | flamingo gx.Color = gx.Color{238, 190, 190, 255} 61 | rosewater gx.Color = gx.Color{242, 213, 207, 255} 62 | } 63 | 64 | pub struct CatppuchinMacchiato { 65 | pub: 66 | crust gx.Color = gx.Color{24, 25, 38, 255} 67 | mantle gx.Color = gx.Color{30, 32, 48, 255} 68 | base gx.Color = gx.Color{36, 39, 58, 255} 69 | surface0 gx.Color = gx.Color{54, 58, 79, 255} 70 | surface1 gx.Color = gx.Color{73, 77, 100, 255} 71 | surface2 gx.Color = gx.Color{91, 96, 120, 255} 72 | overlay0 gx.Color = gx.Color{110, 115, 141, 255} 73 | overlay1 gx.Color = gx.Color{128, 135, 162, 255} 74 | overlay2 gx.Color = gx.Color{147, 154, 183, 255} 75 | subtext0 gx.Color = gx.Color{165, 173, 203, 255} 76 | subtext1 gx.Color = gx.Color{184, 192, 224, 255} 77 | text gx.Color = gx.Color{202, 211, 245, 255} 78 | lavender gx.Color = gx.Color{183, 189, 248, 255} 79 | blue gx.Color = gx.Color{138, 173, 244, 255} 80 | sapphire gx.Color = gx.Color{125, 196, 228, 255} 81 | sky gx.Color = gx.Color{145, 215, 227, 255} 82 | teal gx.Color = gx.Color{139, 213, 202, 255} 83 | green gx.Color = gx.Color{166, 218, 149, 255} 84 | yellow gx.Color = gx.Color{238, 212, 159, 255} 85 | peach gx.Color = gx.Color{245, 169, 127, 255} 86 | maroon gx.Color = gx.Color{238, 153, 160, 255} 87 | red gx.Color = gx.Color{237, 135, 150, 255} 88 | mauve gx.Color = gx.Color{198, 160, 246, 255} 89 | pink gx.Color = gx.Color{245, 189, 230, 255} 90 | flamingo gx.Color = gx.Color{240, 198, 198, 255} 91 | rosewater gx.Color = gx.Color{244, 219, 214, 255} 92 | } 93 | 94 | pub struct CatppuchinLatte { 95 | pub: 96 | crust gx.Color = gx.Color{220, 224, 232, 255} 97 | mantle gx.Color = gx.Color{230, 233, 239, 255} 98 | base gx.Color = gx.Color{239, 241, 245, 255} 99 | surface0 gx.Color = gx.Color{204, 208, 218, 255} 100 | surface1 gx.Color = gx.Color{188, 192, 204, 255} 101 | surface2 gx.Color = gx.Color{172, 176, 190, 255} 102 | overlay0 gx.Color = gx.Color{156, 160, 176, 255} 103 | overlay1 gx.Color = gx.Color{140, 143, 161, 255} 104 | overlay2 gx.Color = gx.Color{124, 127, 147, 255} 105 | subtext0 gx.Color = gx.Color{108, 111, 133, 255} 106 | subtext1 gx.Color = gx.Color{92, 95, 119, 255} 107 | text gx.Color = gx.Color{76, 79, 105, 255} 108 | lavender gx.Color = gx.Color{114, 135, 253, 255} 109 | blue gx.Color = gx.Color{30, 102, 245, 255} 110 | sapphire gx.Color = gx.Color{32, 159, 181, 255} 111 | sky gx.Color = gx.Color{4, 165, 229, 255} 112 | teal gx.Color = gx.Color{23, 146, 153, 255} 113 | green gx.Color = gx.Color{64, 160, 43, 255} 114 | yellow gx.Color = gx.Color{223, 142, 29, 255} 115 | peach gx.Color = gx.Color{254, 100, 11, 255} 116 | maroon gx.Color = gx.Color{230, 69, 83, 255} 117 | red gx.Color = gx.Color{210, 15, 57, 255} 118 | mauve gx.Color = gx.Color{136, 57, 239, 255} 119 | pink gx.Color = gx.Color{234, 118, 203, 255} 120 | flamingo gx.Color = gx.Color{221, 120, 120, 255} 121 | rosewater gx.Color = gx.Color{220, 138, 120, 255} 122 | } 123 | 124 | pub interface Clickable { 125 | mut: 126 | id int 127 | x f32 128 | y f32 129 | shape Area 130 | click_func fn (mut g Gui) 131 | render(mut g Gui, x_offset f32, y_offset f32) 132 | } 133 | 134 | pub interface Area { 135 | mut: 136 | width f32 137 | height f32 138 | relative_pos Pos 139 | } 140 | 141 | fn (a Area) offset() (f32, f32) { 142 | return match a.relative_pos { 143 | .center {-a.width/2, -a.height/2} 144 | .left {0, -a.height/2} 145 | .right {-a.width, -a.height/2} 146 | .top {-a.width/2, 0} 147 | .bottom {-a.width/2, a.height} 148 | .top_left {0, 0} 149 | .top_right {-a.width, 0} 150 | .bottom_left {0, a.height} 151 | .bottom_right {a.height, a.width} 152 | } 153 | } 154 | 155 | pub fn (a Area) render(mut g Gui, x_offset f32, y_offset f32, color gx.Color) { 156 | mut x_coo := x_offset 157 | mut y_coo := y_offset 158 | match a.relative_pos { 159 | .center { 160 | x_coo -= a.width/2 161 | y_coo -= a.height/2 162 | } 163 | .left {y_coo -= a.height/2} 164 | .right { 165 | x_coo -= a.width 166 | y_coo -= a.height/2 167 | } 168 | .top {x_coo -= a.width/2} 169 | .bottom { 170 | x_coo -= a.width/2 171 | y_coo += a.height 172 | } 173 | .top_left {} 174 | .top_right { 175 | x_coo -= a.width 176 | } 177 | .bottom_left { 178 | y_coo += a.height 179 | } 180 | .bottom_right { 181 | y_coo += a.height 182 | x_coo += a.width 183 | } 184 | } 185 | match a { 186 | RoundedShape {g.gg.draw_rounded_rect_filled(x_coo, y_coo, a.width, a.height, a.rounded, color)} 187 | else {g.gg.draw_rect_filled(x_coo, y_coo, a.width, a.height, color)} 188 | } 189 | } 190 | 191 | pub interface Element { 192 | mut: 193 | id int 194 | x f32 195 | y f32 196 | render(mut g Gui, x_offset f32, y_offset f32) 197 | } 198 | 199 | pub enum Pos { 200 | center 201 | right 202 | left 203 | top 204 | bottom 205 | top_right 206 | top_left 207 | bottom_right 208 | bottom_left 209 | } 210 | 211 | pub struct RoundedShape { 212 | pub mut: 213 | width f32 214 | height f32 215 | rounded int 216 | relative_pos Pos 217 | } 218 | 219 | pub struct Shape { 220 | pub mut: 221 | width f32 222 | height f32 223 | relative_pos Pos 224 | } -------------------------------------------------------------------------------- /ggui/rect.v: -------------------------------------------------------------------------------- 1 | module ggui 2 | import gx 3 | 4 | pub struct Rect { 5 | pub mut: 6 | id int 7 | x f32 8 | y f32 9 | shape Area 10 | color gx.Color 11 | } 12 | 13 | fn (r Rect) render(mut g Gui, x_offset f32, y_offset f32) { 14 | r.shape.render(mut g, r.x, r.y, r.color) 15 | } 16 | -------------------------------------------------------------------------------- /ggui/text.v: -------------------------------------------------------------------------------- 1 | module ggui 2 | import gx 3 | 4 | @[heap] 5 | pub struct Text { 6 | pub mut: 7 | id int 8 | x f32 9 | y f32 10 | text string 11 | cfg gx.TextCfg 12 | } 13 | 14 | pub fn (t Text) render(mut g Gui, x_offset f32, y_offset f32) { 15 | if t.text != "" { 16 | g.gg.draw_text(int(t.x + x_offset), int(t.y + y_offset), t.text, t.cfg) 17 | } 18 | } -------------------------------------------------------------------------------- /ggui/utilities.v: -------------------------------------------------------------------------------- 1 | module ggui 2 | 3 | pub fn in_range[T](x T, y T, x_start T, y_start T, x_end T, y_end T) bool { 4 | return x >= x_start && x < x_end && y >= y_start && y < y_end 5 | } -------------------------------------------------------------------------------- /la/blas.v: -------------------------------------------------------------------------------- 1 | module la 2 | 3 | import vblas 4 | import math 5 | 6 | // TODO: @ulises-jeremias to remove this once https://github.com/vlang/v/issues/14047 is finished 7 | fn arr_to_f64arr[T](arr []T) []f64 { 8 | mut ret := []f64{cap: arr.len} 9 | for v in arr { 10 | ret << f64(v) 11 | } 12 | return ret 13 | } 14 | 15 | /* 16 | * vector_rms_error returns the scaled root-mean-square of the difference between two vectors 17 | * with components normalised by a scaling factor 18 | * __________________________ 19 | * / ———— 2 20 | * / 1 \ / error[i] \ 21 | * rms = \ / ——— / | —————————— | 22 | * \/ N ———— \ scale[i] / 23 | * 24 | * error[i] = |u[i] - v[i]| 25 | * 26 | * scale[i] = a + m*|s[i]| 27 | */ 28 | pub fn vector_rms_error[T](u []T, v []T, a T, m T, s []T) T { 29 | mut rms := T{} 30 | for i in 0 .. u.len { 31 | scale := a + m * math.abs(s[i]) 32 | err := math.abs(u[i] - v[i]) 33 | rms += err * err / (scale * scale) 34 | } 35 | return T(math.sqrt(f64(rms) / f64(u.len))) 36 | } 37 | 38 | // vector_dot returns the dot product between two vectors: 39 | // s := u・v 40 | pub fn vector_dot[T](u []T, v []T) T { 41 | $if T is f64 { 42 | mut res := T{} 43 | cutoff := 150 44 | if u.len <= cutoff { 45 | for i in 0 .. u.len { 46 | res += u[i] * v[i] 47 | } 48 | return res 49 | } 50 | return vblas.ddot(u.len, arr_to_f64arr[T](u), 1, arr_to_f64arr[T](v), 1) 51 | } $else { 52 | mut res := T{} 53 | for i in 0 .. u.len { 54 | res += u[i] * v[i] 55 | } 56 | return res 57 | } 58 | } 59 | 60 | // vector_add adds the scaled components of two vectors 61 | // res := alpha⋅u + beta⋅v ⇒ result[i] := alpha⋅u[i] + beta⋅v[i] 62 | pub fn vector_add[T](alpha T, u []T, beta T, v []T) []T { 63 | $if T is f64 { 64 | mut res := []f64{len: v.len} 65 | n := u.len 66 | cutoff := 150 67 | if beta == 1 && n > cutoff { 68 | res = v.clone() 69 | vblas.daxpy(n, alpha, arr_to_f64arr(u), 1, mut res, 1) 70 | return res 71 | } 72 | m := n % 4 73 | for i in 0 .. m { 74 | res[i] = alpha * u[i] + beta * v[i] 75 | } 76 | for i := m; i < n; i += 4 { 77 | res[i + 0] = alpha * u[i + 0] + beta * v[i + 0] 78 | res[i + 1] = alpha * u[i + 1] + beta * v[i + 1] 79 | res[i + 2] = alpha * u[i + 2] + beta * v[i + 2] 80 | res[i + 3] = alpha * u[i + 3] + beta * v[i + 3] 81 | } 82 | return res 83 | } $else { 84 | mut res := []T{len: v.len} 85 | n := u.len 86 | m := n % 4 87 | for i in 0 .. m { 88 | res[i] = alpha * u[i] + beta * v[i] 89 | } 90 | for i := m; i < n; i += 4 { 91 | res[i + 0] = alpha * u[i + 0] + beta * v[i + 0] 92 | res[i + 1] = alpha * u[i + 1] + beta * v[i + 1] 93 | res[i + 2] = alpha * u[i + 2] + beta * v[i + 2] 94 | res[i + 3] = alpha * u[i + 3] + beta * v[i + 3] 95 | } 96 | return res 97 | } 98 | } 99 | 100 | // vector_max_diff returns the maximum absolute difference between two vectors 101 | // maxdiff = max(|u - v|) 102 | pub fn vector_max_diff[T](u []T, v []T) T { 103 | mut maxdiff := math.abs(u[0] - v[0]) 104 | for i := 1; i < u.len; i++ { 105 | diff := math.abs(u[i] - v[i]) 106 | if diff > maxdiff { 107 | maxdiff = diff 108 | } 109 | } 110 | return maxdiff 111 | } 112 | 113 | // vector_scale_abs creates a "scale" vector using the absolute value of another vector 114 | // scale := a + m ⋅ |x| ⇒ scale[i] := a + m ⋅ |x[i]| 115 | pub fn vector_scale_abs[T](a T, m T, x []T) []T { 116 | mut scale := []T{len: x.len} 117 | for i in 0 .. x.len { 118 | scale[i] = a + m * math.abs(x[i]) 119 | } 120 | return scale 121 | } 122 | 123 | // matrix_vector_mul returns the matrix-vector multiplication 124 | // 125 | // v = alpha⋅a⋅u ⇒ vi = alpha * aij * uj 126 | // 127 | pub fn matrix_vector_mul[T](alpha T, a &Matrix[T], u []T) []T { 128 | $if T is f64 { 129 | mut v := []f64{len: a.m} 130 | if a.m < 9 && a.n < 9 { 131 | for i in 0 .. a.m { 132 | v[i] = 0.0 133 | for j := 0; j < a.n; j++ { 134 | v[i] += alpha * a.get(i, j) * u[j] 135 | } 136 | } 137 | return v 138 | } 139 | vblas.dgemv(.no_trans, a.m, a.n, alpha, arr_to_f64arr[T](a.data), a.n, arr_to_f64arr[T](u), 140 | 1, 0.0, mut v, 1) 141 | return v 142 | } $else { 143 | mut v := []T{len: a.m} 144 | for i in 0 .. a.m { 145 | v[i] = 0.0 146 | for j := 0; j < a.n; j++ { 147 | v[i] += alpha * a.get(i, j) * u[j] 148 | } 149 | } 150 | return v 151 | } 152 | } 153 | 154 | // matrix_tr_vector_mul returns the transpose(matrix)-vector multiplication 155 | // 156 | // v = alpha⋅aᵀ⋅u ⇒ vi = alpha * aji * uj = alpha * uj * aji 157 | // 158 | pub fn matrix_tr_vector_mul[T](alpha T, a &Matrix[T], u []T) []T { 159 | $if T is f64 { 160 | mut v := []f64{len: a.n} 161 | if a.m < 9 && a.n < 9 { 162 | for i in 0 .. a.n { 163 | v[i] = 0.0 164 | for j := 0; j < a.m; j++ { 165 | v[i] += alpha * a.get(j, i) * u[j] 166 | } 167 | } 168 | return v 169 | } 170 | vblas.dgemv(.trans, a.m, a.n, alpha, arr_to_f64arr[T](a.data), a.n, arr_to_f64arr[T](u), 171 | 1, 0.0, mut v, 1) 172 | return v 173 | } $else { 174 | mut v := []T{len: a.n} 175 | for i in 0 .. a.n { 176 | v[i] = 0.0 177 | for j := 0; j < a.m; j++ { 178 | v[i] += alpha * a.get(j, i) * u[j] 179 | } 180 | } 181 | return v 182 | } 183 | } 184 | 185 | // vector_vector_tr_mul returns the matrix = vector-transpose(vector) multiplication 186 | // (e.g. dyadic product) 187 | // 188 | // a = alpha⋅u⋅vᵀ ⇒ aij = alpha * ui * vj 189 | // 190 | pub fn vector_vector_tr_mul[T](alpha T, u []T, v []T) &Matrix[T] { 191 | $if T is f64 { 192 | mut m := Matrix.new[f64](u.len, v.len) 193 | if m.m < 9 && m.n < 9 { 194 | for i in 0 .. m.m { 195 | for j in 0 .. m.n { 196 | m.set(i, j, alpha * u[i] * v[j]) 197 | } 198 | } 199 | return m 200 | } 201 | mut a := []f64{len: u.len * v.len} 202 | vblas.dger(m.m, m.n, alpha, arr_to_f64arr[T](u), 1, arr_to_f64arr[T](v), 1, mut 203 | a, m.n) 204 | return Matrix.raw(u.len, v.len, a) 205 | } $else { 206 | mut m := Matrix.new[T](u.len, v.len) 207 | 208 | for i in 0 .. m.m { 209 | for j in 0 .. m.n { 210 | m.set(i, j, alpha * u[i] * v[j]) 211 | } 212 | } 213 | return m 214 | } 215 | } 216 | 217 | // matrix_vector_mul_add returns the matrix-vector multiplication with addition 218 | // 219 | // v += alpha⋅a⋅u ⇒ vi += alpha * aij * uj 220 | // 221 | pub fn matrix_vector_mul_add(alpha f64, a &Matrix[f64], u []f64) []f64 { 222 | mut v := []f64{len: a.m} 223 | vblas.dgemv(.no_trans, a.m, a.n, alpha, a.data, a.m, u, 1, 1.0, mut v, v.len) 224 | return v 225 | } 226 | 227 | // matrix_matrix_mul returns the matrix multiplication (scaled) 228 | // 229 | // c := alpha⋅a⋅b ⇒ cij := alpha * aik * bkj 230 | // 231 | pub fn matrix_matrix_mul(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 232 | if c.m < 6 && c.n < 6 && a.n < 30 { 233 | for i in 0 .. c.m { 234 | for j := 0; j < c.n; j++ { 235 | c.set(i, j, 0.0) 236 | for k := 0; k < a.n; k++ { 237 | c.add(i, j, alpha * a.get(i, k) * b.get(k, j)) 238 | } 239 | } 240 | } 241 | return 242 | } 243 | vblas.dgemm(.no_trans, .no_trans, a.m, b.n, a.n, alpha, a.data, a.m, b.data, b.m, 244 | 0.0, mut c.data, c.m) 245 | } 246 | 247 | // matrix_tr_matrix_mul returns the matrix multiplication (scaled) with transposed(a) 248 | // 249 | // c := alpha⋅aᵀ⋅b ⇒ cij := alpha * aki * bkj 250 | // 251 | pub fn matrix_tr_matrix_mul(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 252 | if c.m < 6 && c.n < 6 && a.m < 30 { 253 | for i in 0 .. c.m { 254 | for j := 0; j < c.n; j++ { 255 | c.set(i, j, 0.0) 256 | for k := 0; k < a.m; k++ { 257 | c.add(i, j, alpha * a.get(k, i) * b.get(k, j)) 258 | } 259 | } 260 | } 261 | return 262 | } 263 | vblas.dgemm(.trans, .no_trans, a.n, b.n, a.m, alpha, a.data, a.n, b.data, b.m, 0.0, mut 264 | c.data, c.m) 265 | } 266 | 267 | // matrix_matrix_tr_mul returns the matrix multiplication (scaled) with transposed(b) 268 | // 269 | // c := alpha⋅a⋅bᵀ ⇒ cij := alpha * aik * bjk 270 | // 271 | pub fn matrix_matrix_tr_mul(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 272 | vblas.dgemm(.no_trans, .trans, a.m, b.m, a.n, alpha, a.data, a.n, b.data, b.m, 0.0, mut 273 | c.data, c.m) 274 | } 275 | 276 | // matrix_tr_matrix_tr_mul returns the matrix multiplication (scaled) with transposed(a) and transposed(b) 277 | // 278 | // c := alpha⋅aᵀ⋅bᵀ ⇒ cij := alpha * aki * bjk 279 | // 280 | pub fn matrix_tr_matrix_tr_mul(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 281 | vblas.dgemm(.trans, .trans, a.n, b.m, a.m, alpha, a.data, a.n, b.data, b.m, 0.0, mut 282 | c.data, c.m) 283 | } 284 | 285 | // matrix_matrix_muladd returns the matrix multiplication (scaled) 286 | // 287 | // c += alpha⋅a⋅b ⇒ cij += alpha * aik * bkj 288 | // 289 | pub fn matrix_matrix_muladd(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 290 | vblas.dgemm(.no_trans, .no_trans, a.m, b.n, a.n, alpha, a.data, a.n, b.data, b.m, 291 | 1.0, mut c.data, c.m) 292 | } 293 | 294 | // matrix_tr_matrix_muladd returns the matrix multiplication (scaled) with transposed(a) 295 | // 296 | // c += alpha⋅aᵀ⋅b ⇒ cij += alpha * aki * bkj 297 | // 298 | pub fn matrix_tr_matrix_muladd(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 299 | vblas.dgemm(.trans, .no_trans, a.n, b.n, a.m, alpha, a.data, a.n, b.data, b.m, 1.0, mut 300 | c.data, c.m) 301 | } 302 | 303 | // matrix_matrix_tr_muladd returns the matrix multiplication (scaled) with transposed(b) 304 | // 305 | // c += alpha⋅a⋅bᵀ ⇒ cij += alpha * aik * bjk 306 | // 307 | pub fn matrix_matrix_tr_muladd(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 308 | vblas.dgemm(.no_trans, .trans, a.m, b.m, a.n, alpha, a.data, a.n, b.data, b.m, 1.0, mut 309 | c.data, c.m) 310 | } 311 | 312 | // matrix_tr_matrix_tr_mul_add returns the matrix multiplication (scaled) with transposed(a) and transposed(b) 313 | // 314 | // c += alpha⋅aᵀ⋅bᵀ ⇒ cij += alpha * aki * bjk 315 | // 316 | pub fn matrix_tr_matrix_tr_mul_add(mut c Matrix[f64], alpha f64, a &Matrix[f64], b &Matrix[f64]) { 317 | vblas.dgemm(.trans, .trans, a.n, b.m, a.m, alpha, a.data, a.n, b.data, b.m, 1.0, mut 318 | c.data, c.m) 319 | } 320 | 321 | // matrix_add adds the scaled components of two matrices 322 | // res := alpha⋅a + beta⋅b ⇒ result[i][j] := alpha⋅a[i][j] + beta⋅b[i][j] 323 | pub fn matrix_add(mut res Matrix[f64], alpha f64, a &Matrix[f64], beta f64, b &Matrix[f64]) { 324 | n := a.data.len // treating these matrices as vectors 325 | cutoff := 150 326 | if beta == 1 && n > cutoff { 327 | res.data = b.data.clone() 328 | vblas.daxpy(n, alpha, a.data, 1, mut res.data, 1) 329 | return 330 | } 331 | m := n % 4 332 | for i in 0 .. m { 333 | res.data[i] = alpha * a.data[i] + beta * b.data[i] 334 | } 335 | for i := m; i < n; i += 4 { 336 | res.data[i + 0] = alpha * a.data[i + 0] + beta * b.data[i + 0] 337 | res.data[i + 1] = alpha * a.data[i + 1] + beta * b.data[i + 1] 338 | res.data[i + 2] = alpha * a.data[i + 2] + beta * b.data[i + 2] 339 | res.data[i + 3] = alpha * a.data[i + 3] + beta * b.data[i + 3] 340 | } 341 | return 342 | } 343 | -------------------------------------------------------------------------------- /la/matrix.v: -------------------------------------------------------------------------------- 1 | module la 2 | 3 | import math 4 | import strconv 5 | import vsl.errors 6 | 7 | @[heap] 8 | pub struct Matrix[T] { 9 | pub mut: 10 | m int 11 | n int 12 | data []T 13 | } 14 | 15 | // Matrix.new allocates a new (empty) Matrix with given (m,n) (row/col sizes) 16 | pub fn Matrix.new[T](m int, n int) &Matrix[T] { 17 | data := []T{len: m * n} 18 | return &Matrix[T]{ 19 | m: m 20 | n: n 21 | data: data 22 | } 23 | } 24 | 25 | // Matrix.deep2 allocates a new Matrix from given (Deep2) nested slice. 26 | // NOTE: make sure to have at least 1x1 item 27 | pub fn Matrix.deep2[T](a [][]T) &Matrix[T] { 28 | mut o := Matrix.new[T](a.len, a[0].len) 29 | o.set_from_deep2(a) 30 | return o 31 | } 32 | 33 | // Matrix.raw creates a new Matrix using given raw data 34 | // Input: 35 | // rawdata -- data organized as column-major; e.g. Fortran format 36 | // NOTE: 37 | // (1) rawdata is not copied! 38 | // (2) the external slice rawdata should not be changed or deleted 39 | pub fn Matrix.raw[T](m int, n int, rawdata []T) &Matrix[T] { 40 | return &Matrix[T]{ 41 | m: m 42 | n: n 43 | data: rawdata 44 | } 45 | } 46 | 47 | // set_from_deep2 sets matrix with data from a nested slice (Deep2) structure 48 | pub fn (mut o Matrix[T]) set_from_deep2(a [][]T) { 49 | mut k := 0 50 | for i in 0 .. o.m { 51 | for j in 0 .. o.n { 52 | o.data[k] = a[i][j] 53 | k++ 54 | } 55 | } 56 | } 57 | 58 | // set_diag sets diagonal matrix with diagonal components equal to val 59 | pub fn (mut o Matrix[T]) set_diag(val T) { 60 | for i in 0 .. o.m { 61 | for j in 0 .. o.n { 62 | if i == j { 63 | o.data[i * o.n + j] = val 64 | } else { 65 | o.data[i * o.n + j] = 0 66 | } 67 | } 68 | } 69 | } 70 | 71 | // set sets value 72 | pub fn (mut o Matrix[T]) set(i int, j int, val T) { 73 | o.data[i * o.n + j] = val // row-major 74 | } 75 | 76 | // get gets value 77 | pub fn (o &Matrix[T]) get(i int, j int) T { 78 | return o.data[i * o.n + j] // row-major 79 | } 80 | 81 | // get_deep2 returns nested slice representation 82 | pub fn (o &Matrix[T]) get_deep2() [][]T { 83 | mut m := [][]T{len: o.m, init: []T{len: o.n}} 84 | for i in 0 .. o.m { 85 | for j in 0 .. o.n { 86 | m[i][j] = o.data[i * o.n + j] 87 | } 88 | } 89 | return m 90 | } 91 | 92 | // clone returns a copy of this matrix 93 | pub fn (o &Matrix[T]) clone() &Matrix[T] { 94 | mut clone := Matrix.new[T](o.m, o.n) 95 | clone.data = o.data.clone() 96 | return clone 97 | } 98 | 99 | // transpose returns the transpose matrix 100 | pub fn (o &Matrix[T]) transpose() &Matrix[T] { 101 | mut tran := Matrix.new[T](o.n, o.m) 102 | for i := 0; i < o.n; i++ { 103 | for j := 0; j < o.m; j++ { 104 | tran.set(i, j, o.get(j, i)) 105 | } 106 | } 107 | return tran 108 | } 109 | 110 | // copy_into copies the scaled components of this matrix into another one (result) 111 | // result := alpha * this ⇒ result[ij] := alpha * this[ij] 112 | pub fn (o &Matrix[T]) copy_into(mut result Matrix[T], alpha T) { 113 | for k in 0 .. o.m * o.n { 114 | result.data[k] = alpha * o.data[k] 115 | } 116 | } 117 | 118 | // add adds value to (i,j) location 119 | pub fn (mut o Matrix[T]) add(i int, j int, val T) { 120 | o.data[i * o.n + j] += val // row-major 121 | } 122 | 123 | // fill fills this matrix with a single number val 124 | // aij = val 125 | pub fn (mut o Matrix[T]) fill(val T) { 126 | for k in 0 .. o.m * o.n { 127 | o.data[k] = val 128 | } 129 | } 130 | 131 | /* 132 | * clear_rc clear rows and columns and set diagonal components 133 | * _ _ _ _ 134 | * Example: | 1 2 3 4 | | 1 2 3 4 | 135 | * A = | 5 6 7 8 | ⇒ clear([1,2], [], 1.0) ⇒ A = | 0 1 0 0 | 136 | * |_ 4 3 2 1 _| |_ 0 0 1 0 _| 137 | */ 138 | pub fn (mut o Matrix[T]) clear_rc(rows []int, cols []int, diag T) { 139 | for r in rows { 140 | for j in 0 .. o.n { 141 | if r == j { 142 | o.set(r, j, diag) 143 | } else { 144 | o.set(r, j, 0.0) 145 | } 146 | } 147 | } 148 | for c in cols { 149 | for i in 0 .. o.m { 150 | if i == c { 151 | o.set(i, c, diag) 152 | } else { 153 | o.set(i, c, 0.0) 154 | } 155 | } 156 | } 157 | } 158 | 159 | /* 160 | * clear_bry clears boundaries 161 | * _ _ _ _ 162 | * Example: | 1 2 3 | | 1 0 0 | 163 | * A = | 4 5 6 | ⇒ clear(1.0) ⇒ A = | 0 5 0 | 164 | * |_ 7 8 9 _| |_ 0 0 1 _| 165 | */ 166 | pub fn (mut o Matrix[T]) clear_bry(diag T) { 167 | o.clear_rc([0, o.m - 1], [0, o.n - 1], diag) 168 | } 169 | 170 | // max_diff returns the maximum difference between the components of this and another matrix 171 | pub fn (o &Matrix[T]) max_diff(another &Matrix[T]) T { 172 | mut maxdiff := math.abs(o.data[0] - another.data[0]) 173 | for k := 1; k < o.m * o.n; k++ { 174 | diff := math.abs(o.data[k] - another.data[k]) 175 | if diff > maxdiff { 176 | maxdiff = diff 177 | } 178 | } 179 | return maxdiff 180 | } 181 | 182 | // largest returns the largest component |a[ij]| of this matrix, normalised by den 183 | // largest := |a[ij]| / den 184 | pub fn (o &Matrix[T]) largest(den T) T { 185 | mut largest := math.abs(o.data[0]) 186 | for k := 1; k < o.m * o.n; k++ { 187 | tmp := math.abs(o.data[k]) 188 | if tmp > largest { 189 | largest = tmp 190 | } 191 | } 192 | return largest / den 193 | } 194 | 195 | // col access column j of this matrix. No copies are made since the internal data are in 196 | // row-major format already. 197 | // NOTE: this method can be used to modify the columns; e.g. with o.col(0)[0] = 123 198 | @[inline] 199 | pub fn (o &Matrix[T]) col(j int) []T { 200 | return o.get_col(j) 201 | } 202 | 203 | // get_row returns row i of this matrix 204 | pub fn (o &Matrix[T]) get_row(i int) []T { 205 | return o.data[(i * o.n)..((i + 1) * o.n)] 206 | } 207 | 208 | // get_col returns column j of this matrix 209 | pub fn (o &Matrix[T]) get_col(j int) []T { 210 | mut col := []T{len: o.m} 211 | for i in 0 .. o.m { 212 | col[i] = o.data[i * o.n + j] 213 | } 214 | return col 215 | } 216 | 217 | // extract_cols returns columns from j=start to j=endp1-1 218 | // start -- first column 219 | // endp1 -- "end-plus-one", the number of the last requested column + 1 220 | pub fn (o &Matrix[T]) extract_cols(start int, endp1 int) !&Matrix[T] { 221 | if endp1 <= start { 222 | return errors.error("endp1 'end-plus-one' must be greater than start. start=${start}, endp1=${endp1} invalid", 223 | .efailed) 224 | } 225 | ncol := endp1 - start 226 | mut reduced := Matrix.new[T](o.m, ncol) 227 | for i in 0 .. o.m { 228 | for j := 0; j < ncol; j++ { 229 | reduced.set(i, j, o.get(i, j + start)) 230 | } 231 | } 232 | return reduced 233 | } 234 | 235 | // extract_rows returns rows from i=start to i=endp1-1 236 | // start -- first column 237 | // endp1 -- "end-plus-one", the number of the last requested column + 1 238 | pub fn (o &Matrix[T]) extract_rows(start int, endp1 int) !&Matrix[T] { 239 | if endp1 <= start { 240 | return errors.error("endp1 'end-plus-one' must be greater than start. start=${start}, endp1=${endp1} invalid", 241 | .efailed) 242 | } 243 | nrow := endp1 - start 244 | mut reduced := Matrix.new[T](nrow, o.n) 245 | reduced.data = o.data[start * o.m..endp1 * o.m] 246 | return reduced 247 | } 248 | 249 | // set_row sets the values of a row i with a single value 250 | pub fn (mut o Matrix[T]) set_row(i int, value T) { 251 | for k := i * o.m; k < (i + 1) * o.m; k++ { 252 | o.data[k] = value 253 | } 254 | } 255 | 256 | // set_col sets the values of a column j with a single value 257 | pub fn (mut o Matrix[T]) set_col(j int, value T) { 258 | for i in 0 .. o.m { 259 | o.data[i * o.n + j] = value 260 | } 261 | } 262 | 263 | // split_by_col splits this matrix into two matrices at column j 264 | // j -- column index 265 | pub fn (o &Matrix[T]) split_by_col(j int) !(&Matrix[T], &Matrix[T]) { 266 | if j < 0 || j >= o.n { 267 | return errors.error('j=${j} must be in range [0, ${o.n})', .efailed) 268 | } 269 | mut left := Matrix.new[T](o.m, j) 270 | mut right := Matrix.new[T](o.m, o.n - j) 271 | for i in 0 .. o.m { 272 | for k := 0; k < j; k++ { 273 | left.set(i, k, o.get(i, k)) 274 | } 275 | for k := j; k < o.n; k++ { 276 | right.set(i, k - j, o.get(i, k)) 277 | } 278 | } 279 | return left, right 280 | } 281 | 282 | // split_by_row splits this matrix into two matrices at row i 283 | // i -- row index 284 | pub fn (o &Matrix[T]) split_by_row(i int) !(&Matrix[T], &Matrix[T]) { 285 | if i < 0 || i >= o.m { 286 | return errors.error('i=${i} must be in range [0, ${o.m})', .efailed) 287 | } 288 | mut top := Matrix.new[T](i, o.n) 289 | mut bottom := Matrix.new[T](o.m - i, o.n) 290 | for j in 0 .. o.n { 291 | for k := 0; k < i; k++ { 292 | top.set(k, j, o.get(k, j)) 293 | } 294 | for k := i; k < o.m; k++ { 295 | bottom.set(k - i, j, o.get(k, j)) 296 | } 297 | } 298 | return top, bottom 299 | } 300 | 301 | // norm_frob returns the Frobenius norm of this matrix 302 | // nrm := ‖a‖_F = sqrt(Σ_i Σ_j a[ij]⋅a[ij]) = ‖a‖_2 303 | pub fn (o &Matrix[T]) norm_frob() T { 304 | mut nrm := 0.0 305 | for k in 0 .. o.m * o.n { 306 | nrm += o.data[k] * o.data[k] 307 | } 308 | return math.sqrt(nrm) 309 | } 310 | 311 | // norm_inf returns the infinite norm of this matrix 312 | // nrm := ‖a‖_∞ = max_i ( Σ_j a[ij] ) 313 | pub fn (o &Matrix[T]) norm_inf() T { 314 | mut nrm := 0.0 315 | for i := 0; i < o.n; i++ { // sum first row 316 | nrm += math.abs(o.data[i]) 317 | } 318 | mut sumrow := 0.0 319 | for i := 1; i < o.m; i++ { 320 | sumrow = 0.0 321 | for j in 0 .. o.n { // sum the other rows 322 | sumrow += math.abs(o.data[i * o.n + j]) 323 | if sumrow > nrm { 324 | nrm = sumrow 325 | } 326 | } 327 | } 328 | return nrm 329 | } 330 | 331 | // apply sets this matrix with the scaled components of another matrix 332 | // this := alpha * another ⇒ this[i] := alpha * another[i] 333 | // NOTE: "another" may be "this" 334 | pub fn (mut o Matrix[T]) apply(alpha T, another &Matrix[T]) { 335 | for k in 0 .. o.m * o.n { 336 | o.data[k] = alpha * another.data[k] 337 | } 338 | } 339 | 340 | // equals returns true if this matrix is equal to another matrix 341 | // this == another ⇒ this[i] == another[i] 342 | // NOTE: "another" may be "this" 343 | pub fn (o &Matrix[T]) equals(another &Matrix[T]) bool { 344 | for k in 0 .. o.m * o.n { 345 | if o.data[k] != another.data[k] { 346 | return false 347 | } 348 | } 349 | return true 350 | } 351 | 352 | pub fn (o &Matrix[T]) str() string { 353 | return o.print('') 354 | } 355 | 356 | // print prints matrix (without commas or brackets) 357 | pub fn (o &Matrix[T]) print(nfmt_ string) string { 358 | mut nfmt := nfmt_ 359 | if nfmt == '' { 360 | nfmt = '%g ' 361 | } 362 | mut l := '' 363 | for i in 0 .. o.m { 364 | if i > 0 { 365 | l += '\n' 366 | } 367 | for j in 0 .. o.n { 368 | l += safe_print(nfmt, o.get(i, j)) 369 | } 370 | } 371 | return l 372 | } 373 | 374 | // print_v prints matrix in V format 375 | pub fn (o &Matrix[T]) print_v(nfmt_ string) string { 376 | mut nfmt := nfmt_ 377 | if nfmt == '' { 378 | nfmt = '%10g' 379 | } 380 | mut l := '[][]${T.name}{\n' 381 | for i in 0 .. o.m { 382 | l += ' {' 383 | for j in 0 .. o.n { 384 | if j > 0 { 385 | l += ',' 386 | } 387 | l += safe_print(nfmt, o.get(i, j)) 388 | } 389 | l += '},\n' 390 | } 391 | l += '}' 392 | return l 393 | } 394 | 395 | // print_py prints matrix in Python format 396 | pub fn (o &Matrix[T]) print_py(nfmt_ string) string { 397 | mut nfmt := nfmt_ 398 | if nfmt == '' { 399 | nfmt = '%10g' 400 | } 401 | mut l := 'np.matrix([\n' 402 | for i in 0 .. o.m { 403 | l += ' [' 404 | for j in 0 .. o.n { 405 | if j > 0 { 406 | l += ',' 407 | } 408 | l += safe_print(nfmt, o.get(i, j)) 409 | } 410 | l += '],\n' 411 | } 412 | l += '], dtype=float)' 413 | return l 414 | } 415 | 416 | @[inline] 417 | pub fn safe_print[T](format string, message T) string { 418 | return unsafe { strconv.v_sprintf(format, message) } 419 | } 420 | -------------------------------------------------------------------------------- /la/matrix_ops.v: -------------------------------------------------------------------------------- 1 | module la 2 | 3 | import vsl.errors 4 | import math 5 | 6 | // det computes the determinant of matrix using the LU factorization 7 | // NOTE: this method may fail due to overflow... 8 | /* 9 | pub fn matrix_det(o &Matrix[f64]) f64 { 10 | if o.m != o.n { 11 | errors.vsl_panic('matrix must be square to compute determinant. ${o.m} x ${o.n} is invalid\n', 12 | .efailed) 13 | } 14 | mut ai := o.data.clone() 15 | ipiv := []int{len: int(math.min(o.m, o.n))} 16 | vblas.dgetrf(o.m, o.n, mut ai, o.m, ipiv) // NOTE: ipiv are 1-based indices 17 | mut det := 1.0 18 | for i in 0 .. o.m { 19 | if ipiv[i] - 1 == i { // NOTE: ipiv are 1-based indices 20 | det = det * ai[i + i * o.m] 21 | } else { 22 | det = -det * ai[i + i * o.m] 23 | } 24 | } 25 | return det 26 | } 27 | */ 28 | 29 | // matrix_inv_small computes the inverse of small matrices of size 1x1, 2x2, or 3x3. 30 | // It also returns the determinant. 31 | // Input: 32 | // a -- the matrix 33 | // tol -- tolerance to assume zero determinant 34 | // Output: 35 | // ai -- the inverse matrix 36 | // det -- determinant of a 37 | pub fn matrix_inv_small(mut ai Matrix[f64], a Matrix[f64], tol f64) f64 { 38 | mut det := 0.0 39 | if a.m == 1 && a.n == 1 { 40 | det = a.get(0, 0) 41 | if math.abs(det) < tol { 42 | errors.vsl_panic('inverse of (${a.m} x ${a.n}) matrix failed with zero determinant: |det(a)| = ${det} < ${tol}', 43 | .efailed) 44 | } 45 | ai.set(0, 0, 1.0 / det) 46 | } else if a.m == 2 && a.n == 2 { 47 | det = a.get(0, 0) * a.get(1, 1) - (a.get(0, 1)) * a.get(1, 0) 48 | if math.abs(det) < tol { 49 | errors.vsl_panic('inverse of (${a.m} x ${a.n}) matrix failed with zero determinant: |det(a)| = ${det} < ${tol}', 50 | .efailed) 51 | } 52 | ai.set(0, 0, a.get(1, 1) / det) 53 | ai.set(0, 1, -(a.get(0, 1)) / det) 54 | ai.set(1, 0, -(a.get(1, 0)) / det) 55 | ai.set(1, 1, a.get(0, 0) / det) 56 | } else if a.m == 3 && a.n == 3 { 57 | det = 58 | a.get(0, 0) * (a.get(1, 1) * a.get(2, 2) - a.get(1, 2) * a.get(2, 1)) - (a.get(0, 1)) * (a.get(1, 0) * a.get(2, 2) - a.get(1, 2) * a.get(2, 0)) + 59 | a.get(0, 2) * (a.get(1, 0) * a.get(2, 1) - a.get(1, 1) * a.get(2, 0)) 60 | if math.abs(det) < tol { 61 | errors.vsl_panic('inverse of (${a.m} x ${a.n}) matrix failed with zero determinant: |det(a)| = ${det} < ${tol}', 62 | .efailed) 63 | } 64 | ai.set(0, 0, (a.get(1, 1) * a.get(2, 2) - a.get(1, 2) * a.get(2, 1)) / det) 65 | ai.set(0, 1, (a.get(0, 2) * a.get(2, 1) - a.get(0, 1) * a.get(2, 2)) / det) 66 | ai.set(0, 2, (a.get(0, 1) * a.get(1, 2) - a.get(0, 2) * a.get(1, 1)) / det) 67 | ai.set(1, 0, (a.get(1, 2) * a.get(2, 0) - a.get(1, 0) * a.get(2, 2)) / det) 68 | ai.set(1, 1, (a.get(0, 0) * a.get(2, 2) - a.get(0, 2) * a.get(2, 0)) / det) 69 | ai.set(1, 2, (a.get(0, 2) * a.get(1, 0) - a.get(0, 0) * a.get(1, 2)) / det) 70 | ai.set(2, 0, (a.get(1, 0) * a.get(2, 1) - a.get(1, 1) * a.get(2, 0)) / det) 71 | ai.set(2, 1, (a.get(0, 1) * a.get(2, 0) - a.get(0, 0) * a.get(2, 1)) / det) 72 | ai.set(2, 2, (a.get(0, 0) * a.get(1, 1) - a.get(0, 1) * a.get(1, 0)) / det) 73 | } else { 74 | errors.vsl_panic('cannot compute inverse of (${a.m} x ${a.n}) matrix with this function', 75 | .efailed) 76 | } 77 | return det 78 | } 79 | 80 | // matrix_svd performs the SVD decomposition 81 | // Input: 82 | // a -- matrix a 83 | // copy_a -- creates a copy of a; otherwise 'a' is modified 84 | // Output: 85 | // s -- diagonal terms [must be pre-allocated] s.len = imin(a.m, a.n) 86 | // u -- left matrix [must be pre-allocated] u is (a.m x a.m) 87 | // vt -- transposed right matrix [must be pre-allocated] vt is (a.n x a.n) 88 | /* 89 | pub fn matrix_svd(mut s []f64, mut u Matrix[f64], mut vt Matrix[f64], mut a Matrix[f64], copy_a bool) { 90 | superb := []f64{len: int(math.min(a.m, a.n))} 91 | mut acpy := unsafe { &Matrix[f64](a) } 92 | if copy_a { 93 | acpy = a.clone() 94 | } 95 | vblas.dgesvd(&char('A'.str), &char('A'.str), a.m, a.n, acpy.data, 1, s, u.data, a.m, 96 | vt.data, a.n, superb) 97 | } 98 | */ 99 | 100 | // matrix_inv computes the inverse of a general matrix (square or not). It also computes the 101 | // pseudo-inverse if the matrix is not square. 102 | // Input: 103 | // a -- input matrix (M x N) 104 | // Output: 105 | // ai -- inverse matrix (N x M) 106 | // det -- determinant of matrix (ONLY if calc_det == true and the matrix is square) 107 | // NOTE: the dimension of the ai matrix must be N x M for the pseudo-inverse 108 | /* 109 | pub fn matrix_inv(mut ai Matrix[f64], mut a Matrix[f64], calc_det bool) f64 { 110 | mut det := 0.0 111 | // square inverse 112 | if a.m == a.n { 113 | ai.data = a.data.clone() 114 | ipiv := []int{len: int(math.min(a.m, a.n))} 115 | vblas.dgetrf(a.m, a.n, mut ai.data, a.m, ipiv) // NOTE: ipiv are 1-based indices 116 | if calc_det { 117 | det = 1.0 118 | for i := 0; i < a.m; i++ { 119 | if ipiv[i] - 1 == i { // NOTE: ipiv are 1-based indices 120 | det = math.abs(det) * ai.get(i, i) 121 | } else { 122 | det = -det * ai.get(i, i) 123 | } 124 | } 125 | } 126 | vblas.dgetri(a.n, mut ai.data, a.m, ipiv) 127 | return det 128 | } 129 | // singular value decomposition 130 | mut s := []f64{len: int(math.min(a.m, a.n))} 131 | mut u := Matrix.new[f64](a.m, a.m) 132 | mut vt := Matrix.new[f64](a.n, a.n) 133 | matrix_svd(mut s, mut u, mut vt, mut a, true) 134 | // pseudo inverse 135 | tol_s := 1e-8 // TODO: improve this tolerance with a better estimate 136 | for i := 0; i < a.n; i++ { 137 | for j := 0; j < a.m; j++ { 138 | ai.set(i, j, 0) 139 | for k := 0; k < s.len; k++ { 140 | if s[k] > tol_s { 141 | ai.add(i, j, vt.get(k, i) * u.get(j, k) / s[k]) 142 | } 143 | } 144 | } 145 | } 146 | return det 147 | } 148 | */ 149 | 150 | // matrix_cond_num returns the condition number of a square matrix using the inverse of this matrix; 151 | // thus it is not as efficient as it could be, e.g. by using the SV decomposition. 152 | // normtype -- Type of norm to use: 153 | // "I" => Infinite 154 | // "F" or "" (default) => Frobenius 155 | /* 156 | pub fn matrix_cond_num(mut a Matrix[f64], normtype string) f64 { 157 | mut res := 0.0 158 | mut ai := Matrix.new[f64](a.m, a.n) 159 | matrix_inv(mut ai, mut a, false) 160 | if normtype == 'I' { 161 | res = a.norm_inf() * ai.norm_inf() 162 | return res 163 | } 164 | res = a.norm_frob() * ai.norm_frob() 165 | return res 166 | } 167 | */ -------------------------------------------------------------------------------- /la/vector.v: -------------------------------------------------------------------------------- 1 | module la 2 | 3 | import math 4 | 5 | // vector_apply sets this []T with the scaled components of another []T 6 | // this := a * another ⇒ this[i] := a * another[i] 7 | // NOTE: "another" may be "this" 8 | pub fn vector_apply[T](mut o []T, a T, another []T) { 9 | for i in 0 .. o.len { 10 | o[i] = a * another[i] 11 | } 12 | } 13 | 14 | // vector_apply_func runs a function over all components of a []T 15 | // vi = f(i,vi) 16 | pub fn vector_apply_func[T](mut o []T, f fn (i int, x T) T) { 17 | for i in 0 .. o.len { 18 | o[i] = f(i, o[i]) 19 | } 20 | } 21 | 22 | // vector_unit returns the unit []f64 parallel to this []f64 23 | // b := a / norm(a) 24 | pub fn vector_unit(mut o []f64) []f64 { 25 | mut unit := []f64{len: o.len} 26 | s := vector_norm(o) 27 | if s > 0 { 28 | vector_apply(mut unit, 1.0 / s, o) 29 | } 30 | return unit 31 | } 32 | 33 | // vector_accum sum/accumulates all components in a []T 34 | // sum := Σ_i v[i] 35 | pub fn vector_accum[T](o []T) T { 36 | mut sum := T{} 37 | for i in 0 .. o.len { 38 | sum += o[i] 39 | } 40 | return sum 41 | } 42 | 43 | // vector_norm returns the Euclidean norm of a []T: 44 | // nrm := ‖v‖ 45 | pub fn vector_norm(o []f64) f64 { 46 | return math.sqrt(vector_dot(o, o)) 47 | } 48 | 49 | // vector_rms returns the root-mean-square of this []T 50 | // 51 | pub fn vector_rms[T](o []T) T { 52 | mut rms := T{} 53 | for i in 0 .. o.len { 54 | rms += o[i] * o[i] 55 | } 56 | rms = T(math.sqrt(f64(rms / T(o.len)))) 57 | return rms 58 | } 59 | 60 | // vector_norm_diff returns the Euclidean norm of the difference: 61 | // nrm := ||u - v|| 62 | pub fn vector_norm_diff[T](o []T, v []T) T { 63 | mut nrm := T{} 64 | for i in 0 .. v.len { 65 | nrm += (o[i] - v[i]) * (o[i] - v[i]) 66 | } 67 | nrm = T(math.sqrt(f64(nrm))) 68 | return nrm 69 | } 70 | 71 | // vector_largest returns the largest component |u[i]| of this []T, normalised by den 72 | // largest := |u[i]| / den 73 | pub fn vector_largest[T](o []T, den T) T { 74 | mut largest := math.abs(o[0]) 75 | for i := 1; i < o.len; i++ { 76 | tmp := math.abs(o[i]) 77 | if tmp > largest { 78 | largest = tmp 79 | } 80 | } 81 | return largest / den 82 | } 83 | -------------------------------------------------------------------------------- /neural_networks/activ_funcs.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import math 4 | 5 | pub enum ActivationFunctions { 6 | tanh 7 | leaky_relu 8 | } 9 | 10 | fn get_activ_function(f ActivationFunctions) (fn (f64) f64, fn (f64) f64) { 11 | match f { 12 | .tanh { return tanh, tanh_prime } 13 | .leaky_relu { return lrelu, lrelu_prime } 14 | } 15 | } 16 | 17 | @[inline] 18 | pub fn tanh(a f64) f64 { 19 | return math.tanh(a) 20 | } 21 | 22 | @[inline] 23 | pub fn tanh_prime(a f64) f64 { 24 | tanha := math.tanh(a) 25 | return 1 - tanha * tanha 26 | } 27 | 28 | @[inline] 29 | pub fn lrelu(value f64) f64 { 30 | return if value < 0 { value * 0.1 } else { value } 31 | } 32 | 33 | @[inline] 34 | pub fn lrelu_prime(value f64) f64 { 35 | return if value < 0 { 0.1 } else { 1.0 } 36 | } 37 | -------------------------------------------------------------------------------- /neural_networks/classifier_utilities.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | pub fn match_number_to_classifier_array(nb u8) []f64 { 4 | return []f64{len:10, init: if index == nb {1} else {0}} 5 | } 6 | 7 | pub fn match_classifier_array_to_number(a []f64) int { 8 | for i, elem in a { 9 | if elem == 1.0 { 10 | return i 11 | } 12 | } 13 | panic("No corresponding number") 14 | } 15 | 16 | pub fn match_output_array_to_number(a []f64) int { 17 | mut highest := 0 18 | for i, elem in a { 19 | if elem > a[highest] { 20 | highest = i 21 | } 22 | } 23 | return highest 24 | } -------------------------------------------------------------------------------- /neural_networks/cost_functions.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import la 4 | 5 | pub enum CostFunctions { 6 | mse 7 | } 8 | 9 | fn get_cost_function(f CostFunctions) (fn ([]f64, []f64) f64, fn ([]f64, []f64) []f64) { 10 | match f { 11 | .mse { return mse, mse_prime } 12 | } 13 | } 14 | 15 | pub fn mse(y_true []f64, y_pred []f64) f64 { // mean squared error 16 | not_squared_error := la.vector_add(1.0, y_true, -1.0, y_pred) 17 | mut mean := 0.0 18 | for elem in not_squared_error { 19 | mean += elem * elem 20 | } 21 | return mean 22 | } 23 | 24 | pub fn mse_prime(y_true []f64, y_pred []f64) []f64 { 25 | return la.vector_add(1.0 * 2 / f64(y_true.len), y_true, -1.0 * 2 / f64(y_pred.len), 26 | y_pred) 27 | } 28 | -------------------------------------------------------------------------------- /neural_networks/image_processing.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import math 4 | import rand 5 | 6 | @[direct_array_access] 7 | pub fn rand_noise(a []f64, noise_probability int, noise_range int) []f64 { 8 | if noise_probability > 0 && noise_range > 0 { 9 | mut output := a.clone() 10 | for mut elem in output { 11 | if rand.int_in_range(0, noise_probability) or {1} == 0 { 12 | elem += rand.f64_in_range(0, f64(noise_range)-elem) or {0.0} 13 | } 14 | } 15 | return output 16 | } else { 17 | return a 18 | } 19 | } 20 | 21 | @[direct_array_access] 22 | pub fn get_center_of_mass(a []f64, x_size int, y_size int) (int, int) { 23 | mut x := 0.0 24 | mut y := 0.0 25 | mut cpt := 0.0 // to divide everything by the total of values 26 | for l in 0 .. y_size { 27 | for c in 0 .. x_size { 28 | px_value := a[l * y_size + c] 29 | if px_value != 0 { 30 | x += c * px_value 31 | y += l * px_value 32 | cpt += 1 * px_value 33 | } 34 | } 35 | } 36 | if cpt != 0 { 37 | x /= cpt 38 | y /= cpt 39 | } 40 | return int(x - x_size / 2), int(y - x_size / 2) // offset (half goal/crop image size) 41 | } 42 | 43 | @[direct_array_access] 44 | pub fn center_image(a []f64, x_size int, y_size int) []f64 { 45 | offset_x, offset_y := get_center_of_mass(a, x_size, y_size) 46 | mut output := []f64{cap: x_size * y_size} 47 | for l in 0 .. y_size { 48 | for c in 0 .. x_size { 49 | if in_range(offset_x + c, offset_y + l, 0, 0, x_size, y_size) { 50 | output << a[a_coords(offset_y + l, offset_x + c, x_size)] 51 | } else { 52 | output << 0.0 53 | } 54 | } 55 | } 56 | return output 57 | } 58 | 59 | @[direct_array_access] 60 | pub fn scale_img(a []f64, scale_goal f64, x_size int, y_size int) []f64 { 61 | base_side_x := x_size 62 | base_side_y := y_size 63 | scaled_side_x := ceil(f64(base_side_x) * scale_goal) 64 | scaled_side_y := ceil(f64(base_side_y) * scale_goal) 65 | if scaled_side_y != base_side_y && scaled_side_x != base_side_x { 66 | mut new_a := []f64{len: scaled_side_y * scaled_side_x, cap: scaled_side_y * scaled_side_x} 67 | for l in 0 .. scaled_side_y { 68 | for c in 0 .. scaled_side_x { 69 | // Index in the new array of the current pixel 70 | new_i := l * scaled_side_y + c 71 | // needs division (for proportionality) but only if needed : 72 | mut val_l := f64(l * (base_side_y - 1)) 73 | mut val_c := f64(c * (base_side_x - 1)) 74 | 75 | // if the division is a integer (it corresponds to an exact pixel) 76 | l_is_int := int(val_l) % (scaled_side_y - 1) != 0 77 | c_is_int := int(val_c) % (scaled_side_x - 1) != 0 78 | // divide 79 | val_l /= (scaled_side_y - 1) 80 | val_c /= (scaled_side_x - 1) 81 | int_val_l := int(val_l) 82 | int_val_c := int(val_c) 83 | // Take the right pixel values 84 | if l_is_int && c_is_int { 85 | new_a[new_i] = a[int(val_l) * base_side_x + int_val_c] 86 | } else if !(l_is_int || c_is_int) { // none of them 87 | new_a[new_i] = a[a_coords(int_val_l, int_val_c, base_side_x)] * float_gap(val_c) * float_gap(val_l) + 88 | a[a_coords(int_val_l, ceil(val_c), base_side_x)] * float_offset(val_c) * float_gap(val_l) + 89 | a[a_coords(ceil(val_l), int_val_c, base_side_x)] * float_offset(val_l) * float_gap(val_c) + 90 | a[a_coords(ceil(val_l), ceil(val_c), base_side_x)] * float_offset(val_l) * float_offset(val_c) 91 | } else if l_is_int { // exact line (not useful for squares I think but there if needed) 92 | new_a[new_i] = a[a_coords(int_val_l, int_val_c, base_side_x)] * float_gap(val_c) + 93 | a[a_coords(int_val_l, ceil(val_c), base_side_x)] * float_offset(val_c) 94 | } else { // exact collumn (not useful for squares I think but there if needed) 95 | new_a[new_i] = a[a_coords(int_val_l, int_val_c, base_side_x)] * float_gap(val_l) + 96 | a[a_coords(ceil(val_l), int_val_c, base_side_x)] * float_offset(val_l) 97 | } 98 | } 99 | } 100 | return new_a // needs to be cropped 101 | } else { 102 | return a 103 | } 104 | } 105 | 106 | @[direct_array_access] 107 | pub fn rotate(a []f64, alpha f64, base_x int, base_y int) []f64 { 108 | if alpha != 0 { 109 | angle := math.radians(alpha) 110 | // different sizes of the sides 111 | full_x := base_x * math.cos(angle) - base_y * math.sin(angle) // x coords of the last pixel (bottom right corner) 112 | full_y := base_x * math.sin(angle) + base_y * math.cos(angle) 113 | only_x_x := base_x * math.cos(angle) // - 0*math.sin(angle) top right corner 114 | only_x_y := base_x * math.sin(angle) // + 0*math.cos(angle) 115 | only_y_x := -base_y * math.sin(angle) // bottom left corner 116 | only_y_y := base_y * math.cos(angle) 117 | max_x := max([full_x, only_x_x, only_y_x, 0]) 118 | min_x := min([full_x, only_x_x, only_y_x, 0]) 119 | max_y := max([full_y, only_x_y, only_y_y, 0]) 120 | min_y := min([full_y, only_x_y, only_y_y, 0]) 121 | size_x := ceil(max_x - min_x + 1) 122 | size_y := ceil(max_y - min_y + 1) 123 | 124 | mut output := []f64{len: size_x * size_y} 125 | for i, _ in output { 126 | x := f64(i % size_x) - (f64(size_x - 1) / 2.0) 127 | y := f64(i / size_y) - (f64(size_y - 1) / 2.0) 128 | xn := x * math.cos(angle) - y * math.sin(angle) 129 | yn := x * math.sin(angle) + y * math.cos(angle) 130 | 131 | array_coord_y := yn + f64(base_y-1)/2.0 132 | array_coord_x := xn + f64(base_x-1)/2.0 133 | 134 | if in_range(ceil(array_coord_x), ceil(array_coord_y), 0, 0, base_x, base_y) { 135 | elem := a_coords(int(array_coord_y), int(array_coord_x), base_x) 136 | elem1 := a_coords(int(array_coord_y), ceil(array_coord_x), base_x) 137 | elem2 := a_coords(ceil(array_coord_y), int(array_coord_x), base_x) 138 | elem3 := a_coords(ceil(array_coord_y), ceil(array_coord_x), base_x) 139 | 140 | output[i] = f64(int(a[elem] * float_gap(array_coord_y) * float_gap(array_coord_x) + 141 | a[elem1] * float_gap(array_coord_y) * float_offset(array_coord_x) + 142 | a[elem2] * float_offset(array_coord_y) * float_gap(array_coord_x) + 143 | a[elem3] * float_offset(array_coord_y) * float_offset(array_coord_x))) 144 | } 145 | } 146 | return output 147 | } else { 148 | return a 149 | } 150 | } 151 | 152 | @[inline] 153 | pub fn a_coords(y int, x int, size int) int { 154 | return y * size + x 155 | } 156 | 157 | @[inline] 158 | pub fn in_range[T](x T, y T, x_start T, y_start T, x_end T, y_end T) bool { 159 | return x >= x_start && x < x_end && y >= y_start && y < y_end 160 | } 161 | 162 | @[inline] 163 | pub fn ceil(nb f64) int { // it is not a ceil.... 164 | return int(math.ceil(nb)) 165 | } 166 | 167 | @[inline] 168 | fn float_offset(f f64) f64 { 169 | return f - int(f) 170 | } 171 | 172 | @[inline] 173 | fn float_gap(f f64) f64 { 174 | return 1 - float_offset(f) 175 | } 176 | 177 | @[direct_array_access; inline] 178 | fn max(a []f64) f64 { 179 | mut highest := 0 180 | for nb, val in a { 181 | if val > a[highest] { 182 | highest = nb 183 | } 184 | } 185 | return a[highest] 186 | } 187 | 188 | @[direct_array_access; inline] 189 | fn min(a []f64) f64 { 190 | mut highest := 0 191 | for nb, val in a { 192 | if val < a[highest] { 193 | highest = nb 194 | } 195 | } 196 | return a[highest] 197 | } 198 | 199 | @[direct_array_access] 200 | pub fn crop(a []f64, x_base int, y_base int, x_goal int, y_goal int) []f64 { 201 | mut output := []f64{cap: y_goal * x_goal} 202 | for l in (y_base - y_goal)/2 .. (y_base - y_goal)/2 + y_goal { 203 | for c in (x_base - x_goal)/2 .. (x_base - x_goal)/2 + x_goal { 204 | if in_range(c, l, 0, 0, x_base, y_base) { 205 | output << a[a_coords(l, c, x_base)] 206 | } else { 207 | output << 0.0 208 | } 209 | } 210 | } 211 | return output 212 | } -------------------------------------------------------------------------------- /neural_networks/layer_activation.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | pub struct Activation { 4 | mut: 5 | input []f64 6 | output []f64 7 | activ_type ActivationFunctions 8 | activ fn (n f64) f64 @[required] 9 | activ_prime fn (n f64) f64 @[required] 10 | } 11 | 12 | pub fn Activation.new(activ_type ActivationFunctions) Activation { 13 | activ, activ_prime := get_activ_function(activ_type) 14 | return Activation{[]f64{}, []f64{}, activ_type, activ, activ_prime} 15 | } 16 | 17 | pub fn (mut a Activation) forward(input []f64) []f64 { 18 | a.input = input.clone() 19 | a.output = input.clone() 20 | vector_apply_func(mut a.output, a.activ) 21 | return a.output 22 | } 23 | 24 | pub fn (mut a Activation) backward(output_gradient []f64) []f64 { 25 | mut input_deriv := a.input.clone() 26 | vector_apply_func(mut input_deriv, a.activ_prime) 27 | output := vector_element_wise_mul(output_gradient, input_deriv) 28 | return output 29 | } 30 | 31 | pub fn (mut a Activation) apply_grad(nb_elems_seen int, lr f64, momentum f64) { 32 | } 33 | 34 | pub fn (mut a Activation) reset() { 35 | } 36 | 37 | pub fn vector_apply_func(mut a []f64, f fn (n f64) f64) { 38 | for mut elem in a { 39 | elem = f(elem) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /neural_networks/layer_base.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import la 4 | 5 | pub enum LayerType as i64 { 6 | dense 7 | activ 8 | } 9 | 10 | pub fn layer_type(l Layer) LayerType { 11 | match l { 12 | Dense { return .dense } 13 | Activation { return .activ } 14 | else { panic('strange') } 15 | } 16 | } 17 | 18 | pub fn layer_from_type(lt LayerType) Layer { 19 | match lt { 20 | .dense { return Dense{ 21 | weights: la.Matrix.new[f64](0, 0) 22 | weights_gradient: la.Matrix.new[f64](0, 0) 23 | old_weights_gradient: la.Matrix.new[f64](0, 0) 24 | } } 25 | .activ { return Activation{ 26 | activ: lrelu 27 | activ_prime: lrelu_prime 28 | } } 29 | } 30 | panic('Unknown LayerType value : ${lt} ${int(lt)}') 31 | } 32 | 33 | pub interface Layer { 34 | mut: 35 | input []f64 36 | output []f64 37 | forward(input []f64) []f64 38 | backward(output_gradient []f64) []f64 39 | apply_grad(nb_elems_seen int, lr f64, momentum f64) 40 | reset() 41 | } 42 | -------------------------------------------------------------------------------- /neural_networks/layer_dense.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import la 4 | 5 | /* 6 | A Dense layer is also called a fully connected layer. 7 | It connects all the neurones of the previous layer to all the neurones of the next layer. 8 | */ 9 | 10 | pub struct Dense { 11 | pub mut: 12 | input_size i64 13 | output_size i64 14 | input []f64 15 | output []f64 16 | weights &la.Matrix[f64] 17 | weights_gradient &la.Matrix[f64] 18 | old_weights_gradient &la.Matrix[f64] 19 | bias []f64 20 | bias_gradient []f64 21 | old_bias_gradient []f64 22 | } 23 | 24 | pub fn Dense.new(input_size int, output_size int, weights_range f64, biases_range f64) Dense { 25 | return Dense{input_size, output_size, []f64{len: input_size}, []f64{len: output_size}, rand_matrix(output_size, 26 | input_size, weights_range), la.Matrix.new[f64](output_size, input_size), la.Matrix.new[f64](output_size, 27 | input_size), rand_array(output_size, biases_range), []f64{len: output_size}, []f64{len: output_size}} 28 | } 29 | 30 | pub fn (mut d Dense) reset() { // important, call after apply grad 31 | d.old_weights_gradient = d.weights_gradient.clone() 32 | d.weights_gradient = la.Matrix.new[f64](int(d.output_size), int(d.input_size)) 33 | d.old_bias_gradient = d.bias_gradient.clone() 34 | d.bias_gradient = []f64{len: int(d.output_size)} 35 | } 36 | 37 | pub fn (mut d Dense) forward(input []f64) []f64 { 38 | d.input = input.clone() 39 | without_bias := la.matrix_vector_mul(1.0, d.weights, d.input) 40 | d.output = la.vector_add(1.0, without_bias, 1.0, d.bias) 41 | return d.output 42 | } 43 | 44 | pub fn (mut d Dense) backward(output_gradient []f64) []f64 { 45 | la.matrix_add(mut d.weights_gradient, 1.0, la.vector_vector_tr_mul(1.0, output_gradient, 46 | d.input), 1.0, d.weights_gradient) 47 | d.bias_gradient = la.vector_add(1.0, output_gradient, 1.0, d.bias_gradient) 48 | return la.matrix_tr_vector_mul(1.0, d.weights, output_gradient) 49 | } 50 | 51 | pub fn (mut d Dense) apply_grad(nb_elems_seen int, lr f64, momentum f64) { 52 | if momentum > 0 { 53 | la.matrix_add(mut d.weights_gradient, lr / f64(nb_elems_seen), d.weights_gradient, 54 | lr * momentum, d.old_weights_gradient) 55 | la.matrix_add(mut d.weights, 1.0, d.weights_gradient, 1.0, d.weights) 56 | d.bias = la.vector_add(lr / f64(nb_elems_seen), d.bias_gradient, 1.0, d.bias) 57 | } else { 58 | la.matrix_add(mut d.weights, lr / f64(nb_elems_seen), d.weights_gradient, 1.0, 59 | d.weights) 60 | d.bias = la.vector_add(lr / f64(nb_elems_seen), d.bias_gradient, 1.0, d.bias) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /neural_networks/neural_network.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import la 4 | import os 5 | import rand 6 | 7 | pub struct NeuralNetwork { 8 | pub mut: 9 | layers []Layer 10 | cost f64 11 | accuracy f64 12 | } 13 | 14 | pub fn NeuralNetwork.new(seed u32) NeuralNetwork { 15 | rand.seed([seed, 0]) 16 | return NeuralNetwork{} 17 | } 18 | 19 | pub fn (mut nn NeuralNetwork) add_layer(layer Layer) { 20 | nn.layers << layer 21 | } 22 | 23 | pub fn (mut nn NeuralNetwork) train(t_m TrainingMode) { 24 | println("\nTraining for $t_m.nb_epochs epochs:") 25 | match t_m { 26 | BackpropTrainingParams { nn.train_backprop(t_m) } 27 | MinibatchesBackpropTrainingParams { nn.train_minibatches_backprop(t_m) } 28 | else { exit_err('The training mode is not implemented') } 29 | } 30 | } 31 | 32 | pub fn (mut nn NeuralNetwork) forward_propagation(input []f64) []f64 { 33 | mut next_layer_input := input.clone() 34 | for mut layer in nn.layers { 35 | next_layer_input = layer.forward(next_layer_input) 36 | } 37 | return next_layer_input 38 | } 39 | 40 | pub fn (mut nn NeuralNetwork) backpropagation(expected_output []f64, output []f64, cost_prime fn ([]f64, []f64) []f64) { 41 | mut gradient := cost_prime(expected_output, output) 42 | for j := nn.layers.len - 1; j >= 0; j -= 1 { 43 | gradient = nn.layers[j].backward(gradient) 44 | } 45 | } 46 | 47 | pub fn (mut nn NeuralNetwork) apply_gradient_descent(nb_elems_seen int, lr f64, momentum f64) { 48 | for mut layer in nn.layers { 49 | layer.apply_grad(nb_elems_seen, lr, momentum) 50 | layer.reset() 51 | } 52 | } 53 | 54 | pub fn (nn NeuralNetwork) mutate(range f64) NeuralNetwork { 55 | mut new_nn := NeuralNetwork{layers: nn.layers.clone()} 56 | for mut l in new_nn.layers { 57 | if mut l is Dense { 58 | for mut b in l.bias { 59 | b += rand.f64_in_range(-range, range) or {panic(err)} 60 | } 61 | la.matrix_add(mut l.weights, 1, rand_matrix(l.weights.m, l.weights.n, range), 1, l.weights) 62 | } 63 | } 64 | return new_nn 65 | } 66 | 67 | pub fn (mut nn NeuralNetwork) save_model(save_name string) { 68 | mut file := os.create(save_name) or { panic(err) } 69 | file.write_raw(i64(nn.layers.len)) or { panic(err) } 70 | for layer in nn.layers { 71 | l_type := layer_type(layer) 72 | file.write_raw(i32(l_type)) or { panic(err) } 73 | match layer { 74 | Dense { 75 | file.write_raw(layer.input_size) or { panic(err) } 76 | file.write_raw(layer.output_size) or { panic(err) } 77 | for elem in layer.weights.data { 78 | file.write_raw(elem) or { panic(err) } 79 | } 80 | for elem in layer.bias { 81 | file.write_raw(elem) or { panic(err) } 82 | } 83 | } 84 | Activation { 85 | file.write_raw(layer.activ_type) or { panic(err) } 86 | } 87 | else {} 88 | } 89 | } 90 | file.close() 91 | } 92 | 93 | @[noreturn] 94 | fn exit_err(message string) { 95 | println(message) 96 | exit(1) 97 | } 98 | 99 | pub fn (mut nn NeuralNetwork) load_model(save_name string) { 100 | mut load := os.open(save_name) or { exit_err("This save doesn't exist") } 101 | nb_layers := load.read_raw[i64]() or { panic(err) } 102 | println("$nb_layers Layers :") 103 | for _ in 0 .. nb_layers { 104 | ltype := load.read_raw[i32]() or { panic(err) } 105 | mut layer_base := layer_from_type(unsafe{LayerType(ltype)}) 106 | match mut layer_base { 107 | Dense { 108 | layer_base.input_size = load.read_raw[i64]() or { panic(err) } 109 | layer_base.output_size = load.read_raw[i64]() or { panic(err) } 110 | println("Dense ${layer_base.input_size} - ${layer_base.output_size}") 111 | matrix_size := int(layer_base.input_size * layer_base.output_size) 112 | layer_base.weights = la.Matrix.raw(int(layer_base.output_size), int(layer_base.input_size), 113 | []f64{len: matrix_size, init: index - index + load.read_raw[f64]() or { 114 | panic(err) 115 | }}) 116 | layer_base.weights_gradient = la.Matrix.new[f64](int(layer_base.output_size), 117 | int(layer_base.input_size)) 118 | layer_base.old_weights_gradient = la.Matrix.new[f64](int(layer_base.output_size), 119 | int(layer_base.input_size)) 120 | layer_base.bias = []f64{len: int(layer_base.output_size), init: index - index + load.read_raw[f64]() or { 121 | panic(err) 122 | }} 123 | layer_base.bias_gradient = []f64{len: int(layer_base.output_size)} 124 | layer_base.old_bias_gradient = []f64{len: int(layer_base.output_size)} 125 | } 126 | Activation { 127 | layer_base = Activation.new(load.read_raw[ActivationFunctions]() or { panic(err) }) 128 | println("Activation ${layer_base.activ_type}") 129 | } 130 | else {println("Problem ?")} 131 | } 132 | nn.add_layer(layer_base) 133 | } 134 | println("Finished loading the NN ${save_name}") 135 | } 136 | -------------------------------------------------------------------------------- /neural_networks/training_mode_backprop.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | // An epoch is when the nn has seen the entire dataset 4 | pub struct BackpropTrainingParams { 5 | pub: 6 | learning_rate f64 7 | momentum f64 8 | nb_epochs int 9 | classifier bool 10 | print_interval int 11 | cost_function CostFunctions 12 | training Dataset 13 | test Dataset 14 | test_params TestParams 15 | } 16 | 17 | fn (mut nn NeuralNetwork) train_backprop(t_p BackpropTrainingParams) { 18 | cost_fn, cost_prime := get_cost_function(t_p.cost_function) 19 | for epoch in 0 .. t_p.nb_epochs { 20 | mut cost := 0.0 21 | mut accuracy := 0.0 22 | print_epoch := t_p.print_interval != 0 && ((epoch + 1) % t_p.print_interval == 0 || epoch == 0) 23 | test_epoch := t_p.test_params.training_interval != 0 && ((epoch + 1) % t_p.test_params.training_interval == 0 || epoch == 0) 24 | for i, input in t_p.training.inputs { 25 | output := nn.forward_propagation(input) 26 | cost += cost_fn(t_p.training.expected_outputs[i], output) 27 | nn.backpropagation(t_p.training.expected_outputs[i], output, cost_prime) 28 | if t_p.classifier { 29 | if match_output_array_to_number(output) == match_classifier_array_to_number(t_p.training.expected_outputs[i]) { 30 | accuracy += 1 31 | } 32 | } 33 | } 34 | accuracy /= f64(t_p.test.inputs.len)/100.0 35 | cost /= t_p.training.inputs.len 36 | if print_epoch { 37 | if t_p .classifier { 38 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tCost : ${cost}\t-\tAccuracy : ${accuracy:.2}%') 39 | } else { 40 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tCost : ${cost}') 41 | } 42 | } 43 | nn.apply_gradient_descent(t_p.training.inputs.len, t_p.learning_rate, t_p.momentum) 44 | if test_epoch { 45 | nn.test(t_p) 46 | } 47 | nn.cost = cost 48 | nn.accuracy = accuracy 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /neural_networks/training_mode_base.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | // An epoch is when the nn has seen the entire dataset 4 | interface TrainingMode { 5 | learning_rate f64 6 | nb_epochs int 7 | classifier bool 8 | cost_function CostFunctions 9 | training Dataset 10 | test Dataset 11 | test_params TestParams 12 | } 13 | 14 | pub struct Dataset { 15 | pub mut: 16 | inputs [][]f64 17 | expected_outputs [][]f64 18 | } 19 | 20 | pub fn (dataset Dataset) clone() Dataset { 21 | return Dataset{dataset.inputs.clone(), dataset.expected_outputs.clone()} 22 | } 23 | 24 | // [ start -> end ] 25 | // test_interval in epochs 26 | pub struct TestParams { 27 | pub: 28 | print_start int 29 | print_end int 30 | training_interval int 31 | training_batch_interval int 32 | } 33 | 34 | pub fn (mut nn NeuralNetwork) test(t_m TrainingMode) { 35 | println("\nTest Dataset:") 36 | cost_fn, _ := get_cost_function(t_m.cost_function) 37 | mut cost := 0.0 38 | mut accuracy := 0.0 39 | for i, input in t_m.test.inputs { 40 | output := nn.forward_propagation(input) 41 | cost += cost_fn(t_m.test.expected_outputs[i], output) 42 | if t_m.classifier { 43 | if match_output_array_to_number(output) == match_classifier_array_to_number(t_m.test.expected_outputs[i]) { 44 | accuracy += 1 45 | } 46 | } 47 | if i >= t_m.test_params.print_start && i <= t_m.test_params.print_end { // if there is an interval to print 48 | println("$i -> $output / ${t_m.test.expected_outputs[i]}") 49 | } 50 | } 51 | accuracy /= f64(t_m.test.inputs.len)/100.0 52 | cost /= t_m.training.inputs.len 53 | if t_m.classifier { 54 | println('Test Cost : ${cost} - Test Accuracy : ${accuracy:.2}%\n') 55 | }else{ 56 | println('Test Cost : ${cost}\n') 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /neural_networks/training_mode_minibatches_backprop.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | pub struct MinibatchesBackpropTrainingParams { 4 | pub: 5 | learning_rate f64 6 | momentum f64 7 | batch_size int = 1 8 | nb_epochs int 9 | classifier bool 10 | print_interval int 11 | print_batch_interval int 12 | cost_function CostFunctions 13 | training Dataset 14 | test Dataset 15 | test_params TestParams 16 | } 17 | 18 | fn (mut nn NeuralNetwork) train_minibatches_backprop(t_p MinibatchesBackpropTrainingParams) { 19 | cost_fn, cost_prime := get_cost_function(t_p.cost_function) 20 | nb_batches := t_p.training.inputs.len / t_p.batch_size 21 | for epoch in 0 .. t_p.nb_epochs { 22 | mut epoch_error := 0.0 23 | mut epoch_accuracy := 0.0 24 | print_epoch := t_p.print_interval != 0 && ((epoch + 1) % t_p.print_interval == 0 || epoch == 0) 25 | test_epoch := t_p.test_params.training_interval != 0 && ((epoch + 1) % t_p.test_params.training_interval == 0 || epoch == 0) 26 | for batch in 0 .. nb_batches { // be careful for the size of the batches to not lose some data over the division rounding 27 | mut error := 0.0 28 | mut accuracy := 0.0 29 | print_batch := t_p.print_batch_interval != 0 && ((batch + 1) % t_p.print_batch_interval == 0 || batch == 0) 30 | test_batch := t_p.test_params.training_batch_interval != 0 && ((batch + 1) % t_p.test_params.training_batch_interval == 0) 31 | for i, input in t_p.training.inputs[batch * t_p.batch_size..(batch + 1) * t_p.batch_size] { 32 | nb := batch * t_p.batch_size + i 33 | output := nn.forward_propagation(input) 34 | error += cost_fn(t_p.training.expected_outputs[nb], output) 35 | nn.backpropagation(t_p.training.expected_outputs[nb], output, cost_prime) 36 | if t_p.classifier { 37 | if match_output_array_to_number(output) == match_classifier_array_to_number(t_p.training.expected_outputs[nb]) { 38 | accuracy += 1 39 | } 40 | } 41 | } 42 | epoch_error += error 43 | epoch_accuracy += accuracy 44 | error /= t_p.batch_size 45 | accuracy /= f64(t_p.batch_size)/100.0 46 | nn.apply_gradient_descent(t_p.batch_size, t_p.learning_rate, t_p.momentum) 47 | if print_batch && print_epoch { 48 | if t_p.classifier { 49 | println(' batch ${batch + 1}/${nb_batches}\t-\tCost : ${error}\t-\tAccuracy : ${accuracy:.2}%') 50 | } else { 51 | println(' batch ${batch + 1}/${nb_batches}\t-\tCost : ${error}') 52 | } 53 | } 54 | if test_epoch && test_batch { 55 | nn.test(t_p) 56 | } 57 | } 58 | epoch_error /= (nb_batches*t_p.batch_size) 59 | epoch_accuracy /= (nb_batches*t_p.batch_size)/100 60 | if print_epoch { 61 | if t_p.classifier { 62 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tAverage Cost : ${epoch_error}\t-\tAverage Accuracy : ${epoch_accuracy:.2}%') 63 | } else { 64 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tAverage Cost : ${epoch_error}') 65 | } 66 | } 67 | nn.cost = epoch_error 68 | nn.accuracy = epoch_accuracy 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /neural_networks/utilities.v: -------------------------------------------------------------------------------- 1 | module neural_networks 2 | 3 | import la 4 | import rand 5 | 6 | fn rand_array(nb int, range f64) []f64 { 7 | mut a := []f64{} 8 | for _ in 0 .. nb { 9 | a << rand.f64_in_range(-range, range) or { panic(err) } 10 | } 11 | return a 12 | } 13 | 14 | fn rand_2darray(nb_lines int, nb_cols int, range f64) [][]f64 { 15 | mut a := [][]f64{len: nb_lines, init: []f64{}} 16 | for mut line in a { 17 | line << rand_array(nb_cols, range) 18 | } 19 | return a 20 | } 21 | 22 | fn rand_matrix(nb_lines int, nb_cols int, range f64) &la.Matrix[f64] { 23 | return la.Matrix.deep2(rand_2darray(nb_lines, nb_cols, range)) 24 | } 25 | 26 | fn vector_element_wise_mul(u []f64, v []f64) []f64 { 27 | mut result := []f64{len: u.len} 28 | for i, elem in u { 29 | result[i] = elem * v[i] 30 | } 31 | return result 32 | } 33 | -------------------------------------------------------------------------------- /neural_networks/v.mod: -------------------------------------------------------------------------------- 1 | Module { name: 'neural_networks' version: '0.0.0' description: '' license: 'MIT' dependencies:[vsl] } -------------------------------------------------------------------------------- /neural_networks_acc/activ_funcs.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import math 4 | 5 | pub enum ActivationFunctions { 6 | tanh 7 | leaky_relu 8 | } 9 | 10 | fn get_activ_function(f ActivationFunctions) (fn (f64) f64, fn (f64) f64) { 11 | match f { 12 | .tanh { return tanh, tanh_prime } 13 | .leaky_relu { return lrelu, lrelu_prime } 14 | } 15 | } 16 | 17 | @[inline] 18 | pub fn tanh(a f64) f64 { 19 | return math.tanh(a) 20 | } 21 | 22 | @[inline] 23 | pub fn tanh_prime(a f64) f64 { 24 | tanha := math.tanh(a) 25 | return 1 - tanha * tanha 26 | } 27 | 28 | @[inline] 29 | pub fn lrelu(value f64) f64 { 30 | return if value < 0 { value * 0.1 } else { value } 31 | } 32 | 33 | @[inline] 34 | pub fn lrelu_prime(value f64) f64 { 35 | return if value < 0 { 0.1 } else { 1.0 } 36 | } 37 | -------------------------------------------------------------------------------- /neural_networks_acc/classifier_utilities.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | pub fn match_number_to_classifier_array(nb u8) []f64 { 4 | return []f64{len:10, init: if index == nb {1} else {0}} 5 | } 6 | 7 | pub fn match_classifier_array_to_number(a []f64) int { 8 | for i, elem in a { 9 | if elem == 1.0 { 10 | return i 11 | } 12 | } 13 | panic("No corresponding number") 14 | } 15 | 16 | pub fn match_output_array_to_number(a []f64) int { 17 | mut highest := 0 18 | for i, elem in a { 19 | if elem > a[highest] { 20 | highest = i 21 | } 22 | } 23 | return highest 24 | } -------------------------------------------------------------------------------- /neural_networks_acc/cost_functions.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import vsl.la 4 | 5 | pub enum CostFunctions { 6 | mse 7 | } 8 | 9 | fn get_cost_function(f CostFunctions) (fn ([]f64, []f64) f64, fn ([]f64, []f64) []f64) { 10 | match f { 11 | .mse { return mse, mse_prime } 12 | } 13 | } 14 | 15 | pub fn mse(y_true []f64, y_pred []f64) f64 { // mean squared error 16 | not_squared_error := la.vector_add(1.0, y_true, -1.0, y_pred) 17 | mut mean := 0.0 18 | for elem in not_squared_error { 19 | mean += elem * elem 20 | } 21 | return mean 22 | } 23 | 24 | pub fn mse_prime(y_true []f64, y_pred []f64) []f64 { 25 | return la.vector_add(1.0 * 2 / f64(y_true.len), y_true, -1.0 * 2 / f64(y_pred.len), 26 | y_pred) 27 | } 28 | -------------------------------------------------------------------------------- /neural_networks_acc/image_processing.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import math 4 | import rand 5 | 6 | @[direct_array_access] 7 | pub fn rand_noise(a []f64, noise_probability int, noise_range int) []f64 { 8 | if noise_probability > 0 && noise_range > 0 { 9 | mut output := a.clone() 10 | for mut elem in output { 11 | if rand.int_in_range(0, noise_probability) or {1} == 0 { 12 | elem += rand.f64_in_range(0, f64(noise_range)-elem) or {0.0} 13 | } 14 | } 15 | return output 16 | } else { 17 | return a 18 | } 19 | } 20 | 21 | @[direct_array_access] 22 | pub fn get_center_of_mass(a []f64, x_size int, y_size int) (int, int) { 23 | mut x := 0.0 24 | mut y := 0.0 25 | mut cpt := 0.0 // to divide everything by the total of values 26 | for l in 0 .. y_size { 27 | for c in 0 .. x_size { 28 | px_value := a[l * y_size + c] 29 | if px_value != 0 { 30 | x += c * px_value 31 | y += l * px_value 32 | cpt += 1 * px_value 33 | } 34 | } 35 | } 36 | if cpt != 0 { 37 | x /= cpt 38 | y /= cpt 39 | } 40 | return int(x - 28 / 2), int(y - 28 / 2) // offset (half goal/crop image size) 41 | } 42 | 43 | @[direct_array_access] 44 | pub fn center_image(a []f64, x_size int, y_size int) []f64 { 45 | offset_x, offset_y := get_center_of_mass(a, x_size, y_size) 46 | mut output := []f64{cap: x_size * y_size} 47 | for l in 0 .. y_size { 48 | for c in 0 .. x_size { 49 | if in_range(offset_x + c, offset_y + l, 0, 0, x_size, y_size) { 50 | output << a[a_coords(offset_y + l, offset_x + c, x_size)] 51 | } else { 52 | output << 0.0 53 | } 54 | } 55 | } 56 | return output 57 | } 58 | 59 | @[direct_array_access] 60 | pub fn scale_img(a []f64, scale_goal f64, x_size int, y_size int) []f64 { 61 | base_side_x := x_size 62 | base_side_y := y_size 63 | scaled_side_x := ceil(f64(base_side_x) * scale_goal) 64 | scaled_side_y := ceil(f64(base_side_y) * scale_goal) 65 | if scaled_side_y != base_side_y && scaled_side_x != base_side_x { 66 | mut new_a := []f64{len: scaled_side_y * scaled_side_x, cap: scaled_side_y * scaled_side_x} 67 | for l in 0 .. scaled_side_y { 68 | for c in 0 .. scaled_side_x { 69 | // Index in the new array of the current pixel 70 | new_i := l * scaled_side_y + c 71 | // needs division (for proportionality) but only if needed : 72 | mut val_l := f64(l * (base_side_y - 1)) 73 | mut val_c := f64(c * (base_side_x - 1)) 74 | 75 | // if the division is a integer (it corresponds to an exact pixel) 76 | l_is_int := int(val_l) % (scaled_side_y - 1) != 0 77 | c_is_int := int(val_c) % (scaled_side_x - 1) != 0 78 | // divide 79 | val_l /= (scaled_side_y - 1) 80 | val_c /= (scaled_side_x - 1) 81 | int_val_l := int(val_l) 82 | int_val_c := int(val_c) 83 | // Take the right pixel values 84 | if l_is_int && c_is_int { 85 | new_a[new_i] = a[int(val_l) * base_side_x + int_val_c] 86 | } else if !(l_is_int || c_is_int) { // none of them 87 | new_a[new_i] = a[a_coords(int_val_l, int_val_c, base_side_x)] * float_gap(val_c) * float_gap(val_l) + 88 | a[a_coords(int_val_l, ceil(val_c), base_side_x)] * float_offset(val_c) * float_gap(val_l) + 89 | a[a_coords(ceil(val_l), int_val_c, base_side_x)] * float_offset(val_l) * float_gap(val_c) + 90 | a[a_coords(ceil(val_l), ceil(val_c), base_side_x)] * float_offset(val_l) * float_offset(val_c) 91 | } else if l_is_int { // exact line (not useful for squares I think but there if needed) 92 | new_a[new_i] = a[a_coords(int_val_l, int_val_c, base_side_x)] * float_gap(val_c) + 93 | a[a_coords(int_val_l, ceil(val_c), base_side_x)] * float_offset(val_c) 94 | } else { // exact collumn (not useful for squares I think but there if needed) 95 | new_a[new_i] = a[a_coords(int_val_l, int_val_c, base_side_x)] * float_gap(val_l) + 96 | a[a_coords(ceil(val_l), int_val_c, base_side_x)] * float_offset(val_l) 97 | } 98 | } 99 | } 100 | return new_a // needs to be cropped 101 | } else { 102 | return a 103 | } 104 | } 105 | 106 | @[direct_array_access] 107 | pub fn rotate(a []f64, alpha f64, base_x int, base_y int) []f64 { 108 | if alpha != 0 { 109 | angle := math.radians(alpha) 110 | // different sizes of the sides 111 | full_x := base_x * math.cos(angle) - base_y * math.sin(angle) // x coords of the last pixel (bottom right corner) 112 | full_y := base_x * math.sin(angle) + base_y * math.cos(angle) 113 | only_x_x := base_x * math.cos(angle) // - 0*math.sin(angle) top right corner 114 | only_x_y := base_x * math.sin(angle) // + 0*math.cos(angle) 115 | only_y_x := -base_y * math.sin(angle) // bottom left corner 116 | only_y_y := base_y * math.cos(angle) 117 | max_x := max([full_x, only_x_x, only_y_x, 0]) 118 | min_x := min([full_x, only_x_x, only_y_x, 0]) 119 | max_y := max([full_y, only_x_y, only_y_y, 0]) 120 | min_y := min([full_y, only_x_y, only_y_y, 0]) 121 | size_x := ceil(max_x - min_x + 1) 122 | size_y := ceil(max_y - min_y + 1) 123 | 124 | mut output := []f64{len: size_x * size_y} 125 | for i, _ in output { 126 | x := f64(i % size_x) - (f64(size_x - 1) / 2.0) 127 | y := f64(i / size_y) - (f64(size_y - 1) / 2.0) 128 | xn := x * math.cos(angle) - y * math.sin(angle) 129 | yn := x * math.sin(angle) + y * math.cos(angle) 130 | 131 | array_coord_y := yn + f64(base_y-1)/2.0 132 | array_coord_x := xn + f64(base_x-1)/2.0 133 | 134 | if in_range(array_coord_x, array_coord_y, 0, 0, base_x, base_y) { 135 | elem := a_coords(int(array_coord_y), int(array_coord_x), base_x) 136 | elem1 := a_coords(int(array_coord_y), ceil(array_coord_x), base_x) 137 | elem2 := a_coords(ceil(array_coord_y), int(array_coord_x), base_x) 138 | elem3 := a_coords(ceil(array_coord_y), ceil(array_coord_x), base_x) 139 | 140 | output[i] = f64(int(a[elem] * float_gap(array_coord_y) * float_gap(array_coord_x) + 141 | a[elem1] * float_gap(array_coord_y) * float_offset(array_coord_x) + 142 | a[elem2] * float_offset(array_coord_y) * float_gap(array_coord_x) + 143 | a[elem3] * float_offset(array_coord_y) * float_offset(array_coord_x))) 144 | } 145 | } 146 | return output 147 | } else { 148 | return a 149 | } 150 | } 151 | 152 | @[inline] 153 | pub fn a_coords(y int, x int, size int) int { 154 | return y * size + x 155 | } 156 | 157 | @[inline] 158 | pub fn in_range[T](x T, y T, x_start T, y_start T, x_end T, y_end T) bool { 159 | return x >= x_start && x < x_end && y >= y_start && y < y_end 160 | } 161 | 162 | @[inline] 163 | pub fn ceil(nb f64) int { 164 | return -int(-nb) 165 | } 166 | 167 | @[inline] 168 | fn float_offset(f f64) f64 { 169 | return f - int(f) 170 | } 171 | 172 | @[inline] 173 | fn float_gap(f f64) f64 { 174 | return 1 - float_offset(f) 175 | } 176 | 177 | @[direct_array_access; inline] 178 | fn max(a []f64) f64 { 179 | mut highest := 0 180 | for nb, val in a { 181 | if val > a[highest] { 182 | highest = nb 183 | } 184 | } 185 | return a[highest] 186 | } 187 | 188 | @[direct_array_access; inline] 189 | fn min(a []f64) f64 { 190 | mut highest := 0 191 | for nb, val in a { 192 | if val < a[highest] { 193 | highest = nb 194 | } 195 | } 196 | return a[highest] 197 | } 198 | 199 | @[direct_array_access] 200 | pub fn crop(a []f64, x_base int, y_base int, x_goal int, y_goal int) []f64 { 201 | mut output := []f64{cap: y_goal * x_goal} 202 | for l in 0 .. y_goal { 203 | for c in 0 .. x_goal { 204 | if in_range(c, l, 0, 0, x_base, y_base) { 205 | output << a[a_coords(l, c, x_base)] 206 | } else { 207 | output << 0.0 208 | } 209 | } 210 | } 211 | return output 212 | } -------------------------------------------------------------------------------- /neural_networks_acc/layer_activation.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | pub struct Activation { 4 | mut: 5 | input []f64 6 | output []f64 7 | activ_type ActivationFunctions 8 | activ fn (n f64) f64 @[required] 9 | activ_prime fn (n f64) f64 @[required] 10 | } 11 | 12 | pub fn Activation.new(activ_type ActivationFunctions) Activation { 13 | activ, activ_prime := get_activ_function(activ_type) 14 | return Activation{[]f64{}, []f64{}, activ_type, activ, activ_prime} 15 | } 16 | 17 | pub fn (mut a Activation) forward(input []f64) []f64 { 18 | a.input = input.clone() 19 | a.output = input.clone() 20 | vector_apply_func(mut a.output, a.activ) 21 | return a.output 22 | } 23 | 24 | pub fn (mut a Activation) backward(output_gradient []f64) []f64 { 25 | mut input_deriv := a.input.clone() 26 | vector_apply_func(mut input_deriv, a.activ_prime) 27 | output := vector_element_wise_mul(output_gradient, input_deriv) 28 | return output 29 | } 30 | 31 | pub fn (mut a Activation) apply_grad(nb_elems_seen int, lr f64, momentum f64) { 32 | } 33 | 34 | pub fn (mut a Activation) reset() { 35 | } 36 | 37 | pub fn vector_apply_func(mut a []f64, f fn (n f64) f64) { 38 | for mut elem in a { 39 | elem = f(elem) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /neural_networks_acc/layer_base.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import vsl.la 4 | 5 | pub enum LayerType as i64 { 6 | dense 7 | activ 8 | } 9 | 10 | pub fn layer_type(l Layer) LayerType { 11 | match l { 12 | Dense { return .dense } 13 | Activation { return .activ } 14 | else { panic('strange') } 15 | } 16 | } 17 | 18 | pub fn layer_from_type(lt LayerType) Layer { 19 | match lt { 20 | .dense { return Dense{ 21 | weights: la.Matrix.new[f64](0, 0) 22 | weights_gradient: la.Matrix.new[f64](0, 0) 23 | old_weights_gradient: la.Matrix.new[f64](0, 0) 24 | } } 25 | .activ { return Activation{ 26 | activ: lrelu 27 | activ_prime: lrelu_prime 28 | } } 29 | } 30 | panic('Unknown LayerType value : ${lt} ${int(lt)}') 31 | } 32 | 33 | pub interface Layer { 34 | mut: 35 | input []f64 36 | output []f64 37 | forward(input []f64) []f64 38 | backward(output_gradient []f64) []f64 39 | apply_grad(nb_elems_seen int, lr f64, momentum f64) 40 | reset() 41 | } 42 | -------------------------------------------------------------------------------- /neural_networks_acc/layer_dense.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import vsl.la 4 | 5 | /* 6 | A Dense layer is also called a fully connected layer. 7 | It connects all the neurones of the previous layer to all the neurones of the next layer. 8 | */ 9 | 10 | pub struct Dense { 11 | pub mut: 12 | input_size i64 13 | output_size i64 14 | input []f64 15 | output []f64 16 | weights &la.Matrix[f64] 17 | weights_gradient &la.Matrix[f64] 18 | old_weights_gradient &la.Matrix[f64] 19 | bias []f64 20 | bias_gradient []f64 21 | old_bias_gradient []f64 22 | } 23 | 24 | pub fn Dense.new(input_size int, output_size int, weights_range f64, biases_range f64) Dense { 25 | return Dense{input_size, output_size, []f64{len: input_size}, []f64{len: output_size}, rand_matrix(output_size, 26 | input_size, weights_range), la.Matrix.new[f64](output_size, input_size), la.Matrix.new[f64](output_size, 27 | input_size), rand_array(output_size, biases_range), []f64{len: output_size}, []f64{len: output_size}} 28 | } 29 | 30 | pub fn (mut d Dense) reset() { // important, call after apply grad 31 | d.old_weights_gradient = d.weights_gradient.clone() 32 | d.weights_gradient = la.Matrix.new[f64](int(d.output_size), int(d.input_size)) 33 | d.old_bias_gradient = d.bias_gradient.clone() 34 | d.bias_gradient = []f64{len: int(d.output_size)} 35 | } 36 | 37 | pub fn (mut d Dense) forward(input []f64) []f64 { 38 | d.input = input.clone() 39 | without_bias := la.matrix_vector_mul(1.0, d.weights, d.input) 40 | d.output = la.vector_add(1.0, without_bias, 1.0, d.bias) 41 | return d.output 42 | } 43 | 44 | pub fn (mut d Dense) backward(output_gradient []f64) []f64 { 45 | la.matrix_add(mut d.weights_gradient, 1.0, la.vector_vector_tr_mul(1.0, output_gradient, 46 | d.input), 1.0, d.weights_gradient) 47 | d.bias_gradient = la.vector_add(1.0, output_gradient, 1.0, d.bias_gradient) 48 | return la.matrix_tr_vector_mul(1.0, d.weights, output_gradient) 49 | } 50 | 51 | pub fn (mut d Dense) apply_grad(nb_elems_seen int, lr f64, momentum f64) { 52 | if momentum > 0 { 53 | la.matrix_add(mut d.weights_gradient, lr / f64(nb_elems_seen), d.weights_gradient, 54 | lr * momentum, d.old_weights_gradient) 55 | la.matrix_add(mut d.weights, 1.0, d.weights_gradient, 1.0, d.weights) 56 | d.bias = la.vector_add(lr / f64(nb_elems_seen), d.bias_gradient, 1.0, d.bias) 57 | } else { 58 | la.matrix_add(mut d.weights, lr / f64(nb_elems_seen), d.weights_gradient, 1.0, 59 | d.weights) 60 | d.bias = la.vector_add(lr / f64(nb_elems_seen), d.bias_gradient, 1.0, d.bias) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /neural_networks_acc/neural_network.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import vsl.la 4 | import os 5 | import rand 6 | 7 | pub struct NeuralNetwork { 8 | pub mut: 9 | layers []Layer 10 | cost f64 11 | accuracy f64 12 | } 13 | 14 | pub fn NeuralNetwork.new(seed u32) NeuralNetwork { 15 | rand.seed([seed, 0]) 16 | return NeuralNetwork{} 17 | } 18 | 19 | pub fn (mut nn NeuralNetwork) add_layer(layer Layer) { 20 | nn.layers << layer 21 | } 22 | 23 | pub fn (mut nn NeuralNetwork) train(t_m TrainingMode) { 24 | println("\nTraining for $t_m.nb_epochs epochs:") 25 | match t_m { 26 | BackpropTrainingParams { nn.train_backprop(t_m) } 27 | MinibatchesBackpropTrainingParams { nn.train_minibatches_backprop(t_m) } 28 | else { exit_err('The training mode is not implemented') } 29 | } 30 | } 31 | 32 | pub fn (mut nn NeuralNetwork) forward_propagation(input []f64) []f64 { 33 | mut next_layer_input := input.clone() 34 | for mut layer in nn.layers { 35 | next_layer_input = layer.forward(next_layer_input) 36 | } 37 | return next_layer_input 38 | } 39 | 40 | pub fn (mut nn NeuralNetwork) backpropagation(expected_output []f64, output []f64, cost_prime fn ([]f64, []f64) []f64) { 41 | mut gradient := cost_prime(expected_output, output) 42 | for j := nn.layers.len - 1; j >= 0; j -= 1 { 43 | gradient = nn.layers[j].backward(gradient) 44 | } 45 | } 46 | 47 | pub fn (mut nn NeuralNetwork) apply_gradient_descent(nb_elems_seen int, lr f64, momentum f64) { 48 | for mut layer in nn.layers { 49 | layer.apply_grad(nb_elems_seen, lr, momentum) 50 | layer.reset() 51 | } 52 | } 53 | 54 | pub fn (mut nn NeuralNetwork) save_model(save_name string) { 55 | mut file := os.create(save_name) or { panic(err) } 56 | file.write_raw(i64(nn.layers.len)) or { panic(err) } 57 | for layer in nn.layers { 58 | l_type := layer_type(layer) 59 | file.write_raw(i32(l_type)) or { panic(err) } 60 | match layer { 61 | Dense { 62 | file.write_raw(layer.input_size) or { panic(err) } 63 | file.write_raw(layer.output_size) or { panic(err) } 64 | for elem in layer.weights.data { 65 | file.write_raw(elem) or { panic(err) } 66 | } 67 | for elem in layer.bias { 68 | file.write_raw(elem) or { panic(err) } 69 | } 70 | } 71 | Activation { 72 | file.write_raw(layer.activ_type) or { panic(err) } 73 | } 74 | else {} 75 | } 76 | } 77 | file.close() 78 | } 79 | 80 | @[noreturn] 81 | fn exit_err(message string) { 82 | println(message) 83 | exit(1) 84 | } 85 | 86 | pub fn (mut nn NeuralNetwork) load_model(save_name string) { 87 | mut load := os.open(save_name) or { exit_err("This save doesn't exist") } 88 | nb_layers := load.read_raw[i64]() or { panic(err) } 89 | println("$nb_layers Layers :") 90 | for _ in 0 .. nb_layers { 91 | ltype := load.read_raw[i32]() or { panic(err) } 92 | mut layer_base := layer_from_type(unsafe{LayerType(ltype)}) 93 | match mut layer_base { 94 | Dense { 95 | layer_base.input_size = load.read_raw[i64]() or { panic(err) } 96 | layer_base.output_size = load.read_raw[i64]() or { panic(err) } 97 | println("Dense ${layer_base.input_size} - ${layer_base.output_size}") 98 | matrix_size := int(layer_base.input_size * layer_base.output_size) 99 | layer_base.weights = la.Matrix.raw(int(layer_base.output_size), int(layer_base.input_size), 100 | []f64{len: matrix_size, init: index - index + load.read_raw[f64]() or { 101 | panic(err) 102 | }}) 103 | layer_base.weights_gradient = la.Matrix.new[f64](int(layer_base.output_size), 104 | int(layer_base.input_size)) 105 | layer_base.old_weights_gradient = la.Matrix.new[f64](int(layer_base.output_size), 106 | int(layer_base.input_size)) 107 | layer_base.bias = []f64{len: int(layer_base.output_size), init: index - index + load.read_raw[f64]() or { 108 | panic(err) 109 | }} 110 | layer_base.bias_gradient = []f64{len: int(layer_base.output_size)} 111 | layer_base.old_bias_gradient = []f64{len: int(layer_base.output_size)} 112 | } 113 | Activation { 114 | layer_base = Activation.new(load.read_raw[ActivationFunctions]() or { panic(err) }) 115 | println("Activation ${layer_base.activ_type}") 116 | } 117 | else {println("Problem ?")} 118 | } 119 | nn.add_layer(layer_base) 120 | } 121 | println("Finished loading the NN ${save_name}") 122 | } 123 | -------------------------------------------------------------------------------- /neural_networks_acc/training_mode_backprop.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | // An epoch is when the nn has seen the entire dataset 4 | pub struct BackpropTrainingParams { 5 | learning_rate f64 6 | momentum f64 7 | nb_epochs int 8 | classifier bool 9 | print_interval int 10 | cost_function CostFunctions 11 | training Dataset 12 | test Dataset 13 | test_params TestParams 14 | } 15 | 16 | fn (mut nn NeuralNetwork) train_backprop(t_p BackpropTrainingParams) { 17 | cost_fn, cost_prime := get_cost_function(t_p.cost_function) 18 | for epoch in 0 .. t_p.nb_epochs { 19 | mut cost := 0.0 20 | mut accuracy := 0.0 21 | print_epoch := t_p.print_interval != 0 && ((epoch + 1) % t_p.print_interval == 0 || epoch == 0) 22 | test_epoch := t_p.test_params.training_interval != 0 && ((epoch + 1) % t_p.test_params.training_interval == 0 || epoch == 0) 23 | for i, input in t_p.training.inputs { 24 | output := nn.forward_propagation(input) 25 | cost += cost_fn(t_p.training.expected_outputs[i], output) 26 | nn.backpropagation(t_p.training.expected_outputs[i], output, cost_prime) 27 | if t_p.classifier { 28 | if match_output_array_to_number(output) == match_classifier_array_to_number(t_p.training.expected_outputs[i]) { 29 | accuracy += 1 30 | } 31 | } 32 | } 33 | accuracy /= f64(t_p.test.inputs.len)/100.0 34 | cost /= t_p.training.inputs.len 35 | if print_epoch { 36 | if t_p .classifier { 37 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tCost : ${cost}\t-\tAccuracy : ${accuracy:.2}%') 38 | } else { 39 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tCost : ${cost}') 40 | } 41 | } 42 | nn.apply_gradient_descent(t_p.training.inputs.len, t_p.learning_rate, t_p.momentum) 43 | if test_epoch { 44 | nn.test(t_p) 45 | } 46 | nn.cost = cost 47 | nn.accuracy = accuracy 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /neural_networks_acc/training_mode_base.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | // An epoch is when the nn has seen the entire dataset 4 | interface TrainingMode { 5 | learning_rate f64 6 | nb_epochs int 7 | classifier bool 8 | cost_function CostFunctions 9 | training Dataset 10 | test Dataset 11 | test_params TestParams 12 | } 13 | 14 | pub struct Dataset { 15 | pub mut: 16 | inputs [][]f64 17 | expected_outputs [][]f64 18 | } 19 | 20 | pub fn (dataset Dataset) clone() Dataset { 21 | return Dataset{dataset.inputs.clone(), dataset.expected_outputs.clone()} 22 | } 23 | 24 | // [ start -> end ] 25 | // test_interval in epochs 26 | pub struct TestParams { 27 | print_start int 28 | print_end int 29 | training_interval int 30 | training_batch_interval int 31 | } 32 | 33 | pub fn (mut nn NeuralNetwork) test(t_m TrainingMode) { 34 | println("\nTest Dataset:") 35 | cost_fn, _ := get_cost_function(t_m.cost_function) 36 | mut cost := 0.0 37 | mut accuracy := 0.0 38 | for i, input in t_m.test.inputs { 39 | output := nn.forward_propagation(input) 40 | cost += cost_fn(t_m.test.expected_outputs[i], output) 41 | if t_m.classifier { 42 | if match_output_array_to_number(output) == match_classifier_array_to_number(t_m.test.expected_outputs[i]) { 43 | accuracy += 1 44 | } 45 | } 46 | if i >= t_m.test_params.print_start && i <= t_m.test_params.print_end { // if there is an interval to print 47 | println("$i -> $output / ${t_m.test.expected_outputs[i]}") 48 | } 49 | } 50 | accuracy /= f64(t_m.test.inputs.len)/100.0 51 | cost /= t_m.training.inputs.len 52 | if t_m.classifier { 53 | println('Test Cost : ${cost} - Test Accuracy : ${accuracy:.2}%\n') 54 | }else{ 55 | println('Test Cost : ${cost}\n') 56 | } 57 | } -------------------------------------------------------------------------------- /neural_networks_acc/training_mode_minibatches_backprop.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | pub struct MinibatchesBackpropTrainingParams { 4 | learning_rate f64 5 | momentum f64 6 | batch_size int = 1 7 | nb_epochs int 8 | classifier bool 9 | print_interval int 10 | print_batch_interval int 11 | cost_function CostFunctions 12 | training Dataset 13 | test Dataset 14 | test_params TestParams 15 | } 16 | 17 | fn (mut nn NeuralNetwork) train_minibatches_backprop(t_p MinibatchesBackpropTrainingParams) { 18 | cost_fn, cost_prime := get_cost_function(t_p.cost_function) 19 | nb_batches := t_p.training.inputs.len / t_p.batch_size 20 | for epoch in 0 .. t_p.nb_epochs { 21 | mut epoch_error := 0.0 22 | mut epoch_accuracy := 0.0 23 | print_epoch := t_p.print_interval != 0 && ((epoch + 1) % t_p.print_interval == 0 || epoch == 0) 24 | test_epoch := t_p.test_params.training_interval != 0 && ((epoch + 1) % t_p.test_params.training_interval == 0 || epoch == 0) 25 | for batch in 0 .. nb_batches { // be careful for the size of the batches to not lose some data over the division rounding 26 | mut error := 0.0 27 | mut accuracy := 0.0 28 | print_batch := t_p.print_batch_interval != 0 && ((batch + 1) % t_p.print_batch_interval == 0 || batch == 0) 29 | test_batch := t_p.test_params.training_batch_interval != 0 && ((batch + 1) % t_p.test_params.training_batch_interval == 0) 30 | for i, input in t_p.training.inputs[batch * t_p.batch_size..(batch + 1) * t_p.batch_size] { 31 | nb := batch * t_p.batch_size + i 32 | output := nn.forward_propagation(input) 33 | error += cost_fn(t_p.training.expected_outputs[nb], output) 34 | nn.backpropagation(t_p.training.expected_outputs[nb], output, cost_prime) 35 | if t_p.classifier { 36 | if match_output_array_to_number(output) == match_classifier_array_to_number(t_p.training.expected_outputs[nb]) { 37 | accuracy += 1 38 | } 39 | } 40 | } 41 | epoch_error += error 42 | epoch_accuracy += accuracy 43 | error /= t_p.batch_size 44 | accuracy /= f64(t_p.batch_size)/100.0 45 | nn.apply_gradient_descent(t_p.batch_size, t_p.learning_rate, t_p.momentum) 46 | if print_batch && print_epoch { 47 | if t_p.classifier { 48 | println(' batch ${batch + 1}/${nb_batches}\t-\tCost : ${error}\t-\tAccuracy : ${accuracy:.2}%') 49 | } else { 50 | println(' batch ${batch + 1}/${nb_batches}\t-\tCost : ${error}') 51 | } 52 | } 53 | if test_epoch && test_batch { 54 | nn.test(t_p) 55 | } 56 | } 57 | epoch_error /= (nb_batches*t_p.batch_size) 58 | epoch_accuracy /= (nb_batches*t_p.batch_size)/100 59 | if print_epoch { 60 | if t_p.classifier { 61 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tAverage Cost : ${epoch_error}\t-\tAverage Accuracy : ${epoch_accuracy:.2}%') 62 | } else { 63 | println('Epoch ${epoch + 1}/${t_p.nb_epochs}\t-\tAverage Cost : ${epoch_error}') 64 | } 65 | } 66 | nn.cost = epoch_error 67 | nn.accuracy = epoch_accuracy 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /neural_networks_acc/utilities.v: -------------------------------------------------------------------------------- 1 | module neural_networks_acc 2 | 3 | import vsl.la 4 | import rand 5 | 6 | fn rand_array(nb int, range f64) []f64 { 7 | mut a := []f64{} 8 | for _ in 0 .. nb { 9 | a << rand.f64_in_range(-range, range) or { panic(err) } 10 | } 11 | return a 12 | } 13 | 14 | fn rand_2darray(nb_lines int, nb_cols int, range f64) [][]f64 { 15 | mut a := [][]f64{len: nb_lines, init: []f64{}} 16 | for mut line in a { 17 | line << rand_array(nb_cols, range) 18 | } 19 | return a 20 | } 21 | 22 | fn rand_matrix(nb_lines int, nb_cols int, range f64) &la.Matrix[f64] { 23 | return la.Matrix.deep2(rand_2darray(nb_lines, nb_cols, range)) 24 | } 25 | 26 | fn vector_element_wise_mul(u []f64, v []f64) []f64 { 27 | mut result := []f64{len: u.len} 28 | for i, elem in u { 29 | result[i] = elem * v[i] 30 | } 31 | return result 32 | } 33 | -------------------------------------------------------------------------------- /neural_networks_acc/v.mod: -------------------------------------------------------------------------------- 1 | Module { name: 'neural_networks' version: '0.0.0' description: '' license: 'MIT' dependencies:[vsl] } -------------------------------------------------------------------------------- /v.mod: -------------------------------------------------------------------------------- 1 | Module { 2 | name: 'neural_networks' 3 | author: 'eliyaan-nopana' 4 | version: '0.0.1' 5 | repo_url: 'https://github.com/Eliyaan/NeuralNetworks-V-Module' 6 | vcs: 'git' 7 | tags: [] 8 | description: '' 9 | license: 'MIT' 10 | } 11 | -------------------------------------------------------------------------------- /vblas/conversions.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | pub enum MemoryLayout { 4 | row_major = 101 5 | col_major = 102 6 | } 7 | 8 | pub enum Transpose { 9 | no_trans = 111 10 | trans = 112 11 | conj_trans = 113 12 | conj_no_trans = 114 13 | } 14 | 15 | pub enum Uplo { 16 | upper = 121 17 | lower = 122 18 | } 19 | 20 | pub enum Diagonal { 21 | non_unit = 131 22 | unit = 132 23 | } 24 | 25 | pub enum Side { 26 | left = 141 27 | right = 142 28 | } 29 | -------------------------------------------------------------------------------- /vblas/dgemm.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | // import runtime 4 | import sync 5 | import vsl.float.float64 6 | import math 7 | 8 | // dgemm performs one of the matrix-matrix operations 9 | // C = alpha * A * B + beta * C 10 | // C = alpha * Aᵀ * B + beta * C 11 | // C = alpha * A * Bᵀ + beta * C 12 | // C = alpha * Aᵀ * Bᵀ + beta * C 13 | // where A is an m×k or k×m dense matrix, B is an n×k or k×n dense matrix, C is 14 | // an m×n matrix, and alpha and beta are scalars. trans_a and trans_b specify whether A or 15 | // B are transposed. 16 | pub fn dgemm(trans_a Transpose, trans_b Transpose, m int, n int, k int, alpha f64, a []f64, lda int, b []f64, ldb int, beta f64, mut c []f64, ldc int) { 17 | if m < 0 { 18 | panic(mlt0) 19 | } 20 | if n < 0 { 21 | panic(nlt0) 22 | } 23 | if k < 0 { 24 | panic(klt0) 25 | } 26 | a_trans := trans_a == .trans || trans_a == .conj_trans 27 | if a_trans { 28 | if lda < math.max(1, m) { 29 | panic(bad_ld_a) 30 | } 31 | } else { 32 | if lda < math.max(1, k) { 33 | panic(bad_ld_a) 34 | } 35 | } 36 | b_trans := trans_b == .trans || trans_b == .conj_trans 37 | if b_trans { 38 | if ldb < math.max(1, k) { 39 | panic(bad_ld_b) 40 | } 41 | } else { 42 | if ldb < math.max(1, n) { 43 | panic(bad_ld_b) 44 | } 45 | } 46 | if ldc < math.max(1, n) { 47 | panic(bad_ld_c) 48 | } 49 | 50 | // Quick return if possible. 51 | if m == 0 || n == 0 { 52 | return 53 | } 54 | 55 | // For zero matrix size the following slice length checks are trivially satisfied. 56 | if a_trans { 57 | if a.len < (k - 1) * lda + m { 58 | panic(short_a) 59 | } 60 | } else { 61 | if a.len < (m - 1) * lda + k { 62 | panic(short_a) 63 | } 64 | } 65 | if b_trans { 66 | if b.len < (n - 1) * ldb + k { 67 | panic(short_b) 68 | } 69 | } else { 70 | if b.len < (k - 1) * ldb + n { 71 | panic(short_b) 72 | } 73 | } 74 | if c.len < (m - 1) * ldc + n { 75 | panic(short_c) 76 | } 77 | 78 | // Quick return if possible. 79 | if (alpha == 0 || k == 0) && beta == 1 { 80 | return 81 | } 82 | 83 | // scale c 84 | if beta != 1 { 85 | if beta == 0 { 86 | for i in 0 .. m { 87 | mut ctmp := unsafe { c[i * ldc..i * ldc + n] } 88 | for j, _ in ctmp { 89 | ctmp[j] = 0 90 | } 91 | } 92 | } else { 93 | for i in 0 .. m { 94 | mut ctmp := unsafe { c[i * ldc..i * ldc + n] } 95 | for j, _ in ctmp { 96 | ctmp[j] *= beta 97 | } 98 | } 99 | } 100 | } 101 | 102 | dgemm_parallel(a_trans, b_trans, m, n, k, a, lda, b, ldb, mut c, ldc, alpha) 103 | } 104 | 105 | fn dgemm_parallel(a_trans bool, b_trans bool, m int, n int, k int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64) { 106 | // dgemm_parallel computes a parallel matrix multiplication by partitioning 107 | // a and b into sub-blocks, and updating c with the multiplication of the sub-block 108 | // In all cases, 109 | // A = [ A_11 A_12 ... A_1j 110 | // A_21 A_22 ... A_2j 111 | // ... 112 | // A_i1 A_i2 ... A_ij] 113 | // 114 | // and same for B. All of the submatrix sizes are block_size×block_size except 115 | // at the edges. 116 | // 117 | // In all cases, there is one dimension for each matrix along which 118 | // C must be updated sequentially. 119 | // Cij = \sum_k Aik Bki, (A * B) 120 | // Cij = \sum_k Aki Bkj, (Aᵀ * B) 121 | // Cij = \sum_k Aik Bjk, (A * Bᵀ) 122 | // Cij = \sum_k Aki Bjk, (Aᵀ * Bᵀ) 123 | // 124 | // This code computes one {i, j} block sequentially along the k dimension, 125 | // and computes all of the {i, j} blocks concurrently. This 126 | // partitioning allows Cij to be updated in-place without race-conditions. 127 | // Instead of launching a goroutine for each possible concurrent computation, 128 | // a number of worker goroutines are created and channels are used to pass 129 | // available and completed cases. 130 | // 131 | // http://alexkr.com/docs/matrixmult.pdf is a good reference on matrix-matrix 132 | // multiplies, though this code does not copy matrices to attempt to eliminate 133 | // cache misses. 134 | 135 | max_k_len := k 136 | par_blocks := blocks(m, block_size) * blocks(n, block_size) 137 | if par_blocks < min_par_block { 138 | // The matrix multiplication is small in the dimensions where it can be 139 | // computed concurrently. Just do it in serial. 140 | dgemm_serial(a_trans, b_trans, m, n, k, a, lda, b, ldb, mut c, ldc, alpha) 141 | return 142 | } 143 | 144 | // worker_limit acts a number of maximum concurrent workers, 145 | // with the limit set to the number of procs available. 146 | // worker_limit := chan int{cap: runtime.nr_jobs()} 147 | 148 | // wg is used to wait for all 149 | mut wg := sync.new_waitgroup() 150 | wg.add(par_blocks) 151 | defer { 152 | wg.wait() 153 | } 154 | 155 | for i := 0; i < m; i += block_size { 156 | for j := 0; j < n; j += block_size { 157 | // worker_limit <- 0 158 | go fn (a_trans bool, b_trans bool, m int, n int, max_k_len int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64, i int, j int, mut wg sync.WaitGroup) { 159 | defer { 160 | wg.done() 161 | // <-worker_limit 162 | } 163 | 164 | mut leni := block_size 165 | if i + leni > m { 166 | leni = m - i 167 | } 168 | mut lenj := block_size 169 | if j + lenj > n { 170 | lenj = n - j 171 | } 172 | 173 | mut c_sub := slice_view_f64(*c, ldc, i, j, leni, lenj) 174 | 175 | // Compute A_ik B_kj for all k 176 | for k := 0; k < max_k_len; k += block_size { 177 | mut lenk := block_size 178 | if k + lenk > max_k_len { 179 | lenk = max_k_len - k 180 | } 181 | mut a_sub := []f64{} 182 | mut b_sub := []f64{} 183 | if a_trans { 184 | a_sub = slice_view_f64(a, lda, k, i, lenk, leni) 185 | } else { 186 | a_sub = slice_view_f64(a, lda, i, k, leni, lenk) 187 | } 188 | if b_trans { 189 | b_sub = slice_view_f64(b, ldb, j, k, lenj, lenk) 190 | } else { 191 | b_sub = slice_view_f64(b, ldb, k, j, lenk, lenj) 192 | } 193 | dgemm_serial(a_trans, b_trans, leni, lenj, lenk, a_sub, lda, b_sub, 194 | ldb, mut c_sub, ldc, alpha) 195 | } 196 | }(a_trans, b_trans, m, n, max_k_len, a, lda, b, ldb, mut c, ldc, alpha, i, 197 | j, mut wg) 198 | } 199 | } 200 | } 201 | 202 | // dgemm_serial is serial matrix multiply 203 | fn dgemm_serial(a_trans bool, b_trans bool, m int, n int, k int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64) { 204 | if !a_trans && !b_trans { 205 | dgemm_serial_not_not(m, n, k, a, lda, b, ldb, mut c, ldc, alpha) 206 | return 207 | } 208 | if a_trans && !b_trans { 209 | dgemm_serial_trans_not(m, n, k, a, lda, b, ldb, mut c, ldc, alpha) 210 | return 211 | } 212 | if !a_trans && b_trans { 213 | dgemm_serial_not_trans(m, n, k, a, lda, b, ldb, mut c, ldc, alpha) 214 | return 215 | } 216 | if a_trans && b_trans { 217 | dgemm_serial_trans_trans(m, n, k, a, lda, b, ldb, mut c, ldc, alpha) 218 | return 219 | } 220 | panic('unreachable') 221 | } 222 | 223 | // dgemm_serial where neither a nor b are transposed 224 | fn dgemm_serial_not_not(m int, n int, k int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64) { 225 | // This style is used instead of the literal [i*stride +j]) is used because 226 | // approximately 5 times faster. 227 | for i in 0 .. m { 228 | mut ctmp := unsafe { c[i * ldc..i * ldc + n] } 229 | for l, v in a[i * lda..i * lda + k] { 230 | tmp := alpha * v 231 | if tmp != 0 { 232 | float64.axpy_unitary(tmp, b[l * ldb..l * ldb + n], mut ctmp) 233 | } 234 | } 235 | } 236 | } 237 | 238 | // dgemm_serial where neither a is transposed and b is not 239 | fn dgemm_serial_trans_not(m int, n int, k int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64) { 240 | // This style is used instead of the literal [i*stride +j]) is used because 241 | // approximately 5 times faster. 242 | for l := 0; l < k; l++ { 243 | btmp := b[l * ldb..l * ldb + n] 244 | for i, v in a[l * lda..l * lda + m] { 245 | tmp := alpha * v 246 | if tmp != 0 { 247 | mut ctmp := unsafe { c[i * ldc..i * ldc + n] } 248 | float64.axpy_unitary(tmp, btmp, mut ctmp) 249 | } 250 | } 251 | } 252 | } 253 | 254 | // dgemm_serial where neither a is not transposed and b is 255 | fn dgemm_serial_not_trans(m int, n int, k int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64) { 256 | // This style is used instead of the literal [i*stride +j]) is used because 257 | // approximately 5 times faster. 258 | for i in 0 .. m { 259 | atmp := a[i * lda..i * lda + k] 260 | mut ctmp := unsafe { c[i * ldc..i * ldc + n] } 261 | for j in 0 .. n { 262 | ctmp[j] += alpha * float64.dot_unitary(atmp, b[j * ldb..j * ldb + k]) 263 | } 264 | } 265 | } 266 | 267 | // dgemm_serial where both are transposed 268 | fn dgemm_serial_trans_trans(m int, n int, k int, a []f64, lda int, b []f64, ldb int, mut c []f64, ldc int, alpha f64) { 269 | // This style is used instead of the literal [i*stride +j]) is used because 270 | // approximately 5 times faster. 271 | for l := 0; l < k; l++ { 272 | for i, v in a[l * lda..l * lda + m] { 273 | tmp := alpha * v 274 | if tmp != 0 { 275 | mut ctmp := unsafe { c[i * ldc..i * ldc + n] } 276 | float64.axpy_inc(tmp, b[l..], mut ctmp, u32(n), u32(ldb), 1, 0, 0) 277 | } 278 | } 279 | } 280 | } 281 | 282 | fn slice_view_f64(a []f64, lda int, i int, j int, r int, c int) []f64 { 283 | return a[i * lda + j..(i + r - 1) * lda + j + c] 284 | } 285 | -------------------------------------------------------------------------------- /vblas/dgemv.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | import vsl.float.float64 4 | import math 5 | 6 | // dgemv computes 7 | // y = alpha * A * x + beta * y if trans_a = .no_trans 8 | // y = alpha * Aᵀ * x + beta * y if trans_a = .trans or .conj_trans 9 | // where A is an m×n dense matrix, x and y are vectors, and alpha and beta are scalars. 10 | pub fn dgemv(trans_a Transpose, m int, n int, alpha f64, a []f64, lda int, x []f64, incx int, beta f64, mut y []f64, incy int) { 11 | if m < 0 { 12 | panic(mlt0) 13 | } 14 | if n < 0 { 15 | panic(nlt0) 16 | } 17 | if lda < math.max(1, n) { 18 | panic(bad_ld_a) 19 | } 20 | if incx == 0 { 21 | panic(zero_incx) 22 | } 23 | if incy == 0 { 24 | panic(zero_incy) 25 | } 26 | // Set up indexes 27 | mut len_x := m 28 | mut len_y := n 29 | if trans_a == .no_trans { 30 | len_x = n 31 | len_y = m 32 | } 33 | 34 | // Quick return if possible 35 | if m == 0 || n == 0 { 36 | return 37 | } 38 | 39 | if (incx > 0 && (len_x - 1) * incx >= x.len) || (incx < 0 && (1 - len_x) * incx >= x.len) { 40 | panic(short_x) 41 | } 42 | if (incy > 0 && (len_y - 1) * incy >= y.len) || (incy < 0 && (1 - len_y) * incy >= y.len) { 43 | panic(short_y) 44 | } 45 | if a.len < lda * (m - 1) + n { 46 | panic(short_a) 47 | } 48 | 49 | // Quick return if possible 50 | if alpha == 0.0 && beta == 1 { 51 | return 52 | } 53 | 54 | if alpha == 0.0 { 55 | // First form y = beta * y 56 | if incy > 0 { 57 | dscal(len_y, beta, mut y, incy) 58 | } else { 59 | dscal(len_y, beta, mut y, -incy) 60 | } 61 | return 62 | } 63 | 64 | // Form y = alpha * A * x + y 65 | if trans_a == .no_trans { 66 | float64.gemv_n(u32(m), u32(n), alpha, a, u32(lda), x, u32(incx), beta, mut y, 67 | u32(incy)) 68 | return 69 | } 70 | // Cases where a is transposed. 71 | float64.gemv_t(u32(m), u32(n), alpha, a, u32(lda), x, u32(incx), beta, mut y, u32(incy)) 72 | } 73 | -------------------------------------------------------------------------------- /vblas/dgemv_test.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | fn test_dgemv_no_trans_1() { 4 | expected := [0.0, 0, 0, 0, 0] 5 | a := [ 6 | [4.1, 6.2, 8.1], 7 | [9.6, 3.5, 9.1], 8 | [10.0, 7, 3], 9 | [1.0, 1, 2], 10 | [9.0, 2, 5], 11 | ] 12 | m := a.len 13 | n := a[0].len 14 | alpha := 0.0 15 | beta := 0.0 16 | incx := 1 17 | incy := 1 18 | x := [1.0, 2, 3] 19 | mut y := [7.0, 8, 9, 10, 11] 20 | dgemv(.no_trans, m, n, 0.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 21 | assert expected == y 22 | } 23 | 24 | fn test_dgemv_no_trans_2() { 25 | expected := [7.0, 8, 9, 10, 11] 26 | a := [ 27 | [4.1, 6.2, 8.1], 28 | [9.6, 3.5, 9.1], 29 | [10.0, 7, 3], 30 | [1.0, 1, 2], 31 | [9.0, 2, 5], 32 | ] 33 | m := a.len 34 | n := a[0].len 35 | alpha := 0.0 36 | beta := 1.0 37 | incx := 1 38 | incy := 1 39 | x := [1.0, 2, 3] 40 | mut y := [7.0, 8, 9, 10, 11] 41 | dgemv(.no_trans, m, n, 0.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 42 | assert expected == y 43 | } 44 | 45 | fn test_dgemv_no_trans_3() { 46 | expected := [40.8, 43.9, 33, 9, 28] 47 | a := [ 48 | [4.1, 6.2, 8.1], 49 | [9.6, 3.5, 9.1], 50 | [10.0, 7, 3], 51 | [1.0, 1, 2], 52 | [9.0, 2, 5], 53 | ] 54 | m := a.len 55 | n := a[0].len 56 | alpha := 1.0 57 | beta := 0.0 58 | incx := 1 59 | incy := 1 60 | x := [1.0, 2, 3] 61 | mut y := [7.0, 8, 9, 10, 11] 62 | dgemv(.no_trans, m, n, 1.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 63 | assert expected == y 64 | } 65 | 66 | fn test_dgemv_no_trans_4() { 67 | expected := [284.4, 303.2, 210, 12, 158] 68 | a := [ 69 | [4.1, 6.2, 8.1], 70 | [9.6, 3.5, 9.1], 71 | [10.0, 7, 3], 72 | [1.0, 1, 2], 73 | [9.0, 2, 5], 74 | ] 75 | m := a.len 76 | n := a[0].len 77 | alpha := 8.0 78 | beta := -6.0 79 | incx := 1 80 | incy := 1 81 | x := [1.0, 2, 3] 82 | mut y := [7.0, 8, 9, 10, 11] 83 | dgemv(.no_trans, m, n, 8.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 84 | assert expected == y 85 | } 86 | 87 | fn test_dgemv_trans_1() { 88 | expected := [0.0, 0, 0] 89 | a := [ 90 | [4.1, 6.2, 8.1], 91 | [9.6, 3.5, 9.1], 92 | [10.0, 7, 3], 93 | [1.0, 1, 2], 94 | [9.0, 2, 5], 95 | ] 96 | m := a.len 97 | n := a[0].len 98 | alpha := 0.0 99 | beta := 0.0 100 | incx := 1 101 | incy := 1 102 | x := [1.0, 2, 3, -4, 5] 103 | mut y := [7.0, 8, 9] 104 | dgemv(.trans, m, n, 0.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 105 | assert expected == y 106 | } 107 | 108 | fn test_dgemv_trans_2() { 109 | expected := [7.0, 8, 9] 110 | a := [ 111 | [4.1, 6.2, 8.1], 112 | [9.6, 3.5, 9.1], 113 | [10.0, 7, 3], 114 | [1.0, 1, 2], 115 | [9.0, 2, 5], 116 | ] 117 | m := a.len 118 | n := a[0].len 119 | alpha := 0.0 120 | beta := 1.0 121 | incx := 1 122 | incy := 1 123 | x := [1.0, 2, 3, -4, 5] 124 | mut y := [7.0, 8, 9] 125 | dgemv(.trans, m, n, 0.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 126 | assert expected == y 127 | } 128 | 129 | fn test_dgemv_trans_3() { 130 | expected := [94.3, 40.2, 52.3] 131 | a := [ 132 | [4.1, 6.2, 8.1], 133 | [9.6, 3.5, 9.1], 134 | [10.0, 7, 3], 135 | [1.0, 1, 2], 136 | [9.0, 2, 5], 137 | ] 138 | m := a.len 139 | n := a[0].len 140 | alpha := 1.0 141 | beta := 0.0 142 | incx := 1 143 | incy := 1 144 | x := [1.0, 2, 3, -4, 5] 145 | mut y := [7.0, 8, 9] 146 | dgemv(.trans, m, n, 1.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 147 | assert expected == y 148 | } 149 | 150 | fn test_dgemv_trans_4() { 151 | expected := [712.4, 273.6, 364.4] 152 | a := [ 153 | [4.1, 6.2, 8.1], 154 | [9.6, 3.5, 9.1], 155 | [10.0, 7, 3], 156 | [1.0, 1, 2], 157 | [9.0, 2, 5], 158 | ] 159 | m := a.len 160 | n := a[0].len 161 | alpha := 8.0 162 | beta := -6.0 163 | incx := 1 164 | incy := 1 165 | x := [1.0, 2, 3, -4, 5] 166 | mut y := [7.0, 8, 9] 167 | dgemv(.trans, m, n, 8.0, flatten(a), a[0].len, x, incx, beta, mut y, incy) 168 | assert expected == y 169 | } 170 | -------------------------------------------------------------------------------- /vblas/error.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | // Panic strings used during parameter checks. 4 | // This list is duplicated in netlib/blas/netlib. Keep in sync. 5 | pub const zero_incx = 'blas: zero x index increment' 6 | pub const zero_incy = 'blas: zero y index increment' 7 | 8 | pub const mlt0 = 'blas: m < 0' 9 | pub const nlt0 = 'blas: n < 0' 10 | pub const klt0 = 'blas: k < 0' 11 | pub const kllt0 = 'blas: kl < 0' 12 | pub const kult0 = 'blas: ku < 0' 13 | 14 | pub const bad_uplo = 'blas: illegal triangle' 15 | pub const bad_transpose = 'blas: illegal transpose' 16 | pub const bad_diag = 'blas: illegal diagonal' 17 | pub const bad_side = 'blas: illegal side' 18 | pub const bad_flag = 'blas: illegal rotm flag' 19 | 20 | pub const bad_ld_a = 'blas: bad leading dimension of A' 21 | pub const bad_ld_b = 'blas: bad leading dimension of B' 22 | pub const bad_ld_c = 'blas: bad leading dimension of C' 23 | 24 | pub const short_x = 'blas: insufficient length of x' 25 | pub const short_y = 'blas: insufficient length of y' 26 | pub const short_ap = 'blas: insufficient length of ap' 27 | pub const short_a = 'blas: insufficient length of a' 28 | pub const short_b = 'blas: insufficient length of b' 29 | pub const short_c = 'blas: insufficient length of c' 30 | -------------------------------------------------------------------------------- /vblas/level1f64.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | import vsl.float.float64 4 | import math 5 | 6 | // dnrm2 computes the Euclidean norm of a vector, 7 | // sqrt(\sum_i x[i] * x[i]). 8 | // This function returns 0 if incx is negative. 9 | pub fn dnrm2(n int, x []f64, incx int) f64 { 10 | if incx < 1 { 11 | if incx == 0 { 12 | panic(zero_incx) 13 | } 14 | return 0.0 15 | } 16 | if x.len <= (n - 1) * incx { 17 | panic(short_x) 18 | } 19 | if n < 2 { 20 | if n == 1 { 21 | return math.abs(x[0]) 22 | } 23 | if n == 0 { 24 | return 0.0 25 | } 26 | panic(nlt0) 27 | } 28 | if incx == 1 { 29 | return float64.l2_norm_unitary(x[..n]) 30 | } 31 | return float64.l2_norm_inc(x, u32(n), u32(incx)) 32 | } 33 | 34 | // dasum computes the sum of the absolute values of the elements of x. 35 | // \sum_i |x[i]| 36 | // dasum returns 0 if incx is negative. 37 | pub fn dasum(n int, x []f64, incx int) f64 { 38 | mut sum := 0.0 39 | if n < 0 { 40 | panic(nlt0) 41 | } 42 | if incx < 1 { 43 | if incx == 0 { 44 | panic(zero_incx) 45 | } 46 | return 0.0 47 | } 48 | if x.len <= (n - 1) * incx { 49 | panic(short_x) 50 | } 51 | if incx == 1 { 52 | for v in x[..n] { 53 | sum += math.abs(v) 54 | } 55 | return sum 56 | } 57 | for i in 0 .. n { 58 | sum += math.abs(x[i * incx]) 59 | } 60 | return sum 61 | } 62 | 63 | // idamax returns the index of an element of x with the largest absolute value. 64 | // If there are multiple such indices the earliest is returned. 65 | // idamax returns -1 if n == 0. 66 | pub fn idamax(n int, x []f64, incx int) int { 67 | if incx < 1 { 68 | if incx == 0 { 69 | panic(zero_incx) 70 | } 71 | return 0 72 | } 73 | if x.len <= (n - 1) * incx { 74 | panic(short_x) 75 | } 76 | if n < 2 { 77 | if n == 1 { 78 | return 0 79 | } 80 | if n == 0 { 81 | return -1 // Netlib returns invalid index when n == 0. 82 | } 83 | panic(nlt0) 84 | } 85 | mut idx := 0 86 | mut max := math.abs(x[0]) 87 | if incx == 1 { 88 | for i, v in x[..n] { 89 | abs_v := math.abs(v) 90 | if abs_v > max { 91 | max = abs_v 92 | idx = i 93 | } 94 | } 95 | return idx 96 | } 97 | mut ix := incx 98 | for i in 1 .. n { 99 | v := x[ix] 100 | abs_v := math.abs(v) 101 | if abs_v > max { 102 | max = abs_v 103 | idx = i 104 | } 105 | ix += incx 106 | } 107 | return idx 108 | } 109 | 110 | // dswap exchanges the elements of two vectors. 111 | // x[i], y[i] = y[i], x[i] for all i 112 | pub fn dswap(n int, mut x []f64, incx int, mut y []f64, incy int) { 113 | if incx == 0 { 114 | panic(zero_incx) 115 | } 116 | if incy == 0 { 117 | panic(zero_incy) 118 | } 119 | if n < 1 { 120 | if n == 0 { 121 | return 122 | } 123 | panic(nlt0) 124 | } 125 | if (incx > 0 && x.len <= (n - 1) * incx) || (incx < 0 && x.len <= (1 - n) * incx) { 126 | panic(short_x) 127 | } 128 | if (incy > 0 && y.len <= (n - 1) * incy) || (incy < 0 && y.len <= (1 - n) * incy) { 129 | panic(short_y) 130 | } 131 | if incx == 1 && incy == 1 { 132 | for i, v in x[..n] { 133 | x[i], y[i] = y[i], v 134 | } 135 | return 136 | } 137 | mut ix := 0 138 | mut iy := 0 139 | if incx < 0 { 140 | ix = (-n + 1) * incx 141 | } 142 | if incy < 0 { 143 | iy = (-n + 1) * incy 144 | } 145 | for _ in 0 .. n { 146 | tmp := x[ix] 147 | x[ix] = y[iy] 148 | y[iy] = tmp 149 | ix += incx 150 | iy += incy 151 | } 152 | } 153 | 154 | // dcopy copies the elements of x into the elements of y. 155 | // y[i] = x[i] for all i 156 | pub fn dcopy(n int, x []f64, incx int, mut y []f64, incy int) { 157 | if incx == 0 { 158 | panic(zero_incx) 159 | } 160 | if incy == 0 { 161 | panic(zero_incy) 162 | } 163 | if n < 1 { 164 | if n == 0 { 165 | return 166 | } 167 | panic(nlt0) 168 | } 169 | if (incx > 0 && x.len <= (n - 1) * incx) || (incx < 0 && x.len <= (1 - n) * incx) { 170 | panic(short_x) 171 | } 172 | if (incy > 0 && y.len <= (n - 1) * incy) || (incy < 0 && y.len <= (1 - n) * incy) { 173 | panic(short_y) 174 | } 175 | if incx == 1 && incy == 1 { 176 | for i in 0 .. n { 177 | y[i] = x[i] 178 | } 179 | return 180 | } 181 | mut ix := 0 182 | mut iy := 0 183 | if incx < 0 { 184 | ix = (-n + 1) * incx 185 | } 186 | if incy < 0 { 187 | iy = (-n + 1) * incy 188 | } 189 | for _ in 0 .. n { 190 | y[iy] = x[ix] 191 | ix += incx 192 | iy += incy 193 | } 194 | } 195 | 196 | // daxpy adds alpha times x to y 197 | // y[i] += alpha * x[i] for all i 198 | pub fn daxpy(n int, alpha f64, x []f64, incx int, mut y []f64, incy int) { 199 | if incx == 0 { 200 | panic(zero_incx) 201 | } 202 | if incy == 0 { 203 | panic(zero_incy) 204 | } 205 | if n < 1 { 206 | if n == 0 { 207 | return 208 | } 209 | panic(nlt0) 210 | } 211 | if (incx > 0 && x.len <= (n - 1) * incx) || (incx < 0 && x.len <= (1 - n) * incx) { 212 | panic(short_x) 213 | } 214 | if (incy > 0 && y.len <= (n - 1) * incy) || (incy < 0 && y.len <= (1 - n) * incy) { 215 | panic(short_y) 216 | } 217 | if alpha == 0 { 218 | return 219 | } 220 | if incx == 1 && incy == 1 { 221 | float64.axpy_unitary(alpha, x[..n], mut y[..n]) 222 | return 223 | } 224 | mut ix := 0 225 | mut iy := 0 226 | if incx < 0 { 227 | ix = (-n + 1) * incx 228 | } 229 | if incy < 0 { 230 | iy = (-n + 1) * incy 231 | } 232 | float64.axpy_inc(alpha, x, mut y, u32(n), u32(incx), u32(incy), u32(ix), u32(iy)) 233 | } 234 | 235 | // drotg computes the plane rotation 236 | // _ _ _ _ _ _ 237 | // | c s | | a | | r | 238 | // | -s c | * | b | = | 0 | 239 | // ‾ ‾ ‾ ‾ ‾ ‾ 240 | // where 241 | // r = ±√(a^2 + b^2) 242 | // c = a/r, the cosine of the plane rotation 243 | // s = b/r, the sine of the plane rotation 244 | // 245 | // NOTE: There is a discrepancy between the reference implementation and the BLAS 246 | // technical manual regarding the sign for r when a or b are zero. 247 | // drotg agrees with the definition in the manual and other 248 | // common BLAS implementations. 249 | pub fn drotg(a f64, b f64) (f64, f64, f64, f64) { 250 | if b == 0 && a == 0 { 251 | return 1.0, 0.0, a, 0.0 252 | } 253 | abs_a := math.abs(a) 254 | abs_b := math.abs(b) 255 | agtb := abs_a > abs_b 256 | mut r := math.hypot(a, b) 257 | if agtb { 258 | r = math.copysign(r, a) 259 | } else { 260 | r = math.copysign(r, b) 261 | } 262 | mut c := a / r 263 | mut s := b / r 264 | mut z := 0.0 265 | if agtb { 266 | z = s 267 | } else if c != 0 { 268 | // r == 0 case handled above 269 | z = 1 / c 270 | } else { 271 | z = 1 272 | } 273 | return c, s, r, z 274 | } 275 | 276 | // drot applies a plane transformation. 277 | // x[i] = c * x[i] + s * y[i] 278 | // y[i] = c * y[i] - s * x[i] 279 | pub fn drot(n int, mut x []f64, incx int, mut y []f64, incy int, c f64, s f64) { 280 | if incx == 0 { 281 | panic(zero_incx) 282 | } 283 | if incy == 0 { 284 | panic(zero_incy) 285 | } 286 | if n < 1 { 287 | if n == 0 { 288 | return 289 | } 290 | panic(nlt0) 291 | } 292 | if (incx > 0 && x.len <= (n - 1) * incx) || (incx < 0 && x.len <= (1 - n) * incx) { 293 | panic(short_x) 294 | } 295 | if (incy > 0 && y.len <= (n - 1) * incy) || (incy < 0 && y.len <= (1 - n) * incy) { 296 | panic(short_y) 297 | } 298 | if incx == 1 && incy == 1 { 299 | for i, vx in x[..n] { 300 | vy := y[i] 301 | x[i] = c * vx + s * vy 302 | y[i] = c * vy - s * vx 303 | } 304 | return 305 | } 306 | mut ix := 0 307 | mut iy := 0 308 | if incx < 0 { 309 | ix = (-n + 1) * incx 310 | } 311 | if incy < 0 { 312 | iy = (-n + 1) * incy 313 | } 314 | for i in 0 .. n { 315 | vx := x[ix] 316 | vy := y[iy] 317 | x[i] = c * vx + s * vy 318 | y[i] = c * vy - s * vx 319 | ix += incx 320 | iy += incy 321 | } 322 | } 323 | 324 | // dscal scales x by alpha. 325 | // x[i] *= alpha 326 | // dscal has no effect if incx < 0. 327 | pub fn dscal(n int, alpha f64, mut x []f64, incx int) { 328 | if incx < 1 { 329 | if incx == 0 { 330 | panic(zero_incx) 331 | } 332 | return 333 | } 334 | if n < 1 { 335 | if n == 0 { 336 | return 337 | } 338 | panic(nlt0) 339 | } 340 | if (n - 1) * incx >= x.len { 341 | panic(short_x) 342 | } 343 | if alpha == 0 { 344 | if incx == 1 { 345 | for i in 0 .. n { 346 | x[i] = 0 347 | } 348 | return 349 | } 350 | for ix := 0; ix < n * incx; ix += incx { 351 | x[ix] = 0 352 | } 353 | return 354 | } 355 | if incx == 1 { 356 | float64.scal_unitary(alpha, mut x[..n]) 357 | return 358 | } 359 | float64.scal_inc(alpha, mut x, u32(n), u32(incx)) 360 | } 361 | -------------------------------------------------------------------------------- /vblas/level1f64_ddot.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | import vsl.float.float64 4 | 5 | // ddot computes the dot product of the two vectors 6 | // \sum_i x[i]*y[i] 7 | pub fn ddot(n int, x []f64, incx int, y []f64, incy int) f64 { 8 | if incx == 0 { 9 | panic(zero_incx) 10 | } 11 | if incy == 0 { 12 | panic(zero_incy) 13 | } 14 | if n <= 0 { 15 | if n == 0 { 16 | return 0 17 | } 18 | panic(nlt0) 19 | } 20 | if incx == 1 && incy == 1 { 21 | if x.len < n { 22 | panic(short_x) 23 | } 24 | if y.len < n { 25 | panic(short_y) 26 | } 27 | return float64.dot_unitary(x[..n], y[..n]) 28 | } 29 | mut ix := 0 30 | mut iy := 0 31 | if incx < 0 { 32 | ix = (-n + 1) * incx 33 | } 34 | if incy < 0 { 35 | iy = (-n + 1) * incy 36 | } 37 | if ix >= x.len || ix + (n - 1) * incx >= x.len { 38 | panic(short_x) 39 | } 40 | if iy >= y.len || iy + (n - 1) * incy >= y.len { 41 | panic(short_y) 42 | } 43 | return float64.dot_inc(x, y, u32(n), u32(incx), u32(incy), u32(ix), u32(iy)) 44 | } 45 | -------------------------------------------------------------------------------- /vblas/level3f64.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | import vsl.float.float64 4 | import math 5 | 6 | // dsyrk performs one of the symmetric rank-k operations 7 | // C = alpha * A * Aᵀ + beta * C if trans_a == .no_trans 8 | // C = alpha * Aᵀ * A + beta * C if trans_a == .trans or trans_a == .conj_trans 9 | // where A is an n×k or k×n matrix, C is an n×n symmetric matrix, and alpha and 10 | // beta are scalars. 11 | pub fn dsyrk(ul Uplo, trans_a Transpose, n int, k int, alpha f64, a []f64, lda int, beta f64, mut c []f64, ldc int) { 12 | if n < 0 { 13 | panic(nlt0) 14 | } 15 | if k < 0 { 16 | panic(klt0) 17 | } 18 | mut row := k 19 | mut col := n 20 | if trans_a == .no_trans { 21 | row = n 22 | col = k 23 | } 24 | if lda < math.max(1, col) { 25 | panic(bad_ld_a) 26 | } 27 | if ldc < math.max(1, n) { 28 | panic(bad_ld_c) 29 | } 30 | 31 | // Quick return if possible. 32 | if n == 0 { 33 | return 34 | } 35 | 36 | // For zero matrix size the following slice length checks are trivially satisfied. 37 | if a.len < lda * (row - 1) + col { 38 | panic(short_a) 39 | } 40 | if c.len < ldc * (n - 1) + n { 41 | panic(short_c) 42 | } 43 | 44 | if alpha == 0 { 45 | if beta == 0 { 46 | if ul == .upper { 47 | for i in 0 .. n { 48 | mut ctmp := unsafe { c[i * ldc + i..i * ldc + n] } 49 | for j in 0 .. ctmp.len { 50 | ctmp[j] = 0 51 | } 52 | } 53 | return 54 | } 55 | for i in 0 .. n { 56 | mut ctmp := unsafe { c[i * ldc..i * ldc + i + 1] } 57 | for j in 0 .. ctmp.len { 58 | ctmp[j] = 0 59 | } 60 | } 61 | return 62 | } 63 | if ul == .upper { 64 | for i in 0 .. n { 65 | mut ctmp := unsafe { c[i * ldc + i..i * ldc + n] } 66 | for j in 0 .. ctmp.len { 67 | ctmp[j] *= beta 68 | } 69 | } 70 | return 71 | } 72 | for i in 0 .. n { 73 | mut ctmp := unsafe { c[i * ldc..i * ldc + i + 1] } 74 | for j in 0 .. ctmp.len { 75 | ctmp[j] *= beta 76 | } 77 | } 78 | return 79 | } 80 | if trans_a == .no_trans { 81 | if ul == .upper { 82 | for i in 0 .. n { 83 | mut ctmp := unsafe { c[i * ldc + i..i * ldc + n] } 84 | atmp := a[i * lda..i * lda + k] 85 | if beta == 0 { 86 | for jc in 0 .. ctmp.len { 87 | j := jc + i 88 | ctmp[int(jc)] = alpha * float64.dot_unitary(atmp, a[j * lda..j * lda + k]) 89 | } 90 | } else { 91 | for jc, vc in ctmp { 92 | j := jc + i 93 | ctmp[int(jc)] = vc * beta + 94 | alpha * float64.dot_unitary(atmp, a[j * lda..j * lda + k]) 95 | } 96 | } 97 | } 98 | return 99 | } 100 | for i in 0 .. n { 101 | mut ctmp := unsafe { c[i * ldc..i * ldc + i + 1] } 102 | atmp := a[i * lda..i * lda + k] 103 | if beta == 0 { 104 | for j in 0 .. ctmp.len { 105 | ctmp[j] = alpha * float64.dot_unitary(a[j * lda..j * lda + k], atmp) 106 | } 107 | } else { 108 | for j, vc in ctmp { 109 | ctmp[j] = vc * beta + alpha * float64.dot_unitary(a[j * lda..j * lda + k], atmp) 110 | } 111 | } 112 | } 113 | return 114 | } 115 | // Cases where a is transposed. 116 | if ul == .upper { 117 | for i in 0 .. n { 118 | mut ctmp := unsafe { c[i * ldc + i..i * ldc + n] } 119 | if beta == 0 { 120 | for j in 0 .. ctmp.len { 121 | ctmp[j] = 0 122 | } 123 | } else if beta != 1 { 124 | for j in 0 .. ctmp.len { 125 | ctmp[j] *= beta 126 | } 127 | } 128 | for l := 0; l < k; l++ { 129 | tmp := alpha * a[l * lda + i] 130 | if tmp != 0 { 131 | float64.axpy_unitary(tmp, a[l * lda + i..l * lda + n], mut ctmp) 132 | } 133 | } 134 | } 135 | return 136 | } 137 | for i in 0 .. n { 138 | mut ctmp := unsafe { c[i * ldc..i * ldc + i + 1] } 139 | if beta != 1 { 140 | for j in 0 .. ctmp.len { 141 | ctmp[j] *= beta 142 | } 143 | } 144 | for l := 0; l < k; l++ { 145 | tmp := alpha * a[l * lda + i] 146 | if tmp != 0 { 147 | float64.axpy_unitary(tmp, a[l * lda..l * lda + i + 1], mut ctmp) 148 | } 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /vblas/util.v: -------------------------------------------------------------------------------- 1 | module vblas 2 | 3 | // [SD]gemm behavior constants. These are kept here to keep them out of the 4 | // way during single precision code genration. 5 | const block_size = 64 // b x b matrix 6 | 7 | const min_par_block = 4 // minimum number of blocks needed to go parallel 8 | 9 | // blocks returns the number of divisions of the dimension length with the given 10 | // block size. 11 | fn blocks(dim int, bsize int) int { 12 | return (dim + bsize - 1) / bsize 13 | } 14 | 15 | fn flatten(a [][]f64) []f64 { 16 | if a.len == 0 { 17 | return [] 18 | } 19 | m := a.len 20 | n := a[0].len 21 | mut s := []f64{len: m * n} 22 | for i in 0 .. m { 23 | for j in 0 .. n { 24 | s[i * n + j] = a[i][j] 25 | } 26 | } 27 | return s 28 | } 29 | --------------------------------------------------------------------------------