├── .gitignore ├── README.md ├── utils.py └── snn.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *ubyte -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SpikingConvNet 2 | 3 | Implementation of the paper *STDP-based spiking deep neural networks for object recognition*, available [here](https://www.sciencedirect.com/science/article/abs/pii/S0893608017302903), for the MNIST classification task. 4 | 5 | ## Results 6 | 7 | The model achieves ~95% accuracy, better performance can be reached with more tuning. 8 | 9 | ## MNIST Dataset 10 | 11 | See https://pypi.org/project/python-mnist/ to download the dataset. 12 | 13 | ## References: 14 | 15 | [1] Kheradpisheh, S. R., Ganjtabesh, M., Thorpe, S. J., & Masquelier, T. (2018). STDP-based spiking deep convolutional neural networks for object recognition. Neural Networks, 99, 56–67. https://doi.org/10.1016/J.NEUNET.2017.12.005 16 | 17 | [2] Mozafari, M., Ganjtabesh, M., Nowzari-Dalini, A., & Masquelier, T. (2019). SpykeTorch: Efficient simulation of convolutional spiking neural networks with at most one spike per neuron. Frontiers in Neuroscience, 13, 625. https://doi.org/10.3389/FNINS.2019.00625 18 | 19 | [3] https://github.com/npvoid/SDNN_python 20 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy.ndimage import correlate 3 | from math import ceil 4 | from mnist import MNIST 5 | 6 | 7 | # Some functions are adapted from https://github.com/npvoid/SDNN_python 8 | 9 | 10 | 11 | def spike_encoding(img, nb_timesteps): 12 | """ 13 | Encode an image into spikes using a temporal coding based on pixel intensity. 14 | 15 | Args : 16 | img (ndarray) : input of shape (height,width) 17 | nb_timesteps (int) : number of spike bins 18 | """ 19 | # Intensity to latency 20 | with np.errstate(divide='ignore',invalid='ignore'): # suppress dive by zero warning 21 | I, lat = np.argsort(1/img.flatten()), np.sort(1/img.flatten()) 22 | # Remove pixels of value 0 23 | I = np.delete(I, np.where(lat == np.inf)) 24 | # Convert 1D into 2D coordinates 25 | II = np.unravel_index(I, img.shape) 26 | # Compute the number of steps 27 | t_step = np.ceil(np.arange(I.size) / (I.size / (nb_timesteps-1))).astype(np.uint8) 28 | # Add dimension axis to index array 29 | # shape : (timestep, height, width) 30 | II = (t_step,) + II 31 | # Create spikes 32 | spike_times = np.zeros((nb_timesteps, img.shape[0], img.shape[1]), dtype=np.uint8) 33 | spike_times[II] = 1 34 | return spike_times 35 | 36 | 37 | 38 | def DoG_filter(img, filt, threshold): 39 | """ 40 | Apply a DoG filter on the given image. 41 | 42 | Args : 43 | img (ndarray) : input of shape (height,width) 44 | filt (ndarray) : DoG filter 45 | threshold (int) : threshold applied on contrasts 46 | """ 47 | # Apply filter on input image 48 | img = correlate(img, filt, mode='constant') 49 | # Set to 0 borders 50 | border = np.zeros(img.shape) 51 | border[5:-5, 5:-5] = 1. 52 | img = img * border 53 | # Keep pixels bigger than the threshdold 54 | img = (img >= threshold).astype(int) * img 55 | img = np.abs(img) 56 | return img 57 | 58 | 59 | 60 | def DoG(size, s1, s2): 61 | """ 62 | Create a DoG filter. 63 | 64 | Args : 65 | size (int) : size of the filter 66 | s1 (int) : std1 67 | s2 (int) : std2 68 | """ 69 | r = np.arange(size)+1 70 | x = np.tile(r, [size, 1]) 71 | y = x.T 72 | d2 = (x-size/2.-0.5)**2 + (y-size/2.-0.5)**2 73 | filt = 1/np.sqrt(2*np.pi) * (1/s1 * np.exp(-d2/(2*(s1**2))) - 1/s2 * np.exp(-d2/(2*(s2**2)))) 74 | filt -= np.mean(filt[:]) 75 | filt /= np.amax(filt[:]) 76 | return filt 77 | 78 | 79 | 80 | def preprocess_MNIST(dataset, nb_timesteps, filters, threshold): 81 | """ 82 | Preprocess the MNIST dataset. 83 | """ 84 | nb_channels = len(filters) 85 | samples, height, width = dataset.shape 86 | out = np.zeros((samples, nb_timesteps, nb_channels, height, width), dtype=np.uint8) 87 | for i,img in enumerate(dataset): 88 | encoded_img = np.zeros((nb_channels, nb_timesteps, height, width)) 89 | for f,filt in enumerate(filters): 90 | dog_img = DoG_filter(img, filt, threshold) 91 | encoded_img[f] = spike_encoding(dog_img, nb_timesteps) 92 | out[i] = np.swapaxes(encoded_img,0,1) 93 | return out 94 | 95 | 96 | 97 | def load_MNIST(data_prop=1): 98 | """ 99 | Load the MNIST dataset. 100 | """ 101 | mndata = MNIST() 102 | images, labels = mndata.load_training() 103 | 104 | # Training set 105 | X_train, y_train = np.asarray(images), np.asarray(labels) 106 | if data_prop < 1: 107 | samples_ind = np.random.choice(len(X_train), int(len(X_train)*data_prop), replace=False) 108 | X_train = X_train[samples_ind] 109 | y_train = y_train[samples_ind] 110 | X_train = X_train.reshape(-1, 28, 28) 111 | # Random shuffling 112 | random_indices = np.random.permutation(len(X_train)) 113 | X_train, y_train = X_train[random_indices], y_train[random_indices] 114 | 115 | # Testing set 116 | images, labels = mndata.load_testing() 117 | X_test, y_test = np.asarray(images), np.asarray(labels) 118 | if data_prop < 1: 119 | samples_ind = np.random.choice(len(X_test), int(len(X_test)*data_prop), replace=False) 120 | X_test = X_test[samples_ind] 121 | y_test = y_test[samples_ind] 122 | X_test = X_test.reshape(-1, 28, 28) 123 | 124 | input_shape = X_test[0].shape 125 | 126 | return X_train, y_train, X_test, y_test, input_shape 127 | 128 | 129 | 130 | def load_encoded_MNIST(data_prop=1, nb_timesteps=15, threshold=15, filters=[DoG(7,1,2),DoG(7,2,1)]): 131 | """ 132 | Load and preprocess the MNIST dataset. 133 | """ 134 | X_train, y_train, X_test, y_test, _ = load_MNIST(data_prop) 135 | X_train_encoded = preprocess_MNIST(X_train, nb_timesteps, filters, threshold) 136 | X_test_encoded = preprocess_MNIST(X_test, nb_timesteps, filters, threshold) 137 | return X_train_encoded, y_train, X_test_encoded, y_test -------------------------------------------------------------------------------- /snn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from tqdm import tqdm 4 | from sklearn.svm import LinearSVC 5 | from sklearn.metrics import accuracy_score 6 | from torch.nn.functional import conv2d, max_pool2d 7 | 8 | from utils import load_encoded_MNIST 9 | 10 | 11 | """ 12 | Implementation of the paper STDP-based spiking deep neural networks for object recognition 13 | for the MNIST classification task. 14 | 15 | References: 16 | 17 | [1] Kheradpisheh, S. R., Ganjtabesh, M., Thorpe, S. J., & Masquelier, T. (2018). 18 | STDP-based spiking deep convolutional neural networks for object recognition. 19 | Neural Networks, 99, 56–67. https://doi.org/10.1016/J.NEUNET.2017.12.005 20 | 21 | [2] Mozafari, M., Ganjtabesh, M., Nowzari-Dalini, A., & Masquelier, T. (2019). 22 | SpykeTorch: Efficient simulation of convolutional spiking neural networks with 23 | at most one spike per neuron. 24 | Frontiers in Neuroscience, 13, 625. https://doi.org/10.3389/FNINS.2019.00625 25 | 26 | [3] https://github.com/npvoid/SDNN_python 27 | """ 28 | 29 | 30 | 31 | 32 | class SpikingPool: 33 | """ 34 | Pooling layer with spiking neurons that can fire only once. 35 | """ 36 | def __init__(self, input_shape, kernel_size, stride, padding=0): 37 | in_channels, in_height, in_width = input_shape 38 | self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size,kernel_size) 39 | self.stride = stride if isinstance(stride, tuple) else (stride,stride) 40 | self.padding = padding if isinstance(padding, tuple) else (padding,padding) 41 | out_height = int(((in_height + 2 * self.padding[0] - self.kernel_size[0]) / self.stride[0]) + 1) 42 | out_width = int(((in_width + 2 * self.padding[1] - self.kernel_size[1]) / self.stride[1]) + 1) 43 | self.output_shape = (in_channels, out_height, out_width) 44 | # Keep track of active neurons because they can fire once 45 | self.active_neurons = np.ones(self.output_shape).astype(bool) 46 | 47 | 48 | def reset(self): 49 | self.active_neurons[:] = True 50 | 51 | 52 | def __call__(self, in_spks): 53 | # padding 54 | in_spks = np.pad(in_spks, ((0,), (self.padding[0],), (self.padding[1],)), mode='constant') 55 | in_spks = torch.Tensor(in_spks).unsqueeze(0) 56 | # Max pooling (using torch as it is fast and easier, to be changed) 57 | out_spks = max_pool2d(in_spks, self.kernel_size, stride=self.stride).numpy()[0] 58 | # Keep spikes of active neurons 59 | out_spks = out_spks * self.active_neurons 60 | # Update active neurons as each pooling neuron can fire only once 61 | self.active_neurons[out_spks == 1] = False 62 | return out_spks 63 | 64 | 65 | 66 | 67 | class SpikingConv: 68 | """ 69 | Convolutional layer with IF spiking neurons that can fire only once. 70 | Implements a Winner-take-all STDP learning rule. 71 | """ 72 | def __init__(self, input_shape, out_channels, kernel_size, stride, padding=0, 73 | nb_winners=1, firing_threshold=1, stdp_max_iter=None, adaptive_lr=False, 74 | stdp_a_plus=0.004, stdp_a_minus=-0.003, stdp_a_max=0.15, inhibition_radius=0, 75 | update_lr_cnt=500, weight_init_mean=0.8, weight_init_std=0.05, v_reset=0 76 | ): 77 | in_channels, in_height, in_width = input_shape 78 | self.out_channels = out_channels 79 | self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size,kernel_size) 80 | self.stride = stride if isinstance(stride, tuple) else (stride,stride) 81 | self.padding = padding if isinstance(padding, tuple) else (padding,padding) 82 | self.firing_threshold = firing_threshold 83 | self.v_reset = v_reset 84 | self.weights = np.random.normal( 85 | loc=weight_init_mean, scale=weight_init_std, 86 | size=(out_channels, in_channels, self.kernel_size[0], self.kernel_size[1])) 87 | 88 | # Output neurons 89 | out_height = int(((in_height + 2 * self.padding[0] - self.kernel_size[0]) / self.stride[0]) + 1) 90 | out_width = int(((in_width + 2 * self.padding[1] - self.kernel_size[1]) / self.stride[1]) + 1) 91 | self.pot = np.zeros((out_channels, out_height, out_width)) 92 | self.active_neurons = np.ones(self.pot.shape).astype(bool) 93 | self.output_shape = self.pot.shape 94 | 95 | # STDP 96 | self.recorded_spks = np.zeros((in_channels, in_height+2*self.padding[0], in_width+2*self.padding[1])) 97 | self.nb_winners = nb_winners 98 | self.inhibition_radius = inhibition_radius 99 | self.adaptive_lr = adaptive_lr 100 | self.a_plus = stdp_a_plus 101 | self.a_minus = stdp_a_minus 102 | self.a_max = stdp_a_max 103 | self.stdp_cnt = 0 104 | self.update_lr_cnt = update_lr_cnt 105 | self.stdp_max_iter = stdp_max_iter 106 | self.plasticity = True 107 | self.stdp_neurons = np.ones(self.pot.shape).astype(bool) 108 | 109 | 110 | def get_learning_convergence(self): 111 | return (self.weights * (1-self.weights)).sum() / np.prod(self.weights.shape) 112 | 113 | 114 | def reset(self): 115 | self.pot[:] = self.v_reset 116 | self.active_neurons[:] = True 117 | self.stdp_neurons[:] = True 118 | self.recorded_spks[:] = 0 119 | 120 | 121 | def get_winners(self): 122 | winners = [] 123 | channels = np.arange(self.pot.shape[0]) 124 | # Copy potentials and keep neurons that can do STDP 125 | pots_tmp = np.copy(self.pot) * self.stdp_neurons 126 | # Find at most nb_winners 127 | while len(winners) < self.nb_winners: 128 | # Find new winner 129 | winner = np.argmax(pots_tmp) # 1D index 130 | winner = np.unravel_index(winner, pots_tmp.shape) # 3D index 131 | # Assert winner potential is higher than firing threshold 132 | # If not, stop the winner selection 133 | if pots_tmp[winner] <= self.firing_threshold: 134 | break 135 | # Add winner 136 | winners.append(winner) 137 | # Disable winner selection for neurons in neighborhood of other channels 138 | pots_tmp[channels != winner[0], 139 | max(0,winner[1]-self.inhibition_radius):winner[1]+self.inhibition_radius+1, 140 | max(0,winner[2]-self.inhibition_radius):winner[2]+self.inhibition_radius+1 141 | ] = self.v_reset 142 | # Disable winner selection for neurons in same channel 143 | pots_tmp[winner[0]] = self.v_reset 144 | return winners 145 | 146 | 147 | def lateral_inhibition(self, spks): 148 | # Get index of spikes 149 | spks_c,spks_h,spks_w = np.where(spks) 150 | # Get associated potentials 151 | spks_pot = np.array([self.pot[spks_c[i],spks_h[i],spks_w[i]] for i in range(len(spks_c))]) 152 | # Sort index by potential in a descending order 153 | spks_sorted_ind = np.argsort(spks_pot)[::-1] 154 | # Sequentially inhibit neurons in the neighborhood of other channels 155 | # Neurons with highest potential inhibit neurons with lowest one, even if both spike 156 | for ind in spks_sorted_ind: 157 | # Check that neuron has not been inhibated by another one 158 | if spks[spks_c[ind],spks_h[ind],spks_w[ind]] == 1: 159 | # Compute index 160 | inhib_channels = np.arange(spks.shape[0]) != spks_c[ind] 161 | # Inhibit neurons 162 | spks[inhib_channels,spks_h[ind],spks_w[ind]] = 0 163 | self.pot[inhib_channels,spks_h[ind],spks_w[ind]] = self.v_reset 164 | self.active_neurons[inhib_channels,spks_h[ind],spks_w[ind]] = False 165 | return spks 166 | 167 | 168 | def get_conv_of(self, input, output_neuron): 169 | # Neuron index 170 | n_c, n_h, n_w = output_neuron 171 | # Get the list of convolutions on input neurons to update output neurons 172 | # shape : (in_neuron_values, nb_convs) 173 | input = torch.Tensor(input).unsqueeze(0) # batch axis 174 | convs = torch.nn.functional.unfold(input, kernel_size=self.kernel_size, stride=self.stride)[0].numpy() 175 | # Get the convolution for the spiking neuron 176 | conv_ind = (n_h * self.pot.shape[2]) + n_w # 2D to 1D index 177 | return convs[:, conv_ind] 178 | 179 | 180 | def stdp(self, winner): 181 | if not self.stdp_neurons[winner]: exit(1) 182 | if not self.plasticity: return 183 | # Count call 184 | self.stdp_cnt += 1 185 | # Winner 3D coordinates 186 | winner_c, winner_h, winner_w = winner 187 | # Get convolution window used to compute output neuron potential 188 | conv = self.get_conv_of(self.recorded_spks, winner).flatten() 189 | # Compute dW 190 | w = self.weights[winner_c].flatten() * (1 - self.weights[winner_c]).flatten() 191 | w_plus = conv > 0 # Pre-then-post 192 | w_minus = conv == 0 # Post-then-pre (we assume that if no spike before, then after) 193 | dW = (w_plus * w * self.a_plus) + (w_minus * w * self.a_minus) 194 | self.weights[winner_c] += dW.reshape(self.weights[winner_c].shape) 195 | # Lateral inhibition between channels (local inter competition) 196 | channels = np.arange(self.pot.shape[0]) 197 | self.stdp_neurons[channels != winner_c, 198 | max(0,winner_h-self.inhibition_radius):winner_h+self.inhibition_radius+1, 199 | max(0,winner_w-self.inhibition_radius):winner_w+self.inhibition_radius+1 200 | ] = False 201 | # Lateral inhibition in the same channel (gobal intra competition) 202 | self.stdp_neurons[winner_c] = False 203 | # Adpative learning rate 204 | if self.adaptive_lr and self.stdp_cnt % self.update_lr_cnt == 0: 205 | self.a_plus = min(2 * self.a_plus, self.a_max) 206 | self.a_minus = - 0.75 * self.a_plus 207 | # Stop STDP after X trains 208 | if self.stdp_max_iter is not None and self.stdp_cnt > self.stdp_max_iter: 209 | self.plasticity = False 210 | 211 | 212 | def __call__(self, spk_in, train=False): 213 | # padding 214 | spk_in = np.pad(spk_in, ((0,), (self.padding[0],), (self.padding[1],)), mode='constant') 215 | # Keep records of spike input for STDP 216 | self.recorded_spks += spk_in 217 | # Output recorded spikes 218 | spk_out = np.zeros(self.pot.shape) 219 | # Convert to torch tensors 220 | x = torch.Tensor(spk_in).unsqueeze(0) # Add batch axis for torch conv2d 221 | weights = torch.Tensor(self.weights) # converts at the fly... (not so good) 222 | # Convolve (using torch as it is fast and easier, to be changed) 223 | out_conv = conv2d(x, weights, stride=self.stride).numpy()[0] # Converted to numpy 224 | # Update potentials 225 | self.pot[self.active_neurons] += out_conv[self.active_neurons] 226 | # Check for neurons that can spike 227 | output_spikes = self.pot > self.firing_threshold 228 | if np.any(output_spikes): 229 | # Generate spikes 230 | spk_out[output_spikes] = 1 231 | # Lateral inhibition for neurons in neighborhood in other channels 232 | # Inhibit and disable neurons with lower potential that fire 233 | spk_out = self.lateral_inhibition(spk_out) 234 | # STDP plasticity 235 | if train and self.plasticity: 236 | # Find winners (based on potential) 237 | winners = self.get_winners() 238 | # Apply STDP for each neuron winner 239 | for winner in winners: 240 | self.stdp(winner) 241 | # Reset potentials and disable neurons that fire 242 | self.pot[spk_out == 1] = self.v_reset 243 | self.active_neurons[spk_out == 1] = False 244 | return spk_out 245 | 246 | 247 | 248 | 249 | class SNN: 250 | """ 251 | Spiking convolutional neural network model. 252 | """ 253 | def __init__(self, input_shape): 254 | 255 | conv1 = SpikingConv(input_shape, 256 | out_channels=30, kernel_size=5, stride=1, padding=2, 257 | nb_winners=1, firing_threshold=10, stdp_max_iter=None, 258 | adaptive_lr=True, inhibition_radius=2, v_reset=0, 259 | ) 260 | 261 | pool1 = SpikingPool(conv1.output_shape, kernel_size=2, stride=2, padding=0) 262 | 263 | conv2 = SpikingConv(pool1.output_shape, 264 | out_channels=100, kernel_size=5, stride=1, padding=2, 265 | nb_winners=1, firing_threshold=1, stdp_max_iter=None, 266 | adaptive_lr=True, inhibition_radius=1, v_reset=0, 267 | ) 268 | 269 | pool2 = SpikingPool(conv2.output_shape, kernel_size=2, stride=2, padding=0) 270 | 271 | self.conv_layers = [conv1, conv2] 272 | self.pool_layers = [pool1, pool2] 273 | self.output_shape = pool2.output_shape 274 | self.nb_trainable_layers = len(self.conv_layers) 275 | self.recorded_sum_spks = [] 276 | 277 | 278 | def reset(self): 279 | for layer in self.conv_layers: 280 | layer.reset() 281 | for layer in self.pool_layers: 282 | layer.reset() 283 | 284 | 285 | def __call__(self, x, train_layer=None): 286 | self.reset() 287 | nb_timesteps = x.shape[0] 288 | output_spikes = np.zeros((nb_timesteps,) + self.output_shape) 289 | sum_spks = 0 290 | for t in range(nb_timesteps): 291 | spk_in = x[t].astype(np.float64) 292 | sum_spks += spk_in.sum() 293 | spk = self.conv_layers[0](spk_in, train=(train_layer==0)) 294 | sum_spks += spk.sum() 295 | spk_in = self.pool_layers[0](spk) 296 | sum_spks += spk_in.sum() 297 | spk = self.conv_layers[1](spk_in, train=(train_layer==1)) 298 | sum_spks += spk.sum() 299 | spk_out = self.pool_layers[1](spk) 300 | sum_spks += spk_out.sum() 301 | output_spikes[t] = spk_out 302 | if train_layer is None: 303 | self.recorded_sum_spks.append(sum_spks) 304 | if output_spikes.sum() == 0: print("[WARNING] No output spike recorded.") 305 | return output_spikes 306 | 307 | 308 | 309 | 310 | 311 | 312 | def main( 313 | seed=1, 314 | data_prop=1, # Proportion of data to load 315 | nb_timesteps=15, # Number of spike bins 316 | epochs=[2,2], # Number of epochs per layer 317 | convergence_rate=0.01, # Stop training when learning convergence reaches this rate 318 | ): 319 | np.random.seed(seed) 320 | torch.manual_seed(seed) 321 | 322 | # Load encoded dataset 323 | X_train, y_train, X_test, y_test = load_encoded_MNIST(data_prop=data_prop, nb_timesteps=nb_timesteps) 324 | 325 | # Init SNN 326 | input_shape = X_train[0][0].shape 327 | snn = SNN(input_shape) 328 | 329 | print(f"Input shape : {X_train[0].shape} ({np.prod(X_train[0].shape)} values)") 330 | print(f"Output shape : {snn.output_shape} ({np.prod(snn.output_shape)} values)") 331 | print(f"Mean spikes count per input : {X_train.mean(0).sum()}") 332 | 333 | 334 | ### TRAINING ### 335 | print("\n### TRAINING ###") 336 | 337 | for layer in range(snn.nb_trainable_layers): 338 | print(f"Layer {layer+1}...") 339 | for epoch in range(epochs[layer]): 340 | print(f"\t epoch {epoch+1}") 341 | for x,y in zip(tqdm(X_train), y_train): 342 | snn(x, train_layer=layer) 343 | if snn.conv_layers[layer].get_learning_convergence() < convergence_rate: 344 | break 345 | 346 | 347 | ### TESTING ### 348 | print("\n### TESTING ###") 349 | 350 | output_train_max = np.zeros((len(X_train), np.prod(snn.output_shape))) 351 | output_train_sum = np.zeros((len(X_train), np.prod(snn.output_shape))) 352 | for i,x in enumerate(tqdm(X_train)): 353 | spk = snn(x) 354 | output_train_max[i] = spk.max(0).flatten() 355 | output_train_sum[i] = spk.sum(0).flatten() 356 | 357 | output_test_max = np.zeros((len(X_test), np.prod(snn.output_shape))) 358 | output_test_sum = np.zeros((len(X_test), np.prod(snn.output_shape))) 359 | for i,x in enumerate(tqdm(X_test)): 360 | spk = snn(x) 361 | output_test_max[i] = spk.max(0).flatten() 362 | output_test_sum[i] = spk.sum(0).flatten() 363 | 364 | print(f"Mean total number of spikes per sample : {np.mean(snn.recorded_sum_spks)}") 365 | 366 | 367 | ### READOUT ### 368 | 369 | clf = LinearSVC(max_iter=3000, random_state=seed) 370 | clf.fit(output_train_max,y_train) 371 | y_pred = clf.predict(output_test_max) 372 | acc = accuracy_score(y_test,y_pred) 373 | print(f"Accuracy with method 1 (max) : {acc}") 374 | 375 | clf = LinearSVC(max_iter=3000, random_state=seed) 376 | clf.fit(output_train_sum,y_train) 377 | y_pred = clf.predict(output_test_sum) 378 | acc = accuracy_score(y_test,y_pred) 379 | print(f"Accuracy with method 2 (sum) : {acc}") 380 | 381 | 382 | 383 | 384 | if __name__ == "__main__": 385 | main() 386 | --------------------------------------------------------------------------------