├── flare ├── __init__.py └── trainer.py ├── setup.py ├── LICENSE ├── examples └── mnist.py ├── README.md └── tests └── test_trainer.py /flare/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainer import * -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from setuptools import find_packages 3 | 4 | setup( 5 | name = 'Flare', 6 | version = '0.0.1', 7 | description = 'PyTorch training for humans', 8 | author = 'Abhai Kollara Dilip', 9 | author_email = 'abhai.dilip@gmail.com', 10 | packages = find_packages() 11 | ) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Abhai Kollara Dilip 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/mnist.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | import torchvision 6 | from torchvision import transforms 7 | from torchvision.datasets import MNIST 8 | 9 | import flare 10 | from flare import Trainer 11 | 12 | # Data loading and transformation 13 | 14 | train_data = MNIST('/Users/Abhai/datasets/', train=True, download=True) 15 | test_data = MNIST('/Users/Abhai/datasets/', train=False, download=True) 16 | 17 | train_X, train_Y = train_data.train_data, train_data.train_labels 18 | test_X, test_Y = test_data.test_data, test_data.test_labels 19 | 20 | # Conv layers require 4D inputs 21 | train_X = torch.unsqueeze(train_X, 1).float() 22 | test_X = torch.unsqueeze(test_X, 1).float() 23 | 24 | class Net(nn.Module): 25 | def __init__(self): 26 | super(Net, self).__init__() 27 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 28 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 29 | self.conv2_drop = nn.Dropout2d() 30 | self.fc1 = nn.Linear(320, 50) 31 | self.fc2 = nn.Linear(50, 10) 32 | 33 | def forward(self, x): 34 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 35 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 36 | x = x.view(-1, 320) 37 | x = F.relu(self.fc1(x)) 38 | x = F.dropout(x, training=self.training) 39 | x = self.fc2(x) 40 | return F.log_softmax(x, dim=1) 41 | 42 | 43 | model = Net() 44 | 45 | t = Trainer(model, nn.NLLLoss(), torch.optim.Adam(model.parameters())) 46 | t.train(train_X, train_Y, validation_split=0.2, batch_size=128) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Flare: PyTorch for everyday use 2 | 3 | ## Beware, Flare is still in developmental stages ! 4 | 5 | Flare is a utility library that enables users to train their networks on PyTorch instantly. The API was inspired from [Keras](https://github.com/keras-team/keras) deep learning framework. 6 | 7 | Currently Flare is designed to run on PyTorch >= 0.4.0 8 | 9 | ## Guiding principles 10 | - **"Everything should be made as simple as possible, but not simpler"** 11 | - **Intuitiveness** Do not support complex usecases at the cost of intuitiveness of simpler tasks. 12 | 13 | 14 | ## Installation 15 | First, clone the repo using 16 | `https://github.com/abhaikollara/flare.git` 17 | 18 | then `cd` to the **flare** folder and run the install command 19 | ` 20 | cd flare 21 | sudo python setup.py install 22 | ` 23 | 24 | ## Example 25 | ```python 26 | import torch 27 | from torch import nn 28 | 29 | import flare 30 | from flare import Trainer 31 | 32 | input_1 = np.random.rand(10000,5) 33 | input_2 = np.random.rand(10000,5) 34 | targets = np.random.randint(0, 10, size=[10000,]) 35 | 36 | class linear_two_input(nn.Module): 37 | 38 | def __init__(self): 39 | super(linear_two_input, self).__init__() 40 | self.dense1 = nn.Linear(10, 64) 41 | self.dense2 = nn.Linear(64, 10) 42 | 43 | def forward(self, inputs): 44 | y = torch.cat(inputs, dim=-1) 45 | y = self.dense1(y) 46 | y = self.dense2(y) 47 | return y 48 | 49 | model = linear_two_input() 50 | 51 | t = Trainer(model, nn.CrossEntropyLoss(), torch.optim.Adam(model.parameters())) 52 | t.train([input_1, input_2], targets, validation_split=0.2, batch_size=128) 53 | ``` 54 | 55 | 56 | [See MNIST example here](https://github.com/abhaikollara/flare/blob/master/examples/mnist.py) 57 | 58 | ## Why this name, Flare 59 | 60 | ¯\\_(ツ)_/¯ 61 | -------------------------------------------------------------------------------- /tests/test_trainer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import numpy as np 4 | import flare 5 | from flare import Trainer, TensorDataset 6 | from torch.utils.data import DataLoader 7 | import pytest 8 | 9 | np_input_1 = np.random.rand(1000,5) 10 | np_input_2 = np.random.rand(1000,5) 11 | np_target = np.random.randint(0, 10, size=[1000,]) 12 | 13 | tensor_input_1 = torch.from_numpy(np_input_1).float() 14 | tensor_input_2 = torch.from_numpy(np_input_2).float() 15 | tensor_target = torch.FloatTensor(np_target).long() 16 | 17 | 18 | train_dataset = TensorDataset([tensor_input_1, tensor_input_2], tensor_target) 19 | predict_dataset = TensorDataset([tensor_input_1, tensor_input_2]) 20 | 21 | class linear_1(nn.Module): 22 | 23 | def __init__(self): 24 | super(linear_1, self).__init__() 25 | self.dense1 = nn.Linear(5, 64) 26 | self.dense2 = nn.Linear(64, 10) 27 | 28 | def forward(self, x): 29 | y = self.dense1(x) 30 | y = self.dense2(y) 31 | return y 32 | 33 | class linear_2(nn.Module): 34 | 35 | def __init__(self): 36 | super(linear_2, self).__init__() 37 | self.dense1 = nn.Linear(10, 64) 38 | self.dense2 = nn.Linear(64, 10) 39 | 40 | def forward(self, x): 41 | y = torch.cat(x,dim=-1) 42 | y = self.dense1(y) 43 | y = self.dense2(y) 44 | return y 45 | 46 | def generator(targets=True): 47 | while True: 48 | i = 0 49 | bs = 32 50 | for i in range(0, tensor_input_1.shape[0], bs): 51 | if targets: 52 | yield [[np_input_1[i:i+bs], np_input_2[i:i+bs]], np_target[i:i+bs]] 53 | else: 54 | yield [np_input_1[i:i+bs], np_input_2[i:i+bs]] 55 | 56 | single_input_model = linear_1() 57 | multi_input_model = linear_2() 58 | 59 | train_generator = generator() 60 | predict_generator = generator(targets=False) 61 | val_data_loader = DataLoader(train_dataset, batch_size=128) 62 | predict_data_loader = DataLoader(predict_dataset, batch_size=128) 63 | 64 | 65 | def _get_optim(model): 66 | return torch.optim.Adam(model.parameters()) 67 | 68 | class TestDataset(object): 69 | 70 | @pytest.mark.parametrize("input_data, target_data", [ 71 | ([np_input_1, np_input_2[:300]], np_target), 72 | ([np_input_1, np_input_2], np_target[:300]) 73 | ]) 74 | def test_unequal_samples(self, input_data, target_data): 75 | with pytest.raises(ValueError): 76 | t = Trainer(multi_input_model, nn.CrossEntropyLoss(), _get_optim(multi_input_model)) 77 | t.train(input_data, target_data) 78 | 79 | 80 | class TestTrainer(object): 81 | 82 | @pytest.mark.parametrize("model, data", [ 83 | (multi_input_model, [np_input_1, np_input_2]), 84 | (single_input_model, np_input_1,), 85 | ]) 86 | @pytest.mark.parametrize("validation_split", 87 | [0.0, 0.1] 88 | ) 89 | def test_train_validation_split(self, model, data, validation_split): 90 | t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model)) 91 | t.train(data, np_target, validation_split=validation_split, batch_size=128) 92 | 93 | @pytest.mark.parametrize("model, data, validation_data", [ 94 | (multi_input_model, [np_input_1, np_input_2], [(np_input_1, np_input_2), np_target]), 95 | (single_input_model, np_input_1, (np_input_1, np_target)), 96 | ]) 97 | def test_train_validation_data(self, model, data, validation_data): 98 | t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model)) 99 | t.train(data, np_target, validation_data=validation_data, batch_size=128) 100 | 101 | 102 | @pytest.mark.parametrize("model, data", [ 103 | (multi_input_model, [np_input_1, np_input_2]), 104 | (single_input_model, np_input_1), 105 | ]) 106 | def test_evaluate(self, model, data): 107 | t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model)) 108 | t.evaluate(data, np_target, batch_size=128) 109 | 110 | @pytest.mark.parametrize("model, data", [ 111 | (multi_input_model, [np_input_1, np_input_2]), 112 | (single_input_model, np_input_1) 113 | ]) 114 | @pytest.mark.parametrize("classes", 115 | [True, False] 116 | ) 117 | def test_predict(self, model, data, classes): 118 | t = Trainer(model, nn.CrossEntropyLoss(), _get_optim(model)) 119 | t.predict(data, classes=classes, batch_size=128) 120 | 121 | @pytest.mark.parametrize("generator", 122 | [train_generator, val_data_loader] 123 | ) 124 | @pytest.mark.parametrize("validation_data", 125 | [None, [(np_input_1, np_input_2), np_target], train_generator, val_data_loader] 126 | ) 127 | def test_train_on_generator(self, generator, validation_data): 128 | t = Trainer(multi_input_model, nn.CrossEntropyLoss(), _get_optim(multi_input_model)) 129 | t.train_on_generator(generator, steps_per_epoch=31, validation_data=validation_data, validation_steps=31) 130 | 131 | @pytest.mark.parametrize("generator", 132 | [train_generator, val_data_loader] 133 | ) 134 | def test_evaluate_on_generator(self, generator): 135 | t = Trainer(multi_input_model, nn.CrossEntropyLoss(), _get_optim(multi_input_model)) 136 | t.evaluate_on_generator(generator, steps_per_epoch=31) 137 | 138 | @pytest.mark.parametrize("generator", 139 | [predict_generator, predict_data_loader] 140 | ) 141 | def test_predict_on_generator(self, generator): 142 | t = Trainer(multi_input_model, nn.CrossEntropyLoss(), _get_optim(multi_input_model)) 143 | t.predict_on_generator(generator, steps_per_epoch=31) -------------------------------------------------------------------------------- /flare/trainer.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | import torch 4 | from torch import optim 5 | from torch.utils.data import Dataset, DataLoader 6 | import numpy as np 7 | from tqdm import tqdm 8 | 9 | 10 | def _to_list(x): 11 | ''' 12 | Used to ensure that model input is 13 | always a list, even if single input is required 14 | ''' 15 | if isinstance(x, (list, tuple)): 16 | return x 17 | else: 18 | return [x] 19 | 20 | 21 | def _wrap_in_tensor(x, requires_grad=True): 22 | if torch.is_tensor(x): 23 | return x 24 | if issubclass(x.dtype.type, np.floating): 25 | return torch.tensor(x, requires_grad=requires_grad, dtype=torch.float32) 26 | elif issubclass(x.dtype.type, np.integer): 27 | return torch.tensor(x, requires_grad=requires_grad, dtype=torch.int64) 28 | else: 29 | raise TypeError( 30 | 'Input array must be valid numpy arrays or torch tensors') 31 | 32 | 33 | class TensorDataset(Dataset): 34 | 35 | def __init__(self, inputs, targets=None): 36 | super(TensorDataset, self).__init__() 37 | self.inputs = inputs 38 | self.targets = targets 39 | if len(set(len(x) for x in self.inputs)) != 1: 40 | raise ValueError('Inputs must have equal n_samples dimension.') 41 | 42 | if targets is not None: 43 | if len(self.inputs[0]) != len(self.targets): 44 | raise ValueError( 45 | 'Inputs and targets must have equal n_samples dimension') 46 | 47 | def __len__(self): 48 | return self.inputs[0].shape[0] 49 | 50 | def __getitem__(self, idx): 51 | if self.targets is not None: 52 | return [x[idx] for x in self.inputs], self.targets[idx] 53 | else: 54 | return [x[idx] for x in self.inputs] 55 | 56 | 57 | class Trainer(object): 58 | 59 | def __init__(self, model, loss, optimizer): 60 | """ A trainer utility for PyTorch modules 61 | 62 | # Arguments: 63 | model: An instance of torch.nn.Module 64 | loss: A PyTorch loss function 65 | optimizer: An instance of torch.optim object 66 | """ 67 | self.model = model 68 | self.optimizer = optimizer 69 | self.loss_func = loss 70 | 71 | def train(self, inputs, targets, batch_size=1, epochs=1, 72 | validation_split=0.0, validation_data=None, shuffle=True, disable_progbar=False): 73 | """Trains the model for a fixed number of epochs 74 | 75 | # Arguments 76 | inputs: A single input or a list of inputs which can be either 77 | numpy arrays or torch tensors 78 | targets: Target values/classes. A numpy array or a torch tensor. 79 | batch_size: int. Number of samples per gradient update. 80 | epochs: int. Number of epochs to train the model. 81 | validation_split: float (0. < x < 1.) 82 | Fraction of data to use as validation data. This 83 | takes precedence of validation_data 84 | validation_data: tuple(input_data, target_data) 85 | shuffle: boolean. Whether to shuffle data at each epoch 86 | 87 | #Raises 88 | ValueError: If the number of samples in inputs and 89 | targets are not equal 90 | """ 91 | 92 | inputs = [_wrap_in_tensor(x) for x in _to_list(inputs)] 93 | targets = _wrap_in_tensor(targets, requires_grad=False) 94 | 95 | if validation_split > 0.0: 96 | split_size = int(len(inputs[0]) * validation_split) 97 | train_dataset = TensorDataset([x[:-split_size] 98 | for x in inputs], targets[:-split_size]) 99 | validation_data = ([x[-split_size:] 100 | for x in inputs], targets[-split_size:]) 101 | else: 102 | train_dataset = TensorDataset(inputs, targets) 103 | 104 | train_data_loader = DataLoader( 105 | train_dataset, batch_size=batch_size, shuffle=shuffle) 106 | 107 | self.train_on_generator( 108 | train_data_loader, epochs=epochs, validation_data=validation_data) 109 | 110 | def train_on_generator(self, generator, steps_per_epoch=None, epochs=1, validation_data=None, validation_steps=None): 111 | """Trains the model on data generator 112 | 113 | # Arguments 114 | generator : A user created data generator or 115 | torch.utils.data.DataLoader. The generator should 116 | yield and iterable of (input_data, target_data) 117 | steps_per_epoch: Total number of steps (batches of samples) 118 | to yield from generator before declaring one epoch 119 | finished and starting the next epoch. Must be specified 120 | for user created generator 121 | epochs: int. Number of epochs to train the model. 122 | validation_data: It can be any of the following 123 | tuple(input_data, target_data), 124 | generator yielding a tuple(input_data, target_data), 125 | torch.utils.DataLoader, 126 | validation_steps: Total number of steps to yield from 127 | validation generator. Required only if 128 | you are using a generator for validation data. 129 | 130 | #Raises 131 | ValueError: If the number of samples in inputs and 132 | targets are not equal 133 | 134 | AssertionError: If generator is a user defined generator and 135 | steps_per_epoch is not specified 136 | """ 137 | if isinstance(generator, DataLoader): 138 | batch_gen = (batch for batch in generator) 139 | steps_per_epoch = int(len(generator.dataset) / generator.batch_size) 140 | else: 141 | assert steps_per_epoch is not None 142 | batch_gen = generator 143 | 144 | for epoch in range(epochs): 145 | print('Epoch', str(epoch), 'of', str(epochs)) 146 | for bnum in tqdm(range(steps_per_epoch)): 147 | batch_inputs, batch_targets = next(batch_gen) 148 | _ = self.train_batch(batch_inputs, batch_targets) 149 | 150 | if validation_data is not None: 151 | if isinstance(validation_data, DataLoader) or inspect.isgenerator(validation_data): 152 | self.evaluate_on_generator( 153 | validation_data, validation_steps) 154 | else: 155 | self.evaluate(validation_data[0], validation_data[1]) 156 | 157 | def train_batch(self, inputs, targets): 158 | """ Single gradient update over one batch of samples 159 | 160 | # Arguments 161 | inputs: A single input or a list of inputs which can be either 162 | numpy arrays or torch tensors 163 | targets: Target values/classes. A numpy array or a torch tensor. 164 | 165 | # Returns 166 | Scalar training loss as torch tensor 167 | """ 168 | input_batch = [_wrap_in_tensor(x) for x in _to_list(inputs)] 169 | target_batch = _wrap_in_tensor(targets, requires_grad=False) 170 | 171 | self.optimizer.zero_grad() 172 | self.model.train() 173 | 174 | if len(input_batch) == 1: 175 | y = self.model(input_batch[0]) 176 | else: 177 | y = self.model(input_batch) 178 | 179 | loss = self.loss_func(y, target_batch) 180 | loss.backward() 181 | self.optimizer.step() 182 | return loss.item() 183 | 184 | def evaluate(self, inputs, targets, batch_size=1): 185 | """Computes and prints the loss on data 186 | batch by batch without optimizing 187 | 188 | # Arguments 189 | inputs: A single input or a list of inputs which can be either 190 | numpy arrays or torch tensors 191 | targets: Target values/classes. A numpy array or a torch tensor. 192 | batch_size: int. Number of samples per gradient update. 193 | 194 | #Raises 195 | ValueError: If the number of samples in inputs and 196 | targets are not equal 197 | """ 198 | 199 | inputs = [_wrap_in_tensor(x) for x in _to_list(inputs)] 200 | targets = _wrap_in_tensor(targets, requires_grad=False) 201 | 202 | valid_dataset = TensorDataset(inputs, targets) 203 | valid_data_loader = DataLoader( 204 | valid_dataset, batch_size=batch_size) 205 | 206 | self.evaluate_on_generator(valid_data_loader) 207 | 208 | def evaluate_on_generator(self, generator, steps_per_epoch=None): 209 | """Evaluates the model on data generator 210 | 211 | # Arguments 212 | generator : A user created data generator or 213 | torch.utils.data.DataLoader. The generator should 214 | yield and iterable of (input_data, target_data) 215 | steps_per_epoch: Total number of steps (batches of samples) 216 | to yield from generator before declaring one epoch 217 | finished and starting the next epoch. Must be specified 218 | for user created generator 219 | 220 | #Raises 221 | AssertionError: If generator is a user defined generator and 222 | steps_per_epoch is not specified 223 | """ 224 | if isinstance(generator, DataLoader): 225 | batch_gen = (batch for batch in generator) 226 | steps_per_epoch = int(len(generator.dataset) / generator.batch_size) 227 | else: 228 | assert steps_per_epoch is not None 229 | batch_gen = generator 230 | 231 | for batch in tqdm(range(steps_per_epoch)): 232 | batch_inputs, batch_targets = next(batch_gen) 233 | _ = self.evaluate_batch(batch_inputs, batch_targets) 234 | 235 | 236 | def evaluate_batch(self, inputs, targets): 237 | """Evaluates the model over a single batch of samples. 238 | 239 | # Arguments 240 | inputs: A single input or a list of inputs which can be either 241 | numpy arrays or torch tensors 242 | targets: Target values/classes. A numpy array or a torch tensor. 243 | 244 | # Returns 245 | Scalar test loss as torch tensor 246 | 247 | """ 248 | input_batch = [_wrap_in_tensor(x) for x in _to_list(inputs)] 249 | target_batch = _wrap_in_tensor(targets, requires_grad=False) 250 | 251 | self.model.eval() 252 | 253 | if len(input_batch) == 1: 254 | y = self.model(input_batch[0]) 255 | else: 256 | y = self.model(input_batch) 257 | 258 | loss = self.loss_func(y, target_batch) 259 | return loss.item() 260 | 261 | def predict(self, inputs, batch_size=1, classes=False, disable_progbar=False): 262 | """Generates output predictions batch 263 | by batch for the input samples. 264 | 265 | # Arguments 266 | inputs: A single input or a list of inputs which can be either 267 | numpy arrays or torch tensors 268 | batch_size: integer. Number of samples per batch 269 | classes: boolean. Whether to return class predictions 270 | 271 | # Returns 272 | A 1D torch tensor of predictions 273 | 274 | #Raises 275 | ValueError: If the number of samples in inputs are 276 | not equal 277 | """ 278 | inputs = [_wrap_in_tensor(x, requires_grad=False) 279 | for x in _to_list(inputs)] 280 | 281 | predict_dataset = TensorDataset(inputs) 282 | predict_data_loader = DataLoader( 283 | predict_dataset, batch_size=batch_size, shuffle=False) 284 | 285 | return self.predict_on_generator(predict_data_loader, classes=classes) 286 | 287 | def predict_on_generator(self, generator, steps_per_epoch=None, classes=False): 288 | """Predicts the model on data generator 289 | 290 | # Arguments 291 | generator : A user created data generator or 292 | torch.utils.data.DataLoader. The generator should 293 | yield and iterable of (input_data) 294 | steps_per_epoch: Total number of steps (batches of samples) 295 | to yield from generator before declaring one epoch 296 | finished and starting the next epoch. Must be specified 297 | for user created generator 298 | 299 | #Raises 300 | AssertionError: If generator is a user defined generator and 301 | steps_per_epoch is not specified 302 | """ 303 | preds = [] 304 | if isinstance(generator, DataLoader): 305 | batch_gen = (batch for batch in generator) 306 | steps_per_epoch = int(len(generator.dataset) / generator.batch_size) 307 | else: 308 | assert steps_per_epoch is not None 309 | batch_gen = generator 310 | 311 | for bno in tqdm(range(steps_per_epoch)): 312 | batch_inputs = next(batch_gen) 313 | pred = self.predict_batch(batch_inputs, classes=classes) 314 | preds.append(pred) 315 | 316 | return preds 317 | 318 | def predict_batch(self, inputs, classes=False): 319 | """Returns predictions for a single batch of samples. 320 | 321 | # Arguments 322 | inputs: A single input or a list of inputs which can be either 323 | numpy arrays or torch tensors 324 | classes: boolean. Whether to return class predictions 325 | # Returns 326 | A torch tensor of predictions 327 | """ 328 | input_batch = [_wrap_in_tensor(x, requires_grad=False) 329 | for x in _to_list(inputs)] 330 | 331 | if len(input_batch) == 1: 332 | y = self.model(input_batch[0]) 333 | else: 334 | y = self.model(input_batch) 335 | 336 | if classes: 337 | return torch.max(y, -1)[1] 338 | else: 339 | return y 340 | --------------------------------------------------------------------------------