├── CNN_VAE.ipynb ├── LICENSE └── README.md /CNN_VAE.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "ExecuteTime": { 8 | "end_time": "2017-07-08T01:08:48.639152Z", 9 | "start_time": "2017-07-08T01:08:33.434405Z" 10 | } 11 | }, 12 | "outputs": [], 13 | "source": [ 14 | "# import bcolz \n", 15 | "import importlib\n", 16 | "import numpy as np\n", 17 | "import torch.utils.data\n", 18 | "import matplotlib.pyplot as plt\n", 19 | "from tqdm import tqdm_notebook as tqdm\n", 20 | "\n", 21 | "from torch.autograd import Variable\n", 22 | "import torch" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "# Load Dataset\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Load your favorite dataset" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "metadata": { 43 | "ExecuteTime": { 44 | "end_time": "2017-07-08T01:08:48.889834Z", 45 | "start_time": "2017-07-08T01:08:48.643672Z" 46 | } 47 | }, 48 | "outputs": [], 49 | "source": [ 50 | "# you're supposed to come up with \n", 51 | "# x_val (VALIDATION_SIZE, TIME_STEPS), y_val (VALIDATION_SIZE, N_CLASSES) => validataion dataset \n", 52 | "# x_train (TRAIN_SIZE, TIME_STEPS), y_train (TRAIN_SIZE, N_CLASSES) => train dataset" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "## Size params" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "LENGTH = x_train.shape[-1]\n", 69 | "NCLASSES = y_train.shape[-1]" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "## Normalize by max" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": { 83 | "ExecuteTime": { 84 | "end_time": "2017-06-22T20:33:30.787459Z", 85 | "start_time": "2017-06-22T20:33:30.785255Z" 86 | }, 87 | "scrolled": false 88 | }, 89 | "outputs": [], 90 | "source": [ 91 | "x_val = x_val / x_val.max(axis=-1, keepdims=True)\n", 92 | "x_train = x_train / x_train.max(axis=-1, keepdims=True)\n", 93 | "y_train_oh = np.argmax(y_train, axis=-1) # one hot\n", 94 | "y_val_oh = np.argmax(y_val, axis=-1) # one hot" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": {}, 100 | "source": [ 101 | "## Compute Class Weights" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": { 108 | "ExecuteTime": { 109 | "end_time": "2017-07-08T01:09:06.313789Z", 110 | "start_time": "2017-07-08T01:09:06.301761Z" 111 | } 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "weights = 1.0 / np.sum(y_train, axis=0)\n", 116 | "normalized_weights = weights / np.max(weights)\n", 117 | "class_weight = {i : normalized_weights[i] for i in range(len(normalized_weights))}\n", 118 | "weights = torch.from_numpy(np.array(list(class_weight.values()))).float().cuda()" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "metadata": {}, 124 | "source": [ 125 | "# Construct Model" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": null, 131 | "metadata": {}, 132 | "outputs": [], 133 | "source": [ 134 | "import gc; gc.collect()" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "## (De)Convolution block\n" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "class Conv_block(torch.nn.Module):\n", 151 | " def __init__(self, in_channels, out_channels, kernel_size, padding, is_conv=True):\n", 152 | " super(Conv_block, self).__init__()\n", 153 | " self.in_channels = in_channels\n", 154 | " self.out_channels = out_channels\n", 155 | " self.kernel_size = kernel_size\n", 156 | " self.padding = padding \n", 157 | " self.pool_op = torch.nn.AvgPool1d(2, ) if is_conv \\\n", 158 | " else torch.nn.Upsample(scale_factor=2, mode='linear')\n", 159 | " self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)\n", 160 | " self.bn = torch.nn.BatchNorm1d(out_channels, eps=0.001, momentum=0.99)\n", 161 | " self.relu = torch.nn.ReLU()\n", 162 | " def forward(self, x):\n", 163 | " x = self.conv(x)\n", 164 | " x = self.bn(x)\n", 165 | " x = self.relu(x)\n", 166 | " return self.pool_op(x)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": {}, 172 | "source": [ 173 | "## Encoder/Classifier block\n" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": null, 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [ 182 | "class Encoder(torch.nn.Module):\n", 183 | " def __init__(self, in_channels, in_length, nclasses, latent_size, encoder_out_channels):\n", 184 | " super(Encoder, self).__init__()\n", 185 | " \n", 186 | " self.in_channels = in_channels\n", 187 | " self.in_length = in_length\n", 188 | " self.nclasses = nclasses\n", 189 | " self.latent_size = latent_size\n", 190 | " self.encoder_out_channels = encoder_out_channels\n", 191 | " length = self.in_length\n", 192 | " self.bn0 = torch.nn.BatchNorm1d(self.in_channels, eps=0.001, momentum=0.99)\n", 193 | " # Layer 1\n", 194 | " in_channels = self.in_channels\n", 195 | " out_channels = 32\n", 196 | " kernel_size = 201\n", 197 | " padding = kernel_size // 2\n", 198 | " self.conv_block_1 = Conv_block(in_channels, out_channels, kernel_size, padding)\n", 199 | " length = length // 2\n", 200 | " # Layer 2\n", 201 | " in_channels = out_channels\n", 202 | " out_channels = 32\n", 203 | " kernel_size = 201\n", 204 | " padding = kernel_size // 2\n", 205 | " self.conv_block_2 = Conv_block(in_channels, out_channels, kernel_size, padding)\n", 206 | " length = length // 2\n", 207 | " \n", 208 | " # Layer 3\n", 209 | " in_channels = out_channels\n", 210 | " last_featuremaps_channels = 64\n", 211 | " kernel_size = 201\n", 212 | " padding = kernel_size // 2\n", 213 | " self.conv_block_3 = Conv_block(in_channels, last_featuremaps_channels, kernel_size, padding)\n", 214 | " length = length // 2\n", 215 | " \n", 216 | " in_channels = last_featuremaps_channels\n", 217 | " out_channels = NCLASSES\n", 218 | " kernel_size = 30\n", 219 | " padding = kernel_size // 2\n", 220 | " self.conv_final = torch.nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)\n", 221 | " self.gp_final = torch.nn.AvgPool1d(length)\n", 222 | " \n", 223 | " # encoder\n", 224 | " in_channels = last_featuremaps_channels\n", 225 | " out_channels = self.encoder_out_channels\n", 226 | " kernel_size = 51\n", 227 | " padding = kernel_size // 2\n", 228 | " self.adapt_pool = torch.nn.AvgPool1d(2); length = length // 2\n", 229 | " self.adapt_conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)\n", 230 | " self.encode_mean = torch.nn.Linear(length*out_channels, self.latent_size)\n", 231 | " self.encode_logvar = torch.nn.Linear(length*out_channels, self.latent_size)\n", 232 | " self.relu = torch.nn.ReLU()\n", 233 | " length = 1\n", 234 | "\n", 235 | " def forward(self, x):\n", 236 | " x = x.view(-1, self.in_channels, self.in_length)\n", 237 | " x = self.bn0(x)\n", 238 | " x = self.conv_block_1(x)\n", 239 | " x = self.conv_block_2(x)\n", 240 | " x = self.conv_block_3(x)\n", 241 | " cv_final = self.conv_final(x)\n", 242 | " oh_class = self.gp_final(cv_final)\n", 243 | " x = self.adapt_pool(x)\n", 244 | " x = self.adapt_conv(x)\n", 245 | " x = x.view(x.size(0), -1)\n", 246 | " mean = self.relu(self.encode_mean(x)) \n", 247 | " logvar = self.relu(self.encode_logvar(x))\n", 248 | " return [oh_class.view(oh_class.size(0), self.nclasses), \n", 249 | " mean, logvar, \n", 250 | " self._sample_latent(mean, logvar)]\n", 251 | " \n", 252 | " def _sample_latent(self, mean, logvar): # z ~ N(mean, var (sigma^2)) \n", 253 | " z_std = torch.from_numpy(np.random.normal(0, 1, size=mean.size())).float()\n", 254 | " sigma = torch.exp(logvar).cuda()\n", 255 | " return mean + sigma * Variable(z_std, requires_grad=False).cuda()" 256 | ] 257 | }, 258 | { 259 | "cell_type": "markdown", 260 | "metadata": {}, 261 | "source": [ 262 | "## Decoder block" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "metadata": {}, 269 | "outputs": [], 270 | "source": [ 271 | "class Decoder(torch.nn.Module):\n", 272 | " def __init__(self, length, in_channels, nclasses, latent_size):\n", 273 | " super(Decoder, self).__init__()\n", 274 | " \n", 275 | " self.in_channels = in_channels\n", 276 | " self.length = length\n", 277 | " self.latent_size = latent_size\n", 278 | " length = self.length \n", 279 | " length = length // 2 // 2 // 2 \n", 280 | " # Adapt Layer\n", 281 | " self.relu = torch.nn.ReLU()\n", 282 | " self.tanh = torch.nn.Tanh()\n", 283 | " self.adapt_nn = torch.nn.Linear(latent_size, self.in_channels*length)\n", 284 | " # Layer 1\n", 285 | " in_channels = self.in_channels\n", 286 | " out_channels = 64\n", 287 | " kernel_size = 200\n", 288 | " padding = kernel_size // 2\n", 289 | " self.deconv_block_1 = Conv_block(in_channels, out_channels, kernel_size, padding, is_conv=False)\n", 290 | " length = length * 2\n", 291 | " # Layer 2\n", 292 | " in_channels = out_channels\n", 293 | " out_channels = 32\n", 294 | " kernel_size = 200\n", 295 | " padding = kernel_size // 2\n", 296 | " self.deconv_block_2 = Conv_block(in_channels, out_channels, kernel_size, padding, is_conv=False)\n", 297 | " length = length * 2\n", 298 | " \n", 299 | " # Layer 3\n", 300 | " in_channels = out_channels\n", 301 | " out_channels = 32\n", 302 | " kernel_size = 200\n", 303 | " padding = kernel_size // 2\n", 304 | " self.deconv_block_3 = Conv_block(in_channels, out_channels, kernel_size, padding, is_conv=False)\n", 305 | " length = length * 2\n", 306 | " \n", 307 | " in_channels = out_channels\n", 308 | " out_channels = 1\n", 309 | " kernel_size = 200\n", 310 | " padding = kernel_size // 2\n", 311 | " self.decode_conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)\n", 312 | " \n", 313 | " def forward(self, z):\n", 314 | "\n", 315 | " x = self.relu(self.adapt_nn(z)).cuda()\n", 316 | " x = x.view(x.size(0), self.in_channels, self.length // 2 // 2 // 2)\n", 317 | " x = self.deconv_block_1(x)\n", 318 | " x = self.deconv_block_2(x)\n", 319 | " x = self.deconv_block_3(x)\n", 320 | " x = self.decode_conv(x)\n", 321 | " out = self.tanh(x)\n", 322 | " return out" 323 | ] 324 | }, 325 | { 326 | "cell_type": "markdown", 327 | "metadata": {}, 328 | "source": [ 329 | "## Variational autoencoder" 330 | ] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": null, 335 | "metadata": {}, 336 | "outputs": [], 337 | "source": [ 338 | "class VAE(torch.nn.Module):\n", 339 | " def __init__(self, length, nclasses, latent_size, transition_channels):\n", 340 | " super(VAE, self).__init__()\n", 341 | " self.encoder = Encoder(1, length, nclasses, latent_size, transition_channels)\n", 342 | " self.decoder = Decoder(length, transition_channels, nclasses, latent_size)\n", 343 | " def count_parameters(self):\n", 344 | " return np.sum([np.prod(x.size()) for x in self.parameters()])\n", 345 | " def forward(self, x):\n", 346 | " oh_class, mean, z = self.encoder(x)\n", 347 | " x_decoded = self.decoder(z)\n", 348 | " return oh_class, mean, z, x_decoded" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": null, 354 | "metadata": {}, 355 | "outputs": [], 356 | "source": [ 357 | "model = VAE(LENGTH, NCLASSES, 10, 4).cuda()\n", 358 | "model.count_parameters()" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": null, 364 | "metadata": {}, 365 | "outputs": [], 366 | "source": [ 367 | "# x = Variable(torch.from_numpy(x_train[:2])).cuda().float()\n", 368 | "# a = model.encoder(x)" 369 | ] 370 | }, 371 | { 372 | "cell_type": "markdown", 373 | "metadata": {}, 374 | "source": [ 375 | "## Losses" 376 | ] 377 | }, 378 | { 379 | "cell_type": "code", 380 | "execution_count": null, 381 | "metadata": {}, 382 | "outputs": [], 383 | "source": [ 384 | "class SSD(torch.nn.Module):\n", 385 | " def __init__(self):\n", 386 | " super(SSD, self).__init__()\n", 387 | " def forward(self, x_decoded, x):\n", 388 | " loss = torch.sum(torch.pow(x - x_decoded, 2))\n", 389 | " return loss / x_decoded.size(0)\n", 390 | "class Variational_loss(torch.nn.Module):\n", 391 | " def __init__(self):\n", 392 | " super(Variational_loss, self).__init__()\n", 393 | " def forward(self, x_decoded, x, mu, logvar):\n", 394 | " return SSD()(x_decoded, x) + torch.sum(0.5 * (mu ** 2 + torch.exp(logvar) - logvar - 1))\n", 395 | "\n", 396 | "class VAE_loss(torch.nn.Module):\n", 397 | " def __init__(self, weights):\n", 398 | " super(VAE_loss, self).__init__()\n", 399 | " self.classification_loss = torch.nn.CrossEntropyLoss(weights)\n", 400 | " self.variational_loss = Variational_loss()\n", 401 | " self.c = 0.001\n", 402 | " def forward(self, x_decoded, x, mu, oh_class, y):\n", 403 | " \n", 404 | " a = self.classification_loss(oh_class.cuda(), y)\n", 405 | " b = self.variational_loss(\n", 406 | " x_decoded.squeeze()[:,:LENGTH].cuda(), \n", 407 | " x.squeeze()[:,:LENGTH].cuda(), \n", 408 | " mu) * self.c\n", 409 | " return a + b, a, b" 410 | ] 411 | }, 412 | { 413 | "cell_type": "markdown", 414 | "metadata": {}, 415 | "source": [ 416 | "## Data loader" 417 | ] 418 | }, 419 | { 420 | "cell_type": "code", 421 | "execution_count": null, 422 | "metadata": {}, 423 | "outputs": [], 424 | "source": [ 425 | "class Dataset(torch.utils.data.Dataset):\n", 426 | " def __init__(self, x, y):\n", 427 | " self.x = x\n", 428 | " self.y = y\n", 429 | " def __getitem__(self, idx):\n", 430 | " return self.x[idx], self.y[idx]\n", 431 | " def __len__(self):\n", 432 | " return len(self.x)\n", 433 | "batch_size = 256\n", 434 | "\n", 435 | "train_loader = torch.utils.data.DataLoader(\n", 436 | " dataset=Dataset(x_train, y_train_oh), \n", 437 | " batch_size=batch_size, \n", 438 | " shuffle=True)\n", 439 | "val_loader = torch.utils.data.DataLoader(\n", 440 | " dataset=Dataset(x_val, y_val_oh), \n", 441 | " batch_size=batch_size, \n", 442 | " shuffle=True)" 443 | ] 444 | }, 445 | { 446 | "cell_type": "markdown", 447 | "metadata": {}, 448 | "source": [ 449 | "## Tester" 450 | ] 451 | }, 452 | { 453 | "cell_type": "code", 454 | "execution_count": null, 455 | "metadata": {}, 456 | "outputs": [], 457 | "source": [ 458 | "def test(model, loader):\n", 459 | " acc = []\n", 460 | " for batch_id, (x, y) in tqdm(enumerate(loader), total=len(loader)):\n", 461 | " x = Variable(x).float().cuda()\n", 462 | " y = Variable(y).cuda()\n", 463 | " out = model(x)\n", 464 | " y_pred = out[0]\n", 465 | " _, index = torch.max(y_pred, -1)\n", 466 | " acc.append((index == y).cpu().data.numpy())\n", 467 | " acc = np.concatenate(acc).mean()\n", 468 | " return acc" 469 | ] 470 | }, 471 | { 472 | "cell_type": "markdown", 473 | "metadata": {}, 474 | "source": [ 475 | "# Train" 476 | ] 477 | }, 478 | { 479 | "cell_type": "markdown", 480 | "metadata": {}, 481 | "source": [ 482 | "## Train Classifier/Encoder first" 483 | ] 484 | }, 485 | { 486 | "cell_type": "markdown", 487 | "metadata": {}, 488 | "source": [ 489 | "### Freeze variational encoder layers" 490 | ] 491 | }, 492 | { 493 | "cell_type": "code", 494 | "execution_count": null, 495 | "metadata": {}, 496 | "outputs": [], 497 | "source": [ 498 | "classifier = model.encoder\n", 499 | "parameters = []\n", 500 | "layers = (classifier.adapt_conv, classifier.adapt_pool, classifier.encode_mean, classifier.encode_logvar)\n", 501 | "for layer in layers:\n", 502 | " for param in layer.parameters():\n", 503 | " param.requires_grad = False\n", 504 | "for param in classifier.parameters():\n", 505 | " if param.requires_grad == True:\n", 506 | " parameters.append(param)\n", 507 | "classifier_parameters = iter(parameters)" 508 | ] 509 | }, 510 | { 511 | "cell_type": "markdown", 512 | "metadata": {}, 513 | "source": [ 514 | "### Create optimizer and classifier" 515 | ] 516 | }, 517 | { 518 | "cell_type": "code", 519 | "execution_count": null, 520 | "metadata": {}, 521 | "outputs": [], 522 | "source": [ 523 | "optim_classifier = torch.optim.Adam(classifier_parameters)\n", 524 | "Loss = torch.nn.CrossEntropyLoss(weights)" 525 | ] 526 | }, 527 | { 528 | "cell_type": "markdown", 529 | "metadata": {}, 530 | "source": [ 531 | "## Train" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": null, 537 | "metadata": {}, 538 | "outputs": [], 539 | "source": [ 540 | "learning_rates = [0.01] * 4 + [0.001] * 4 + [0.0001] * 3 + [0.00001] * 3 \n", 541 | "for lr in tqdm(learning_rates, total=len(learning_rates)):\n", 542 | " optim_classifier.param_groups[0]['lr'] = lr\n", 543 | " for i, (x, y) in tqdm(enumerate(train_loader), total=len(train_loader)):\n", 544 | " x = Variable(x).float().cuda()\n", 545 | " y = Variable(y.long()).cuda()\n", 546 | "\n", 547 | " oh_class, mu, logvar, z = model.encoder(x)\n", 548 | " loss = Loss(oh_class.cuda(), y)\n", 549 | " optim_classifier.zero_grad()\n", 550 | " loss.backward()\n", 551 | " optim_classifier.step()\n", 552 | " print('Loss:' ,loss.data)\n", 553 | " print('Train Accuracy: ', test(model.encoder, train_loader))\n", 554 | " print('Validation Accuracy:', test(model.encoder, val_loader))\n" 555 | ] 556 | }, 557 | { 558 | "cell_type": "markdown", 559 | "metadata": {}, 560 | "source": [ 561 | "## Train Decoder" 562 | ] 563 | }, 564 | { 565 | "cell_type": "code", 566 | "execution_count": null, 567 | "metadata": {}, 568 | "outputs": [], 569 | "source": [ 570 | "nepochs = 50\n", 571 | "for epoch in tqdm(range(nepochs), total=nepochs):\n", 572 | " optim.param_groups[0]['lr'] *= 0.912011 # 0.001*(a^50) = 0.00001, a = 0.912011\n", 573 | " \n", 574 | " for i, (x, y) in tqdm(enumerate(train_loader), total=len(train_loader)):\n", 575 | " x = Variable(x).float().cuda()\n", 576 | " y = Variable(y.long()).cuda()\n", 577 | "\n", 578 | " oh_class, mu, z, x_decoded = model(x)\n", 579 | " loss, class_loss, var_loss = \\\n", 580 | " Loss(x_decoded.cuda(), x, mu.cuda(), oh_class.cuda(), y) # x_decoded, x, mu, oh_class, y\n", 581 | " optim.zero_grad()\n", 582 | " loss.backward()\n", 583 | " optim.step()\n", 584 | " if not i % 50:\n", 585 | " print('training encoder only\\n')\n", 586 | " oh_class, _, _ = model.encoder(x)\n", 587 | " enc_aux_loss = torch.nn.CrossEntropyLoss(weights)(oh_class.cuda(), y)\n", 588 | " optim.zero_grad()\n", 589 | " enc_aux_loss.backward()\n", 590 | " optim.step()\n", 591 | " print('Loss:' ,loss.data)\n", 592 | " print('Class loss:' ,class_loss.data)\n", 593 | " print('Var loss:' ,var_loss.data)\n", 594 | " \n", 595 | " \n", 596 | " print('Train Accuracy: ', test(model, train_loader))\n", 597 | " print('Validation Accuracy:', test(model, val_loader))\n" 598 | ] 599 | } 600 | ], 601 | "metadata": { 602 | "kernelspec": { 603 | "display_name": "Python 3", 604 | "language": "python", 605 | "name": "python3" 606 | }, 607 | "language_info": { 608 | "codemirror_mode": { 609 | "name": "ipython", 610 | "version": 3 611 | }, 612 | "file_extension": ".py", 613 | "mimetype": "text/x-python", 614 | "name": "python", 615 | "nbconvert_exporter": "python", 616 | "pygments_lexer": "ipython3", 617 | "version": "3.6.8" 618 | }, 619 | "toc": { 620 | "base_numbering": 1, 621 | "nav_menu": { 622 | "height": "419px", 623 | "width": "252px" 624 | }, 625 | "number_sections": true, 626 | "sideBar": true, 627 | "skip_h1_title": false, 628 | "title_cell": "Table of Contents", 629 | "title_sidebar": "Contents", 630 | "toc_cell": false, 631 | "toc_position": { 632 | "height": "512px", 633 | "left": "0px", 634 | "right": "1246px", 635 | "top": "133px", 636 | "width": "228px" 637 | }, 638 | "toc_section_display": "block", 639 | "toc_window_display": true 640 | }, 641 | "varInspector": { 642 | "cols": { 643 | "lenName": 16, 644 | "lenType": 16, 645 | "lenVar": 40 646 | }, 647 | "kernels_config": { 648 | "python": { 649 | "delete_cmd_postfix": "", 650 | "delete_cmd_prefix": "del ", 651 | "library": "var_list.py", 652 | "varRefreshCmd": "print(var_dic_list())" 653 | }, 654 | "r": { 655 | "delete_cmd_postfix": ") ", 656 | "delete_cmd_prefix": "rm(", 657 | "library": "var_list.r", 658 | "varRefreshCmd": "cat(var_dic_list()) " 659 | } 660 | }, 661 | "types_to_exclude": [ 662 | "module", 663 | "function", 664 | "builtin_function_or_method", 665 | "instance", 666 | "_Feature" 667 | ], 668 | "window_display": false 669 | } 670 | }, 671 | "nbformat": 4, 672 | "nbformat_minor": 2 673 | } 674 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Leoni Mota Loris 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 1D-Convolutional-Variational-Autoencoder 2 | Convolutional Variational Autoencoder for classification and generation of time-series. 3 | It has been made using [Pytorch](pytorch.org). 4 | 5 | It does not load a dataset. You're supposed to load it at the cell it's requested. 6 | It is under construction. 7 | --------------------------------------------------------------------------------