├── 04_Extra
├── Super_Resolution
│ ├── EDSR
│ │ ├── PyTorch.py
│ │ └── TensorFlow
│ │ │ └── model.py
│ ├── README.md
│ ├── download.py
│ ├── SRCNN
│ │ ├── TensorFlow
│ │ │ ├── model.py
│ │ │ └── main.py
│ │ └── PyTorch
│ │ │ └── PyTorch.py
│ ├── VDSR
│ │ ├── TensorFlow
│ │ │ └── model.py
│ │ └── PyTorch
│ │ │ └── model.py
│ └── SubPixel
│ │ └── TensorFlow
│ │ ├── model.py
│ │ └── main.py
├── XAI
│ ├── CAM
│ │ └── PyTorch
│ │ │ └── PyTorch.py
│ └── Grad_CAM
│ │ ├── cat_dog.jpg
│ │ ├── PyTorch
│ │ ├── tabby.jpg
│ │ ├── german_shepherd.jpg
│ │ └── PyTorch.py
│ │ └── TensorFlow
│ │ ├── tabby.jpg
│ │ ├── german_shepherd.jpg
│ │ └── TensorFlow.py
├── Image_Translation
│ ├── Neural_Style_Transfer
│ │ └── PyTorch
│ │ │ ├── Tom_Jerry.jpg
│ │ │ ├── starry_night.jpg
│ │ │ ├── Result
│ │ │ ├── new_style_image.jpg
│ │ │ ├── new_style_image_000500.jpg
│ │ │ ├── new_style_image_001000.jpg
│ │ │ ├── new_style_image_001500.jpg
│ │ │ ├── new_style_image_002000.jpg
│ │ │ ├── new_style_image_002500.jpg
│ │ │ ├── new_style_image_003000.jpg
│ │ │ ├── new_style_image_003500.jpg
│ │ │ ├── new_style_image_004000.jpg
│ │ │ ├── new_style_image_004500.jpg
│ │ │ └── new_style_image_005000.jpg
│ │ │ ├── README.md
│ │ │ └── Neural_Style_Transfer.py
│ ├── cyclegan
│ │ ├── pytorch
│ │ │ ├── helper.py
│ │ │ ├── dataloader.py
│ │ │ └── models.py
│ │ └── tf_keras
│ │ │ └── models.py
│ ├── README.md
│ ├── download.py
│ └── pix2pix
│ │ └── PyTorch
│ │ └── main.py
├── DataLoading
│ ├── TensorFlow
│ │ ├── ver_util.py
│ │ └── ver_generator.py
│ ├── README.md
│ ├── PyTorch
│ │ ├── ver_torchvision.py
│ │ └── ver_custom.py
│ └── flower_download.py
├── Attention_Module
│ └── BAM
│ │ └── PyTorch.py
└── ViT
│ └── PyTorch.py
├── .gitignore
├── 01_Basic
├── Linear_Regression
│ ├── tf_keras.py
│ ├── tf_nn.py
│ ├── tf_subclassing.py
│ ├── PyTorch.py
│ ├── ver_mlx.py
│ ├── MXNet_Gluon.py
│ └── ver_jax.py
└── Logistic_Regression
│ ├── tf_keras.py
│ ├── ver_mlx.py
│ ├── MXNet_Gluon.py
│ ├── tf_nn.py
│ ├── PyTorch.py
│ ├── tf_subclassing.py
│ └── ver_jax.py
├── 02_Intermediate
├── Multi_Layer_Perceptron
│ ├── tf_keras.py
│ ├── ver_mlx.py
│ ├── tf_subclassing.py
│ ├── tf_nn.py
│ ├── ver_jax.py
│ ├── PyTorch.py
│ └── MXNet_Gluon.py
├── Simple_Convolutional_Neural_Network
│ ├── tf_keras.py
│ ├── tf_subclassing.py
│ ├── ver_mlx.py
│ ├── ver_jax.py
│ ├── tf_nn.py
│ ├── MXNet_Gluon.py
│ └── PyTorch.py
└── Simple_Recurrent_Neural_Network
│ ├── ver_mlx.py
│ └── PyTorch.py
├── 03_Advance
├── CNN
│ ├── prepare_data.py
│ ├── VGGNet
│ │ ├── tf_keras.py
│ │ ├── tf_subclassing.py
│ │ └── ver_mlx.py
│ ├── SqueezeNet
│ │ └── tf_keras.py
│ ├── MobileNetV1
│ │ └── tf_keras.py
│ └── ResNet
│ │ └── tf_keras.py
├── Segmentation
│ └── prepare_data.py
├── GAN
│ ├── LSGAN
│ │ ├── tf_keras.py
│ │ └── PyTorch.py
│ ├── Vanilla_GAN
│ │ ├── tf_keras.py
│ │ └── PyTorch.py
│ └── DCGAN
│ │ └── tf_keras.py
└── AutoEncoder
│ ├── Vanilla
│ └── PyTorch.py
│ └── CAE
│ └── PyTorch.py
└── utils
├── mlx_dataset.py
└── jax_dataset.py
/04_Extra/Super_Resolution/EDSR/PyTorch.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/04_Extra/XAI/CAM/PyTorch/PyTorch.py:
--------------------------------------------------------------------------------
1 | # %%
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.DS_Store
2 | *.ipynb_checkpoints
3 | *.vscode
4 | *__pycache__
5 | *data
6 | datasets
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/cat_dog.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/XAI/Grad_CAM/cat_dog.jpg
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/README.md:
--------------------------------------------------------------------------------
1 | # Super Resolution
2 |
3 | ## How to Run
4 |
5 |
6 | ### To-Do List
7 | - [x] Data Downloader
8 |
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/PyTorch/tabby.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/XAI/Grad_CAM/PyTorch/tabby.jpg
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/TensorFlow/tabby.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/XAI/Grad_CAM/TensorFlow/tabby.jpg
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/PyTorch/german_shepherd.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/XAI/Grad_CAM/PyTorch/german_shepherd.jpg
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/TensorFlow/german_shepherd.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/XAI/Grad_CAM/TensorFlow/german_shepherd.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Tom_Jerry.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Tom_Jerry.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/starry_night.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/starry_night.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_000500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_000500.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_001000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_001000.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_001500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_001500.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_002000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_002000.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_002500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_002500.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_003000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_003000.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_003500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_003500.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_004000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_004000.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_004500.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_004500.jpg
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_005000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jjerry-k/learning_framework/HEAD/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Result/new_style_image_005000.jpg
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/download.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2 as cv
3 | from tqdm import tqdm
4 | from tensorflow.keras import utils
5 |
6 | dataset_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
7 | data_dir = utils.get_file(origin=dataset_url, fname="BSR", untar=True, cache_dir = './')
8 | root_dir = os.path.join(data_dir, "BSDS500/data")
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/cyclegan/pytorch/helper.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | def set_requires_grad(models:list, requires_grad:bool) -> None:
5 | for model in models:
6 | if model is not None:
7 | for param in model.parameters():
8 | param.requires_grad = requires_grad
9 |
10 |
11 | # def train_generator(models:list, ):
12 |
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/SRCNN/TensorFlow/model.py:
--------------------------------------------------------------------------------
1 | from tensorflow.keras import models, layers
2 |
3 | def SRCNN(img_channel = 1, name="SRCNN"):
4 |
5 | Input = layers.Input(shape=(None, None, img_channel))
6 |
7 | x = layers.Conv2D(64, 9, activation='relu', name=name+"_Conv_1")(Input)
8 | x = layers.Conv2D(32, 1, activation='relu', name=name+"_Conv_2")(x)
9 | x = layers.Conv2D(img_channel, 5, name=name+"_Output")(x)
10 |
11 | return models.Model(Input, x, name=name)
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/README.md:
--------------------------------------------------------------------------------
1 | # Image Translation
2 |
3 | ## How to Run
4 |
5 | 1. Download dataset
6 |
7 | ``` bash
8 | # In Image_Translation
9 | python download.py --dataset {dataset} # datasets: ['cityscapes', 'facades', 'maps']
10 | ```
11 |
12 | 2. Run what you want (except Neural_Style_Transfer)
13 | ``` bash
14 | cd ./{model}/{framework}
15 | python main.py --DATASET {dataset} --IMG_SIZE {img_size} --EPOCHS {epochs} --BATCH_SIZE {batch_size}
16 | ```
17 |
18 |
19 | ### To-Do List
20 | - [x] download.py 수정
21 | - [ ] Pix2Pix model ver.PyTorch
22 | - [ ] CycleGAN model ver.PyTorch
23 |
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/VDSR/TensorFlow/model.py:
--------------------------------------------------------------------------------
1 | from tensorflow.keras import layers, models
2 |
3 | def VDSR(img_channel=1, name="VDSR"):
4 |
5 | Input_layer = layers.Input(shape=(None, None, img_channel), name=name+"_Input")
6 |
7 | out = layers.Conv2D(64, 3, padding='same', activation='relu', name=name+"_Conv_1")(Input_layer)
8 |
9 | for i in range(1, 19):
10 | out = layers.Conv2D(64, 3, padding='same', activation='relu', name=name+f"_Conv_{i+1}")(out)
11 |
12 | out = layers.Conv2D(img_channel, 3, padding='same', activation='relu', name=name+"_Conv_20")(out)
13 |
14 | output = layers.Add(name=name+"_Output")([Input_layer, out])
15 |
16 | return models.Model(inputs=Input_layer, outputs=output, name=name)
17 |
18 |
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/tf_keras.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tensorflow.keras import models, layers, losses, optimizers
4 |
5 | EPOCHS = 500
6 | LEARNING_RATE = 0.05
7 |
8 | W = 0.1
9 | B = 0.3
10 |
11 | x = np.random.normal(0.0, 0.55, (10000, 1))
12 | y = x * W + B + np.random.normal(0.0, 0.03, (10000,1))
13 |
14 | model = models.Sequential()
15 | model.add(layers.Dense(1))
16 |
17 | loss = losses.MeanSquaredError()
18 | optimizer = optimizers.SGD(learning_rate=LEARNING_RATE)
19 | model.compile(optimizer=optimizer, loss=loss)
20 |
21 | history = model.fit(x, y, epochs=EPOCHS, batch_size=10000, verbose=0)
22 |
23 | param = model.weights
24 | print(f"Real W: {W}, Predict W: {param[0].numpy().item():.3f}")
25 | print(f"Real B: {B}, Predict B: {param[1].numpy().item():.3f}")
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/SRCNN/PyTorch/PyTorch.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | def basic_conv(in_ch, out_ch, ksize=3, pad='same'):
5 | assert ksize%2 == 1, "Please use ksize of odd number."
6 |
7 | if pad=='same':
8 | pad = (ksize-1)//2
9 | elif pad=='valid':
10 | pad = 0
11 |
12 | return nn.Conv2d(in_ch, out_ch, kernel_size=ksize, stride=1, padding=pad)
13 |
14 |
15 | class SRCNN(nn.Module):
16 | def __init__(self):
17 | super(SRCNN, self).__init__()
18 | layers = [
19 | basic_conv(3, 64, 9, 'valid'),
20 | nn.ReLU(inplace=True),
21 | basic_conv(64, 32, 1, 'valid'),
22 | nn.ReLU(inplace=True),
23 | basic_conv(32, 3, 5, 'valid')]
24 | self.net = nn.Sequential(*layers)
25 | def forward(self, x):
26 | out = self.net(x)
27 | return out
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/SubPixel/TensorFlow/model.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.keras import layers, models
3 |
4 | def SubPixel(img_channel=1, upscale_factor=4, name="SubPixel"):
5 |
6 | inputs = layers.Input(shape=(None, None, img_channel))
7 | x = layers.Conv2D(64, 5, padding='same', activation='relu', kernel_initializer="Orthogonal", name=name+"_Conv1")(inputs)
8 | x = layers.Conv2D(64, 3, padding='same', activation='relu', kernel_initializer="Orthogonal", name=name+"_Conv2")(x)
9 | x = layers.Conv2D(32, 3, padding='same', activation='relu', kernel_initializer="Orthogonal", name=name+"_Conv3")(x)
10 | x = layers.Conv2D(img_channel * (upscale_factor ** 2), 3, padding='same', activation='relu', kernel_initializer="Orthogonal", name=name+"_Conv4")(x)
11 | outputs = tf.nn.depth_to_space(x, upscale_factor, name=name+"_Output")
12 | return models.Model(inputs=inputs, outputs=outputs, name=name)
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/VDSR/PyTorch/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | def basic_conv(in_ch, out_ch, ksize=3, pad='same'):
5 | assert ksize%2 == 1, "Please use ksize of odd number."
6 |
7 | if pad=='same':
8 | pad = (ksize-1)//2
9 | elif pad=='valid':
10 | pad = 0
11 |
12 | return nn.Conv2d(in_ch, out_ch, kernel_size=ksize, stride=1, padding=pad)
13 |
14 |
15 | class VDSR(nn.Module):
16 | def __init__(self):
17 | super(VDSR, self).__init__()
18 |
19 | layers = [
20 | basic_conv(3, 64, 3, 'same'),
21 | nn.ReLU(inplace=True)]
22 |
23 | for _ in range(1, 19):
24 | layers.append(basic_conv(64, 64, 3, 'same'))
25 | layers.append(nn.ReLU(inplace=True))
26 |
27 | layers.append(layers.append(basic_conv(64, 3, 3, 'same')))
28 |
29 | self.net = nn.Sequential(*layers)
30 | def forward(self, x):
31 | out = self.net(x)
32 | return out + x
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/README.md:
--------------------------------------------------------------------------------
1 | # Style Transfer Using PyTorch
2 | ---
3 |
4 | - Content Image & Style Image
5 |
6 |
7 |
8 |
9 |
10 | - 변화 과정
11 |
12 | **500 Step**
13 |
14 |
15 | **1000 Step**
16 |
17 |
18 | **1500 Step**
19 |
20 |
21 | **2000 Step**
22 |
23 |
24 | **2500 Step**
25 |
26 |
27 | **3000 Step**
28 |
29 |
30 | **3500 Step**
31 |
32 |
33 | **4000 Step**
34 |
35 |
36 | **4500 Step**
37 |
38 |
39 | **5000 Step**
40 |
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/tf_nn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from matplotlib import pyplot as plt
4 |
5 | x = np.random.normal(0.0, 0.55, (10000, 1))
6 | y = x * 0.1 + 0.3 + np.random.normal(0.0, 0.03, (10000,1))
7 |
8 | plt.plot(x, y, 'r.')
9 | plt.show()
10 |
11 | X = tf.placeholder(tf.float32, shape=[None, 1])
12 | W = tf.Variable(tf.random_normal([1]))
13 | b = tf.Variable(tf.zeros([1]))
14 |
15 | h = X*W+b
16 |
17 | Y = tf.placeholder(tf.float32, shape = [None, 1])
18 | Loss = tf.reduce_mean(tf.square(h - Y))
19 | optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(Loss)
20 |
21 | sess = tf.Session()
22 | sess.run(tf.global_variables_initializer())
23 |
24 | # Training loop
25 | for epoch in range(500):
26 | _, t_loss = sess.run([optimizer, Loss], feed_dict={X:x, Y:y})
27 |
28 | print("Epoch : ", epoch, " Loss : ", t_loss)
29 |
30 | if epoch ==0 :
31 | y_pred = sess.run(h, feed_dict={X:x})
32 | plt.plot(x, y, 'r.')
33 | plt.plot(x, y_pred, 'b.')
34 | plt.show()
35 | elif (epoch+1) % 100 == 0 :
36 | y_pred = sess.run(h, feed_dict={X:x})
37 | plt.plot(x, y, 'r.')
38 | plt.plot(x, y_pred, 'b.')
39 | plt.show()
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/tf_subclassing.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tensorflow.keras import models, layers, losses, optimizers
4 |
5 | EPOCHS = 500
6 | LEARNING_RATE = 0.05
7 |
8 | W = 0.1
9 | B = 0.3
10 |
11 | x = np.random.normal(0.0, 0.55, (10000, 1))
12 | y = x * W + B + np.random.normal(0.0, 0.03, (10000,1))
13 |
14 | class LinearRegression(models.Model):
15 | def __init__(self):
16 | super(LinearRegression, self).__init__()
17 | self.d = layers.Dense(1)
18 |
19 | def call(self, x):
20 | return self.d(x)
21 |
22 | # Create an instance of the model
23 | model = LinearRegression()
24 |
25 | loss_object = losses.MeanSquaredError()
26 |
27 | optimizer = optimizers.SGD(learning_rate=LEARNING_RATE)
28 |
29 | for epoch in range(500):
30 | with tf.GradientTape() as tape:
31 | predictions = model(x, training=True)
32 | loss = loss_object(y, predictions)
33 | gradients = tape.gradient(loss, model.trainable_variables)
34 | optimizer.apply_gradients(zip(gradients, model.trainable_variables))
35 | if (epoch == 0) or ((epoch+1) % 100 == 0):
36 | print(f"Epoch: {epoch+1} Loss: {loss}")
37 |
38 | param = model.weights
39 | print(f"Real W: {W}, Predict W: {param[0].numpy().item():.3f}")
40 | print(f"Real B: {B}, Predict B: {param[1].numpy().item():.3f}")
--------------------------------------------------------------------------------
/04_Extra/DataLoading/TensorFlow/ver_util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import tensorflow as tf
5 | from tqdm import tqdm
6 |
7 | img_size = 128
8 | batch_size = 32
9 |
10 | data_dir = "../data/flower_photos"
11 |
12 | print("Training Dataset")
13 | train_ds = tf.keras.preprocessing.image_dataset_from_directory(
14 | os.path.join(data_dir, "train"),
15 | seed=123,
16 | image_size=(img_size, img_size),
17 | batch_size=batch_size)
18 |
19 | print("Validation Dataset")
20 | val_ds = tf.keras.preprocessing.image_dataset_from_directory(
21 | os.path.join(data_dir, "validation"),
22 | seed=123,
23 | image_size=(img_size, img_size),
24 | batch_size=batch_size)
25 |
26 | with tqdm(total=len(train_ds)) as t:
27 | t.set_description(f'Train Loader')
28 | for i, (batch_img, batch_lab) in enumerate(train_ds):
29 | time.sleep(0.1)
30 | # Add feedforward & Optimization code
31 | t.set_postfix({"Train data shape": f"{batch_img.shape} {batch_lab.shape}"})
32 | t.update()
33 |
34 | with tqdm(total=len(val_ds)) as t:
35 | t.set_description(f'Validation Loader')
36 | for i, (batch_img, batch_lab) in enumerate(val_ds):
37 | time.sleep(0.1)
38 | # Add evaluation code
39 | t.set_postfix({"Validation data shape": f"{batch_img.shape} {batch_lab.shape}"})
40 | t.update()
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/PyTorch.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from torch import optim
4 | import numpy as np
5 |
6 | EPOCHS = 500
7 | LEARNING_RATE = 0.05
8 |
9 | W = 0.1
10 | B = 0.3
11 |
12 | x = np.random.normal(0.0, 0.55, (10000, 1))
13 | y = x * W + B + np.random.normal(0.0, 0.03, (10000,1))
14 |
15 | x_data = torch.Tensor(x)
16 | y_data = torch.Tensor(y)
17 |
18 | class Model(nn.Module):
19 | def __init__(self):
20 | super(Model, self).__init__()
21 | self.linear = nn.Linear(1, 1)
22 |
23 | def forward(self, X):
24 | X = self.linear(X)
25 | return X
26 |
27 | model = Model()
28 | criterion = nn.MSELoss()
29 | optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
30 |
31 | # Training loop
32 | for epoch in range(EPOCHS):
33 | y_pred = model.forward(x_data)
34 |
35 | loss = criterion(y_pred, y_data)
36 |
37 | # Zero gradients, perform a backward pass, and update the weights.
38 | optimizer.zero_grad()
39 | loss.backward()
40 | optimizer.step()
41 |
42 | if (epoch == 0) or ((epoch+1) % 100 == 0):
43 | print(f"Epoch: {epoch+1} Loss: {loss.data.numpy()}")
44 |
45 | # After Training, check parameters
46 | param = list(model.parameters())
47 | print(f"Real W: {W}, Predict W: {param[0].item():.3f}")
48 | print(f"Real B: {B}, Predict B: {param[1].item():.3f}")
--------------------------------------------------------------------------------
/04_Extra/DataLoading/README.md:
--------------------------------------------------------------------------------
1 | # DataLoading
2 |
3 | ---
4 |
5 | Framework 별로 Classification을 수행할 때 Data를 Load 하는 방법에 대해서 알아봅니다.
6 |
7 | ## Data
8 | - `flower_photos` dataset을 이용합니다.
9 | - `daisy`, `dandelion`, `roses`, `sunflowers`, `tulips` 와 같이 5개의 class로 구성된 dataset입니다.
10 | - `flower_download.py` script를 실행하면 dataset setting은 자동으로 됩니다.
11 | - 다른 데이터로 직접 setting을 하고 싶으시다면 data download 후 다음과 같이 Directory tree를 구성합니다.
12 | - `train` 과 `validation` 내의 class 수는 동일해야합니다.
13 | ```
14 | dataset name
15 | │
16 | ├─── train
17 | │ ├─── class1
18 | │ │ │ ~~~~.jpg
19 | │ │ └───...
20 | │ │
21 | │ ├─── class2
22 | │ │ │ ~~~~..jpg
23 | │ │ └───...
24 | │ │
25 | │ ├─── class3
26 | │ │ │ ~~~~..jpg
27 | │ │ └───...
28 | │ │
29 | │ ├─── class4
30 | │ │ │ ~~~~..jpg
31 | │ │ └───...
32 | │ │
33 | │ └─── class5
34 | │ │ ~~~~..jpg
35 | │ └───...
36 | │
37 | └─── validation
38 | ├─── class1
39 | │ │ ~~~~..jpg
40 | │ └───...
41 | │
42 | ├─── class2
43 | │ │ ~~~~..jpg
44 | │ └───...
45 | │
46 | ├─── class3
47 | │ │ ~~~~..jpg
48 | │ └───...
49 | │
50 | ├─── class4
51 | │ │ ~~~~..jpg
52 | │ └───...
53 | │
54 | └─── class5
55 | │ ~~~~..jpg
56 | └───...
57 | ```
58 |
59 | ## PyTorch
60 | - Using `torchvision.datasets.ImageFolder`
61 |
62 | - Using `Custom dataset class`
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/ver_mlx.py:
--------------------------------------------------------------------------------
1 | from mlx import nn
2 | from mlx import core as mx
3 | from mlx import optimizers as optim
4 | import numpy as np
5 |
6 | EPOCHS = 500
7 | LEARNING_RATE = 0.05
8 |
9 | W = 0.1
10 | B = 0.3
11 |
12 | x = np.random.normal(0.0, 0.55, (10000, 1))
13 | y = x * W + B + np.random.normal(0.0, 0.03, (10000, 1))
14 |
15 | x_mx_arr = mx.array(x)
16 | y_mx_arr = mx.array(y)
17 |
18 | class Model(nn.Module):
19 | def __init__(self):
20 | super().__init__()
21 | self.linear = nn.Linear(1, 1)
22 |
23 | def __call__(self, x):
24 | x = self.linear(x)
25 | return x
26 |
27 | def loss_fn(model, X, y):
28 | return mx.mean(nn.losses.mse_loss(model(X), y))
29 |
30 | def eval_fn(model, X, y):
31 | return mx.mean((model(X) - y)**2)
32 |
33 | model = Model()
34 |
35 | loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
36 | optimizer = optim.SGD(learning_rate=LEARNING_RATE)
37 |
38 | for epoch in range(EPOCHS):
39 | loss, grads = loss_and_grad_fn(model, x_mx_arr, y_mx_arr)
40 | optimizer.update(model, grads)
41 | mx.eval(model.parameters(), optimizer.state)
42 | accuracy = eval_fn(model, x_mx_arr, y_mx_arr)
43 | if (epoch == 0) or ((epoch+1) % 100 == 0):
44 | print(f"Epoch: {epoch+1}: Loss: {loss.item()}")
45 |
46 | param = (model.linear.weight, model.linear.bias)
47 | print(f"Real W: {W}, Predict W: {param[0].item():.3f}")
48 | print(f"Real B: {B}, Predict B: {param[1].item():.3f}")
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/tf_keras.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 | import numpy as np
5 |
6 | import tensorflow as tf
7 | from tensorflow.keras import models, layers, optimizers, losses, utils, datasets
8 |
9 | tf.random.set_seed(777)
10 |
11 | print("Packge Loaded!")
12 |
13 | EPOCHS = 10
14 | BATCH_SIZE = 100
15 | LEARNING_RATE = 0.01
16 |
17 | # Data Loading
18 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
19 | train_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])
20 | # 0 : digit < 5
21 | # 1 : digit >= 5
22 | train_y, test_y = np.greater_equal(train_y, 5)[..., np.newaxis], np.greater_equal(test_y, 5)[..., np.newaxis]
23 |
24 |
25 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
26 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
27 |
28 |
29 | # Network Building
30 | ## Using Sequential
31 | mlp = models.Sequential()
32 | mlp.add(layers.Dense(1, activation='sigmoid'))
33 |
34 | ## Using Functional
35 | # _input = layers.Input(shape=(784, ))
36 | # layer = layers.Dense(10, activation='sigmoid')(_input)
37 | # mlp = models.Model(inputs=_input, outputs=layer)
38 |
39 | print("Network Built!")
40 |
41 | mlp.compile(optimizer=optimizers.Adam(learning_rate=LEARNING_RATE), loss=losses.binary_crossentropy, metrics=['accuracy'])
42 |
43 | history = mlp.fit(train_x, train_y, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(test_x, test_y))
44 |
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/MXNet_Gluon.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os
3 | import numpy as np
4 | from mxnet import nd, gluon, init, autograd
5 | from mxnet.gluon import nn
6 | from matplotlib import pyplot as plt
7 | print("Load Package!")
8 | # %%
9 | x = np.random.normal(0.0, 0.55, (10000, 1))
10 | y = x * 0.1 + 0.3 + np.random.normal(0.0, 0.03, (10000,1))
11 |
12 | # %%
13 | net = nn.Sequential()
14 | net.add(nn.Dense(1))
15 |
16 | net.initialize(init=init.Xavier())
17 |
18 | cross_entropy = gluon.loss.L2Loss()
19 | trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
20 | #%%
21 | print("Setting Done!")
22 |
23 | batch_size = 100
24 | tot_iter = len(x) // batch_size
25 | print("Start Training!")
26 | for epoch in range(10):
27 | train_loss, train_acc, valid_acc = 0., 0., 0.
28 | #tic = time.time()
29 | # forward + backward
30 | for iter in range(tot_iter):
31 | idx = np.random.choice(len(x), batch_size, replace=False)
32 | with autograd.record():
33 | output = net(nd.array(x[idx]))
34 | loss = cross_entropy(output, nd.array(y[idx]))
35 | loss.backward()
36 | # update parameters
37 | trainer.step(batch_size)
38 | train_loss += loss.mean().asscalar()
39 | test_y = net.forward(nd.array(x)).asnumpy()
40 | print("Epoch : %d, loss : "%(epoch+1), train_loss/batch_size)
41 | # plt.plot(x, y, 'r.')
42 | # plt.plot(x, test_y, 'b-')
43 | # plt.show()
44 | test_y = net.forward(nd.array(x)).asnumpy()
45 | plt.plot(x, y, 'r.')
46 | plt.plot(x, test_y, 'b-')
47 | plt.show()
48 |
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/tf_keras.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 | import numpy as np
5 | from matplotlib import pyplot as plt
6 |
7 | import tensorflow as tf
8 | from tensorflow.keras import models, layers, optimizers, losses, utils, datasets
9 |
10 | tf.compat.v1.set_random_seed(777)
11 |
12 | print("Packge Loaded!")
13 |
14 |
15 | # Data Loading
16 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
17 | train_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])
18 |
19 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
20 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
21 |
22 | # Network Building
23 | ## Using Sequential
24 | mlp = models.Sequential()
25 | mlp.add(layers.Dense(256, activation='relu', input_shape=(784,)))
26 | mlp.add(layers.Dense(128, activation='relu'))
27 | mlp.add(layers.Dense(10, activation='softmax'))
28 |
29 | ## Using Functional
30 | # _input = layers.Input(shape=(784, ))
31 | # layer = layers.Dense(256, activation='relu')(_input)
32 | # layer = layers.Dense(128, activation='relu')(layer)
33 | # layer = layers.Dense(10, activation='softmax')(layer)
34 | # mlp = models.Model(inputs=_input, outputs=layer)
35 |
36 | print("Network Built!")
37 |
38 | mlp.compile(optimizer=optimizers.Adam(), loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])
39 |
40 | history = mlp.fit(train_x, train_y, epochs=10, batch_size=16, validation_data=(test_x, test_y))
41 |
42 | plt.plot(history.history['loss'], '.-')
43 | plt.plot(history.history['val_loss'], '.-')
44 | plt.legend(['train_loss', 'val_loss'], loc=0)
45 | plt.show()
46 |
47 |
--------------------------------------------------------------------------------
/04_Extra/Attention_Module/BAM/PyTorch.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os, torch
3 | import cv2 as cv
4 | import numpy as np
5 | from torch import nn, optim
6 | from torch.nn import functional as F
7 | from torch.utils.data import Dataset, DataLoader
8 | from torchvision import transforms, datasets, utils
9 |
10 | # Device Configuration
11 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
12 |
13 | # %%
14 |
15 | class BAM(nn.Module):
16 | def __init__(self, input_feature, reduction_ratio=16, dilation=4):
17 | super(BAM, self).__init__()
18 |
19 | inter_feature = input_feature//reduction_ratio
20 |
21 | self.attention_ch = nn.Sequential(
22 | nn.AdaptiveAvgPool2d((1, 1)),
23 | nn.Conv2d(input_feature, inter_feature, ksize=(1, 1)),
24 | nn.ReLU(True),
25 | nn.Conv2d(inter_feature, input_feature, ksize=(1, 1)),
26 | nn.BatchNorm2d(input_feature)
27 | )
28 |
29 | self.attention_sp = nn.Sequential(
30 | nn.Conv2d(input_feature, inter_feature, kernel_size=1),
31 | nn.ReLU(True),
32 | nn.Conv2d(inter_feature, inter_feature, kernel_size=3, stride=1, padding=1, dilation=dilation),
33 | nn.ReLU(True),
34 | nn.Conv2d(inter_feature, inter_feature, kernel_size=3, stride=1, padding=1, dilation=dilation),
35 | nn.ReLU(True),
36 | nn.Conv2d(inter_feature, 1, kernel_size=1),
37 | nn.ReLU(True),
38 | nn.BatchNorm2d(1)
39 |
40 | )
41 |
42 | self.act = nn.Sigmoid()
43 |
44 | def forward(self, x):
45 |
46 | att_ch = self.attention_ch(x)
47 |
48 | att_sp = self.attention_sp(x)
49 |
50 | att = self.act(att_ch + att_sp)
51 |
52 | return x + (x*att)
--------------------------------------------------------------------------------
/04_Extra/DataLoading/PyTorch/ver_torchvision.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import cv2 as cv
4 | import numpy as np
5 | from tqdm import tqdm
6 | from PIL import Image
7 |
8 | import torch
9 | from torchvision import transforms, datasets, utils
10 | from torch.utils.data import Dataset, DataLoader
11 |
12 | PATH = "../data/flower_photos"
13 | IMG_FORMAT = ["jpg", "jpeg", "tif", "tiff", "bmp", "png"]
14 |
15 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
16 | print(category_list)
17 |
18 | num_classes = len(category_list)
19 | img_size = 128
20 | batch_size = 32
21 |
22 | transform = transforms.Compose([
23 | transforms.Resize([img_size, img_size]),
24 | transforms.ToTensor()
25 | ])
26 |
27 | train_dataset = datasets.ImageFolder(os.path.join(PATH, "train"), transform)
28 | train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
29 |
30 | validation_dataset = datasets.ImageFolder(os.path.join(PATH, "validation"), transform)
31 | validation_loader = DataLoader(dataset=validation_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
32 |
33 | with tqdm(total=len(train_loader)) as t:
34 | t.set_description(f'Train Loader')
35 | for i, (batch_img, batch_lab) in enumerate(train_loader):
36 | time.sleep(0.1)
37 | # Add feedforward & Optimization code
38 | t.set_postfix({"Train data shape": f"{batch_img.shape} {batch_lab.shape}"})
39 | t.update()
40 |
41 | with tqdm(total=len(validation_loader)) as t:
42 | t.set_description(f'Validation Loader')
43 | for i, (batch_img, batch_lab) in enumerate(validation_loader):
44 | time.sleep(0.1)
45 | # Add evaluation code
46 | t.set_postfix({"Validation data shape": f"{batch_img.shape} {batch_lab.shape}"})
47 | t.update()
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/cyclegan/pytorch/dataloader.py:
--------------------------------------------------------------------------------
1 | import random
2 | import os
3 | import torch
4 | from PIL import Image
5 | from torch.utils.data import Dataset, DataLoader
6 | import torchvision.transforms as transforms
7 |
8 | class BaseDataset(Dataset):
9 |
10 | def __init__(self, data_dir, transform, type):
11 | self.data_dir = data_dir
12 |
13 | self.A_path = os.path.join(self.data_dir, f"{type}A")
14 | self.A_list = os.listdir(self.A_path)
15 |
16 | self.B_path = os.path.join(self.data_dir, f"{type}B")
17 | self.B_list = os.listdir(self.B_path)
18 |
19 | self.transform = transform
20 |
21 | def __len__(self):
22 | return max(len(self.A_list), len(self.B_list))
23 |
24 | def __getitem__(self, idx):
25 | A_idx = random.randint(0, len(self.A_list)-1)
26 | B_idx = random.randint(0, len(self.B_list)-1)
27 |
28 | A_image = Image.open(os.path.join(self.A_path, self.A_list[A_idx]))
29 | if len(A_image.getbands()) != 3:
30 | A_image = A_image.convert("RGB")
31 |
32 | B_image = Image.open(os.path.join(self.B_path, self.B_list[B_idx]))
33 | if len(B_image.getbands()) != 3:
34 | B_image = B_image.convert("RGB")
35 |
36 | if self.transform:
37 | A_image = self.transform(A_image)
38 | B_image = self.transform(B_image)
39 |
40 | return A_image, B_image
41 |
42 | def CustomDataloader(transform=None, **kwargs):
43 | input_size = (kwargs['input_size'], kwargs['input_size'])
44 | dataloaders = {}
45 | for split in ['train', 'test']:
46 | transform_list = [transforms.Resize(input_size), transforms.ToTensor()]
47 | dl = DataLoader(BaseDataset(kwargs['path'], transforms.Compose(transform_list), split), batch_size=kwargs['batch_size'], num_workers=kwargs['num_workers'], drop_last=True)
48 | dataloaders[split] = dl
49 | return dataloaders
--------------------------------------------------------------------------------
/01_Basic/Linear_Regression/ver_jax.py:
--------------------------------------------------------------------------------
1 | import time
2 | import numpy as np
3 | from jax import jit, grad
4 | from jax.scipy.special import logsumexp
5 | import jax.numpy as jnp
6 |
7 | EPOCHS = 500
8 | LEARNING_RATE = 0.05
9 |
10 |
11 | W = 0.1
12 | B = 0.3
13 |
14 | x = np.random.normal(0.0, 0.55, (10000, 1))
15 | y = x * W + B + np.random.normal(0.0, 0.03, (10000,1))
16 |
17 | def init_random_params(scale, layer_sizes, rng=np.random.RandomState(0)):
18 | return [(scale * rng.randn(m, n), scale * rng.randn(n))
19 | for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
20 |
21 | def predict(params, inputs):
22 | outputs = jnp.dot(inputs, params[0][0]) + params[0][1]
23 | return outputs
24 |
25 | def loss(params, batch):
26 | inputs, targets = batch
27 | preds = predict(params, inputs)
28 | return jnp.mean(jnp.sum((preds - targets)**2, axis=1))
29 | # return -jnp.mean(jnp.sum(preds - targets, axis=1))
30 |
31 | def accuracy(params, batch):
32 | inputs, targets = batch
33 | target_class = jnp.argmax(targets, axis=1)
34 | predicted_class = jnp.argmax(predict(params, inputs), axis=1)
35 | return jnp.mean(predicted_class == target_class)
36 |
37 | layer_sizes = [1, 1]
38 | param_scale= 1
39 |
40 | @jit
41 | def update(params, batch):
42 | grads = grad(loss)(params, batch)
43 | return [(w - LEARNING_RATE * dw, b - LEARNING_RATE * db)
44 | for (w, b), (dw, db) in zip(params, grads)]
45 |
46 | params = init_random_params(param_scale, layer_sizes)
47 | for epoch in range(EPOCHS):
48 | start_time = time.time()
49 | params = update(params, (x, y))
50 | epoch_time = time.time() - start_time
51 | y_ = x*params[0][0] + params[0][1]
52 | if (epoch == 0) or ((epoch+1) % 100 == 0):
53 | print(f"Epoch: {epoch+1} Loss: {loss(params, (x, y))}")
54 |
55 | # After Training, check parameters
56 | print(f"Real W: {W}, Predict W: {params[0][0].item():.3f}")
57 | print(f"Real B: {B}, Predict B: {params[0][1].item():.3f}")
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/tf_keras.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 | import numpy as np
5 | from matplotlib import pyplot as plt
6 |
7 | import tensorflow as tf
8 | from tensorflow.keras import models, layers, optimizers, losses, utils, datasets
9 |
10 | tf.compat.v1.set_random_seed(777)
11 |
12 | print("Packge Loaded!")
13 |
14 |
15 | # Data Loading
16 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
17 | train_x, test_x = np.expand_dims(train_x/255., -1), np.expand_dims(test_x/255., -1)
18 |
19 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
20 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
21 |
22 | # Network Building
23 | ## Using Sequential
24 | cnn = models.Sequential()
25 | cnn.add(layers.Conv2D(16, 3, activation='relu', input_shape=(28, 28, 1,)))
26 | cnn.add(layers.MaxPool2D())
27 | cnn.add(layers.Conv2D(32, 3, activation='relu'))
28 | cnn.add(layers.MaxPool2D())
29 | cnn.add(layers.Flatten())
30 | cnn.add(layers.Dense(10, activation='softmax'))
31 |
32 | ## Using Functional
33 | # _input = layers.Input(shape=(28, 28, 1, ))
34 | # layer = layers.Conv2D(16, 3, activation='relu')(_input)
35 | # layer = layers.MaxPool2D()(layer)
36 | # layer = layers.Conv2D(32, 3, activation='relu')(layer)
37 | # layer = layers.MaxPool2D()(layer)
38 | # layer = layers.Flatten()(layer)
39 | # layer = layers.Dense(10, activation='softmax')(layer)
40 | # cnn = models.Model(inputs=_input, outputs=layer)
41 |
42 | print("Network Built!")
43 |
44 | # Compiling
45 | cnn.compile(optimizer=optimizers.Adam(), loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])
46 |
47 |
48 | # Training
49 | history = cnn.fit(train_x, train_y, epochs=10, batch_size=16, validation_data=(test_x, test_y))
50 |
51 |
52 | # Plotting Result
53 | plt.plot(history.history['loss'], '.-')
54 | plt.plot(history.history['val_loss'], '.-')
55 | plt.legend(['train_loss', 'val_loss'], loc=0)
56 | plt.show()
57 |
58 |
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/ver_mlx.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 |
4 | from mlx import nn
5 | from mlx import core as mx
6 | from mlx import optimizers as optim
7 | import numpy as np
8 | np.random.seed(777)
9 | mx.random.seed(777)
10 |
11 | from utils import mlx_dataset
12 |
13 | train_images, train_labels, test_images, test_labels = mlx_dataset.mnist()
14 |
15 | class Model(nn.Module):
16 | def __init__(self):
17 | super().__init__()
18 | self.linear1 = nn.Linear(784, 256)
19 | self.linear2 = nn.Linear(256, 10)
20 |
21 | def __call__(self, x):
22 | x = self.linear1(x)
23 | x = self.linear2(x)
24 | return x
25 |
26 | def loss_fn(model, x, y):
27 | x = mx.array(x)
28 | y = mx.array(y)
29 | return mx.mean(nn.losses.cross_entropy(model(x), y))
30 |
31 | def eval_fn(x, y):
32 | return mx.mean(mx.argmax(model(x), axis=1) == y)
33 |
34 | def batch_iterate(batch_size, x, y):
35 | perm = mx.array(np.random.permutation(y.size))
36 | for s in range(0, y.size, batch_size):
37 | ids = perm[s : s + batch_size]
38 | yield x[ids], y[ids]
39 |
40 | num_epochs = 10
41 | batch_size = 100
42 |
43 | model = Model()
44 | mx.eval(model.parameters())
45 |
46 | learning_rate = 0.01
47 |
48 | loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
49 | optimizer = optim.Adam(learning_rate=learning_rate)
50 |
51 | for epoch in range(num_epochs):
52 | avg_loss = 0
53 | for i, (batch_x, batch_y) in enumerate(batch_iterate(batch_size, train_images, train_labels)):
54 |
55 |
56 | loss, grads = loss_and_grad_fn(model, batch_x, batch_y)
57 | optimizer.update(model, grads)
58 | mx.eval(model.parameters(), optimizer.state)
59 | avg_loss += loss
60 |
61 | if (i+1)%100 == 0 :
62 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.item()/(i+1))
63 | accuracy = eval_fn(mx.array(test_images), mx.array(test_labels))
64 | print(f"Epoch: {epoch+1}, Loss: {avg_loss.item()/(i+1):.3f}, Accuracy: {accuracy.item():.3f}")
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/tf_subclassing.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 | import numpy as np
5 |
6 | import tensorflow as tf
7 | from tensorflow.keras import models, layers, optimizers, losses, utils, datasets
8 |
9 | tf.random.set_seed(777)
10 |
11 | print("Packge Loaded!")
12 | # %%
13 | EPOCHS = 500
14 | BATCH_SIZE = 128
15 |
16 | # Data Loading
17 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
18 | train_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])
19 |
20 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
21 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
22 |
23 | train_ds = tf.data.Dataset.from_tensor_slices(
24 | (train_x, train_y)).shuffle(10000).batch(BATCH_SIZE)
25 |
26 | test_ds = tf.data.Dataset.from_tensor_slices(
27 | (test_x, test_y)).shuffle(10000).batch(BATCH_SIZE)
28 |
29 | # plt.plot(x, y, 'r.')
30 | # plt.show()
31 | print("Data Prepared!")
32 |
33 | # %%
34 | class MultiLayerNeuralNetwork(models.Model):
35 | def __init__(self):
36 | super(MultiLayerNeuralNetwork, self).__init__()
37 | self.d1 = layers.Dense(256, input_shape=(784, ), activation='relu')
38 | self.d2 = layers.Dense(128, activation='relu')
39 | self.d3 = layers.Dense(10, activation='softmax')
40 |
41 | def call(self, x):
42 | x = self.d1(x)
43 | x = self.d2(x)
44 | return self.d3(x)
45 |
46 | # Create an instance of the model
47 | model = MultiLayerNeuralNetwork()
48 |
49 | loss_object = losses.SparseCategoricalCrossentropy()
50 |
51 | optimizer = optimizers.Adam()
52 |
53 | # %%
54 | for epoch in range(EPOCHS):
55 | for batch_x, batch_y in train_ds:
56 | with tf.GradientTape() as tape:
57 | predictions = model(batch_x, training=True)
58 | loss = loss_object(batch_y, predictions)
59 | gradients = tape.gradient(loss, model.trainable_variables)
60 | optimizer.apply_gradients(zip(gradients, model.trainable_variables))
61 |
62 | print("{:5}|{:10.6f}".format(epoch+1, loss))
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/ver_mlx.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 | from mlx import nn
4 | from mlx import core as mx
5 | from mlx import optimizers as optim
6 | import numpy as np
7 |
8 | from utils import mlx_dataset
9 |
10 | EPOCHS = 10
11 | BATCH_SIZE = 100
12 | LEARNING_RATE = 0.01
13 |
14 | train_images, train_labels, test_images, test_labels = mlx_dataset.mnist()
15 | train_labels = np.greater_equal(train_labels, 5).astype(float)[:, np.newaxis]
16 | test_labels = np.greater_equal(test_labels, 5).astype(float)[:, np.newaxis]
17 |
18 | class Model(nn.Module):
19 | def __init__(self):
20 | super().__init__()
21 | self.linear = nn.Linear(784, 1)
22 |
23 | def __call__(self, x):
24 | x = self.linear(x)
25 | return x
26 |
27 | def loss_fn(model, x, y):
28 | output = model(mx.array(x))
29 | tgt = mx.array(y)
30 | # print(output.shape, tgt.shape)
31 | return mx.mean(nn.losses.binary_cross_entropy(output, tgt))
32 |
33 | def eval_fn(x, y):
34 | return mx.mean(mx.greater_equal(mx.sigmoid(model(x)), 0.5) == y)
35 |
36 | def batch_iterate(batch_size, x, y):
37 | perm = mx.array(np.random.permutation(y.size))
38 | for s in range(0, y.size, batch_size):
39 | ids = perm[s : s + batch_size]
40 | yield x[ids], y[ids]
41 |
42 | model = Model()
43 | mx.eval(model.parameters())
44 |
45 | loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
46 | optimizer = optim.SGD(learning_rate=LEARNING_RATE)
47 |
48 | for epoch in range(EPOCHS):
49 | avg_loss = 0
50 | for i, (batch_x, batch_y) in enumerate(batch_iterate(BATCH_SIZE, train_images, train_labels)):
51 |
52 | loss, grads = loss_and_grad_fn(model, batch_x, batch_y)
53 | optimizer.update(model, grads)
54 | mx.eval(model.parameters(), optimizer.state)
55 | avg_loss += loss
56 |
57 | if (i+1)%100 == 0 :
58 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.item()/(i+1))
59 | accuracy = eval_fn(mx.array(test_images), mx.array(test_labels))
60 | print(f"Epoch: {epoch+1}, Loss: {avg_loss.item()/(i+1):.3f}, Accuracy: {accuracy.item():.3f}")
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Recurrent_Neural_Network/ver_mlx.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 |
4 | from mlx import nn
5 | from mlx import core as mx
6 | from mlx import optimizers as optim
7 | import numpy as np
8 | np.random.seed(777)
9 | mx.random.seed(777)
10 |
11 | from utils import mlx_dataset
12 |
13 | EPOCHS = 5
14 | BATCH_SIZE = 256
15 | LEARNING_RATE = 0.001
16 |
17 | train_images, train_labels, test_images, test_labels = mlx_dataset.mnist()
18 | train_images = train_images.reshape([-1, 28, 28])
19 | test_images = test_images.reshape([-1, 28, 28])
20 |
21 | class Model(nn.Module):
22 | def __init__(self, input_size, hidden_size, num_layers, num_classes):
23 | super(Model, self).__init__()
24 | self.hidden_size = hidden_size
25 | self.num_layers = num_layers
26 | self.rnn = nn.RNN(input_size, hidden_size, num_layers)
27 | self.fc = nn.Linear(hidden_size, num_classes)
28 | def __call__(self, x):
29 | h0 = mx.zeros((self.num_layers, x.shape[0], self.hidden_size))
30 | x, _ = self.rnn(x, h0)
31 | x = self.fc(x[:, -1, :])
32 | return x
33 |
34 | def loss_fn(model, x, y):
35 | x = mx.array(x)
36 | y = mx.array(y)
37 | return mx.mean(nn.losses.cross_entropy(model(x), y))
38 |
39 | def eval_fn(x, y):
40 | return mx.mean(mx.argmax(model(x), axis=1) == y)
41 |
42 | def batch_iterate(batch_size, x, y):
43 | perm = mx.array(np.random.permutation(y.size))
44 | for s in range(0, y.size, batch_size):
45 | ids = perm[s : s + batch_size]
46 | yield x[ids], y[ids]
47 |
48 |
49 | model = Model(28, 128, 2, 10)
50 | mx.eval(model.parameters())
51 |
52 | loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
53 | optimizer = optim.Adam(learning_rate=LEARNING_RATE)
54 |
55 | for epoch in range(EPOCHS):
56 | avg_loss = 0
57 | for i, (batch_x, batch_y) in enumerate(batch_iterate(BATCH_SIZE, train_images, train_labels)):
58 | loss, grads = loss_and_grad_fn(model, batch_x, batch_y)
59 | optimizer.update(model, grads)
60 | mx.eval(model.parameters(), optimizer.state)
61 | avg_loss += loss
62 |
63 | if (i+1)%100 == 0 :
64 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.item()/(i+1))
65 | accuracy = eval_fn(mx.array(test_images), mx.array(test_labels))
66 | print(f"Epoch: {epoch+1}, Loss: {avg_loss.item()/(i+1):.3f}, Accuracy: {accuracy.item():.3f}")
--------------------------------------------------------------------------------
/04_Extra/DataLoading/TensorFlow/ver_generator.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | from PIL import Image
5 | import tensorflow as tf
6 | from tqdm import tqdm
7 |
8 | img_size = 128
9 | batch_size = 32
10 |
11 | data_dir = "../data/flower_photos"
12 | IMG_FORMAT = ["jpg", "jpeg", "tif", "tiff", "bmp", "png"]
13 |
14 | class DataGenerator():
15 | def __init__(self, data_dir, img_size):
16 | self.filelist = []
17 | self.classes = sorted(os.listdir(data_dir))
18 | for root, sub_dir, files in os.walk(data_dir):
19 | if not len(files): continue
20 | files = [os.path.join(root, file) for file in files if file.split(".")[-1].lower() in IMG_FORMAT]
21 | self.filelist += files
22 | self.filelist.sort()
23 |
24 | def __len__(self):
25 | return len(self.filelist)
26 |
27 | def __call__(self):
28 | for file in self.filelist:
29 | image = Image.open(file)
30 | image = image.resize((img_size, img_size))
31 | image = np.array(image)
32 | label = file.split('/')[-2]
33 | label = self.classes.index(label)
34 | yield image, label
35 |
36 | train_dataset = DataGenerator(os.path.join(data_dir, "train"), img_size)
37 | val_dataset = DataGenerator(os.path.join(data_dir, "validation"), img_size)
38 |
39 | train_ds = tf.data.Dataset.from_generator(
40 | train_dataset, (tf.float32, tf.int16))
41 | train_ds = train_ds.batch(batch_size).prefetch(2)
42 | train_ds.__len__ = int(np.ceil(len(train_dataset)/batch_size))
43 |
44 | val_ds = tf.data.Dataset.from_generator(
45 | val_dataset, (tf.float32, tf.int16))
46 | val_ds = val_ds.batch(batch_size).prefetch(2)
47 | val_ds.__len__ = int(np.ceil(len(val_dataset)/batch_size))
48 |
49 | with tqdm(total=train_ds.__len__) as t:
50 | t.set_description(f'Train Loader')
51 | for i, (batch_img, batch_lab) in enumerate(train_ds):
52 | time.sleep(0.1)
53 | # Add feedforward & Optimization code
54 | t.set_postfix({"Train data shape": f"{batch_img.shape} {batch_lab.shape}"})
55 | t.update()
56 |
57 | with tqdm(total=val_ds.__len__) as t:
58 | t.set_description(f'Validation Loader')
59 | for i, (batch_img, batch_lab) in enumerate(val_ds):
60 | time.sleep(0.1)
61 | # Add evaluation code
62 | t.set_postfix({"Validation data shape": f"{batch_img.shape} {batch_lab.shape}"})
63 | t.update()
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/MXNet_Gluon.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from mxnet import nd, gluon, init, autograd
4 | from mxnet.gluon.data.vision import datasets
5 | from mxnet.gluon import nn
6 | from matplotlib import pyplot as plt
7 | print("Load Package!")
8 |
9 | train_raw_data = datasets.MNIST(train=True)
10 | val_raw_data = datasets.MNIST(train=False)
11 |
12 | train_data = {}
13 | train_data['data'] = np.array([i[0].asnumpy() for i in train_raw_data])
14 | train_data['label'] = np.array([i[1] for i in train_raw_data])
15 | #train_data['label'] = np.array([np.eye(1, 10, k=i[1]).squeeze(axis=0) for i in train_raw_data])
16 |
17 | print(train_data['data'].shape)
18 | print(train_data['label'].shape)
19 |
20 | val_data = {}
21 | val_data['data'] = np.array([i[0].asnumpy() for i in val_raw_data])
22 | val_data['label'] = np.array([i[1] for i in val_raw_data])
23 | #val_data['label'] = np.array([np.eye(1, 10, k=i[1]).squeeze(axis=0) for i in val_raw_data])
24 |
25 | print(val_data['data'].shape)
26 | print(val_data['label'].shape)
27 |
28 | # %%
29 | net = nn.Sequential()
30 | net.add(nn.Dense(256, activation='relu'), nn.Dense(10, activation='sigmoid'))
31 |
32 | net.initialize(init=init.Xavier())
33 |
34 | cross_entropy = gluon.loss.SoftmaxCELoss()
35 | trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
36 | #%%
37 | print("Setting Done!")
38 |
39 | batch_size = 100
40 | tot_iter = len(train_data['data']) // batch_size
41 | print("Start Training!")
42 | for epoch in range(10):
43 | train_loss, train_acc, valid_acc = 0., 0., 0.
44 | #tic = time.time()
45 | # forward + backward
46 | for iter in range(tot_iter):
47 | idx = np.random.choice(len(train_data['data']), batch_size, replace=False)
48 | with autograd.record():
49 | output = net(nd.array(np.reshape(train_data['data'][idx], (batch_size, -1))))
50 | loss = cross_entropy(output, nd.array(train_data['label'][idx]))
51 | loss.backward()
52 | # update parameters
53 | trainer.step(batch_size)
54 | train_loss += loss.mean().asscalar()
55 |
56 | val_idx = np.random.choice(len(val_data['data']), 100, replace=False)
57 | output = nd.argmax(net(nd.array(np.reshape(val_data['data'][val_idx], (batch_size, -1)))), axis = 1).asnumpy()
58 | acc = np.mean(output == val_data['label'][val_idx])
59 |
60 | print("Epoch : %d, loss : %f, val_acc : %f"%(epoch+1, train_loss/batch_size, acc))
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/TensorFlow/TensorFlow.py:
--------------------------------------------------------------------------------
1 | # %%
2 | # Load package
3 | import cv2 as cv
4 | import numpy as np
5 | from matplotlib import pyplot as plt
6 | import tensorflow as tf
7 | from tensorflow.keras import models
8 | from tensorflow.keras.applications import VGG19, MobileNet, Xception
9 |
10 | def grad_cam(model, layer_name, label_index, img):
11 | """
12 | ========= Input =========
13 | model: Model instance
14 | activation_layer: Name of layer
15 | label_index: Index of labels
16 | img: Image using Grad-CAM
17 |
18 | ========= Output =========
19 | output_image: Activation map + real image
20 | cam: Activation map
21 | """
22 | H, W = img.shape[1:3]
23 | grad_model = models.Model([model.inputs], [model.get_layer(layer_name).output, model.output])
24 |
25 | with tf.GradientTape() as tape:
26 | conv_outputs, predictions = grad_model(img)
27 | loss = predictions[:, label_index]
28 |
29 | output = conv_outputs[0]
30 | grads = tape.gradient(loss, conv_outputs)[0]
31 |
32 | guided_grads = tf.cast(output > 0, 'float32') * tf.cast(grads > 0, 'float32') * grads
33 |
34 | weights = tf.reduce_mean(guided_grads, axis=(0, 1))
35 |
36 | cam = np.ones(output.shape[0: 2], dtype = np.float32)
37 |
38 | for i, w in enumerate(weights):
39 | cam += w * output[:, :, i]
40 |
41 | cam = cv.resize(cam.numpy(), (W, H))
42 | cam = np.maximum(cam, 0)
43 | heatmap = (cam - cam.min()) / (cam.max() - cam.min())
44 |
45 | cam = cv.applyColorMap(np.uint8(255*heatmap), cv.COLORMAP_JET)
46 |
47 | output_image = cv.addWeighted(cv.cvtColor((img[0]*255).astype('uint8'), cv.COLOR_RGB2BGR), 0.5, cam, 0.5, 0)
48 |
49 | output_image = cv.cvtColor(output_image, cv.COLOR_BGR2RGB)
50 |
51 | cam = cv.cvtColor(cam, cv.COLOR_BGR2RGB)
52 | return output_image, cam
53 |
54 | # %%
55 | # Read image
56 | img = cv.imread('../cat_dog.jpg')
57 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
58 | img = cv.resize(img, (224, 224))
59 |
60 | # %%
61 | # Load model
62 |
63 | model = VGG19(weights='imagenet')
64 |
65 | model.summary()
66 |
67 | # After prinintg, copy layer's name
68 | # %%
69 | # 174 tabby
70 | # 211 german_shepherd
71 |
72 | overlaped, cam = grad_cam(model, 'block5_pool', 174, img[np.newaxis]/255.)
73 | cv.imwrite(f'./tabby.jpg', overlaped)
74 |
75 | overlaped, cam = grad_cam(model, 'block5_pool', 211, img[np.newaxis]/255.)
76 | cv.imwrite(f'./german_shepherd.jpg', overlaped)
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/tf_subclassing.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 | import numpy as np
5 |
6 | import tensorflow as tf
7 | from tensorflow.keras import models, layers, optimizers, losses, utils, datasets
8 |
9 | tf.random.set_seed(777)
10 |
11 | print("Packge Loaded!")
12 | # %%
13 | EPOCHS = 500
14 | BATCH_SIZE = 128
15 |
16 | # Data Loading
17 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
18 | train_x, test_x = np.expand_dims(train_x/255., -1), np.expand_dims(test_x/255., -1)
19 |
20 |
21 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
22 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
23 |
24 | train_ds = tf.data.Dataset.from_tensor_slices(
25 | (train_x, train_y)).shuffle(10000).batch(BATCH_SIZE)
26 |
27 | test_ds = tf.data.Dataset.from_tensor_slices(
28 | (test_x, test_y)).shuffle(10000).batch(BATCH_SIZE)
29 |
30 | # plt.plot(x, y, 'r.')
31 | # plt.show()
32 | print("Data Prepared!")
33 |
34 | # %%
35 | class SimpleConvolutionalNeuralNetwork(models.Model):
36 | def __init__(self):
37 | super(SimpleConvolutionalNeuralNetwork, self).__init__()
38 | self.conv1 = layers.Conv2D(16, 3, activation='relu', input_shape=(28, 28, 1,))
39 | self.pool1 = layers.MaxPool2D()
40 | self.conv2 = layers.Conv2D(32, 3, activation='relu')
41 | self.pool2 = layers.MaxPool2D()
42 | self.flat = layers.Flatten()
43 | self.dense = layers.Dense(10, activation='softmax')
44 | def call(self, x):
45 | x = self.conv1(x)
46 | x = self.pool1(x)
47 | x = self.conv2(x)
48 | x = self.pool2(x)
49 | x = self.flat(x)
50 | return self.dense(x)
51 |
52 | # Create an instance of the model
53 | model = SimpleConvolutionalNeuralNetwork()
54 |
55 | loss_object = losses.SparseCategoricalCrossentropy()
56 |
57 | optimizer = optimizers.Adam()
58 |
59 | # %%
60 | for epoch in range(EPOCHS):
61 | epoch_loss = 0
62 | for batch_x, batch_y in train_ds:
63 | with tf.GradientTape() as tape:
64 | predictions = model(batch_x, training=True)
65 | loss = loss_object(batch_y, predictions)
66 | gradients = tape.gradient(loss, model.trainable_variables)
67 | optimizer.apply_gradients(zip(gradients, model.trainable_variables))
68 | epoch_loss += loss
69 | print("{:5}|{:10.6f}".format(epoch+1, loss/(len(train_x)/BATCH_SIZE + 1)))
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/tf_nn.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import utils, datasets
9 |
10 | print("Packge Loaded!")
11 |
12 | # Data Loading
13 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
14 | train_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])
15 | train_y, test_y = utils.to_categorical(train_y, 10), utils.to_categorical(test_y, 10)
16 |
17 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
18 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
19 |
20 | # Set Network
21 | hidden_node = 256
22 | num_classes = 10
23 |
24 | X = tf.placeholder(tf.float32, shape=[None, 784])
25 |
26 | W1 = tf.Variable(tf.random.normal([784, hidden_node]))
27 | b1 = tf.Variable(tf.zeros([hidden_node]))
28 |
29 | W2 = tf.Variable(tf.random.normal([hidden_node, num_classes]))
30 | b2 = tf.Variable(tf.zeros([num_classes]))
31 |
32 | first_hidden = tf.nn.relu(tf.matmul(X, W1)+b1)
33 | output = tf.matmul(first_hidden, W2)+b2
34 |
35 | Y = tf.placeholder(tf.float32, shape = [None, 10])
36 | Loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=Y))
37 | Optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(Loss)
38 | Corr = tf.equal(tf.argmax(output,1), tf.argmax(Y,1))
39 | Acc = tf.reduce_mean(tf.cast(Corr, tf.float32))
40 |
41 | sess = tf.Session()
42 | sess.run(tf.global_variables_initializer())
43 |
44 |
45 | # Training loop
46 | print("Start Training !")
47 | epochs = 500
48 | batch_size = 100
49 | steps = np.ceil(len(train_x)/batch_size)
50 | for epoch in range(epochs):
51 | epoch_loss = 0
52 | epoch_acc = 0
53 | for step in range(0, len(train_x), batch_size):
54 | _, step_loss, step_acc = sess.run([Optimizer, Loss, Acc],
55 | feed_dict={X:train_x[step:step+batch_size], Y:train_y[step:step+batch_size]})
56 | epoch_loss += step_loss
57 | epoch_acc += step_acc
58 | val_idx = np.random.choice(len(test_x), batch_size, replace=False)
59 | val_loss, val_acc = sess.run([Loss, Acc], feed_dict={X:test_x, Y:test_y})
60 |
61 | print("\nEpoch : ", epoch)
62 | print("Train Loss : ", epoch_loss/steps, " Train Accuracy : ", epoch_acc/steps)
63 | print("Validation Loss : ", val_loss, "Validation Accuracy : ", val_acc)
--------------------------------------------------------------------------------
/03_Advance/CNN/prepare_data.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import sys
4 | import time
5 | import shutil
6 | import tarfile
7 | import urllib.request
8 |
9 | # For progressbar
10 | def report(url):
11 | file_name = url.split("/")[-1]
12 | def progbar(blocknr, blocksize, size):
13 | current = blocknr*blocksize
14 | sys.stdout.write(f"\rDownloading {file_name} ...... {100.0*current/size:.2f}%")
15 | return progbar
16 |
17 | SAVE_PATH = "../../data"
18 | URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
19 | file_name = URL.split("/")[-1]
20 | file_path = os.path.join(SAVE_PATH, file_name)
21 |
22 | # Data Download
23 | if not os.path.exists(file_path):
24 | print("In progress to download data ....")
25 | urllib.request.urlretrieve(URL, file_path, report(URL))
26 | print()
27 | else:
28 | print("Already downloaded !")
29 |
30 | # Data Extract
31 | if not os.path.exists(os.path.join(SAVE_PATH, "flower_photos")):
32 | print("In progress to extract data ....")
33 | tar = tarfile.open(file_path)
34 | members = tar.getmembers()
35 | for idx, member in enumerate(members):
36 | tar.extract(member=member, path=SAVE_PATH)
37 | sys.stdout.write(f"\rExtracting {file_name} ...... {100.0*(idx+1)/len(members):.2f}%")
38 | print()
39 | else:
40 | print("Already extracted !")
41 |
42 | # Data Split
43 | print("In progress to split data ....")
44 |
45 | flower_photos_path = os.path.join(SAVE_PATH, "flower_photos")
46 | for i, (root, subdir, files) in enumerate(os.walk(flower_photos_path)):
47 |
48 | if not i: continue
49 |
50 | flower = root.split("/")[-1]
51 |
52 | print(f"{flower} ......")
53 |
54 | split_ratio = int(0.9 * len(files))
55 |
56 | # Move to train directory
57 | dst_root = os.path.join(SAVE_PATH, "flower_photos", "train", flower)
58 | os.makedirs(dst_root, exist_ok=True)
59 | for file in files[:split_ratio]:
60 | src_path = os.path.join(root, file)
61 | dst_path = os.path.join(dst_root, file)
62 | shutil.move(src_path, dst_path)
63 |
64 | # Move to validation directory
65 | dst_root = os.path.join(SAVE_PATH, "flower_photos", "validation", flower)
66 | os.makedirs(dst_root, exist_ok=True)
67 | for file in files[split_ratio:]:
68 | src_path = os.path.join(root, file)
69 | dst_path = os.path.join(dst_root, file)
70 | shutil.move(src_path, dst_path)
71 |
72 | shutil.rmtree(root)
73 |
74 | print("Data preparation done !")
--------------------------------------------------------------------------------
/04_Extra/DataLoading/flower_download.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | import sys
4 | import time
5 | import shutil
6 | import tarfile
7 | import urllib.request
8 |
9 | # For progressbar
10 | def report(url):
11 | file_name = url.split("/")[-1]
12 | def progbar(blocknr, blocksize, size):
13 | current = blocknr*blocksize
14 | sys.stdout.write(f"\rDownloading {file_name} ...... {100.0*current/size:.2f}%")
15 | return progbar
16 |
17 | SAVE_PATH = "./data"
18 | URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
19 | file_name = URL.split("/")[-1]
20 | file_path = os.path.join(SAVE_PATH, file_name)
21 |
22 | # Data Download
23 | if not os.path.exists(file_path):
24 | print("In progress to download data ....")
25 | urllib.request.urlretrieve(URL, file_path, report(URL))
26 | print()
27 | else:
28 | print("Already downloaded !")
29 |
30 | # Data Extract
31 | if not os.path.exists(os.path.join(SAVE_PATH, "flower_photos")):
32 | print("In progress to extract data ....")
33 | tar = tarfile.open(file_path)
34 | members = tar.getmembers()
35 | for idx, member in enumerate(members):
36 | tar.extract(member=member, path=SAVE_PATH)
37 | sys.stdout.write(f"\rExtracting {file_name} ...... {100.0*(idx+1)/len(members):.2f}%")
38 | print()
39 | else:
40 | print("Already extracted !")
41 |
42 | # Data Split
43 | print("In progress to split data ....")
44 |
45 | flower_photos_path = os.path.join(SAVE_PATH, "flower_photos")
46 | for i, (root, subdir, files) in enumerate(os.walk(flower_photos_path)):
47 |
48 | if not i: continue
49 |
50 | flower = root.split("/")[-1]
51 |
52 | print(f"{flower} ......")
53 |
54 | split_ratio = int(0.9 * len(files))
55 |
56 | # Move to train directory
57 | dst_root = os.path.join(SAVE_PATH, "flower_photos", "train", flower)
58 | os.makedirs(dst_root, exist_ok=True)
59 | for file in files[:split_ratio]:
60 | src_path = os.path.join(root, file)
61 | dst_path = os.path.join(dst_root, file)
62 | shutil.move(src_path, dst_path)
63 |
64 | # Move to validation directory
65 | dst_root = os.path.join(SAVE_PATH, "flower_photos", "validation", flower)
66 | os.makedirs(dst_root, exist_ok=True)
67 | for file in files[split_ratio:]:
68 | src_path = os.path.join(root, file)
69 | dst_path = os.path.join(dst_root, file)
70 | shutil.move(src_path, dst_path)
71 |
72 | shutil.rmtree(root)
73 |
74 | print("Data preparation done !")
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/tf_nn.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import utils, datasets
9 |
10 | print("Packge Loaded!")
11 |
12 | # Data Loading
13 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
14 | train_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])
15 | # 0 : digit < 5
16 | # 1 : digit >= 5
17 | train_y, test_y = np.greater_equal(train_y, 5)[..., np.newaxis], np.greater_equal(test_y, 5)[..., np.newaxis]
18 |
19 |
20 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
21 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
22 |
23 | # Set Network
24 | hidden_node = 256
25 | num_classes = 1
26 |
27 | X = tf.placeholder(tf.float32, shape=[None, 784])
28 |
29 | W1 = tf.Variable(tf.random.normal([784, hidden_node]))
30 | b1 = tf.Variable(tf.zeros([hidden_node]))
31 |
32 | W2 = tf.Variable(tf.random.normal([hidden_node, num_classes]))
33 | b2 = tf.Variable(tf.zeros([num_classes]))
34 |
35 | first_hidden = tf.nn.relu(tf.matmul(X, W1)+b1)
36 | output = tf.matmul(first_hidden, W2)+b2
37 |
38 | Y = tf.placeholder(tf.float32, shape = [None, num_classes])
39 | Loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
40 | Optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(Loss)
41 | Corr = tf.equal(tf.greater_equal(output,0.5), tf.greater_equal(Y,0.5))
42 | Acc = tf.reduce_mean(tf.cast(Corr, tf.float32))
43 |
44 | sess = tf.Session()
45 | sess.run(tf.global_variables_initializer())
46 |
47 |
48 | # Training loop
49 | print("Start Training !")
50 | epochs = 500
51 | batch_size = 100
52 | steps = np.ceil(len(train_x)/batch_size)
53 | for epoch in range(epochs):
54 | epoch_loss = 0
55 | epoch_acc = 0
56 | for step in range(0, len(train_x), batch_size):
57 | _, step_loss, step_acc = sess.run([Optimizer, Loss, Acc],
58 | feed_dict={X:train_x[step:step+batch_size], Y:train_y[step:step+batch_size]})
59 | epoch_loss += step_loss
60 | epoch_acc += step_acc
61 | val_idx = np.random.choice(len(test_x), batch_size, replace=False)
62 | val_loss, val_acc = sess.run([Loss, Acc], feed_dict={X:test_x, Y:test_y})
63 |
64 | print("\nEpoch : ", epoch)
65 | print("Train Loss : ", epoch_loss/steps, " Train Accuracy : ", epoch_acc/steps)
66 | print("Validation Loss : ", val_loss, "Validation Accuracy : ", val_acc)
67 |
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/PyTorch.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from torch import optim
4 | from torch.utils.data import DataLoader
5 |
6 | from torchvision import datasets
7 | from torchvision import transforms
8 |
9 | import numpy as np
10 |
11 | EPOCHS = 10
12 | BATCH_SIZE = 100
13 | LEARNING_RATE = 0.01
14 |
15 | # MNIST dataset
16 | mnist_train = datasets.MNIST(root="../../data",
17 | train=True,
18 | transform=transforms.ToTensor(),
19 | download=True)
20 | print("Downloading Train Data Done ! ")
21 |
22 | mnist_test = datasets.MNIST(root="../../data",
23 | train=False,
24 | transform=transforms.ToTensor(),
25 | download=True)
26 | print("Downloading Test Data Done ! ")
27 |
28 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
29 |
30 | # our model
31 | class Model(nn.Module):
32 | def __init__(self):
33 | super(Model, self).__init__()
34 | self.linear = nn.Linear(784,1)
35 |
36 | def forward(self, X):
37 | X = self.linear(X)
38 | return X
39 |
40 | model = Model().to(device)
41 |
42 | criterion = nn.BCEWithLogitsLoss()
43 | optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
44 |
45 | data_iter = DataLoader(mnist_train, batch_size=BATCH_SIZE, shuffle=True)
46 |
47 | for epoch in range(EPOCHS):
48 | avg_loss = 0
49 | total_batch = len(mnist_train)//BATCH_SIZE
50 | for i, (batch_img, batch_lab) in enumerate(data_iter):
51 |
52 | # 0 : digit < 5
53 | # 1 : digit >= 5
54 | X = batch_img.view(-1, 28*28).to(device)
55 |
56 | # To use BCEWithLogitsLoss
57 | # 1. Target tensor must be same as predict result's size
58 | # 2. Target tensor's type must be Float
59 | Y = batch_lab.unsqueeze(dim=1)
60 | Y = Y.type(torch.FloatTensor).to(device)
61 | Y[Y>=5] = 1
62 | Y[Y<5] = 0
63 |
64 |
65 | y_pred = model.forward(X)
66 | loss = criterion(y_pred, Y)
67 | # Zero gradients, perform a backward pass, and update the weights.
68 |
69 | optimizer.zero_grad()
70 | loss.backward()
71 | optimizer.step()
72 | avg_loss += loss
73 |
74 | if (i+1)%100 == 0 :
75 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.data.cpu().numpy()/(i+1))
76 | print("Epoch : ", epoch+1, " Loss : ", avg_loss.data.cpu().numpy()/(i+1))
77 | print("Training Done !")
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/ver_mlx.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 |
4 | from mlx import nn
5 | from mlx import core as mx
6 | from mlx import optimizers as optim
7 | import numpy as np
8 | np.random.seed(777)
9 | mx.random.seed(777)
10 |
11 | from utils import mlx_dataset
12 |
13 | EPOCHS = 5
14 | BATCH_SIZE = 256
15 | LEARNING_RATE = 0.01
16 |
17 | train_images, train_labels, test_images, test_labels = mlx_dataset.mnist()
18 | train_images = train_images.reshape([-1, 28, 28, 1])
19 | test_images = test_images.reshape([-1, 28, 28, 1])
20 |
21 | class Model(nn.Module):
22 | def __init__(self):
23 | super().__init__()
24 | self.layer1 = nn.Sequential(
25 | nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
26 | nn.BatchNorm(16),
27 | nn.ReLU(),
28 | nn.MaxPool2d(kernel_size=2, stride=2))
29 | self.layer2 = nn.Sequential(
30 | nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
31 | nn.BatchNorm(32),
32 | nn.ReLU(),
33 | nn.MaxPool2d(kernel_size=2, stride=2))
34 | self.fc = nn.Linear(7*7*32, 10)
35 |
36 | def __call__(self, x):
37 | x = self.layer1(x)
38 | x = self.layer2(x)
39 | x = x.reshape(x.shape[0], -1)
40 | x = self.fc(x)
41 | return x
42 |
43 | def loss_fn(model, x, y):
44 | x = mx.array(x)
45 | y = mx.array(y)
46 | return mx.mean(nn.losses.cross_entropy(model(x), y))
47 |
48 | def eval_fn(x, y):
49 | return mx.mean(mx.argmax(model(x), axis=1) == y)
50 |
51 | def batch_iterate(batch_size, x, y):
52 | perm = mx.array(np.random.permutation(y.size))
53 | for s in range(0, y.size, batch_size):
54 | ids = perm[s : s + batch_size]
55 | yield x[ids], y[ids]
56 |
57 |
58 | model = Model()
59 | mx.eval(model.parameters())
60 |
61 | loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
62 | optimizer = optim.Adam(learning_rate=LEARNING_RATE)
63 |
64 | for epoch in range(EPOCHS):
65 | avg_loss = 0
66 | for i, (batch_x, batch_y) in enumerate(batch_iterate(BATCH_SIZE, train_images, train_labels)):
67 | loss, grads = loss_and_grad_fn(model, batch_x, batch_y)
68 | optimizer.update(model, grads)
69 | mx.eval(model.parameters(), optimizer.state)
70 | avg_loss += loss
71 |
72 | if (i+1)%100 == 0 :
73 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.item()/(i+1))
74 | accuracy = eval_fn(mx.array(test_images), mx.array(test_labels))
75 | print(f"Epoch: {epoch+1}, Loss: {avg_loss.item()/(i+1):.3f}, Accuracy: {accuracy.item():.3f}")
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/tf_subclassing.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 | import numpy as np
5 |
6 | import tensorflow as tf
7 | from tensorflow.keras import models, layers, optimizers, losses, metrics, utils, datasets
8 |
9 | tf.random.set_seed(777)
10 |
11 | print("Packge Loaded!")
12 |
13 | EPOCHS = 10
14 | BATCH_SIZE = 100
15 | LEARNING_RATE = 0.01
16 |
17 | # Data Loading
18 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
19 | train_x, test_x = np.reshape(train_x/255., [-1, 784]), np.reshape(test_x/255., [-1, 784])
20 | # 0 : digit < 5
21 | # 1 : digit >= 5
22 | train_y, test_y = np.greater_equal(train_y, 5)[..., np.newaxis].astype(np.float32), np.greater_equal(test_y, 5)[..., np.newaxis].astype(np.float32)
23 |
24 |
25 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
26 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
27 |
28 | train_ds = tf.data.Dataset.from_tensor_slices(
29 | (train_x, train_y)).shuffle(10000).batch(BATCH_SIZE)
30 |
31 | test_ds = tf.data.Dataset.from_tensor_slices(
32 | (test_x, test_y)).shuffle(10000).batch(BATCH_SIZE)
33 |
34 | # plt.plot(x, y, 'r.')
35 | # plt.show()
36 | print("Data Prepared!")
37 |
38 | # %%
39 | class LogisticRegression(models.Model):
40 | def __init__(self):
41 | super(LogisticRegression, self).__init__()
42 | self.d = layers.Dense(1, activation='sigmoid')
43 |
44 | def call(self, x):
45 | return self.d(x)
46 |
47 | # Create an instance of the model
48 | model = LogisticRegression()
49 |
50 | loss_object = losses.BinaryCrossentropy()
51 | metric = metrics.Accuracy()
52 | optimizer = optimizers.Adam(learning_rate=LEARNING_RATE)
53 |
54 | # %%
55 | for epoch in range(EPOCHS):
56 | avg_loss, val_loss, val_acc = 0, 0, 0
57 | for i, (batch_x, batch_y) in enumerate(train_ds):
58 | with tf.GradientTape() as tape:
59 | predictions = model(batch_x, training=True)
60 | loss = loss_object(batch_y, predictions)
61 | gradients = tape.gradient(loss, model.trainable_variables)
62 | optimizer.apply_gradients(zip(gradients, model.trainable_variables))
63 | avg_loss += loss
64 | if (i+1)%100 == 0 :
65 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.numpy().item()/(i+1))
66 |
67 | for batch_x, batch_y in test_ds:
68 | predictions = model(batch_x, training=False)
69 | val_loss += loss_object(batch_y, predictions)
70 | val_acc += metric(batch_y, tf.greater_equal(predictions, 0.5))
71 |
72 | print("Epoch : ", epoch+1, " Loss : ", avg_loss.numpy().item()/(i+1))
--------------------------------------------------------------------------------
/04_Extra/DataLoading/PyTorch/ver_custom.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import cv2 as cv
4 | import numpy as np
5 | from tqdm import tqdm
6 | from PIL import Image
7 |
8 | import torch
9 | from torchvision import transforms, datasets, utils
10 | from torch.utils.data import Dataset, DataLoader
11 |
12 | PATH = "../data/flower_photos"
13 | IMG_FORMAT = ["jpg", "jpeg", "tif", "tiff", "bmp", "png"]
14 |
15 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
16 | print(category_list)
17 |
18 | num_classes = len(category_list)
19 | img_size = 128
20 | batch_size = 32
21 |
22 | transform = transforms.Compose([
23 | transforms.Resize([img_size, img_size]),
24 | transforms.ToTensor()
25 | ])
26 |
27 | class CustomDataset(Dataset):
28 | def __init__(self, data_dir, transform):
29 |
30 | self.filelist = []
31 | self.classes = sorted(os.listdir(data_dir))
32 | for root, sub_dir, files in os.walk(data_dir):
33 | if not len(files): continue
34 | files = [os.path.join(root, file) for file in files if file.split(".")[-1].lower() in IMG_FORMAT]
35 | self.filelist += files
36 | self.transform = transform
37 |
38 | def __len__(self):
39 | # return size of dataset
40 | return len(self.filelist)
41 |
42 | def __getitem__(self, idx):
43 |
44 | image = Image.open(self.filelist[idx])
45 | image = self.transform(image)
46 | label = self.filelist[idx].split('/')[-2]
47 | label = self.classes.index(label)
48 | return image, label
49 |
50 | train_dataset = CustomDataset(os.path.join(PATH, "train"), transform)
51 | train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
52 |
53 | validation_dataset = CustomDataset(os.path.join(PATH, "validation"), transform)
54 | validation_loader = DataLoader(dataset=validation_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
55 |
56 | with tqdm(total=len(train_loader)) as t:
57 | t.set_description(f'Train Loader')
58 | for i, (batch_img, batch_lab) in enumerate(train_loader):
59 | time.sleep(0.1)
60 | t.set_postfix({"Train data shape": f"{batch_img.shape} {batch_lab.shape}"})
61 | t.update()
62 |
63 | with tqdm(total=len(validation_loader)) as t:
64 | t.set_description(f'Validation Loader')
65 | for i, (batch_img, batch_lab) in enumerate(validation_loader):
66 | time.sleep(0.1)
67 | t.set_postfix({"Validation data shape": f"{batch_img.shape} {batch_lab.shape}"})
68 | t.update()
--------------------------------------------------------------------------------
/03_Advance/Segmentation/prepare_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import shutil
4 | import tarfile
5 | import urllib.request
6 |
7 | import random
8 |
9 | # For progressbar
10 | def report(url):
11 | file_name = url.split("/")[-1]
12 | def progbar(blocknr, blocksize, size):
13 | current = blocknr*blocksize
14 | sys.stdout.write(f"\rDownloading {file_name} ...... {100.0*current/size:.2f}%")
15 | return progbar
16 |
17 | SAVE_PATH = "../../data"
18 | URL = 'https://www.robots.ox.ac.uk/~vgg/data/bicos/data/horses.tar'
19 | file_name = URL.split("/")[-1]
20 | file_path = os.path.join(SAVE_PATH, file_name)
21 |
22 | # Data Download
23 | if not os.path.exists(file_path):
24 | print("In progress to download data ....")
25 | urllib.request.urlretrieve(URL, file_path, report(URL))
26 | print()
27 | else:
28 | print("Already downloaded !")
29 |
30 | # Data Extract
31 | if not os.path.exists(os.path.join(SAVE_PATH, "horses")):
32 | print("In progress to extract data ....")
33 | tar = tarfile.open(file_path)
34 | members = tar.getmembers()
35 | for idx, member in enumerate(members):
36 | tar.extract(member=member, path=SAVE_PATH)
37 | sys.stdout.write(f"\rExtracting {file_name} ...... {100.0*(idx+1)/len(members):.2f}%")
38 | print()
39 | else:
40 | print("Already extracted !")
41 |
42 | # Data Split
43 | print("In progress to split data ....")
44 |
45 | horses_path = os.path.join(SAVE_PATH, "horses")
46 |
47 | indices = None
48 | for i, (root, subdir, files) in enumerate(os.walk(horses_path)):
49 |
50 | if not i: continue
51 |
52 | if not indices:
53 | indices = list(range(len(files)))
54 | random.shuffle(indices)
55 |
56 | files = sorted(files)
57 |
58 | dir_name = root.split("/")[-1]
59 |
60 | print(f"{dir_name} ......")
61 |
62 | split_ratio = int(0.9 * len(files))
63 |
64 | # Move to train directory
65 | dst_root = os.path.join(SAVE_PATH, "horses", "train", dir_name)
66 | os.makedirs(dst_root, exist_ok=True)
67 | for idx in indices[:split_ratio]:
68 | src_path = os.path.join(root, files[idx])
69 | dst_path = os.path.join(dst_root, files[idx])
70 | shutil.move(src_path, dst_path)
71 |
72 | # Move to validation directory
73 | dst_root = os.path.join(SAVE_PATH, "horses", "validation", dir_name)
74 | os.makedirs(dst_root, exist_ok=True)
75 | for idx in indices[split_ratio:]:
76 | src_path = os.path.join(root, files[idx])
77 | dst_path = os.path.join(dst_root, files[idx])
78 | shutil.move(src_path, dst_path)
79 |
80 | shutil.rmtree(root)
81 |
82 | print("Data preparation done !")
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/ver_jax.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 | import time
4 | import numpy.random as npr
5 | from jax import jit, grad
6 | import jax.nn as nn
7 | from jax.scipy.special import logsumexp
8 | import jax.numpy as jnp
9 | from utils import jax_dataset
10 | # %%
11 |
12 | def init_random_params(scale, layer_sizes, rng=npr.RandomState(0)):
13 | return [(scale * rng.randn(m, n), scale * rng.randn(n))
14 | for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
15 |
16 | def predict(params, inputs):
17 | activations = inputs
18 | for w, b in params[:-1]:
19 | outputs = jnp.dot(activations, w) + b
20 | activations = nn.relu(outputs)
21 |
22 | final_w, final_b = params[-1]
23 | logits = jnp.dot(activations, final_w) + final_b
24 | return logits - logsumexp(logits, axis=1, keepdims=True)
25 |
26 | def loss(params, batch):
27 | inputs, targets = batch
28 | preds = predict(params, inputs)
29 | return -jnp.mean(jnp.sum(preds * targets, axis=1))
30 |
31 | def accuracy(params, batch):
32 | inputs, targets = batch
33 | target_class = jnp.argmax(targets, axis=1)
34 | predicted_class = jnp.argmax(predict(params, inputs), axis=1)
35 | return jnp.mean(predicted_class == target_class)
36 | # %%
37 |
38 |
39 | layer_sizes = [784, 256, 128, 10]
40 | param_scale = 0.1
41 | step_size = 0.001
42 | num_epochs = 10
43 | batch_size = 128
44 |
45 | train_images, train_labels, test_images, test_labels = jax_dataset.mnist()
46 | num_train = train_images.shape[0]
47 | num_complete_batches, leftover = divmod(num_train, batch_size)
48 | num_batches = num_complete_batches + bool(leftover)
49 |
50 | # %%
51 | def data_stream():
52 | rng = npr.RandomState(0)
53 | while True:
54 | perm = rng.permutation(num_train)
55 | for i in range(num_batches):
56 | batch_idx = perm[i * batch_size:(i + 1) * batch_size]
57 | yield train_images[batch_idx], train_labels[batch_idx]
58 |
59 | batches = data_stream()
60 | # %%
61 | @jit
62 | def update(params, batch):
63 | grads = grad(loss)(params, batch)
64 | return [(w - step_size * dw, b - step_size * db)
65 | for (w, b), (dw, db) in zip(params, grads)]
66 |
67 | # %%
68 | params = init_random_params(param_scale, layer_sizes)
69 | for epoch in range(num_epochs):
70 | start_time = time.time()
71 | loss_val = 0
72 | for _ in range(num_batches):
73 | loss_val += loss(params, batch_data)
74 | params = update(params, next(batches))
75 | epoch_time = time.time() - start_time
76 |
77 | train_acc = accuracy(params, (train_images, train_labels))
78 | test_acc = accuracy(params, (test_images, test_labels))
79 | print(f"Epoch: {epoch+1}, Loss: {loss_val/num_batches}, Elapsed time: {epoch_time:0.2f} sec")
80 | print("Training set accuracy {}".format(train_acc))
81 | print("Test set accuracy {}".format(test_acc))
--------------------------------------------------------------------------------
/01_Basic/Logistic_Regression/ver_jax.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('../../')
3 | import time
4 | import numpy as np
5 | from jax import jit, grad
6 | from jax.scipy.special import logsumexp
7 | import jax.numpy as jnp
8 | import jax.nn as nn
9 | from matplotlib import pyplot as plt
10 | from utils import jax_dataset
11 |
12 | EPOCHS = 10
13 | BATCH_SIZE = 100
14 | LEARNING_RATE = 0.01
15 |
16 | def init_random_params(scale, layer_sizes, rng=np.random.RandomState(0)):
17 | return [(scale * rng.randn(m, n), scale * rng.randn(n))
18 | for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
19 |
20 | def predict(params, inputs):
21 | outputs = jnp.dot(inputs, params[0][0]) + params[0][1]
22 | return nn.sigmoid(outputs)
23 |
24 | def loss(params, batch):
25 | inputs, targets = batch
26 | preds = predict(params, inputs)
27 | return -jnp.mean(jnp.log(preds) * targets)
28 |
29 | def accuracy(params, batch):
30 | inputs, targets = batch
31 | predicted_class = jnp.greater_equal(predict(params, inputs), 0.5)
32 | return jnp.mean(predicted_class == targets)
33 |
34 | layer_sizes = [784, 1]
35 | param_scale= 1
36 |
37 |
38 | train_images, train_labels, test_images, test_labels = jax_dataset.mnist()
39 | train_labels= np.greater_equal(np.argmax(train_labels, axis=1)[..., np.newaxis], 0.5).astype(np.float32)
40 | test_labels = np.greater_equal(np.argmax(test_labels, axis=1)[..., np.newaxis], 0.5).astype(np.float32)
41 |
42 | num_train = train_images.shape[0]
43 | num_complete_batches, leftover = divmod(num_train, BATCH_SIZE)
44 | num_batches = num_complete_batches + bool(leftover)
45 | # %%
46 | def data_stream():
47 | rng = np.random.RandomState(0)
48 | while True:
49 | perm = rng.permutation(num_train)
50 | for i in range(num_batches):
51 | batch_idx = perm[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
52 | yield train_images[batch_idx], train_labels[batch_idx]
53 | batches = data_stream()
54 |
55 | @jit
56 | def update(params, batch):
57 | grads = grad(loss)(params, batch)
58 | return [(w - LEARNING_RATE * dw, b - LEARNING_RATE * db)
59 | for (w, b), (dw, db) in zip(params, grads)]
60 |
61 | params = init_random_params(param_scale, layer_sizes)
62 | for epoch in range(EPOCHS):
63 | start_time = time.time()
64 | loss_val = 0
65 | for _ in range(num_batches):
66 | batch_data = next(batches)
67 | loss_val += loss(params, batch_data)
68 | params = update(params, batch_data)
69 | epoch_time = time.time() - start_time
70 |
71 | train_acc = accuracy(params, (train_images, train_labels))
72 | test_acc = accuracy(params, (test_images, test_labels))
73 | print(f"Epoch: {epoch+1}, Loss: {loss_val/num_batches}, Elapsed time: {epoch_time:0.2f} sec")
74 | print("Training set accuracy {}".format(train_acc))
75 | print("Test set accuracy {}".format(test_acc))
--------------------------------------------------------------------------------
/utils/mlx_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright © 2023 Apple Inc.
2 |
3 | import gzip
4 | import os
5 | import pickle
6 | from urllib import request
7 |
8 | import numpy as np
9 |
10 |
11 | def mnist(
12 | save_dir="/tmp", base_url="http://yann.lecun.com/exdb/mnist/", filename="mnist.pkl"
13 | ):
14 | """
15 | Load the MNIST dataset in 4 tensors: train images, train labels,
16 | test images, and test labels.
17 |
18 | Checks `save_dir` for already downloaded data otherwise downloads.
19 |
20 | Download code modified from:
21 | https://github.com/hsjeong5/MNIST-for-Numpy
22 | """
23 |
24 | def download_and_save(save_file):
25 | filename = [
26 | ["training_images", "train-images-idx3-ubyte.gz"],
27 | ["test_images", "t10k-images-idx3-ubyte.gz"],
28 | ["training_labels", "train-labels-idx1-ubyte.gz"],
29 | ["test_labels", "t10k-labels-idx1-ubyte.gz"],
30 | ]
31 |
32 | mnist = {}
33 | for name in filename:
34 | out_file = os.path.join("/tmp", name[1])
35 | request.urlretrieve(base_url + name[1], out_file)
36 | for name in filename[:2]:
37 | out_file = os.path.join("/tmp", name[1])
38 | with gzip.open(out_file, "rb") as f:
39 | mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=16).reshape(
40 | -1, 28 * 28
41 | )
42 | for name in filename[-2:]:
43 | out_file = os.path.join("/tmp", name[1])
44 | with gzip.open(out_file, "rb") as f:
45 | mnist[name[0]] = np.frombuffer(f.read(), np.uint8, offset=8)
46 | with open(save_file, "wb") as f:
47 | pickle.dump(mnist, f)
48 |
49 | save_file = os.path.join(save_dir, filename)
50 | if not os.path.exists(save_file):
51 | download_and_save(save_file)
52 | with open(save_file, "rb") as f:
53 | mnist = pickle.load(f)
54 |
55 | def preproc(x):
56 | return x.astype(np.float32) / 255.0
57 |
58 | mnist["training_images"] = preproc(mnist["training_images"])
59 | mnist["test_images"] = preproc(mnist["test_images"])
60 | return (
61 | mnist["training_images"],
62 | mnist["training_labels"].astype(np.uint32),
63 | mnist["test_images"],
64 | mnist["test_labels"].astype(np.uint32),
65 | )
66 |
67 |
68 | def fashion_mnist(save_dir="/tmp"):
69 | return mnist(
70 | save_dir,
71 | base_url="http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/",
72 | filename="fashion_mnist.pkl",
73 | )
74 |
75 |
76 | if __name__ == "__main__":
77 | train_x, train_y, test_x, test_y = mnist()
78 | assert train_x.shape == (60000, 28 * 28), "Wrong training set size"
79 | assert train_y.shape == (60000,), "Wrong training set size"
80 | assert test_x.shape == (10000, 28 * 28), "Wrong test set size"
81 | assert test_y.shape == (10000,), "Wrong test set size"
--------------------------------------------------------------------------------
/04_Extra/XAI/Grad_CAM/PyTorch/PyTorch.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from torchvision import models
5 | from torchvision import transforms
6 |
7 | from PIL import Image
8 | import numpy as np
9 |
10 | class build_model(nn.Module):
11 | def __init__(self, base_model="efficientnet_b0"):
12 | super(build_model, self).__init__()
13 |
14 | assert base_model in dir(models), "Please Check 'base_model' in https://pytorch.org/vision/stable/models.html"
15 | # get the pretrained VGG19 network
16 | self.net = eval(f"models.{base_model}")(pretrained=True)
17 |
18 | # disect the network to access its last convolutional layer
19 | self.features = self.net.features
20 |
21 | # get the classifier of the vgg19
22 | self.classifier = self.net.classifier
23 |
24 | # placeholder for the gradients
25 | self.gradients = None
26 |
27 | # hook for the gradients of the activations
28 | def activations_hook(self, grad):
29 | self.gradients = grad
30 |
31 | def forward(self, x):
32 | x = self.features(x)
33 | # register the hook
34 | h = x.register_hook(self.activations_hook)
35 | x = self.net.avgpool(x)
36 | x = nn.Flatten()(x)
37 | # apply the remaining pooling
38 | x = self.classifier(x)
39 | return x
40 |
41 | # method for the gradient extraction
42 | def get_activations_gradient(self):
43 | return self.gradients
44 |
45 | # method for the activation exctraction
46 | def get_activations(self, x):
47 | return self.features(x)
48 |
49 | net = build_model(base_model="vgg19")
50 | net.eval()
51 |
52 | # use the ImageNet transformation
53 | transform = transforms.Compose([transforms.Resize((224, 224)),
54 | transforms.ToTensor(),
55 | transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
56 |
57 | raw_img = Image.open("../cat_dog.jpg")
58 | img = transform(raw_img).unsqueeze(0)
59 |
60 | pred = net(img)
61 |
62 | idx2cls = {
63 | 174: "tabby",
64 | 211: "german_shepherd"
65 | }
66 |
67 | idx = 174
68 |
69 | pred[:, idx].backward()
70 | gradients = net.get_activations_gradient()
71 | pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])
72 | activations = net.get_activations(img).detach()
73 |
74 | for i in range(activations.shape[1]):
75 | activations[:, i, :, :] *= pooled_gradients[i]
76 |
77 | heatmap = torch.mean(activations, dim=1).squeeze().numpy()
78 | heatmap = np.maximum(heatmap, 0)
79 | heatmap /= np.max(heatmap)
80 |
81 | # TDL
82 | # Colormap draw
83 | import cv2
84 | img = cv2.imread('../cat_dog.jpg')
85 | heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
86 | heatmap = np.uint8(255 * heatmap)
87 | heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
88 | superimposed_img = heatmap * 0.4 + img
89 | cv2.imwrite(f'./{idx2cls[idx]}.jpg', superimposed_img)
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/ver_jax.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import sys
3 | sys.path.append('../../')
4 | import time
5 | import numpy as np
6 | import numpy.random as npr
7 | import jax
8 | import jax.numpy as jnp
9 | from jax import grad, jit, value_and_grad
10 | from jax import nn, lax
11 | from jax import random
12 |
13 | from jax.experimental import stax, optimizers
14 |
15 | from utils import jax_dataset
16 | key = random.PRNGKey(1)
17 | # %%
18 | param_scale = 0.1
19 | step_size = 0.001
20 | num_epochs = 10
21 | batch_size = 128
22 |
23 | train_images, train_labels, test_images, test_labels = jax_dataset.mnist()
24 | train_images = np.reshape(train_images, [-1, 1, 28, 28])
25 | test_images = np.reshape(test_images, [-1, 1, 28, 28])
26 | num_train = train_images.shape[0]
27 | num_complete_batches, leftover = divmod(num_train, batch_size)
28 | num_batches = num_complete_batches + bool(leftover)
29 |
30 | # %%
31 | def data_stream():
32 | rng = npr.RandomState(0)
33 | while True:
34 | perm = rng.permutation(num_train)
35 | for i in range(num_batches):
36 | batch_idx = perm[i * batch_size:(i + 1) * batch_size]
37 | yield train_images[batch_idx], train_labels[batch_idx]
38 |
39 | batches = data_stream()
40 |
41 | init_fun, net = stax.serial(
42 | stax.Conv(16, (3, 3), (1, 1), padding="SAME"),
43 | stax.Relu,
44 | stax.MaxPool((2, 2), (2, 2), padding="SAME"),
45 | stax.Conv(32, (3, 3), (1, 1), padding="SAME"),
46 | stax.Relu,
47 | stax.MaxPool((2, 2), (2, 2), padding="SAME"),
48 | stax.Flatten,
49 | stax.Dense(10),
50 | stax.LogSoftmax
51 | )
52 |
53 | _, params = init_fun(key, (64, 1, 28, 28))
54 |
55 | def loss(params, batch):
56 | inputs, targets = batch
57 | preds = net(params, inputs)
58 | return -jnp.mean(jnp.sum(preds * targets, axis=1))
59 |
60 | def accuracy(params, batch):
61 | inputs, targets = batch
62 | target_class = jnp.argmax(targets, axis=1)
63 | predicted_class = jnp.argmax(predict(params, inputs), axis=1)
64 | return jnp.mean(predicted_class == target_class)
65 |
66 | # %%
67 | opt_init, opt_update, get_params = optimizers.adam(step_size)
68 | opt_state = opt_init(params)
69 |
70 | @jit
71 | def update(params, batch, opt_state):
72 | value, grads = value_and_grad(loss)(params, batch)
73 | opt_state = opt_update(0, grads, opt_state)
74 | return get_params(opt_state), opt_state, value
75 |
76 | # %%
77 | for epoch in range(num_epochs):
78 | start_time = time.time()
79 | loss_val = 0
80 | for _ in range(num_batches):
81 | batch_data = next(batches)
82 | loss_val += loss(params, batch_data)
83 | params = update(params, batch_data, opt_state)
84 | epoch_time = time.time() - start_time
85 |
86 | train_acc = accuracy(params, (train_images, train_labels))
87 | test_acc = accuracy(params, (test_images, test_labels))
88 | print(f"Epoch: {epoch+1}, Loss: {loss_val/num_batches}, Elapsed time: {epoch_time:0.2f} sec")
89 | print("Training set accuracy {}".format(train_acc))
90 | print("Test set accuracy {}".format(test_acc))
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/download.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import argparse
3 |
4 | def main(args):
5 |
6 | import io
7 | import os
8 | import sys
9 | import time
10 | import shutil
11 | import tarfile
12 | import urllib.request
13 |
14 | datatype = args.datatype
15 | dataset = args.dataset
16 | SAVE_PATH = "./datasets"
17 | os.makedirs(SAVE_PATH, exist_ok=True)
18 | if datatype == 'paired':
19 | URL = f"http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/{dataset}.tar.gz"
20 | else :
21 | URL = f"http://efrosgans.eecs.berkeley.edu/cyclegan/datasets/{dataset}.zip"
22 |
23 | file_name = URL.split("/")[-1]
24 | file_path = os.path.join(SAVE_PATH, file_name)
25 | # Data Download
26 |
27 | ## For progressbar
28 | def report(url):
29 | file_name = url.split("/")[-1]
30 | def progbar(blocknr, blocksize, size):
31 | current = blocknr*blocksize
32 | sys.stdout.write(f"\rDownloading {file_name} ...... {100.0*current/size:.2f}%")
33 | return progbar
34 |
35 | if not os.path.exists(file_path):
36 | print(f"In progress to download '{dataset}' data ....")
37 | urllib.request.urlretrieve(URL, file_path, report(URL))
38 | print()
39 | else:
40 | print("Already downloaded !")
41 |
42 | print(f"Downloading Done!")
43 |
44 | # Data Extract
45 | if not os.path.exists(os.path.join(SAVE_PATH, "flower_photos")):
46 | print("In progress to extract data ....")
47 | tar = tarfile.open(file_path)
48 | members = tar.getmembers()
49 | for idx, member in enumerate(members):
50 | tar.extract(member=member, path=SAVE_PATH)
51 | sys.stdout.write(f"\rExtracting {file_name} ...... {100.0*(idx+1)/len(members):.2f}%")
52 | print()
53 | else:
54 | print("Already extracted !")
55 |
56 | if __name__=="__main__":
57 | parser = argparse.ArgumentParser()
58 | parser.add_argument("--datatype", default='paired', type=str, help="")
59 | parser.add_argument("--dataset", default='facades', type=str, help="")
60 |
61 | args = parser.parse_args()
62 |
63 | datatype_list = ['paired', 'unpaired']
64 | assert args.datatype in datatype_list, f"Please use dataset in {datatype_list}"
65 |
66 | if args.datatype == 'paired':
67 | data_list = ['cityscapes', 'edges2handbags', 'edges2shoes', 'facades', 'maps', 'night2day']
68 | else:
69 | data_list = ["apple2orange", "summer2winter_yosemite", "horse2zebra", "monet2photo", \
70 | "cezanne2photo", "ukiyoe2photo", "vangogh2photo", "iphone2dslr_flower", "ae_photos"]
71 |
72 | assert args.dataset in data_list, f"Please use dataset in {data_list}"
73 |
74 | dict_args = vars(args)
75 | for i in dict_args.keys():
76 | assert dict_args[i]!=None, '"%s" key is None Value!'%i
77 | print("\n================ Options ================")
78 | print(f"Dataset : {args.dataset}")
79 | print("===========================================\n")
80 |
81 |
82 | main(args)
83 | # %%
84 |
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/tf_nn.py:
--------------------------------------------------------------------------------
1 | import os
2 | # For Mac User...
3 | os.environ['KMP_DUPLICATE_LIB_OK']='True'
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import utils, datasets
9 |
10 | print("Packge Loaded!")
11 |
12 | # Data Loading
13 | (train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()
14 | train_x, test_x = np.expand_dims(train_x/255., -1), np.expand_dims(test_x/255., -1)
15 | train_y, test_y = utils.to_categorical(train_y, 10), utils.to_categorical(test_y, 10)
16 |
17 | print("Train Data's Shape : ", train_x.shape, train_y.shape)
18 | print("Test Data's Shape : ", test_x.shape, test_y.shape)
19 |
20 | # Set Network
21 | img_size = train_x.shape[1]
22 | input_channel = 1
23 | ksize = 3
24 | num_filters_1 = 16
25 | num_filters_2 = 32
26 | num_classes = 10
27 |
28 |
29 | X = tf.placeholder(tf.float32, shape=[None, img_size, img_size, input_channel])
30 |
31 | W1 = tf.Variable(tf.random.normal([ksize, ksize, input_channel, num_filters_1]))
32 | b1 = tf.Variable(tf.zeros([num_filters_1]))
33 | W2 = tf.Variable(tf.random.normal([ksize, ksize, num_filters_1, num_filters_2]))
34 | b2 = tf.Variable(tf.zeros([num_filters_2]))
35 | W3 = tf.Variable(tf.random.normal([num_filters_2, num_classes]))
36 | b3 = tf.Variable(tf.zeros([num_classes]))
37 |
38 | first_hidden = tf.nn.relu(tf.nn.conv2d(X, W1, strides=[1,1,1,1], padding='SAME')+b1)
39 | first_pool = tf.nn.max_pool(first_hidden, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
40 | second_hidden = tf.nn.relu(tf.nn.conv2d(first_pool, W2, strides=[1,1,1,1], padding='SAME')+b2)
41 | second_pool = tf.nn.max_pool(second_hidden, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
42 | gap = tf.reduce_mean(second_pool, axis=(1, 2))
43 | output = tf.matmul(gap, W3)+b3
44 |
45 | Y = tf.placeholder(tf.float32, shape = [None, 10])
46 | Loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=Y))
47 | Optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(Loss)
48 | Corr = tf.equal(tf.argmax(output,1), tf.argmax(Y,1))
49 | Acc = tf.reduce_mean(tf.cast(Corr, tf.float32))
50 |
51 | sess = tf.Session()
52 | sess.run(tf.global_variables_initializer())
53 |
54 |
55 | # Training loop
56 | print("Start Training !")
57 | epochs = 500
58 | batch_size = 100
59 | steps = np.ceil(len(train_x)/batch_size)
60 | for epoch in range(epochs):
61 | epoch_loss = 0
62 | epoch_acc = 0
63 | for step in range(0, len(train_x), batch_size):
64 | _, step_loss, step_acc = sess.run([Optimizer, Loss, Acc],
65 | feed_dict={X:train_x[step:step+batch_size], Y:train_y[step:step+batch_size]})
66 | epoch_loss += step_loss
67 | epoch_acc += step_acc
68 | val_idx = np.random.choice(len(test_x), batch_size, replace=False)
69 | val_loss, val_acc = sess.run([Loss, Acc], feed_dict={X:test_x, Y:test_y})
70 |
71 | print("\nEpoch : ", epoch)
72 | print("Train Loss : ", epoch_loss/steps, " Train Accuracy : ", epoch_acc/steps)
73 | print("Validation Loss : ", val_loss, "Validation Accuracy : ", val_acc)
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 |
3 | import torch
4 | import torch.nn.functional as F
5 | from torch import nn
6 | from torch import optim
7 | from torch.autograd import Variable
8 | from torchvision import utils
9 | from torchvision import datasets
10 | from torchvision import transforms
11 | import numpy as np
12 | from matplotlib import pyplot as plt
13 |
14 | # Loading Data
15 |
16 | # MNIST dataset
17 | mnist_train = datasets.MNIST(root='../../data/',
18 | train=True,
19 | transform=transforms.ToTensor(),
20 | download=True)
21 | print("Downloading Train Data Done ! ")
22 |
23 | mnist_test = datasets.MNIST(root='../../data/',
24 | train=False,
25 | transform=transforms.ToTensor(),
26 | download=True)
27 | print("Downloading Test Data Done ! ")
28 |
29 | # Defining Model
30 |
31 | # our model
32 | class Model(nn.Module):
33 | def __init__(self):
34 | super(Model, self).__init__()
35 | self.linear1 = nn.Linear(784, 256)
36 | self.linear2 = nn.Linear(256, 10)
37 |
38 | def forward(self, X):
39 | X = F.relu((self.linear1(X)))
40 | X = self.linear2(X)
41 | return X
42 |
43 | model = Model()
44 | criterion = nn.CrossEntropyLoss()
45 | optimizer = optim.Adam(model.parameters(), lr=0.001)
46 |
47 | # Training Phase
48 |
49 | batch_size = 100
50 |
51 | data_iter = torch.utils.data.DataLoader(mnist_train, batch_size=100, shuffle=True, num_workers=1)
52 |
53 | print("Iteration maker Done !")
54 |
55 | # Training loop
56 | for epoch in range(10):
57 | avg_loss = 0
58 | total_batch = len(mnist_train) // batch_size
59 | for i, (batch_img, batch_lab) in enumerate(data_iter):
60 | X = Variable(batch_img.view(-1, 28*28))
61 | Y = Variable(batch_lab)
62 |
63 | y_pred = model.forward(X)
64 |
65 | loss = criterion(y_pred, Y)
66 | # Zero gradients, perform a backward pass, and update the weights.
67 | optimizer.zero_grad()
68 | loss.backward()
69 | optimizer.step()
70 | avg_loss += loss
71 | if (i+1)%100 == 0 :
72 | print("Epoch : ", epoch+1, "Iteration : ", i+1, " Loss : ", avg_loss.data.numpy()/(i+1))
73 | print("Epoch : ", epoch+1, " Loss : ", avg_loss.data.numpy()/total_batch)
74 | print("Training Done !")
75 |
76 | # Evaluation
77 |
78 | test_img = mnist_test.test_data.view(-1, 28*28).type(torch.FloatTensor)
79 | test_lab = mnist_test.test_labels
80 | outputs = model.forward(test_img)
81 | pred_val, pred_idx = torch.max(outputs.data, 1)
82 | correct = (pred_idx == test_lab).sum()
83 | print('Accuracy : ', correct.data.numpy()/len(test_img)*100)
84 |
85 | # Testing
86 |
87 | r = np.random.randint(0, len(mnist_test)-1)
88 | X_single_data = mnist_test.test_data[r:r + 1].view(-1,28*28).float()
89 | Y_single_data = mnist_test.test_labels[r:r + 1]
90 |
91 | single_prediction = model(X_single_data)
92 | plt.imshow(X_single_data.data.view(28,28).numpy(), cmap='gray')
93 |
94 | print('Label : ', Y_single_data.data.view(1).numpy())
95 | print('Prediction : ', torch.max(single_prediction.data, 1)[1].numpy())
--------------------------------------------------------------------------------
/utils/jax_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google LLC
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # https://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """Datasets used in examples."""
16 |
17 |
18 | import array
19 | import gzip
20 | import os
21 | from os import path
22 | import struct
23 | import urllib.request
24 |
25 | import numpy as np
26 |
27 |
28 | _DATA = "/tmp/jax_example_data/"
29 |
30 |
31 | def _download(url, filename):
32 | """Download a url to a file in the JAX data temp directory."""
33 | if not path.exists(_DATA):
34 | os.makedirs(_DATA)
35 | out_file = path.join(_DATA, filename)
36 | if not path.isfile(out_file):
37 | urllib.request.urlretrieve(url, out_file)
38 | print("downloaded {} to {}".format(url, _DATA))
39 |
40 |
41 | def _partial_flatten(x):
42 | """Flatten all but the first dimension of an ndarray."""
43 | return np.reshape(x, (x.shape[0], -1))
44 |
45 |
46 | def _one_hot(x, k, dtype=np.float32):
47 | """Create a one-hot encoding of x of size k."""
48 | return np.array(x[:, None] == np.arange(k), dtype)
49 |
50 |
51 | def mnist_raw():
52 | """Download and parse the raw MNIST dataset."""
53 | # CVDF mirror of http://yann.lecun.com/exdb/mnist/
54 | base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
55 |
56 | def parse_labels(filename):
57 | with gzip.open(filename, "rb") as fh:
58 | _ = struct.unpack(">II", fh.read(8))
59 | return np.array(array.array("B", fh.read()), dtype=np.uint8)
60 |
61 | def parse_images(filename):
62 | with gzip.open(filename, "rb") as fh:
63 | _, num_data, rows, cols = struct.unpack(">IIII", fh.read(16))
64 | return np.array(array.array("B", fh.read()),
65 | dtype=np.uint8).reshape(num_data, rows, cols)
66 |
67 | for filename in ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
68 | "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]:
69 | _download(base_url + filename, filename)
70 |
71 | train_images = parse_images(path.join(_DATA, "train-images-idx3-ubyte.gz"))
72 | train_labels = parse_labels(path.join(_DATA, "train-labels-idx1-ubyte.gz"))
73 | test_images = parse_images(path.join(_DATA, "t10k-images-idx3-ubyte.gz"))
74 | test_labels = parse_labels(path.join(_DATA, "t10k-labels-idx1-ubyte.gz"))
75 |
76 | return train_images, train_labels, test_images, test_labels
77 |
78 |
79 | def mnist(permute_train=False):
80 | """Download, parse and process MNIST data to unit scale and one-hot labels."""
81 | train_images, train_labels, test_images, test_labels = mnist_raw()
82 |
83 | train_images = _partial_flatten(train_images) / np.float32(255.)
84 | test_images = _partial_flatten(test_images) / np.float32(255.)
85 | train_labels = _one_hot(train_labels, 10)
86 | test_labels = _one_hot(test_labels, 10)
87 |
88 | if permute_train:
89 | perm = np.random.RandomState(0).permutation(train_images.shape[0])
90 | train_images = train_images[perm]
91 | train_labels = train_labels[perm]
92 |
93 | return train_images, train_labels, test_images, test_labels
--------------------------------------------------------------------------------
/02_Intermediate/Multi_Layer_Perceptron/MXNet_Gluon.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os
3 | import numpy as np
4 | import mxnet as mx
5 | from mxnet import io, nd, gluon, init, autograd
6 | from mxnet.gluon.data.vision import datasets
7 | from mxnet.gluon import nn, data
8 | from matplotlib import pyplot as plt
9 | from multiprocessing import cpu_count
10 | CPU_COUNT = cpu_count()
11 | print("Package Loaded!")
12 |
13 | # %%
14 | train_raw_data = datasets.MNIST(train=True)
15 | val_raw_data = datasets.MNIST(train=False)
16 |
17 | train_data = {}
18 | train_data['data'] = np.array([i[0].asnumpy() for i in train_raw_data])
19 | train_data['label'] = np.array([i[1] for i in train_raw_data])
20 |
21 | print(train_data['data'].shape)
22 | print(train_data['label'].shape)
23 |
24 | val_data = {}
25 | val_data['data'] = np.array([i[0].asnumpy() for i in val_raw_data])
26 | val_data['label'] = np.array([i[1] for i in val_raw_data])
27 |
28 | print(val_data['data'].shape)
29 | print(val_data['label'].shape)
30 |
31 | # %%
32 | net = nn.Sequential()
33 | net.add(
34 | nn.Dense(256, activation='relu'),
35 | nn.Dense(128, activation='relu'),
36 | nn.Dense(10, activation='sigmoid')
37 | )
38 |
39 | gpus = mx.test_utils.list_gpus()
40 | ctx = [mx.gpu()] if gpus else [mx.cpu()]
41 |
42 | net.initialize(init=init.Xavier(), ctx=ctx)
43 |
44 | cross_entropy = gluon.loss.SoftmaxCELoss()
45 | trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
46 | print("Setting Done!")
47 |
48 | # %%
49 |
50 | epochs=100
51 | batch_size=16
52 |
53 | class DataIterLoader():
54 | def __init__(self, X, Y, batch_size=1, shuffle=True, ctx=mx.cpu()):
55 | self.data_iter = io.NDArrayIter(data=gluon.utils.split_and_load(np.transpose(X, [0, 3, 1, 2]), ctx_list=ctx, batch_axis=0),
56 | label=gluon.utils.split_and_load(Y, ctx_list=ctx, batch_axis=0),
57 | batch_size=batch_size, shuffle=shuffle)
58 | self.len = len(X)
59 |
60 | def __iter__(self):
61 | self.data_iter.reset()
62 | return self
63 |
64 | def __next__(self):
65 | batch = self.data_iter.__next__()
66 | assert len(batch.data) == len(batch.label) == 1
67 | data = batch.data[0]
68 | label = batch.label[0]
69 | return data, label
70 |
71 | train_loader = DataIterLoader(train_data['data'], train_data['label'], batch_size, ctx=ctx)
72 | validation_loader = DataIterLoader(val_data['data'], val_data['label'], batch_size, ctx=ctx)
73 |
74 | print("Start Training!")
75 | for epoch in range(epochs):
76 | train_loss, train_acc, valid_loss, valid_acc = 0., 0., 0., 0.
77 | #tic = time.time()
78 | # forward + backward
79 | for step, (batch_img, batch_lab) in enumerate(train_loader):
80 | with autograd.record():
81 | output = net(batch_img)
82 | loss = cross_entropy(output, batch_lab)
83 | loss.backward()
84 | # update parameters
85 | trainer.step(batch_size)
86 | correct = nd.argmax(output, axis = 1).asnumpy()
87 | acc = np.mean(correct == batch_lab.asnumpy())
88 | train_loss += loss.mean().asnumpy()
89 | train_acc += acc
90 |
91 | for idx, (val_img, val_lab) in enumerate(validation_loader):
92 | output = net(val_img)
93 | loss = cross_entropy(output, val_lab)
94 | correct = nd.argmax(output, axis = 1).asnumpy()
95 | acc = np.mean(correct == val_lab.asnumpy())
96 | valid_loss += loss.asnumpy().mean()
97 | valid_acc += acc
98 |
99 | print("Epoch : %d, loss : %f, acc : %f, val_loss : %f, val_acc : %f"%(epoch+1, train_loss/(step+1), train_acc/(step+1), valid_loss/(idx+1), valid_acc/(idx+1)))
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/pix2pix/PyTorch/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import torch
4 | from tqdm import tqdm
5 | from PIL import Image
6 | from torch import nn
7 | from torch.utils.data import Dataset, DataLoader
8 | from torchvision import transforms
9 | from models import *
10 |
11 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
12 |
13 | PATH = "../../datasets/night2day"
14 | IMG_FORMAT = ["jpg", "jpeg", "tif", "tiff", "bmp", "png"]
15 |
16 | img_size = 256
17 | batch_size = 32
18 |
19 | transform = transforms.Compose([
20 | transforms.Resize([img_size, img_size]),
21 | transforms.ToTensor()
22 | ])
23 |
24 | class CustomDataset(Dataset):
25 | def __init__(self, data_dir, transform):
26 |
27 | self.filelist = []
28 | self.classes = sorted(os.listdir(data_dir))
29 | for root, _, files in os.walk(data_dir):
30 | if not len(files): continue
31 | files = [os.path.join(root, file) for file in files if file.split(".")[-1].lower() in IMG_FORMAT]
32 | self.filelist += files
33 | self.transform = transform
34 |
35 | def __len__(self):
36 | return len(self.filelist)
37 |
38 | def __getitem__(self, idx):
39 |
40 | image = Image.open(self.filelist[idx])
41 | min_side = min(image.size)
42 | max_side = max(image.size)
43 | dom_a = image.crop((0, 0, min_side, min_side))
44 | dom_b = image.crop((min_side, 0, max_side, min_side))
45 | dom_a = self.transform(dom_a)
46 | dom_b = self.transform(dom_b)
47 | return dom_a, dom_b
48 |
49 | dataset = CustomDataset(PATH, transform)
50 |
51 | loader = DataLoader(dataset, batch_size=batch_size, num_workers=4)
52 |
53 | generator = Generator_Encoder_Decoder(A_channel=3, B_channel=3, num_features=64).to(device)
54 | discriminator = Discriminator(A_channel=3, B_channel=3, num_features=64, n_layers=1).to(device)
55 |
56 | gan_loss = nn.BCELoss()
57 | l1_loss = nn.L1Loss()
58 | l1_lambda = 10
59 | d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=0.0002)
60 | g_optimizer = torch.optim.Adam(generator.parameters(), lr=0.0002)
61 |
62 | with tqdm(total=len(loader)) as t:
63 | t.set_description(f'Loader')
64 | for i, (batch_img_a, batch_img_b) in enumerate(loader):
65 | # time.sleep(0.1)
66 | batch_img_a = batch_img_a.to(device)
67 | batch_img_b = batch_img_b.to(device)
68 |
69 | gen_b = generator(batch_img_a)
70 | dis_pred_real = discriminator(torch.cat([batch_img_a, batch_img_b], dim=1))
71 | dis_pred_fake = discriminator(torch.cat([batch_img_a, gen_b], dim=1).detach())
72 |
73 | # Training Discriminator
74 | real_lab = torch.ones_like(dis_pred_real).to(device)
75 | fake_lab = torch.zeros_like(dis_pred_fake).to(device)
76 |
77 | dis_loss_real = gan_loss(dis_pred_real, real_lab)
78 | dis_loss_fake = gan_loss(dis_pred_fake, fake_lab)
79 |
80 | dis_loss = dis_loss_real + dis_loss_fake
81 | d_optimizer.zero_grad()
82 | dis_loss.backward()
83 | d_optimizer.step()
84 |
85 | # Training Generator
86 | gen_l1_loss = l1_loss(gen_b, batch_img_b)
87 | dis_pred_fake = discriminator(torch.cat([batch_img_a, gen_b], dim=1))
88 | dis_loss_real = gan_loss(dis_pred_fake, real_lab)
89 | gen_loss = dis_loss_real + l1_lambda*gen_l1_loss
90 | g_optimizer.zero_grad()
91 | gen_loss.backward()
92 | g_optimizer.step()
93 |
94 | # Logger
95 | t.set_postfix({"Generator loss": f"{gen_loss.item():.3f}", "Discriminator loss": f"{dis_loss.item():.3f}"})
96 | t.update()
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/EDSR/TensorFlow/model.py:
--------------------------------------------------------------------------------
1 | import math
2 | import tensorflow as tf
3 | from tensorflow.keras import layers, models
4 | from tensorflow.python.keras.layers.advanced_activations import PReLU
5 |
6 | # To-do
7 | # EDSR Model
8 |
9 | def MeanShift(x, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1, name="MeanShift"):
10 | mean = sign * rgb_range * tf.reshape(rgb_mean, [1 ,1, -1]) / tf.reshape(rgb_std, [1, 1, -1])
11 | out = layers.Add(name=name)([x, mean])
12 | return out
13 |
14 | def BasicBlock(x, filters, kernel_size, padding="same", use_bias=False, use_bn=True, act='relu', name="ConvBlock"):
15 | out = layers.Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, use_bias=use_bias, name=name+f"_Conv")(x)
16 | if use_bn:
17 | out = layers.BatchNormalization(name=name+f"_BN")(out)
18 | if act is not None:
19 | out = layers.Activation(act, name=name+f"_Act")(out)
20 | return out
21 |
22 | def ResBlock(x, filters, kernel_size, padding="same", use_bias=True, use_bn=False, act = 'relu', res_scale=1, name="ResBlock"):
23 | out = x
24 | for i in range(2):
25 | out = layers.Conv2D(filters=filters, kernel_size=kernel_size, padding=padding, use_bias=use_bias, name=name+f"_Conv{i+1}")(out)
26 | if use_bn:
27 | out = layers.BatchNormalization(name=name+f"_BN{i+1}")(out)
28 | if i==0:
29 | layers.Activation(act, name=name+f"_Act{i+1}")(out)
30 |
31 | out = layers.Add(name=name+"_Skip")([2*out, x])
32 | return out
33 |
34 | def Upsampler(x, scale, filters, use_bias=True, use_bn=False, act="relu", name="Upsampler"):
35 | if (scale & (scale-1)) == 0:
36 | out = x
37 | for i in range(int(math.log(scale, 2))):
38 | out = layers.Conv2D(4*filters, 3, padding="same", use_bias=use_bias, name=name+f"_Conv{i+1}")(out)
39 | out = tf.nn.depth_to_space(out, scale, name=name+"_SubPixel")
40 |
41 | if use_bn:
42 | out = layers.BatchNormalization(name=name+"_BN")(out)
43 | if act == "relu":
44 | out = layers.Activation("relu", name=name+"_Act")(out)
45 | elif act == "prelu":
46 | out = layers.PReLU(name=name+"_Act")(out)
47 |
48 | elif scale == 3:
49 | out = layers.Conv2D(9*filters, 3, padding="same", use_bias=use_bias, name=name+f"_Conv{i+1}")(out)
50 | out = tf.nn.depth_to_space(out, scale, name=name+"_SubPixel")
51 |
52 | if use_bn:
53 | out = layers.BatchNormalization(name=name+"_BN")(out)
54 | if act == "relu":
55 | out = layers.Activation("relu", name=name+"_Act")(out)
56 | elif act == "prelu":
57 | out = layers.PReLU(name=name+"_Act")(out)
58 | else:
59 | raise NotImplementedError
60 |
61 | return out
62 |
63 |
64 | def EDSR(img_channel=3, rgb_range=255, filters=64, n_resblocks=4, res_scale=1, act='relu', scale=4, name="EDSR"):
65 | input_layer = layers.Input(shape=(None, None, img_channel), name=name+"_Input")
66 | out = MeanShift(input_layer, rgb_range, name=name+"_MeanShift_Top")
67 | out = layers.Conv2D(filters, 3, padding="same", name=name+"_Conv1")(out)
68 | x = out
69 | for i in range(n_resblocks):
70 | out = ResBlock(out, filters, kernel_size, act=act, res_scale=res_scale, name=name+f"_ResBlock_{i+1}")
71 | out = layers.Conv2D(filters, 3, padding="same", name=name+"_Conv2")(out)
72 | out = layers.Add(name=name+"_Add")([out, x])
73 | out = Upsampler(out, scale, filters, act=False, name=name+"_Upsampler")
74 | out = layers.Conv2D(img_channel, 3, padding="same", name=name+"_Conv3")(out)
75 | out = MeanShift(out, rgb_range, sign=1, name=name+"_MeanShift_Bot")
76 | return models.Model(inputs=input_layer, outputs=out, name=name)
77 |
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/MXNet_Gluon.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os
3 | import numpy as np
4 | import mxnet as mx
5 | from mxnet import io, nd, gluon, init, autograd
6 | from mxnet.gluon.data.vision import datasets
7 | from mxnet.gluon import nn, data
8 | from matplotlib import pyplot as plt
9 | from multiprocessing import cpu_count
10 | CPU_COUNT = cpu_count()
11 | print("Package Loaded!")
12 |
13 | # %%
14 | train_raw_data = datasets.MNIST(train=True)
15 | val_raw_data = datasets.MNIST(train=False)
16 |
17 | train_data = {}
18 | train_data['data'] = np.array([i[0].asnumpy() for i in train_raw_data])
19 | train_data['label'] = np.array([i[1] for i in train_raw_data])
20 |
21 | print(train_data['data'].shape)
22 | print(train_data['label'].shape)
23 |
24 | val_data = {}
25 | val_data['data'] = np.array([i[0].asnumpy() for i in val_raw_data])
26 | val_data['label'] = np.array([i[1] for i in val_raw_data])
27 |
28 | print(val_data['data'].shape)
29 | print(val_data['label'].shape)
30 |
31 | # %%
32 | net = nn.Sequential()
33 | net.add(
34 | nn.Conv2D(16, (3, 3), (1, 1), (1, 1), activation='relu'),
35 | nn.MaxPool2D((2, 2), (2, 2)),
36 | nn.Conv2D(32, (3, 3), (1, 1), (1, 1), activation='relu'),
37 | nn.MaxPool2D((2, 2), (2, 2)),
38 | nn.Flatten(),
39 | nn.Dense(10, activation='sigmoid')
40 | )
41 |
42 | gpus = mx.test_utils.list_gpus()
43 | ctx = [mx.gpu()] if gpus else [mx.cpu()]
44 |
45 | net.initialize(init=init.Xavier(), ctx=ctx)
46 |
47 | cross_entropy = gluon.loss.SoftmaxCELoss()
48 | trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01})
49 | print("Setting Done!")
50 |
51 | # %%
52 |
53 | epochs=100
54 | batch_size=16
55 |
56 | class DataIterLoader():
57 | def __init__(self, X, Y, batch_size=1, shuffle=True, ctx=mx.cpu()):
58 | self.data_iter = io.NDArrayIter(data=gluon.utils.split_and_load(np.transpose(X, [0, 3, 1, 2]), ctx_list=ctx, batch_axis=0),
59 | label=gluon.utils.split_and_load(Y, ctx_list=ctx, batch_axis=0),
60 | batch_size=batch_size, shuffle=shuffle)
61 | self.len = len(X)
62 |
63 | def __iter__(self):
64 | self.data_iter.reset()
65 | return self
66 |
67 | def __next__(self):
68 | batch = self.data_iter.__next__()
69 | assert len(batch.data) == len(batch.label) == 1
70 | data = batch.data[0]
71 | label = batch.label[0]
72 | return data, label
73 |
74 | train_loader = DataIterLoader(train_data['data'], train_data['label'], batch_size, ctx=ctx)
75 | validation_loader = DataIterLoader(val_data['data'], val_data['label'], batch_size, ctx=ctx)
76 |
77 | print("Start Training!")
78 | for epoch in range(epochs):
79 | train_loss, train_acc, valid_loss, valid_acc = 0., 0., 0., 0.
80 | #tic = time.time()
81 | # forward + backward
82 | for step, (batch_img, batch_lab) in enumerate(train_loader):
83 |
84 | with autograd.record():
85 | output = net(batch_img)
86 | loss = cross_entropy(output, batch_lab)
87 | loss.backward()
88 | # update parameters
89 | trainer.step(batch_size)
90 | correct = nd.argmax(output, axis = 1).asnumpy()
91 | acc = np.mean(correct == batch_lab.asnumpy())
92 | train_loss += loss.mean().asnumpy()
93 | train_acc += acc
94 |
95 | for idx, (val_img, val_lab) in enumerate(validation_loader):
96 | output = net(val_img)
97 | loss = cross_entropy(output, val_lab)
98 | correct = nd.argmax(output, axis = 1).asnumpy()
99 | acc = np.mean(correct == val_lab.asnumpy())
100 | valid_loss += loss.asnumpy().mean()
101 | valid_acc += acc
102 |
103 | print("Epoch : %d, loss : %f, acc : %f, val_loss : %f, val_acc : %f"%(epoch+1, train_loss/(step+1), train_acc/(step+1), valid_loss/(idx+1), valid_acc/(idx+1)))
--------------------------------------------------------------------------------
/03_Advance/GAN/LSGAN/tf_keras.py:
--------------------------------------------------------------------------------
1 | # %%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 |
10 | # %%
11 | # Data Prepare
12 |
13 | (train_x, _), (test_x, _) = datasets.mnist.load_data()
14 | train_x, test_x = train_x/255., test_x/255.
15 |
16 | print("Train Data's Shape : ", train_x.shape)
17 | print("Test Data's Shape : ", test_x.shape)
18 |
19 | # %%
20 | # Build Network
21 |
22 | def Build_Generator(input_shape=(100, ), output_size=(28, 28), name="Generator"):
23 |
24 | model = models.Sequential(name=name)
25 | model.add(layers.Dense(1200, input_shape=input_shape, name=name+"_Dense_1"))
26 | model.add(layers.BatchNormalization(name=name+"_BN_1"))
27 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_1"))
28 | model.add(layers.Dense(1200, name=name+"_Dense_2"))
29 | model.add(layers.BatchNormalization(name=name+"_BN_2"))
30 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_2"))
31 | model.add(layers.Dense(784, activation='sigmoid', name=name+"Dense"))
32 | model.add(layers.Reshape(output_size, name=name+"_Output"))
33 | return model
34 |
35 | def Build_Discriminator(input_shape=(28,28), name="Discriminator"):
36 |
37 | model = models.Sequential(name=name)
38 | model.add(layers.Flatten(input_shape=input_shape, name=name+"_Flatten"))
39 | model.add(layers.Dense(240, name=name+"_Dense_1"))
40 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_1"))
41 | model.add(layers.Dense(240, name=name+"_Dense_2"))
42 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_2"))
43 | model.add(layers.Dense(1, activation='sigmoid', name=name+"_Output"))
44 |
45 | return model
46 |
47 |
48 | n_latent = 100
49 | input_shape = (n_latent, )
50 | img_size = train_x.shape[1:]
51 |
52 | D = Build_Discriminator(input_shape=img_size, name="Discriminator")
53 | D.compile(optimizer=optimizers.Adam(), loss=losses.mean_squared_error, metrics=['acc'])
54 | D.trainable = False
55 |
56 | G = Build_Generator(input_shape=input_shape, output_size=img_size, name="Generator")
57 |
58 | A = models.Model(inputs=G.input, outputs=D(G.output), name="GAN")
59 | A.compile(optimizer=optimizers.Adam(), loss=losses.mean_squared_error)
60 |
61 | D.summary()
62 |
63 | G.summary()
64 |
65 | A.summary()
66 |
67 | # %%
68 | # Training Network
69 | epochs=100
70 | batch_size=100
71 |
72 | fake_label = np.zeros((batch_size, 1))
73 | real_label = np.ones((batch_size, 1))
74 |
75 | for epoch in range(epochs):
76 |
77 | G_loss_epoch = 0
78 |
79 | D_loss_epoch = 0
80 |
81 | D_acc_epoch = 0
82 |
83 | shuffle_idx = np.random.choice(len(train_x), len(train_x), replace=False)
84 |
85 | for i, idx in enumerate(range(0, len(shuffle_idx), batch_size)):
86 |
87 | latent = np.random.randn(batch_size, n_latent)
88 | fake_x = G.predict(latent)
89 |
90 | real_x = train_x[idx:idx+batch_size]
91 |
92 | D_loss, D_acc = D.train_on_batch(np.concatenate((fake_x, real_x), axis=0),
93 | np.concatenate((fake_label, real_label), axis=0))
94 | D_loss_epoch += D_loss
95 | D_acc_epoch += D_acc
96 |
97 | latent = np.random.randn(batch_size, n_latent)
98 |
99 | G_loss = A.train_on_batch(latent, real_label)
100 |
101 | G_loss_epoch += G_loss
102 |
103 | print(f"{epoch+1}/{epochs}, G loss : {G_loss_epoch/i}, D loss : {D_loss_epoch/i}, D acc : {D_acc_epoch/i}")
104 |
105 | latent = np.random.randn(32, n_latent)
106 | fake_x = G.predict(latent)
107 |
108 | plt.figure(figsize=(8, 4))
109 | for i in range(32):
110 | plt.subplot(4, 8, i+1)
111 | plt.imshow(fake_x[i], cmap='gray')
112 | plt.show()
--------------------------------------------------------------------------------
/03_Advance/GAN/Vanilla_GAN/tf_keras.py:
--------------------------------------------------------------------------------
1 | # %%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 |
10 | # %%
11 | # Data Prepare
12 |
13 | (train_x, _), (test_x, _) = datasets.mnist.load_data()
14 | train_x, test_x = train_x/255., test_x/255.
15 |
16 | print("Train Data's Shape : ", train_x.shape)
17 | print("Test Data's Shape : ", test_x.shape)
18 |
19 | # %%
20 | # Build Network
21 |
22 | def Build_Generator(input_shape=(100, ), output_size=(28, 28), name="Generator"):
23 |
24 | model = models.Sequential(name=name)
25 | model.add(layers.Dense(1200, input_shape=input_shape, name=name+"_Dense_1"))
26 | model.add(layers.BatchNormalization(name=name+"_BN_1"))
27 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_1"))
28 | model.add(layers.Dense(1200, name=name+"_Dense_2"))
29 | model.add(layers.BatchNormalization(name=name+"_BN_2"))
30 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_2"))
31 | model.add(layers.Dense(784, activation='sigmoid', name=name+"Dense"))
32 | model.add(layers.Reshape(output_size, name=name+"_Output"))
33 | return model
34 |
35 | def Build_Discriminator(input_shape=(28,28), name="Discriminator"):
36 |
37 | model = models.Sequential(name=name)
38 | model.add(layers.Flatten(input_shape=input_shape, name=name+"_Flatten"))
39 | model.add(layers.Dense(240, name=name+"_Dense_1"))
40 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_1"))
41 | model.add(layers.Dense(240, name=name+"_Dense_2"))
42 | model.add(layers.LeakyReLU(0.03, name=name+"_Act_2"))
43 | model.add(layers.Dense(1, activation='sigmoid', name=name+"_Output"))
44 |
45 | return model
46 |
47 |
48 | n_latent = 100
49 | input_shape = (n_latent, )
50 | img_size = train_x.shape[1:]
51 |
52 | D = Build_Discriminator(input_shape=img_size, name="Discriminator")
53 | D.compile(optimizer=optimizers.Adam(), loss=losses.binary_crossentropy, metrics=['acc'])
54 | D.trainable = False
55 |
56 | G = Build_Generator(input_shape=input_shape, output_size=img_size, name="Generator")
57 |
58 | A = models.Model(inputs=G.input, outputs=D(G.output), name="GAN")
59 | A.compile(optimizer=optimizers.Adam(), loss=losses.binary_crossentropy)
60 |
61 | D.summary()
62 |
63 | G.summary()
64 |
65 | A.summary()
66 |
67 | # %%
68 | # Training Network
69 | epochs=100
70 | batch_size=100
71 |
72 | fake_label = np.zeros((batch_size, 1))
73 | real_label = np.ones((batch_size, 1))
74 |
75 | for epoch in range(epochs):
76 |
77 | G_loss_epoch = 0
78 |
79 | D_loss_epoch = 0
80 |
81 | D_acc_epoch = 0
82 |
83 | shuffle_idx = np.random.choice(len(train_x), len(train_x), replace=False)
84 |
85 | for i, idx in enumerate(range(0, len(shuffle_idx), batch_size)):
86 |
87 | latent = np.random.randn(batch_size, n_latent)
88 | fake_x = G.predict(latent)
89 |
90 | real_x = train_x[idx:idx+batch_size]
91 |
92 | D_loss, D_acc = D.train_on_batch(np.concatenate((fake_x, real_x), axis=0),
93 | np.concatenate((fake_label, real_label), axis=0))
94 | D_loss_epoch += D_loss
95 | D_acc_epoch += D_acc
96 |
97 | latent = np.random.randn(batch_size, n_latent)
98 |
99 | G_loss = A.train_on_batch(latent, real_label)
100 |
101 | G_loss_epoch += G_loss
102 |
103 | print(f"{epoch+1}/{epochs}, G loss : {G_loss_epoch/i}, D loss : {D_loss_epoch/i}, D acc : {D_acc_epoch/i}")
104 |
105 | latent = np.random.randn(32, n_latent)
106 | fake_x = G.predict(latent)
107 |
108 | plt.figure(figsize=(8, 4))
109 | for i in range(32):
110 | plt.subplot(4, 8, i+1)
111 | plt.imshow(fake_x[i], cmap='gray')
112 | plt.show()
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Recurrent_Neural_Network/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 | import numpy as np
3 | from tqdm import tqdm
4 |
5 | import torch
6 | from torch import nn
7 | from torch import optim
8 | from torch.utils.data import DataLoader
9 |
10 | from torchvision import datasets
11 | from torchvision import transforms
12 | import numpy as np
13 | from matplotlib import pyplot as plt
14 |
15 | # Device Configuration
16 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
17 |
18 | # MNIST dataset
19 | mnist_train = datasets.MNIST(root='../../data/',
20 | train=True,
21 | transform=transforms.ToTensor(),
22 | download=True)
23 | print("Downloading Train Data Done ! ")
24 |
25 | mnist_test = datasets.MNIST(root='../../data/',
26 | train=False,
27 | transform=transforms.ToTensor(),
28 | download=True)
29 | print("Downloading Test Data Done ! ")
30 |
31 | batch_size = 256
32 |
33 | train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=2)
34 | val_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=2)
35 |
36 | # Defining Model
37 | class Model(nn.Module):
38 |
39 | def __init__(self, input_size, hidden_size, num_layers, num_classes):
40 | super(Model, self).__init__()
41 | self.hidden_size = hidden_size
42 | self.num_layers = num_layers
43 | self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)
44 | self.fc = nn.Linear(hidden_size, num_classes)
45 | pass
46 |
47 | def forward(self, x):
48 | h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
49 |
50 | out, hidden = self.rnn(x, h0)
51 |
52 | out = self.fc(out[:, -1, :])
53 | return out
54 |
55 | model = Model(28, 128, 2, 10).to(device)
56 | criterion = nn.CrossEntropyLoss()
57 | optimizer = optim.Adam(model.parameters(), lr=0.001)
58 |
59 | epochs = 5
60 |
61 | # Training
62 | for epoch in range(epochs):
63 | model.train()
64 | avg_loss = 0
65 | avg_acc = 0
66 |
67 | with tqdm(total=len(train_loader)) as t:
68 | t.set_description(f'[{epoch+1}/{epochs}]')
69 | total = 0
70 | correct = 0
71 | for i, (batch_img, batch_lab) in enumerate(train_loader):
72 | X = batch_img.squeeze().to(device) # B, 1, 28, 28 -> B, 28, 28
73 | Y = batch_lab.to(device)
74 |
75 | optimizer.zero_grad()
76 |
77 | y_pred = model.forward(X)
78 |
79 | loss = criterion(y_pred, Y)
80 |
81 | loss.backward()
82 | optimizer.step()
83 | avg_loss += loss.item()
84 |
85 | _, predicted = torch.max(y_pred.data, 1)
86 | total += Y.size(0)
87 | correct += (predicted == Y).sum().item()
88 |
89 | t.set_postfix({"loss": f"{loss.item():05.3f}"})
90 | t.update()
91 | acc = (100 * correct / total)
92 |
93 | model.eval()
94 | with tqdm(total=len(val_loader)) as t:
95 | t.set_description(f'[{epoch+1}/{epochs}]')
96 | with torch.no_grad():
97 | val_loss = 0
98 | total = 0
99 | correct = 0
100 | for i, (batch_img, batch_lab) in enumerate(val_loader):
101 | X = batch_img.squeeze().to(device)
102 | Y = batch_lab.to(device)
103 | y_pred = model(X)
104 | val_loss += criterion(y_pred, Y)
105 | _, predicted = torch.max(y_pred.data, 1)
106 | total += Y.size(0)
107 | correct += (predicted == Y).sum().item()
108 | t.set_postfix({"val_loss": f"{val_loss.item()/(i+1):05.3f}"})
109 | t.update()
110 |
111 | val_loss /= len(val_loader)
112 | val_acc = (100 * correct / total)
113 |
114 | print(f"Epoch : {epoch+1}, Loss : {(avg_loss/len(train_loader)):.3f}, Acc: {acc:.3f}, Val Loss : {val_loss.item():.3f}, Val Acc : {val_acc:.3f}\n")
115 |
116 | print("Training Done !")
--------------------------------------------------------------------------------
/03_Advance/AutoEncoder/Vanilla/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 | import random
3 | from tqdm import tqdm
4 |
5 | import numpy as np
6 |
7 | import torch
8 | from torch import nn
9 | from torch import optim
10 | from torch.utils.data import DataLoader
11 |
12 | from torchvision import datasets
13 | from torchvision import transforms
14 |
15 | from matplotlib import pyplot as plt
16 |
17 | # Device Configuration
18 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
19 |
20 | # Set randomness
21 | seed = 777
22 | random.seed(seed)
23 | np.random.seed(seed)
24 | torch.manual_seed(seed)
25 |
26 | if torch.cuda.is_available():
27 | torch.cuda.manual_seed(seed)
28 | torch.cuda.manual_seed_all(seed) # if use multi-GPU
29 | torch.backends.cudnn.deterministic = True
30 | torch.backends.cudnn.benchmark = False
31 |
32 | # Set hyperparameter
33 | epochs= 10
34 | batch_size= 256
35 |
36 | # MNIST dataset
37 | mnist_train = datasets.MNIST(root='../../../data/',
38 | train=True,
39 | transform=transforms.ToTensor(),
40 | download=True)
41 | print("Downloading Train Data Done ! ")
42 |
43 | mnist_test = datasets.MNIST(root='../../../data/',
44 | train=False,
45 | transform=transforms.ToTensor(),
46 | download=True)
47 | print("Downloading Test Data Done ! ")
48 |
49 | train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=2)
50 | val_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=2)
51 |
52 | # Defining Model
53 | class BuildAE(nn.Module):
54 | def __init__(self, input_features=784):
55 | super(BuildAE, self).__init__()
56 |
57 | self.encoder = nn.Sequential(
58 | nn.Linear(input_features, 64),
59 | nn.ReLU(),
60 | nn.Linear(64, 16),
61 | nn.ReLU()
62 | )
63 |
64 | self.decoder = nn.Sequential(
65 | nn.Linear(16, 64),
66 | nn.ReLU(),
67 | nn.Linear(64, input_features),
68 | nn.Sigmoid()
69 | )
70 |
71 | self.init_weights(self.encoder)
72 | self.init_weights(self.decoder)
73 |
74 | def init_weights(self, m):
75 | if isinstance(m, nn.Linear):
76 | nn.init.xavier_uniform_(m.weight)
77 | m.bias.data.fill_(0.01)
78 |
79 | def forward(self, x):
80 | encoded = self.encoder(x)
81 | decoded = self.decoder(encoded)
82 | return decoded
83 |
84 | model = BuildAE(input_features=784).to(device)
85 | criterion = nn.MSELoss()
86 | optimizer = optim.Adam(model.parameters(), lr = 0.0001)
87 |
88 | for epoch in range(epochs):
89 | model.train()
90 | avg_loss = 0
91 |
92 | with tqdm(total=len(train_loader)) as t:
93 | t.set_description(f'[{epoch+1}/{epochs}]')
94 | for i, (batch_img, batch_lab) in enumerate(train_loader):
95 |
96 | X = batch_img.to(device).view(batch_img.shape[0], -1)
97 |
98 | optimizer.zero_grad()
99 | y_pred = model.forward(X)
100 | loss = criterion(y_pred, X)
101 |
102 | loss.backward()
103 | optimizer.step()
104 | avg_loss += loss.item()
105 |
106 | t.set_postfix({"loss": f"{loss.item():05.3f}"})
107 | t.update()
108 |
109 | model.eval()
110 | with tqdm(total=len(val_loader)) as t:
111 | t.set_description(f'[{epoch+1}/{epochs}]')
112 | with torch.no_grad():
113 | val_loss = 0
114 | for i, (batch_img, batch_lab) in enumerate(val_loader):
115 |
116 | X = batch_img.to(device).view(batch_img.shape[0], -1)
117 |
118 | y_pred = model(X)
119 | val_loss += criterion(y_pred, X)
120 | t.set_postfix({"val_loss": f"{val_loss.item()/(i+1):05.3f}"})
121 | t.update()
122 |
123 | val_loss /= len(val_loader)
124 |
125 | print(f"Epoch : {epoch+1}, Loss : {(avg_loss/len(train_loader)):.3f}, Val Loss : {val_loss.item():.3f}")
126 |
127 | print("Training Done !")
--------------------------------------------------------------------------------
/03_Advance/AutoEncoder/CAE/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 | import random
3 | from tqdm import tqdm
4 |
5 | import numpy as np
6 |
7 | import torch
8 | from torch import nn
9 | from torch import optim
10 | from torch.utils.data import DataLoader
11 |
12 | from torchvision import datasets
13 | from torchvision import transforms
14 |
15 | from matplotlib import pyplot as plt
16 |
17 | # Device Configuration
18 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
19 |
20 | # Set randomness
21 | seed = 777
22 | random.seed(seed)
23 | np.random.seed(seed)
24 | torch.manual_seed(seed)
25 |
26 | if torch.cuda.is_available():
27 | torch.cuda.manual_seed(seed)
28 | torch.cuda.manual_seed_all(seed) # if use multi-GPU
29 | torch.backends.cudnn.deterministic = True
30 | torch.backends.cudnn.benchmark = False
31 |
32 | # Set hyperparameter
33 | epochs= 10
34 | batch_size= 256
35 |
36 | # MNIST dataset
37 | mnist_train = datasets.MNIST(root='../../../data/',
38 | train=True,
39 | transform=transforms.ToTensor(),
40 | download=True)
41 | print("Downloading Train Data Done ! ")
42 |
43 | mnist_test = datasets.MNIST(root='../../../data/',
44 | train=False,
45 | transform=transforms.ToTensor(),
46 | download=True)
47 | print("Downloading Test Data Done ! ")
48 |
49 | train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=2)
50 | val_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=2)
51 |
52 | # Defining Model
53 | class BuildCAE(nn.Module):
54 | def __init__(self, input_features=1):
55 | super(BuildCAE, self).__init__()
56 |
57 | self.encoder = nn.Sequential(
58 | nn.Conv2d(input_features, 16, 3, 1, 1),
59 | nn.ReLU(),
60 | nn.MaxPool2d(2),
61 | nn.Conv2d(16, 64, 3, 1, 1),
62 | nn.ReLU(),
63 | nn.MaxPool2d(2)
64 | )
65 |
66 | self.decoder = nn.Sequential(
67 | nn.ConvTranspose2d(64, 16, 4, 2, 1),
68 | nn.ReLU(),
69 | nn.ConvTranspose2d(16, input_features, 4, 2, 1),
70 | nn.Sigmoid()
71 | )
72 |
73 | self.init_weights(self.encoder)
74 | self.init_weights(self.decoder)
75 |
76 | def init_weights(self, m):
77 | if isinstance(m, nn.Linear):
78 | nn.init.xavier_uniform_(m.weight)
79 | m.bias.data.fill_(0.01)
80 |
81 | def forward(self, x):
82 | encoded = self.encoder(x)
83 | decoded = self.decoder(encoded)
84 | return decoded
85 |
86 | model = BuildCAE(input_features=1).to(device)
87 | criterion = nn.MSELoss()
88 | optimizer = optim.Adam(model.parameters(), lr = 0.0001)
89 |
90 | for epoch in range(epochs):
91 | model.train()
92 | avg_loss = 0
93 |
94 | with tqdm(total=len(train_loader)) as t:
95 | t.set_description(f'[{epoch+1}/{epochs}]')
96 | for i, (batch_img, batch_lab) in enumerate(train_loader):
97 |
98 | X = batch_img.to(device)
99 |
100 | optimizer.zero_grad()
101 | y_pred = model.forward(X)
102 | loss = criterion(y_pred, X)
103 |
104 | loss.backward()
105 | optimizer.step()
106 | avg_loss += loss.item()
107 |
108 | t.set_postfix({"loss": f"{loss.item():05.3f}"})
109 | t.update()
110 |
111 | model.eval()
112 | with tqdm(total=len(val_loader)) as t:
113 | t.set_description(f'[{epoch+1}/{epochs}]')
114 | with torch.no_grad():
115 | val_loss = 0
116 | for i, (batch_img, batch_lab) in enumerate(val_loader):
117 |
118 | X = batch_img.to(device)
119 |
120 | y_pred = model(X)
121 | val_loss += criterion(y_pred, X)
122 | t.set_postfix({"val_loss": f"{val_loss.item()/(i+1):05.3f}"})
123 | t.update()
124 |
125 | val_loss /= len(val_loader)
126 |
127 | print(f"Epoch : {epoch+1}, Loss : {(avg_loss/len(train_loader)):.3f}, Val Loss : {val_loss.item():.3f}")
128 |
129 | print("Training Done !")
--------------------------------------------------------------------------------
/02_Intermediate/Simple_Convolutional_Neural_Network/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 | import numpy as np
3 | from tqdm import tqdm
4 |
5 | import torch
6 | from torch import nn
7 | from torch import optim
8 | from torch.utils.data import DataLoader
9 |
10 | from torchvision import datasets
11 | from torchvision import transforms
12 | import numpy as np
13 | from matplotlib import pyplot as plt
14 |
15 | # Device Configuration
16 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
17 |
18 | # MNIST dataset
19 | mnist_train = datasets.MNIST(root='../../data/',
20 | train=True,
21 | transform=transforms.ToTensor(),
22 | download=True)
23 | print("Downloading Train Data Done ! ")
24 |
25 | mnist_test = datasets.MNIST(root='../../data/',
26 | train=False,
27 | transform=transforms.ToTensor(),
28 | download=True)
29 | print("Downloading Test Data Done ! ")
30 |
31 | batch_size = 256
32 |
33 | train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=2)
34 | val_loader = DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=2)
35 |
36 | # Defining Model
37 | class Model(nn.Module):
38 | def __init__(self):
39 | super(Model, self).__init__()
40 | self.layer1 = nn.Sequential(
41 | nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
42 | nn.BatchNorm2d(16),
43 | nn.ReLU(),
44 | nn.MaxPool2d(kernel_size=2, stride=2))
45 | self.layer2 = nn.Sequential(
46 | nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
47 | nn.BatchNorm2d(32),
48 | nn.ReLU(),
49 | nn.MaxPool2d(kernel_size=2, stride=2))
50 | self.fc = nn.Linear(7*7*32, 10)
51 |
52 | def forward(self, X):
53 | X = self.layer1(X)
54 | X = self.layer2(X)
55 | X = X.reshape(X.size(0), -1)
56 | X = self.fc(X)
57 | return X
58 |
59 | model = Model().to(device)
60 | criterion = nn.CrossEntropyLoss()
61 | optimizer = optim.Adam(model.parameters(), lr=0.001)
62 |
63 | epochs = 5
64 |
65 | # Training
66 | for epoch in range(epochs):
67 | model.train()
68 | avg_loss = 0
69 | avg_acc = 0
70 |
71 | with tqdm(total=len(train_loader)) as t:
72 | t.set_description(f'[{epoch+1}/{epochs}]')
73 | total = 0
74 | correct = 0
75 | for i, (batch_img, batch_lab) in enumerate(train_loader):
76 | X = batch_img.to(device)
77 | Y = batch_lab.to(device)
78 |
79 | optimizer.zero_grad()
80 |
81 | y_pred = model.forward(X)
82 |
83 | loss = criterion(y_pred, Y)
84 |
85 | loss.backward()
86 | optimizer.step()
87 | avg_loss += loss.item()
88 |
89 | _, predicted = torch.max(y_pred.data, 1)
90 | total += Y.size(0)
91 | correct += (predicted == Y).sum().item()
92 |
93 | t.set_postfix({"loss": f"{loss.item():05.3f}"})
94 | t.update()
95 | acc = (100 * correct / total)
96 |
97 | model.eval()
98 | with tqdm(total=len(val_loader)) as t:
99 | t.set_description(f'[{epoch+1}/{epochs}]')
100 | with torch.no_grad():
101 | val_loss = 0
102 | total = 0
103 | correct = 0
104 | for i, (batch_img, batch_lab) in enumerate(val_loader):
105 | X = batch_img.to(device)
106 | Y = batch_lab.to(device)
107 | y_pred = model(X)
108 | val_loss += criterion(y_pred, Y)
109 | _, predicted = torch.max(y_pred.data, 1)
110 | total += Y.size(0)
111 | correct += (predicted == Y).sum().item()
112 | t.set_postfix({"val_loss": f"{val_loss.item()/(i+1):05.3f}"})
113 | t.update()
114 |
115 | val_loss /= len(val_loader)
116 | val_acc = (100 * correct / total)
117 |
118 | print(f"Epoch : {epoch+1}, Loss : {(avg_loss/len(train_loader)):.3f}, Acc: {acc:.3f}, Val Loss : {val_loss.item():.3f}, Val Acc : {val_acc:.3f}\n")
119 |
120 | print("Training Done !")
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/SubPixel/TensorFlow/main.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os
3 | import numpy as np
4 | import tensorflow as tf
5 | from matplotlib import pyplot as plt
6 | from tensorflow import io as tfi
7 | from tensorflow import image as tfimg
8 | from tensorflow.keras import models, layers, losses, metrics, optimizers, callbacks
9 | from tensorflow.keras.preprocessing.image import load_img
10 | from tensorflow.keras.preprocessing import image_dataset_from_directory
11 |
12 | from model import *
13 |
14 | os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
15 | os.environ["CUDA_VISIBLE_DEVICES"]="0"
16 |
17 | # For Efficiency
18 | gpus = tf.config.experimental.list_physical_devices('GPU')
19 | if gpus:
20 | try:
21 | for gpu in gpus:
22 | tf.config.experimental.set_memory_growth(gpu, True)
23 | logical_gpus = tf.config.experimental.list_logical_devices('GPU')
24 | print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
25 | except RuntimeError as e:
26 | print(e)
27 | # %%
28 | # Data Loader
29 | ROOT = "../../datasets"
30 | data = "BSR/BSDS500/data/images/"
31 | root_dir = os.path.join(ROOT, "BSR/BSDS500/data/images")
32 | train_path = os.path.join(ROOT, data, "train")
33 | val_path = os.path.join(ROOT, data, "val")
34 | input_size = 64
35 | scale = 2
36 | epochs = 100
37 | batch_size = 8
38 |
39 | train_ds = image_dataset_from_directory(
40 | root_dir,
41 | batch_size=batch_size,
42 | image_size=(input_size*scale, input_size*scale),
43 | validation_split=0.2,
44 | subset="training",
45 | seed=42,
46 | label_mode=None,
47 | )
48 |
49 | val_ds = image_dataset_from_directory(
50 | root_dir,
51 | batch_size=batch_size,
52 | image_size=(input_size*scale, input_size*scale),
53 | validation_split=0.2,
54 | subset="validation",
55 | seed=42,
56 | label_mode=None,
57 | )
58 |
59 | def process_input(image, input_size):
60 | image = image / 255.0
61 | image = tf.image.rgb_to_yuv(image)
62 | last_dimension_axis = len(image.shape) - 1
63 | y, u, v = tf.split(image, 3, axis=last_dimension_axis)
64 | label = y
65 |
66 | return tf.image.resize(y, [input_size, input_size], method="area"), label
67 |
68 | train_ds = train_ds.map(lambda x: process_input(x, input_size))
69 |
70 | train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)
71 |
72 | val_ds = val_ds.map(lambda x: process_input(x, input_size))
73 |
74 | val_ds = val_ds.prefetch(tf.data.experimental.AUTOTUNE)
75 |
76 | # %%
77 | class Metric():
78 | def __init__(self, max_val):
79 | self.max_val = max_val
80 |
81 | def psnr(self, y_true, y_pred):
82 | return tf.reduce_mean(tfimg.psnr(y_true, y_pred, max_val=self.max_val))
83 |
84 | def ssim(self, y_true, y_pred):
85 | return tf.reduce_mean(tfimg.ssim(y_true, y_pred, max_val=self.max_val))
86 |
87 | # Define plot callback
88 | class PlotCallback(callbacks.Callback):
89 | def __init__(self):
90 | super(PlotCallback, self).__init__()
91 | for i in train_ds.take(1):
92 | self.train_img, self.train_lab = i[0], i[1]
93 | for i in val_ds.take(1):
94 | self.test_img, self.test_lab = i[0], i[1]
95 |
96 | def on_epoch_end(self, epoch, logs=None):
97 | if (epoch+1) % 5 == 0:
98 | pred = self.model(self.train_img)
99 | plt.subplot(231)
100 | plt.imshow(self.train_img[0, ..., 0], cmap='gray')
101 | plt.subplot(232)
102 | plt.imshow(pred[0, ..., 0], cmap='gray')
103 | plt.subplot(233)
104 | plt.imshow(self.train_lab[0, ..., 0], cmap='gray')
105 | pred = self.model(self.test_img)
106 | plt.subplot(234)
107 | plt.imshow(self.test_img[0, ..., 0], cmap='gray')
108 | plt.subplot(235)
109 | plt.imshow(pred[0, ..., 0], cmap='gray')
110 | plt.subplot(236)
111 | plt.imshow(self.test_lab[0, ..., 0], cmap='gray')
112 | plt.show()
113 |
114 |
115 | # %%
116 |
117 | # Build model
118 | model = SubPixel(upscale_factor=scale)
119 |
120 | model.compile(loss = losses.MeanSquaredError(),
121 | optimizer = optimizers.Adam(learning_rate=0.001),
122 | metrics=[Metric(1).psnr, Metric(1).ssim])
123 | # %%
124 | model.fit(train_ds, epochs=epochs, validation_data=val_ds, callbacks = [PlotCallback()], verbose=2)
125 |
126 |
127 | # %%
128 |
--------------------------------------------------------------------------------
/04_Extra/Super_Resolution/SRCNN/TensorFlow/main.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os
3 | import numpy as np
4 | import tensorflow as tf
5 | from matplotlib import pyplot as plt
6 | from tensorflow import io as tfi
7 | from tensorflow import image as tfimg
8 | from tensorflow.keras import models, layers, losses, metrics, optimizers, callbacks
9 | from tensorflow.keras.preprocessing.image import load_img
10 | from tensorflow.keras.preprocessing import image_dataset_from_directory
11 |
12 | from model import *
13 |
14 | os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
15 | os.environ["CUDA_VISIBLE_DEVICES"]="0"
16 |
17 | # For Efficiency
18 | gpus = tf.config.experimental.list_physical_devices('GPU')
19 | if gpus:
20 | try:
21 | for gpu in gpus:
22 | tf.config.experimental.set_memory_growth(gpu, True)
23 | logical_gpus = tf.config.experimental.list_logical_devices('GPU')
24 | print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
25 | except RuntimeError as e:
26 | print(e)
27 |
28 | # %%
29 | # Data Loader
30 | ROOT = "../../datasets"
31 | data = "BSR/BSDS500/data/images/"
32 | root_dir = os.path.join(ROOT, "BSR/BSDS500/data/images")
33 | train_path = os.path.join(ROOT, data, "train")
34 | val_path = os.path.join(ROOT, data, "val")
35 | input_size = 132
36 | scale = 3
37 | batch_size = 32
38 |
39 | train_ds = image_dataset_from_directory(
40 | root_dir,
41 | batch_size=batch_size,
42 | image_size=(input_size, input_size),
43 | validation_split=0.2,
44 | subset="training",
45 | seed=42,
46 | label_mode=None,
47 | )
48 |
49 | val_ds = image_dataset_from_directory(
50 | root_dir,
51 | batch_size=batch_size,
52 | image_size=(input_size, input_size),
53 | validation_split=0.2,
54 | subset="validation",
55 | seed=42,
56 | label_mode=None,
57 | )
58 |
59 | def process_input(image, input_size, scale):
60 | image = image / 255.0
61 | image = tf.image.rgb_to_yuv(image)
62 | last_dimension_axis = len(image.shape) - 1
63 | y, u, v = tf.split(image, 3, axis=last_dimension_axis)
64 | label = y
65 | image = tf.image.resize(y, [input_size//scale, input_size//scale], method="area")
66 | image = tf.image.resize(image, [input_size, input_size], method="area")
67 |
68 | return image, label[:, 6:-6, 6:-6, :]
69 |
70 | train_ds = train_ds.map(lambda x: process_input(x, input_size, scale))
71 |
72 | train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE)
73 |
74 | val_ds = val_ds.map(lambda x: process_input(x, input_size, scale))
75 |
76 | val_ds = val_ds.prefetch(tf.data.experimental.AUTOTUNE)
77 |
78 | # %%
79 | # Defile psnr, ssim for metrics
80 | class Metric():
81 | def __init__(self, max_val):
82 | self.max_val = max_val
83 |
84 | def psnr(self, y_true, y_pred):
85 | return tf.reduce_mean(tfimg.psnr(y_true, y_pred, max_val=self.max_val))
86 |
87 | def ssim(self, y_true, y_pred):
88 | return tf.reduce_mean(tfimg.ssim(y_true, y_pred, max_val=self.max_val))
89 |
90 | # Define plot callback
91 | class PlotCallback(callbacks.Callback):
92 | def __init__(self):
93 | super(PlotCallback, self).__init__()
94 | for i in train_ds.take(1):
95 | self.train_img, self.train_lab = i[0], i[1]
96 | for i in val_ds.take(1):
97 | self.test_img, self.test_lab = i[0], i[1]
98 |
99 | def on_epoch_end(self, epoch, logs=None):
100 | if (epoch+1) % 5 == 0:
101 | pred = self.model(self.train_img)
102 | plt.subplot(231)
103 | plt.imshow(self.train_img[0, ..., 0], cmap='gray')
104 | plt.subplot(232)
105 | plt.imshow(pred[0, ..., 0], cmap='gray')
106 | plt.subplot(233)
107 | plt.imshow(self.train_lab[0, ..., 0], cmap='gray')
108 | pred = self.model(self.test_img)
109 | plt.subplot(234)
110 | plt.imshow(self.test_img[0, ..., 0], cmap='gray')
111 | plt.subplot(235)
112 | plt.imshow(pred[0, ..., 0], cmap='gray')
113 | plt.subplot(236)
114 | plt.imshow(self.test_lab[0, ..., 0], cmap='gray')
115 | plt.show()
116 |
117 | # Build model
118 | model = SRCNN()
119 |
120 | model.compile(loss = losses.MeanSquaredError(),
121 | optimizer = optimizers.Adam(learning_rate=0.0001),
122 | metrics=[Metric(1).psnr, Metric(1).ssim])
123 | # %%
124 | model.fit(train_ds, epochs=50, validation_data=val_ds, callbacks = [PlotCallback()])
125 |
126 | # %%
127 |
--------------------------------------------------------------------------------
/03_Advance/CNN/VGGNet/tf_keras.py:
--------------------------------------------------------------------------------
1 | #%%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 |
10 | # %%
11 | # Data Prepare
12 |
13 | URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
14 | path_to_zip = utils.get_file('flower_photos.tgz', origin=URL, extract=True)
15 |
16 | PATH = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
17 |
18 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
19 | print(category_list)
20 |
21 | num_classes = len(category_list)
22 | img_size = 150
23 |
24 | def read_img(path, img_size):
25 | img = cv.imread(path)
26 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
27 | img = cv.resize(img, (img_size, img_size))
28 | return img
29 |
30 | imgs_tr = []
31 | labs_tr = []
32 |
33 | imgs_val = []
34 | labs_val = []
35 |
36 | for i, category in enumerate(category_list):
37 | path = os.path.join(PATH, category)
38 | imgs_list = os.listdir(path)
39 | print("Total '%s' images : %d"%(category, len(imgs_list)))
40 | ratio = int(np.round(0.05 * len(imgs_list)))
41 | print("%s Images for Training : %d"%(category, len(imgs_list[ratio:])))
42 | print("%s Images for Validation : %d"%(category, len(imgs_list[:ratio])))
43 | print("=============================")
44 |
45 | imgs = [read_img(os.path.join(path, img),img_size) for img in imgs_list]
46 | labs = [i]*len(imgs_list)
47 |
48 | imgs_tr += imgs[ratio:]
49 | labs_tr += labs[ratio:]
50 |
51 | imgs_val += imgs[:ratio]
52 | labs_val += labs[:ratio]
53 |
54 | imgs_tr = np.array(imgs_tr)/255.
55 | labs_tr = utils.to_categorical(np.array(labs_tr), num_classes)
56 |
57 | imgs_val = np.array(imgs_val)/255.
58 | labs_val = utils.to_categorical(np.array(labs_val), num_classes)
59 |
60 | print(imgs_tr.shape, labs_tr.shape)
61 | print(imgs_val.shape, labs_val.shape)
62 |
63 | # %%
64 | # Build Networks
65 | def build_vgg(input_shape=(None, None, 3), num_classes=1, num_layer=16, name='vgg'):
66 |
67 | blocks_dict = {
68 | 11: [1, 1, 2, 2, 2],
69 | 13: [2, 2, 2, 2, 2],
70 | 16: [2, 2, 3, 3, 3],
71 | 19: [2, 2, 4, 4, 4]
72 | }
73 |
74 | num_channel_list = [64, 128, 256, 512, 512]
75 |
76 | assert num_layer in blocks_dict.keys(), "Number of layer must be in %s"%blocks_dict.keys()
77 |
78 | last_act = 'sigmoid' if num_classes==1 else 'softmax'
79 | name = name+str(num_layer)
80 |
81 | model = models.Sequential(name=name)
82 | model.add(layers.Input(shape=input_shape, name=name+"_Input"))
83 | for idx, num_iter in enumerate(blocks_dict[num_layer]):
84 | for jdx in range(num_iter):
85 | model.add(layers.Conv2D(num_channel_list[idx], 3, strides=1, padding='same', activation='relu', name=name+"_Block_%d_Conv%d"%(idx+1, jdx+1)))
86 | model.add(layers.MaxPool2D(name=name+"_Block%d_Pool"%(idx+1)))
87 | model.add(layers.GlobalAveragePooling2D(name=name+"_GAP"))
88 | model.add(layers.Dense(512, activation='relu', name=name+"_Dense_1"))
89 | model.add(layers.Dense(512, activation='relu', name=name+"_Dense_2"))
90 | model.add(layers.Dense(num_classes, activation=last_act, name=name+"_Output"))
91 | return model
92 |
93 | num_layer = 11
94 | input_shape = imgs_tr.shape[1:]
95 |
96 | vgg = build_vgg(input_shape=input_shape, num_classes=num_classes, num_layer=num_layer, name='vgg')
97 | vgg.summary()
98 |
99 | loss = 'binary_crossentropy' if num_classes==1 else 'categorical_crossentropy'
100 | vgg.compile(optimizer=optimizers.Adam(), loss=loss, metrics=['accuracy'])
101 |
102 |
103 |
104 | # %%
105 | # Training Network
106 | epochs=100
107 | batch_size=16
108 |
109 | history=vgg.fit(imgs_tr, labs_tr, epochs = epochs, batch_size=batch_size, validation_data=[imgs_val, labs_val])
110 |
111 | plt.figure(figsize=(10, 4))
112 | plt.subplot(121)
113 | plt.title("Loss graph")
114 | plt.plot(history.history['loss'])
115 | plt.plot(history.history['val_loss'])
116 | plt.legend(['Train', 'Validation'], loc='upper right')
117 |
118 | plt.subplot(122)
119 | plt.title("Acc graph")
120 | plt.plot(history.history['acc'])
121 | plt.plot(history.history['val_acc'])
122 | plt.legend(['Train', 'Validation'], loc='upper right')
123 |
124 | plt.show()
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/cyclegan/pytorch/models.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from torch.nn import functional as F
4 |
5 |
6 | class NormLayer(nn.Module):
7 | def __init__(self, features, norm_type='IN'):
8 | super(NormLayer, self).__init__()
9 |
10 | if norm_type == "BN":
11 | self.norm = nn.BatchNorm2d(features, affine=True, track_running_stats=True)
12 | elif norm_type == "IN":
13 | self.norm = nn.InstanceNorm2d(features, affine=False, track_running_stats=False)
14 | elif norm_type == "None":
15 | self.norm = nn.Identity()
16 | else:
17 | raise ValueError(f"Please input 'norm_type', ['BN', 'IN', 'None']")
18 |
19 | def forward(self, x):
20 | x = self.norm(x)
21 | return x
22 |
23 | class ResidualBlock(nn.Module):
24 | def __init__(self, features, norm_type):
25 | super(ResidualBlock, self).__init__()
26 | layers = []
27 | layers.append(nn.ReflectionPad2d(1))
28 | layers.append(nn.Conv2d(features, features, kernel_size=3))
29 | layers.append(NormLayer(features, norm_type))
30 | layers.append(nn.ReLU(True))
31 |
32 | layers.append(nn.ReflectionPad2d(1))
33 | layers.append(nn.Conv2d(features, features, kernel_size=3))
34 | layers.append(NormLayer(features, norm_type))
35 |
36 | self.block = nn.Sequential(*layers)
37 |
38 | def forward(self, x):
39 | out = x + self.block(x)
40 | return out
41 |
42 | class Generator(nn.Module):
43 | def __init__(self, in_channels, out_channels, features, norm_type, n_downsampling, n_blocks):
44 | super(Generator, self).__init__()
45 |
46 | layers = [
47 | nn.ReflectionPad2d(3),
48 | nn.Conv2d(in_channels, features, kernel_size=7, padding=0),
49 | NormLayer(features, norm_type),
50 | nn.ReLU(True)
51 | ]
52 |
53 | # Downsampling
54 | for i in range(n_downsampling):
55 | multiply = 2 ** i
56 | prev_channels = features * multiply
57 | new_channels = prev_channels * 2
58 | layers.append(nn.Conv2d(prev_channels, new_channels, kernel_size=3, stride=2, padding=1))
59 | layers.append(NormLayer(new_channels, norm_type))
60 | layers.append(nn.ReLU(True))
61 |
62 | # Residual BlockS
63 | multiply = 2 ** n_downsampling
64 | curr_channels = features * multiply
65 | for i in range(n_blocks):
66 | layers.append(ResidualBlock(curr_channels, norm_type))
67 |
68 | # Upsampling
69 | for i in range(n_downsampling):
70 | prev_channels = features * (2 ** (n_downsampling - i))
71 | curr_channels = int(prev_channels /2)
72 | layers.append(nn.ConvTranspose2d(prev_channels, curr_channels,
73 | kernel_size=3, stride=2, padding=1,
74 | output_padding=1))
75 | layers.append(NormLayer(curr_channels, norm_type))
76 | layers.append(nn.ReLU(True))
77 |
78 | layers.append(nn.ReflectionPad2d(3))
79 | layers.append(nn.Conv2d(features, out_channels, kernel_size=7, padding=0))
80 | layers.append(nn.Tanh())
81 |
82 | self.model = nn.Sequential(*layers)
83 |
84 | def forward(self, x):
85 | x = self.model(x)
86 | return x
87 |
88 | class Discriminator(nn.Module):
89 | def __init__(self, in_channels, features, norm_type, n_blocks):
90 | super(Discriminator, self).__init__()
91 | """
92 | C64-C128-C256-C512
93 | """
94 | layers = [
95 | nn.Conv2d(in_channels, features, kernel_size=4, stride=2, padding=1),
96 | nn.LeakyReLU(0.2, True)
97 | ]
98 |
99 | new_channels = features * 2
100 | prev_channels = features
101 | for i in range(1, n_blocks):
102 | layers.append(nn.Conv2d(prev_channels, new_channels, kernel_size=4, stride=2, padding=1))
103 | layers.append(NormLayer(new_channels, norm_type))
104 | layers.append(nn.LeakyReLU(0.2, True))
105 | prev_channels = new_channels
106 | new_channels = min(features * (2**(i+1)), 512)
107 |
108 | layers.append(nn.Conv2d(prev_channels, 1, kernel_size=4, stride=1, padding=1))
109 |
110 | self.model = nn.Sequential(*layers)
111 |
112 | def forward(self, x):
113 | x = self.model(x)
114 | return x
--------------------------------------------------------------------------------
/03_Advance/CNN/VGGNet/tf_subclassing.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os
3 | import cv2 as cv
4 | import numpy as np
5 | import tensorflow as tf
6 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
7 |
8 | # %%
9 | URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
10 | path_to_zip = tf.keras.utils.get_file('flower_photos.tgz', origin=URL, extract=True)
11 | PATH = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
12 |
13 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
14 | print(category_list)
15 |
16 | num_classes = len(category_list)
17 | img_size = 150
18 | EPOCHS = 500
19 | BATCH_SIZE = 128
20 | def read_img(path, img_size):
21 | img = cv.imread(path)
22 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
23 | img = cv.resize(img, (img_size, img_size))
24 | return img
25 |
26 | imgs_tr = []
27 | labs_tr = []
28 |
29 | imgs_val = []
30 | labs_val = []
31 |
32 | for i, category in enumerate(category_list):
33 | path = os.path.join(PATH, category)
34 | imgs_list = os.listdir(path)
35 | print("Total '%s' images : %d"%(category, len(imgs_list)))
36 | ratio = int(np.round(0.05 * len(imgs_list)))
37 | print("%s Images for Training : %d"%(category, len(imgs_list[ratio:])))
38 | print("%s Images for Validation : %d"%(category, len(imgs_list[:ratio])))
39 | print("=============================")
40 | imgs = [read_img(os.path.join(path, img),img_size) for img in imgs_list]
41 | labs = [i]*len(imgs_list)
42 | imgs_tr += imgs[ratio:]
43 | labs_tr += labs[ratio:]
44 | imgs_val += imgs[:ratio]
45 | labs_val += labs[:ratio]
46 |
47 | imgs_tr = np.array(imgs_tr)/255.
48 | labs_tr = utils.to_categorical(np.array(labs_tr), num_classes)
49 |
50 | imgs_val = np.array(imgs_val)/255.
51 | labs_val = utils.to_categorical(np.array(labs_val), num_classes)
52 |
53 | print(imgs_tr.shape, labs_tr.shape)
54 | print(imgs_val.shape, labs_val.shape)
55 |
56 | train_ds = tf.data.Dataset.from_tensor_slices((imgs_tr, labs_tr)).shuffle(10000).batch(BATCH_SIZE)
57 |
58 | test_ds = tf.data.Dataset.from_tensor_slices((imgs_val, labs_val)).shuffle(10000).batch(BATCH_SIZE)
59 |
60 | print("Data Prepared!")
61 |
62 | class Build_VGG(models.Model):
63 | def __init__(self, input_shape=(None, None, 3), num_classes=1, num_layer=16, name='vgg'):
64 | super(Build_VGG, self).__init__()
65 |
66 | blocks_dict = {
67 | 11: [1, 1, 2, 2, 2],
68 | 13: [2, 2, 2, 2, 2],
69 | 16: [2, 2, 3, 3, 3],
70 | 19: [2, 2, 4, 4, 4]
71 | }
72 |
73 | num_channel_list = [64, 128, 256, 512, 512]
74 | assert num_layer in blocks_dict.keys(), "Number of layer must be in %s"%blocks_dict.keys()
75 |
76 | last_act = 'sigmoid' if num_classes==1 else 'softmax'
77 | name = name+str(num_layer)
78 |
79 | self.model = models.Sequential(name=name)
80 | self.model.add(layers.Input(shape=input_shape, name=name+"_Input"))
81 | for idx, num_iter in enumerate(blocks_dict[num_layer]):
82 | for jdx in range(num_iter):
83 | self.model.add(layers.Conv2D(num_channel_list[idx], 3, strides=1, padding='same', activation='relu', name=name+"_Block_%d_Conv%d"%(idx+1, jdx+1)))
84 | self.model.add(layers.MaxPool2D(name=name+"_Block%d_Pool"%(idx+1)))
85 | self.model.add(layers.GlobalAveragePooling2D(name=name+"_GAP"))
86 | self.model.add(layers.Dense(512, activation='relu', name=name+"_Dense_1"))
87 | self.model.add(layers.Dense(512, activation='relu', name=name+"_Dense_2"))
88 | self.model.add(layers.Dense(num_classes, activation=last_act, name=name+"_Output"))
89 |
90 | def call(self, x):
91 | return self.model(x)
92 |
93 |
94 | num_layer = 11
95 | input_shape = imgs_tr.shape[1:]
96 |
97 | model = Build_VGG(input_shape=input_shape, num_classes=num_classes, num_layer=num_layer, name='vgg')
98 |
99 | loss_object = losses.BinaryCrossentropy() if num_classes==1 else losses.CategoricalCrossentropy()
100 |
101 | optimizer = optimizers.Adam()
102 |
103 | for epoch in range(EPOCHS):
104 | for batch_x, batch_y in train_ds:
105 | with tf.GradientTape() as tape:
106 | predictions = model(batch_x, training=True)
107 | loss = loss_object(batch_y, predictions)
108 | gradients = tape.gradient(loss, model.trainable_variables)
109 | optimizer.apply_gradients(zip(gradients, model.trainable_variables))
110 |
111 | print("{:5}|{:10.6f}".format(epoch+1, loss))
112 |
--------------------------------------------------------------------------------
/03_Advance/GAN/DCGAN/tf_keras.py:
--------------------------------------------------------------------------------
1 | # %%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 |
10 | # %%
11 | # Data Prepare
12 |
13 | (train_x, _), (test_x, _) = datasets.mnist.load_data()
14 | train_x, test_x = train_x/255., test_x/255.
15 |
16 | print("Train Data's Shape : ", train_x.shape)
17 | print("Test Data's Shape : ", test_x.shape)
18 |
19 | # %%
20 | # Build Network
21 |
22 | def Build_Generator(input_shape=(100, ), output_size=(28, 28), name="Generator"):
23 |
24 | sub_size = (output_size[0]//4, output_size[1]//4)
25 | model = models.Sequential(name=name)
26 | model.add(layers.Dense(np.prod(sub_size)*512, input_shape=input_shape, name=name+"_Dense"))
27 | model.add(layers.BatchNormalization(name=name+"_BN_1"))
28 | model.add(layers.ReLU(name=name+"_Act_1"))
29 | model.add(layers.Reshape((sub_size[0], sub_size[1], 512), name=name+"_Reshape"))
30 | model.add(layers.Conv2DTranspose(256, 4, 2, padding='same', name=name+"_Upconv_1"))
31 | model.add(layers.BatchNormalization(name=name+"_BN_2"))
32 | model.add(layers.ReLU(name=name+"_Act_2"))
33 | model.add(layers.Conv2DTranspose(64, 4, 2, padding='same', name=name+"_Upconv_2"))
34 | model.add(layers.BatchNormalization(name=name+"_BN_3"))
35 | model.add(layers.ReLU(name=name+"_Act_3"))
36 | model.add(layers.Conv2D(1, 3, 1, padding='same', activation='tanh', name=name+"_Upconv_3"))
37 | model.add(layers.Reshape(output_size, name=name+"_Output"))
38 | return model
39 |
40 | def Build_Discriminator(input_shape=(28,28), name="Discriminator"):
41 |
42 | model = models.Sequential(name=name)
43 | model.add(layers.Reshape((input_shape[0], input_shape[1], 1), input_shape=input_shape, name=name+"_Reshape"))
44 | model.add(layers.Conv2D(128, 4, 2, padding='same', name=name+"_Conv_1"))
45 | model.add(layers.BatchNormalization(name=name+"_BN_1"))
46 | model.add(layers.LeakyReLU(0.01, name=name+"_Act_1"))
47 | model.add(layers.Conv2D(64, 4, 2, padding='same', name=name+"_Conv_2"))
48 | model.add(layers.BatchNormalization(name=name+"_BN_2"))
49 | model.add(layers.LeakyReLU(0.01, name=name+"_Act_2"))
50 | model.add(layers.Flatten(name=name+"_Flatten"))
51 | model.add(layers.Dense(1, activation='sigmoid', name=name+"_Output"))
52 |
53 | return model
54 |
55 |
56 | n_latent = 100
57 | input_shape = (n_latent, )
58 | img_size = train_x.shape[1:]
59 |
60 | D = Build_Discriminator(input_shape=img_size, name="Discriminator")
61 | D.compile(optimizer=optimizers.RMSprop(), loss=losses.binary_crossentropy, metrics=['acc'])
62 | D.trainable = False
63 |
64 | G = Build_Generator(input_shape=input_shape, output_size=img_size, name="Generator")
65 |
66 | A = models.Model(inputs=G.input, outputs=D(G.output), name="GAN")
67 | A.compile(optimizer=optimizers.RMSprop(), loss=losses.binary_crossentropy)
68 |
69 | D.summary()
70 |
71 | G.summary()
72 |
73 | A.summary()
74 |
75 | # %%
76 | # Training Network
77 | epochs=100
78 | batch_size=100
79 |
80 | fake_label = np.zeros((batch_size, 1))
81 | real_label = np.ones((batch_size, 1))
82 |
83 | for epoch in range(epochs):
84 |
85 | G_loss_epoch = 0
86 |
87 | D_loss_epoch = 0
88 |
89 | D_acc_epoch = 0
90 |
91 | shuffle_idx = np.random.choice(len(train_x), len(train_x), replace=False)
92 |
93 | for i, idx in enumerate(range(0, len(shuffle_idx), batch_size)):
94 |
95 | latent = np.random.randn(batch_size, n_latent)
96 | fake_x = G.predict(latent)
97 |
98 | real_x = train_x[idx:idx+batch_size]
99 |
100 | D_loss, D_acc = D.train_on_batch(np.concatenate((fake_x, real_x), axis=0),
101 | np.concatenate((fake_label, real_label), axis=0))
102 | D_loss_epoch += D_loss
103 | D_acc_epoch += D_acc
104 |
105 | latent = np.random.randn(batch_size, n_latent)
106 |
107 | G_loss = A.train_on_batch(latent, real_label)
108 |
109 | G_loss_epoch += G_loss
110 |
111 | print(f"{epoch+1}/{epochs}, G loss : {G_loss_epoch/(i+1)}, D loss : {D_loss_epoch/(i+1)}, D acc : {D_acc_epoch/(i+1)}")
112 |
113 | latent = np.random.normal(-1, 1, (20, n_latent))
114 | fake_x = G.predict(latent)
115 |
116 | plt.figure(figsize=(10, 3))
117 | for i in range(10):
118 | plt.subplot(2, 10, i+1)
119 | plt.imshow(fake_x[2*i], cmap='gray', vmin=0, vmax=1)
120 | plt.axis('off')
121 | plt.subplot(2, 10, i+1+10)
122 | plt.imshow(fake_x[2*i+1], cmap='gray', vmin=0, vmax=1)
123 | plt.axis('off')
124 | plt.tight_layout()
125 | plt.show()
--------------------------------------------------------------------------------
/03_Advance/CNN/VGGNet/ver_mlx.py:
--------------------------------------------------------------------------------
1 | from tqdm import tqdm
2 |
3 | import numpy as np
4 |
5 | from mlx import nn
6 | from mlx import core as mx
7 | from mlx import optimizers as optim
8 |
9 | from mlx.data import datasets
10 |
11 | np.random.seed(777)
12 | mx.random.seed(777)
13 |
14 |
15 | EPOCHS = 5
16 | BATCH_SIZE = 16
17 | IMG_SIZE = 192
18 | LEARNING_RATE = 1e-4
19 |
20 | train_dataset = datasets.load_images_from_folder("../../../data/flower_photos/train")
21 | val_dataset = datasets.load_images_from_folder("../../../data/flower_photos/validation")
22 |
23 | mean = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))
24 | std = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))
25 |
26 | def normalize(x):
27 | return (x - mean) / std
28 |
29 | train_loader = (
30 | train_dataset.shuffle()
31 | .to_stream()
32 | .image_resize("image", IMG_SIZE, IMG_SIZE)
33 | .key_transform("image", normalize)
34 | .batch(BATCH_SIZE)
35 | )
36 | val_loader = (
37 | val_dataset
38 | .to_stream()
39 | .image_resize("image", IMG_SIZE, IMG_SIZE)
40 | .key_transform("image", normalize)
41 | .batch(BATCH_SIZE)
42 | )
43 |
44 | len_train_loader = int(np.ceil(len(train_dataset)/BATCH_SIZE))
45 | len_val_loader = int(np.ceil(len(val_dataset)/BATCH_SIZE))
46 |
47 | # Defining Model
48 | class Model(nn.Module):
49 | def __init__(self, input_channel= 3, num_classes=1000, num_layer=16):
50 | super().__init__()
51 |
52 | blocks_dict = {
53 | 11: [1, 1, 2, 2, 2],
54 | 13: [2, 2, 2, 2, 2],
55 | 16: [2, 2, 3, 3, 3],
56 | 19: [2, 2, 4, 4, 4]
57 | }
58 |
59 | num_channel_list = [64, 128, 256, 512, 512]
60 |
61 | assert num_layer in blocks_dict.keys(), "Number of layer must be in %s"%blocks_dict.keys()
62 |
63 | layer_list = []
64 |
65 | input_features = input_channel
66 | for idx, num_iter in enumerate(blocks_dict[num_layer]):
67 | for _ in range(num_iter):
68 | layer_list.append(nn.Conv2d(input_features, num_channel_list[idx], 3, padding=1))
69 | layer_list.append(nn.ReLU())
70 | input_features = num_channel_list[idx]
71 | layer_list.append(nn.MaxPool2d(2, 2))
72 |
73 | self.vgg = nn.Sequential(*layer_list)
74 | self.classifier = nn.Linear(512, num_classes)
75 |
76 | self.init_weights()
77 |
78 | def init_weights(self):
79 | for m in self.modules():
80 | if isinstance(m, nn.Conv2d):
81 | nn.init.he_normal(m.weight)
82 | if m.bias is not None:
83 | nn.init.constant(m.bias, 0)
84 | elif isinstance(m, nn.Linear):
85 | nn.init.he_normal(m.weight)
86 | nn.init.constant(m.bias, 0)
87 |
88 | def __call__(self, x):
89 | x = self.vgg(x)
90 | x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
91 | x = self.classifier(x)
92 | return x
93 |
94 | def loss_fn(model, x, y):
95 | return mx.mean(nn.losses.cross_entropy(model(x), y))
96 |
97 | def eval_fn(model, x, y):
98 | output = model(x)
99 | loss = mx.mean(nn.losses.cross_entropy(output, y))
100 | metric = mx.mean(mx.argmax(model(x), axis=1) == y)
101 | return loss, metric
102 |
103 | model = Model(input_channel=3, num_classes=5, num_layer=11)
104 | mx.eval(model.parameters())
105 |
106 | loss_and_grad_fn = nn.value_and_grad(model, loss_fn)
107 | optimizer = optim.Adam(learning_rate=LEARNING_RATE)
108 | # %%
109 | for epoch in range(EPOCHS):
110 | train_loss = 0
111 | with tqdm(enumerate(train_loader), total=len_train_loader) as pbar:
112 | pbar.set_description(f"{epoch+1}/{EPOCHS}")
113 | for i, (batch) in pbar:
114 | batch_x = mx.array(batch["image"])
115 | batch_y = mx.array(batch["label"])
116 | batch_loss, batch_grads = loss_and_grad_fn(model, batch_x, batch_y)
117 | optimizer.update(model, batch_grads)
118 | mx.eval(model.parameters(), optimizer.state)
119 | train_loss += batch_loss.item()
120 | pbar.set_postfix(loss=f"{train_loss/(i+1):.3f}")
121 | val_loss = 0
122 | val_acc = 0
123 | with tqdm(enumerate(val_loader), total=len_val_loader) as pbar:
124 | pbar.set_description(f"{epoch+1}/{EPOCHS}")
125 | for i, (batch) in pbar:
126 | batch_x = mx.array(batch["image"])
127 | batch_y = mx.array(batch["label"])
128 | batch_loss, batch_accuracy = eval_fn(model, batch_x, batch_y)
129 | val_loss += batch_loss.item()
130 | val_acc += batch_accuracy.item()
131 | pbar.set_postfix(val_loss=f"{val_loss/(i+1):.3f}")
132 | val_acc /= len_val_loader
133 | pbar.set_postfix(val_loss=f"{val_loss/(i+1):.3f}", val_acc=f"{val_acc:.3f}")
134 | print(f"{epoch+1}/{EPOCHS}: Train Loss: {train_loss/len_train_loader:.3f}, Val Loss: {val_loss/(i+1):.3f}, Val Accuracy: {val_acc:.3f}\n")
135 |
136 | train_loader.reset()
137 | val_loader.reset()
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/cyclegan/tf_keras/models.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.keras import layers, models
3 | import tensorflow_addons as tfa
4 |
5 | def norm_layer(mode="BN", name="Norm"):
6 | if mode == "BN":
7 | layer = layers.BatchNormalization(name=name+"_BN")
8 | elif mode == "IN":
9 | layer = tfa.layers.InstanceNormalization(name=name+"_IN")
10 | elif mode == "LN":
11 | layer = layers.LayerNormalization(name=name+"_LN")
12 | else :
13 | layer = lambda: lambda x: x
14 | return layer
15 |
16 | def residual_block(x, filters=32, padding_type='reflect', norm_type="BN", use_dropout=True, use_bias=True, name="RB"):
17 | output = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding_type, name=name+"_Pad_1")
18 | output = layers.Conv2D(filters, 3, padding='valid', use_bias=False, name=name+"_Conv_1")(output)
19 | output = norm_layer(mode = norm_type, name=name+"_Norm_1")(output)
20 | output = layers.ReLU(name=name+"_Act_1")(output)
21 |
22 | output = tf.pad(output, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding_type, name=name+"_Pad_2")
23 | output = layers.Conv2D(filters, 3, padding='valid', use_bias=False, name=name+"_Conv_2")(output)
24 | output = norm_layer(mode = norm_type, name=name+"_Norm_2")(output)
25 |
26 | return layers.Add(name=name+"_Add")([x, output])
27 |
28 | def ResnetGenerator(input_size=256, input_nc=3, output_nc=3, ngf=64, norm_type="BN", use_dropout=False, n_blocks=6, padding_type='reflect', name="Generator"):
29 | # To do
30 | # [ ] make unetgenerator
31 | input_layer = layers.Input(shape=(input_size, input_size, input_nc), name=name+"_Input")
32 |
33 | out = tf.pad(input_layer, [[0, 0], [3, 3], [3, 3], [0, 0]], mode=padding_type, name=name+"_Pad_1")
34 | out = layers.Conv2D(ngf, 7, padding='valid', use_bias=False, name=name+"_Conv_1")(out)
35 | out = norm_layer(mode = norm_type, name=name+"_Norm_1")(out)
36 | out = layers.ReLU(name=name+"_Act_1")(out)
37 |
38 | n_downsampling = 2
39 |
40 | for i in range(n_downsampling):
41 | ngf *= 2
42 | out = layers.Conv2D(ngf, 3, strides=2, padding='same', use_bias=False, name=name+f"_Down_Conv_{i+2}")(out)
43 | out = norm_layer(mode = norm_type, name=name+f"_Down_Norm_{i+2}")(out)
44 | out = layers.ReLU(name=name+f"_Down_Act_{i+2}")(out)
45 |
46 | for i in range(n_blocks):
47 | out = residual_block(out, filters=ngf, name=name+f"_RB_{i+1}")
48 |
49 | for i in range(n_downsampling):
50 | ngf //= 2
51 | out = layers.Conv2DTranspose(ngf, 3, strides=2, padding='same', use_bias=False, name=name+f"_Up_Conv_{i+1}")(out)
52 | out = norm_layer(mode = norm_type, name=name+f"_Up_Norm_{i+1}")(out)
53 | out = layers.ReLU(name=name+f"_Up_Act_{i+1}")(out)
54 |
55 | out = tf.pad(out, [[0, 0], [3, 3], [3, 3], [0, 0]], mode='REFLECT', name=name+"_Out_Pad")
56 | out = layers.Conv2D(output_nc, 7, padding='valid', name=name+"_Out_Conv")(out)
57 | out = layers.Activation('tanh', name=name+"_Output")(out)
58 |
59 | return models.Model(inputs=input_layer, outputs=out, name=name)
60 |
61 | def NLayerDiscriminator(input_size = 256, input_channel = 3, ndf=64, n_layers=3, norm_type="BN", name="Discriminator"):
62 |
63 | kw = 4
64 | padw = 1
65 | input_layer = layers.Input(shape=(input_size, input_size, input_channel), name=name+"_Input")
66 |
67 | out = layers.Conv2D(ndf, kernel_size=kw, strides=2, padding='same', name=name+"_Conv_1")(input_layer)
68 | out = layers.LeakyReLU(0.2, name=name+"_Act_1")(out)
69 |
70 | nf_mult = 1
71 | for i in range(1, n_layers): # gradually increase the number of filters
72 | nf_mult = min(2 ** i, 8)
73 | out = layers.Conv2D(ndf * nf_mult, kernel_size=kw, strides=2, padding='same', name=name+f"_Conv_{i+1}")(out)
74 | out = norm_layer(mode=norm_type, name=name+f"_Norm_{i+1}")(out)
75 | out = layers.LeakyReLU(0.2, name=name+f"_Act_{i+1}")(out)
76 |
77 | nf_mult = min(2 ** n_layers, 8)
78 | out = layers.Conv2D(ndf * nf_mult, kernel_size=kw, strides=1, padding='same', name=name+f"_Conv_{n_layers+1}")(out)
79 | out = norm_layer(mode=norm_type, name=name+f"_Norm_{n_layers+1}")(out)
80 | out = layers.LeakyReLU(0.2, name=name+f"_Act_{n_layers+1}")(out)
81 |
82 | out = layers.Conv2D(1, kernel_size=kw, strides=1, padding='same', name=name+"_Output")(out)
83 | return models.Model(inputs=input_layer, outputs=out, name=name)
84 |
85 | def PixelDiscriminator(input_size = 256, input_channel = 3, ndf=64, norm_type="BN", name="Discriminator"):
86 |
87 | input_layer = layers.Input(shape=(input_size, input_size, input_channel), name=name+"_Input")
88 |
89 | out = layers.Conv2D(ndf, kernel_size=1, strides=1, padding='same', name=name+"_Conv_1")(input_layer)
90 | out = layers.LeakyReLU(0.2, name=name+"_Act_1")(out)
91 | out = layers.Conv2D(ndf*2, kernel_size=1, strides=1, padding='same', name=name+"_Conv_2")(out)
92 | out = norm_layer(mode=norm_type, name=name+"_Norm_2")(out)
93 | out = layers.LeakyReLU(0.2, name=name+"_Act_2")(out)
94 | out = layers.Conv2D(1, kernel_size=1, strides=1, padding="same", name=name+"_Output")(out)
95 |
96 | return models.Model(inputs=input_layer, outputs=out)
--------------------------------------------------------------------------------
/03_Advance/CNN/SqueezeNet/tf_keras.py:
--------------------------------------------------------------------------------
1 | # %%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 | from tensorflow.keras import backend as K
10 |
11 | # %%
12 | # Data Prepare
13 |
14 | URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
15 | path_to_zip = utils.get_file('flower_photos.tgz', origin=URL, extract=True)
16 |
17 | PATH = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
18 |
19 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
20 | print(category_list)
21 |
22 | num_classes = len(category_list)
23 | img_size = 150
24 |
25 | def read_img(path, img_size):
26 | img = cv.imread(path)
27 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
28 | img = cv.resize(img, (img_size, img_size))
29 | return img
30 |
31 | imgs_tr = []
32 | labs_tr = []
33 |
34 | imgs_val = []
35 | labs_val = []
36 |
37 | for i, category in enumerate(category_list):
38 | path = os.path.join(PATH, category)
39 | imgs_list = os.listdir(path)
40 | print("Total '%s' images : %d"%(category, len(imgs_list)))
41 | ratio = int(np.round(0.05 * len(imgs_list)))
42 | print("%s Images for Training : %d"%(category, len(imgs_list[ratio:])))
43 | print("%s Images for Validation : %d"%(category, len(imgs_list[:ratio])))
44 | print("=============================")
45 |
46 | imgs = [read_img(os.path.join(path, img),img_size) for img in imgs_list]
47 | labs = [i]*len(imgs_list)
48 |
49 | imgs_tr += imgs[ratio:]
50 | labs_tr += labs[ratio:]
51 |
52 | imgs_val += imgs[:ratio]
53 | labs_val += labs[:ratio]
54 |
55 | imgs_tr = np.array(imgs_tr)/255.
56 | labs_tr = utils.to_categorical(np.array(labs_tr), num_classes)
57 |
58 | imgs_val = np.array(imgs_val)/255.
59 | labs_val = utils.to_categorical(np.array(labs_val), num_classes)
60 |
61 | print(imgs_tr.shape, labs_tr.shape)
62 | print(imgs_val.shape, labs_val.shape)
63 |
64 | # %%
65 | # Build Network
66 |
67 | def Conv_Block(input, filters, ksize, stride, padding, activation, use_bn=False, name="Conv"):
68 | out = layers.Conv2D(filters, ksize, stride, padding, name=name+"_Conv")(input)
69 | if use_bn:
70 | out = layers.BatchNormalization(name=name+"_BN")(out)
71 | out = layers.Activation(activation, name=name+"_Act")(out)
72 | return out
73 |
74 | def Fire_Module(input, squ, exp_1x1, exp_3x3, use_bn=False, name="Fire"):
75 |
76 | squeeze = Conv_Block(input, squ, 1, 1, 'valid', 'relu', name=name+"_Squeeze")
77 |
78 | expand_1x1 = Conv_Block(squeeze, exp_1x1, 1, 1, 'valid', 'linear', name=name+"_Expand_1x1")
79 | expand_3x3 = Conv_Block(squeeze, exp_3x3, 3, 1, 'same', 'linear', name=name+"_Expand_3x3")
80 |
81 | out = layers.Concatenate(name=name+"_Expand")([expand_1x1, expand_3x3])
82 | out = layers.ReLU(name=name+"_Act")(out)
83 |
84 | return out
85 |
86 | def build_squeezenet(input_shape=(None, None, 3), num_classes=1, name='SqueezeNet'):
87 |
88 | last_act = 'sigmoid' if num_classes==1 else 'softmax'
89 |
90 | input = layers.Input(shape=input_shape, name=name+"_input")
91 |
92 | x = Conv_Block(input, 96, 7, 2, 'same', 'relu', name=name+"_Block_1")
93 | x = layers.MaxPool2D(3, 2, name=name+"_Pool_1")(x)
94 |
95 | x = Fire_Module(x, 16, 64, 64, name=name+"_Fire_2")
96 | x = Fire_Module(x, 16, 64, 64, name=name+"_Fire_3")
97 | x = Fire_Module(x, 32, 128, 128, name=name+"_Fire_4")
98 | x = layers.MaxPool2D(3, 2, name=name+"_Pool_4")(x)
99 |
100 | x = Fire_Module(x, 32, 128, 128, name=name+"_Fire_5")
101 | x = Fire_Module(x, 48, 192, 192, name=name+"_Fire_6")
102 | x = Fire_Module(x, 48, 192, 192, name=name+"_Fire_7")
103 | x = Fire_Module(x, 64, 256, 256, name=name+"_Fire_8")
104 | x = layers.MaxPool2D(3, 2, name=name+"_Pool_8")(x)
105 |
106 | x = Fire_Module(x, 64, 256, 256, name=name+"_Fire_9")
107 | x = layers.Dropout(0.5, name=name+"_Dropout")(x)
108 | x = Conv_Block(x, num_classes, 1, 1, 'valid', 'relu', name=name+"_Block_10")
109 |
110 | x = layers.GlobalAveragePooling2D(name=name+"_GAP")(x)
111 | x = layers.Activation(last_act, name=name+"_Output")(x)
112 |
113 | return models.Model(input, x)
114 |
115 | input_shape = imgs_tr.shape[1:]
116 | alpha = 1
117 |
118 | squeeze = build_squeezenet(input_shape=input_shape, num_classes=num_classes, name="Squeeze")
119 | squeeze.summary()
120 |
121 | loss = 'binary_crossentropy' if num_classes==1 else 'categorical_crossentropy'
122 | squeeze.compile(optimizer=optimizers.Adam(0.04), loss=loss, metrics=['accuracy'])
123 |
124 | # %%
125 | # Training Network
126 | epochs=100
127 | batch_size=16
128 |
129 | history=squeeze.fit(imgs_tr, labs_tr, epochs = epochs, batch_size=batch_size,
130 | validation_data=(imgs_val, labs_val))
131 |
132 | plt.figure(figsize=(10, 4))
133 | plt.subplot(121)
134 | plt.title("Loss graph")
135 | plt.plot(history.history['loss'])
136 | plt.plot(history.history['val_loss'])
137 | plt.legend(['Train', 'Validation'], loc='upper right')
138 |
139 | plt.subplot(122)
140 | plt.title("Acc graph")
141 | plt.plot(history.history['acc'])
142 | plt.plot(history.history['val_acc'])
143 | plt.legend(['Train', 'Validation'], loc='upper right')
144 |
145 | plt.show()
--------------------------------------------------------------------------------
/04_Extra/Image_Translation/Neural_Style_Transfer/PyTorch/Neural_Style_Transfer.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | parser = argparse.ArgumentParser()
4 | parser.add_argument("--content", help="Path of Content Image", type=str)
5 | parser.add_argument("--style", help="Path of Style Image", type=str)
6 | parser.add_argument("--scale", help="Scaling Factor", type=float, default=1.0)
7 | parser.add_argument("--steps", help="Steps of Training", type=int, default=2000)
8 | args = parser.parse_args()
9 |
10 | import torch
11 | from torch import nn
12 | import torch.optim as opti
13 | from torch.autograd import Variable
14 |
15 | import PIL
16 | import numpy as np
17 | from PIL import Image
18 | from matplotlib import pyplot as plt
19 | from keras.utils import Progbar
20 | import torchvision.transforms as transforms
21 | import torchvision.models as models
22 |
23 | print("Loading Packages!")
24 |
25 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26 |
27 | def load_img(img_path, scale=None, resize=None):
28 | '''
29 | Image Loader
30 |
31 | Parameter
32 | =========
33 | img_path : str, Path of Image
34 | rescale : float, Scaling Factor
35 | resize : (int(w), int(h)), Size to resize
36 |
37 |
38 | Return
39 | =========
40 | img : image
41 | '''
42 | img = Image.open(img_path)
43 | if rescale:
44 | w, h = img.size
45 | w = int(w*scale)
46 | h = int(h*scale)
47 | img = img.resize((w, h), PIL.Image.BICUBIC)
48 | if resize:
49 | img = img.resize(resize, PIL.Image.BICUBIC)
50 | if img.mode =='RGBA':
51 | img = img.convert("RGB")
52 | return img
53 |
54 | def preproc4torch(img):
55 | mean = np.array([[[0.485, 0.456, 0.406]]])
56 | std = np.array([[[0.229, 0.224, 0.225]]])
57 | result = np.array(img)/255.
58 | result = np.flip(result, axis=2)
59 | result = np.transpose((result-mean)/std, [2,0,1])
60 | result = torch.Tensor(result).unsqueeze(0)
61 | return result.to(device)
62 |
63 | def deproc4plot(img):
64 | mean = np.array([[[0.485, 0.456, 0.406]]])
65 | std = np.array([[[0.229, 0.224, 0.225]]])
66 | result = img.detach().cpu().squeeze().numpy()
67 | result = np.transpose(result, [1,2,0])
68 | result = (result*std + mean)*255.
69 | result = np.clip(result, 0, 255)
70 | result = np.flip(result, axis=2)
71 | return np.uint8(result)
72 |
73 | # Make Network for Style Transfer Using Pretrained VGG19
74 | class Extractor(nn.Module):
75 | def __init__(self):
76 | super(Extractor, self).__init__()
77 | self.style_idx = ['0', '5', '10', '19', '28']
78 | self.content_idx = ['20']
79 | self.extractor = models.vgg19(pretrained=True).features
80 |
81 | def forward(self, x, mode = None):
82 | """Extract multiple convolutional feature maps."""
83 | assert mode, "Please input mode of Extractor"
84 | if mode == 'content':feature_idx = self.content_idx
85 | else: feature_idx = self.style_idx
86 | features = []
87 | for num, layer in self.extractor.named_children():
88 | x = layer(x)
89 | if num in feature_idx:
90 | features.append(x)
91 | return features
92 |
93 | print("Define Done!")
94 |
95 | # Load image
96 | content_img = load_img(args.content, rescale=args.scale)
97 | style_img = load_img(args.style, resize=content_img.size)
98 |
99 | content_img = preproc4torch(content_img)
100 | style_img = preproc4torch(style_img)
101 | print('Content image shape : ', content_img.shape)
102 | print('Style image shape : ', style_img.shape)
103 |
104 | target_img = Variable(content_img.data.clone(), requires_grad=True).to(device)
105 | print('Target imate shape : ', target_img.shape)
106 |
107 | print("Loading Image Donw!")
108 |
109 | extractor = Extractor().to(device).eval()
110 |
111 | optim = torch.optim.Adam([target_img], lr=0.001, betas=[0.5, 0.1])
112 |
113 |
114 | def Content_Loss(content, target):
115 | return torch.mean((content[0] - target[0])**2)
116 |
117 |
118 | def Style_Loss(style, target):
119 | loss = 0
120 | for s_f, t_f in zip(style, target):
121 | b, c, h, w = s_f.size()
122 | s_f = s_f.view(b, c, h*w)
123 | t_f = t_f.view(b, c, h*w)
124 |
125 | s_f = torch.bmm(s_f, s_f.transpose(1,2))
126 | t_f = torch.bmm(t_f, t_f.transpose(1,2))
127 | loss += torch.mean((s_f - t_f)**2) / (c*h*w)
128 | return loss
129 |
130 | print("Start Styling!")
131 |
132 | steps = args.steps
133 | progbar = Progbar(steps)
134 | for step in range(steps):
135 | content = extractor(content_img, 'content')
136 | style = extractor(style_img, 'style')
137 | target_content = extractor(target_img, 'content')
138 | target_style = extractor(target_img, 'style')
139 |
140 | c_loss = Content_Loss(content, target_content)
141 | s_loss = Style_Loss(style, target_style)
142 |
143 | loss = c_loss + 100*s_loss
144 |
145 | optim.zero_grad()
146 | loss.backward()
147 | optim.step()
148 | if (step +1)%500 ==0 :
149 | new_img = deproc4plot(target_img)
150 | save_img = Image.fromarray(new_img)
151 | save_img.save('new_style_image_%06d.jpg'%(step+1))
152 | progbar.update(step+1, [('Content loss', c_loss.cpu().detach().numpy()), ('Style loss', s_loss.cpu().detach().numpy()*100)])
153 |
154 |
155 |
156 | save_img = Image.fromarray(new_img)
157 | save_img.save('new_style_image.jpg')
--------------------------------------------------------------------------------
/04_Extra/ViT/PyTorch.py:
--------------------------------------------------------------------------------
1 | # %%
2 | import os, torch
3 | import cv2 as cv
4 | import numpy as np
5 | from torch import nn, optim
6 | from torch.nn import functional as F
7 | from torch.utils.data import Dataset, DataLoader
8 | from torchvision import transforms, datasets, utils
9 |
10 | # Device Configuration
11 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
12 |
13 | # %%
14 | class ScaledDotProductAttention(nn.Module):
15 | def forward(self,Q,K,V,mask=None):
16 | d_K = K.size()[-1] # key dimension
17 | scores = Q.matmul(K.transpose(-2,-1)) / np.sqrt(d_K)
18 | if mask is not None:
19 | scores = scores.masked_fill(mask==0, -1e9)
20 | attention = F.softmax(scores,dim=-1)
21 | out = attention.matmul(V)
22 | return out,attention
23 |
24 | class MultiHeadedAttention(nn.Module):
25 | def __init__(self,d_feat=128, n_head=5, actv=F.relu, use_bias=True):
26 |
27 | super(MultiHeadedAttention, self).__init__()
28 | if (d_feat%n_head) != 0:
29 | raise ValueError("d_feat(%d) should be divisible by b_head(%d)"%(d_feat,n_head))
30 | self.d_feat = d_feat
31 | self.n_head = n_head
32 | self.d_head = self.d_feat // self.n_head
33 | self.actv = actv
34 | self.use_bias = use_bias
35 |
36 | self.SDPA = ScaledDotProductAttention()
37 | self.lin_Q = nn.Linear(self.d_feat,self.d_feat,self.use_bias)
38 | self.lin_K = nn.Linear(self.d_feat,self.d_feat,self.use_bias)
39 | self.lin_V = nn.Linear(self.d_feat,self.d_feat,self.use_bias)
40 | self.lin_O = nn.Linear(self.d_feat,self.d_feat,self.use_bias)
41 |
42 | self.dropout = nn.Dropout(p=self.dropout_rate)
43 |
44 | def forward(self,Q,K,V,mask=None):
45 | n_batch = Q.shape[0]
46 | Q_emb = self.lin_Q(Q)
47 | K_emb = self.lin_K(K)
48 | V_emb = self.lin_V(V)
49 |
50 | Q_emb = Q_emb.view(n_batch, -1, self.n_head, self.d_head).permute(0, 2, 1, 3)
51 | K_emb = K_emb.view(n_batch, -1, self.n_head, self.d_head).permute(0, 2, 1, 3)
52 | V_emb = V_emb.view(n_batch, -1, self.n_head, self.d_head).permute(0, 2, 1, 3)
53 |
54 | out, attention = self.SDPA(Q_emb, K_emb, V_emb, mask)
55 |
56 | # Reshape x
57 | out = out.permute(0,2,1,3).contiguous()
58 | out = out.view(n_batch,-1,self.d_feat)
59 |
60 | # Linear
61 | out = self.lin_O(out)
62 |
63 | return out, attention
64 |
65 | class EncoderLyaer(nn.Module):
66 | def __init__(self, d_feat=128, n_head=5, actv=F.relu, use_bias=True, features=256, rate=0.1):
67 | super(EncoderLyaer, self).__init__()
68 | self.d_feat = d_feat
69 | self.n_head = n_head
70 | self.d_head = self.d_feat // self.n_head
71 | self.actv = actv
72 | self.use_bias = use_bias
73 | self.features = features
74 | self.rate = rate
75 |
76 | self.MHA = MultiHeadedAttention(self.d_feat, self.n_head, self.actv, self.use_bias)
77 | self.FFN = nn.Sequential([
78 | nn.Linear(self.d_feat, self.features, self.use_bias),
79 | nn.ReUL(replace=True),
80 | nn.Linear(self.features, self.d_feat, self.use_bias)
81 | ])
82 |
83 | self.layernorm1 = nn.LayerNorm(self.d_feat)
84 | self.layernorm2 = nn.LayerNorm(self.d_feat)
85 |
86 | self.dropout1 = nn.Dropout(self.rate)
87 | self.dropout2 = nn.Dropout(self.rate)
88 |
89 | def forward(self, x, mask):
90 | out1, _ = self.MHA(x, x, x, mask)
91 | out1 = self.dropout1(out1)
92 | out1 = self.layernorm1(out1 + x)
93 |
94 | out2 = self.FFN(out1)
95 | out2 = self.dropout2(out2)
96 | out2 = self.layernorm2(out2 + out1)
97 |
98 | return out2
99 |
100 | class DecoderLayer(nn.Module):
101 | def __init__(self, d_feat=128, n_head=5, actv=F.relu, use_bias=True, features=256, rate=0.1):
102 | super(EncoderLyaer, self).__init__()
103 | self.d_feat = d_feat
104 | self.n_head = n_head
105 | self.d_head = self.d_feat // self.n_head
106 | self.actv = actv
107 | self.use_bias = use_bias
108 | self.features = features
109 | self.rate = rate
110 |
111 | self.MHA1 = MultiHeadedAttention(self.d_feat, self.n_head, self.actv, self.use_bias)
112 | self.MHA2 = MultiHeadedAttention(self.d_feat, self.n_head, self.actv, self.use_bias)
113 | self.FFN = nn.Sequential([
114 | nn.Linear(self.d_feat, self.features, self.use_bias),
115 | nn.ReUL(replace=True),
116 | nn.Linear(self.features, self.d_feat, self.use_bias)
117 | ])
118 |
119 | self.layernorm1 = nn.LayerNorm(self.d_feat)
120 | self.layernorm2 = nn.LayerNorm(self.d_feat)
121 | self.layernorm3 = nn.LayerNorm(self.d_feat)
122 |
123 | self.dropout1 = nn.Dropout(self.rate)
124 | self.dropout2 = nn.Dropout(self.rate)
125 | self.dropout3 = nn.Dropout(self.rate)
126 |
127 | def forward(self, x, encoder_output, look_mask, padding_mask):
128 | out1, attn1 = self.MHA1(x, x, x, look_mask)
129 | out1 = self.dropout1(out1)
130 | out1 = self.layernorm1(out1 + x)
131 |
132 | out2, attn2 = self.MHA2(encoder_output, encoder_output, out1, padding_mask)
133 | out2 = self.dropout2(out2)
134 | out2 = self.layernorm2(out2 + out1)
135 |
136 | out3 = self.FFN(out2)
137 | out3 = self.dropout3(out3)
138 | out3 = self.layernorm3(out3 + out2)
139 |
140 | return out3, attn1, attn2
--------------------------------------------------------------------------------
/03_Advance/CNN/MobileNetV1/tf_keras.py:
--------------------------------------------------------------------------------
1 | # %%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 |
10 | # %%
11 | # Data Prepare
12 |
13 | URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
14 | path_to_zip = utils.get_file('flower_photos.tgz', origin=URL, extract=True)
15 |
16 | PATH = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
17 |
18 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
19 | print(category_list)
20 |
21 | num_classes = len(category_list)
22 | img_size = 150
23 |
24 | def read_img(path, img_size):
25 | img = cv.imread(path)
26 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
27 | img = cv.resize(img, (img_size, img_size))
28 | return img
29 |
30 | imgs_tr = []
31 | labs_tr = []
32 |
33 | imgs_val = []
34 | labs_val = []
35 |
36 | for i, category in enumerate(category_list):
37 | path = os.path.join(PATH, category)
38 | imgs_list = os.listdir(path)
39 | print("Total '%s' images : %d"%(category, len(imgs_list)))
40 | ratio = int(np.round(0.05 * len(imgs_list)))
41 | print("%s Images for Training : %d"%(category, len(imgs_list[ratio:])))
42 | print("%s Images for Validation : %d"%(category, len(imgs_list[:ratio])))
43 | print("=============================")
44 |
45 | imgs = [read_img(os.path.join(path, img),img_size) for img in imgs_list]
46 | labs = [i]*len(imgs_list)
47 |
48 | imgs_tr += imgs[ratio:]
49 | labs_tr += labs[ratio:]
50 |
51 | imgs_val += imgs[:ratio]
52 | labs_val += labs[:ratio]
53 |
54 | imgs_tr = np.array(imgs_tr)/255.
55 | labs_tr = utils.to_categorical(np.array(labs_tr), num_classes)
56 |
57 | imgs_val = np.array(imgs_val)/255.
58 | labs_val = utils.to_categorical(np.array(labs_val), num_classes)
59 |
60 | print(imgs_tr.shape, labs_tr.shape)
61 | print(imgs_val.shape, labs_val.shape)
62 |
63 | # %%
64 | # Build Network
65 |
66 | def conv_block(x, filters, ksize=3, strides=1, padding="same", name="Block"):
67 | x = layers.Conv2D(filters, ksize, strides=strides, padding=padding, name=name+"_Conv")(x)
68 | x = layers.BatchNormalization(name=name+"_BN")(x)
69 | x = layers.ReLU(name=name+"_Act")(x)
70 | return x
71 |
72 | def depthwise_separable_block(x, filters, ksize=3, strides=1, padding="same", depth_multiplier=1, alpha=1, name="Block"):
73 | x = layers.DepthwiseConv2D(ksize, strides=strides, padding=padding, depth_multiplier=depth_multiplier, name=name+"_Depthwise")(x)
74 | x = layers.BatchNormalization(name=name+"_BN_1")(x)
75 | x = layers.ReLU(name=name+"_Act_1")(x)
76 |
77 | x = layers.Conv2D(int(filters*alpha), 1, name=name+"_Pointwise")(x)
78 | x = layers.BatchNormalization(name=name+"_BN_2")(x)
79 | x = layers.ReLU(name=name+"_Act_2")(x)
80 | return x
81 |
82 | def build_mobilenet(input_shape=(None, None, 3), num_classes=1, depth_multiplier=1, alpha=1, name='mobile'):
83 |
84 | last_act = 'sigmoid' if num_classes==1 else 'softmax'
85 |
86 | input = layers.Input(shape=input_shape, name=name+"_input")
87 |
88 | x = conv_block(input, 32, 3, 2, "same", name+"_Stem")
89 |
90 | x = depthwise_separable_block(x, 64, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_1")
91 | x = depthwise_separable_block(x, 128, strides=2, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_2")
92 | x = depthwise_separable_block(x, 128, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_3")
93 | x = depthwise_separable_block(x, 256, strides=2, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_4")
94 | x = depthwise_separable_block(x, 256, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_5")
95 | x = depthwise_separable_block(x, 512, strides=2, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_6")
96 |
97 | for i in range(5):
98 | x = depthwise_separable_block(x, 512, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_%d"%(i+1+6))
99 |
100 | x = depthwise_separable_block(x, 1024, strides=2, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_12")
101 | x = depthwise_separable_block(x, 1024, depth_multiplier=depth_multiplier, alpha=alpha, name=name+"_Block_13")
102 |
103 | x = layers.GlobalAveragePooling2D(name=name+"_GAP")(x)
104 | x = layers.Dense(num_classes, activation=last_act, name=name+"_Output")(x)
105 |
106 | return models.Model(input, x)
107 |
108 | input_shape = imgs_tr.shape[1:]
109 | depth_multiplier = 1
110 | alpha = 1
111 |
112 | mobile = build_mobilenet(input_shape=input_shape, num_classes=num_classes, depth_multiplier=1, alpha=1, name="Mobile")
113 | mobile.summary()
114 |
115 | loss = 'binary_crossentropy' if num_classes==1 else 'categorical_crossentropy'
116 | mobile.compile(optimizer=optimizers.Adam(), loss=loss, metrics=['accuracy'])
117 |
118 | # %%
119 | # Training Network
120 | epochs=100
121 | batch_size=16
122 |
123 | history=mobile.fit(imgs_tr, labs_tr, epochs = epochs, batch_size=batch_size, validation_data=[imgs_val, labs_val])
124 |
125 | plt.figure(figsize=(10, 4))
126 | plt.subplot(121)
127 | plt.title("Loss graph")
128 | plt.plot(history.history['loss'])
129 | plt.plot(history.history['val_loss'])
130 | plt.legend(['Train', 'Validation'], loc='upper right')
131 |
132 | plt.subplot(122)
133 | plt.title("Acc graph")
134 | plt.plot(history.history['acc'])
135 | plt.plot(history.history['val_acc'])
136 | plt.legend(['Train', 'Validation'], loc='upper right')
137 |
138 | plt.show()
139 |
--------------------------------------------------------------------------------
/03_Advance/GAN/LSGAN/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 | import random
3 | from tqdm import tqdm
4 |
5 | import numpy as np
6 |
7 | import torch
8 | from torch import nn
9 | from torch.nn import functional as F
10 | from torch import optim
11 | from torch.utils.data import DataLoader
12 |
13 | from torchvision import datasets
14 | from torchvision import transforms
15 |
16 | from matplotlib import pyplot as plt
17 |
18 | # Device Configuration
19 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
20 |
21 | # Set randomness
22 | seed = 777
23 | random.seed(seed)
24 | np.random.seed(seed)
25 | torch.manual_seed(seed)
26 |
27 | if torch.cuda.is_available():
28 | torch.cuda.manual_seed(seed)
29 | torch.cuda.manual_seed_all(seed) # if use multi-GPU
30 | torch.backends.cudnn.deterministic = True
31 | torch.backends.cudnn.benchmark = False
32 |
33 | # Set hyperparameter
34 | epochs= 10
35 | batch_size= 256
36 |
37 | # MNIST dataset
38 | mnist_train = datasets.MNIST(root='../../../data/',
39 | train=True,
40 | transform=transforms.ToTensor(),
41 | download=True)
42 | print("Downloading Train Data Done ! ")
43 |
44 | train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=2)
45 |
46 | # Defining Model
47 | class Generator(nn.Module):
48 | def __init__(self):
49 | super(Generator, self).__init__()
50 | self.linear1 = nn.Linear(100, 256)
51 | self.bnorm1 = nn.BatchNorm1d(256)
52 | self.linear2 = nn.Linear(256, 512)
53 | self.bnorm2 = nn.BatchNorm1d(512)
54 | self.linear3 = nn.Linear(512, 784)
55 |
56 | def forward(self, X):
57 | X = F.leaky_relu(self.bnorm1(self.linear1(X)), negative_slope=0.03)
58 | X = F.leaky_relu(self.bnorm2(self.linear2(X)), negative_slope=0.03)
59 | X = torch.sigmoid(self.linear3(X))
60 | return X
61 |
62 | class Discriminator(nn.Module):
63 | def __init__(self):
64 | super(Discriminator, self).__init__()
65 | self.linear1 = nn.Linear(784, 256)
66 | self.linear2 = nn.Linear(256, 64)
67 | self.linear3 = nn.Linear(64, 1)
68 |
69 | def forward(self, X):
70 | X = F.leaky_relu(self.linear1(X), negative_slope=0.03)
71 | X = F.leaky_relu(self.linear2(X), negative_slope=0.03)
72 | X = torch.sigmoid(self.linear3(X))
73 | return X
74 |
75 | G = Generator().to(device)
76 | D = Discriminator().to(device)
77 |
78 | criterion = nn.MSELoss()
79 | d_optimizer = optim.Adam(D.parameters(), lr=0.0002)
80 | g_optimizer = optim.Adam(G.parameters(), lr=0.0002)
81 |
82 | # Helper Function
83 | def plot_generator(G, num=10):
84 | z = torch.randn(num, 100).to(device)
85 |
86 | generated = G.forward(z).cpu().detach()
87 | plt.figure(figsize=(8, 2))
88 | for i in range(num):
89 | plt.subplot(1, num, i+1)
90 | plt.imshow(generated[i].view(28, 28), cmap=plt.cm.gray)
91 | plt.axis('off')
92 | plt.show()
93 |
94 | plot_generator(G)
95 |
96 | # Training loop
97 | for epoch in range(epochs):
98 | G.train()
99 | D.train()
100 | avg_g_loss = 0
101 | avg_d_loss = 0
102 |
103 | with tqdm(total=len(train_loader)) as t:
104 | t.set_description(f'[{epoch+1}/{epochs}]')
105 | for i, (batch_img, _) in enumerate(train_loader):
106 |
107 | tmp_batch_siae = batch_img.shape[0]
108 |
109 | X = batch_img.view(tmp_batch_siae, -1).to(device)
110 |
111 | real_lab = torch.ones(tmp_batch_siae, 1).to(device)
112 |
113 | fake_lab = torch.zeros(tmp_batch_siae, 1).to(device)
114 |
115 | # Training Discriminator
116 | D_pred = D.forward(X)
117 | d_loss_real = criterion(D_pred, real_lab)
118 | real_score = D_pred
119 |
120 | z = torch.randn(tmp_batch_siae, 100).to(device)
121 |
122 | fake_images = G.forward(z)
123 | G_pred = D.forward(fake_images)
124 | d_loss_fake = criterion(G_pred, fake_lab)
125 | fake_score = G_pred
126 |
127 | d_loss = d_loss_real + d_loss_fake
128 | d_optimizer.zero_grad()
129 | d_loss.backward()
130 | d_optimizer.step()
131 |
132 | # Training Generator
133 | z = torch.randn(tmp_batch_siae, 100).to(device)
134 | fake_images = G.forward(z)
135 | G_pred = D.forward(fake_images)
136 | g_loss = criterion(G_pred, real_lab)
137 |
138 | g_optimizer.zero_grad()
139 | g_loss.backward()
140 | g_optimizer.step()
141 |
142 | avg_g_loss += g_loss.item()
143 | avg_d_loss += d_loss.item()
144 |
145 |
146 | t.set_postfix({
147 | "D loss": f"{d_loss.item():05.3f}",
148 | "G loss": f"{g_loss.item():05.3f}"
149 | })
150 | t.update()
151 |
152 |
153 | print(f"Epoch : {epoch+1}, D Loss : {avg_d_loss/len(train_loader):.3f}, G Loss : {avg_g_loss/len(train_loader):.3f}")
154 | plot_generator(G)
155 | print("Training Done !")
156 |
157 | # Sample
158 | num_sample = 64
159 | z = torch.randn(num_sample, 100).to(device)
160 |
161 | generated = G.forward(z).cpu().detach()
162 | plt.figure(figsize=(8, 8))
163 | for i in range(num_sample):
164 | plt.subplot(8, 8, i+1)
165 | plt.imshow(generated[i].view(28, 28), cmap=plt.cm.gray)
166 | plt.axis('off')
167 | plt.show()
--------------------------------------------------------------------------------
/03_Advance/GAN/Vanilla_GAN/PyTorch.py:
--------------------------------------------------------------------------------
1 | # Importing Modules
2 | import random
3 | from tqdm import tqdm
4 |
5 | import numpy as np
6 |
7 | import torch
8 | from torch import nn
9 | from torch.nn import functional as F
10 | from torch import optim
11 | from torch.utils.data import DataLoader
12 |
13 | from torchvision import datasets
14 | from torchvision import transforms
15 |
16 | from matplotlib import pyplot as plt
17 |
18 | # Device Configuration
19 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
20 |
21 | # Set randomness
22 | seed = 777
23 | random.seed(seed)
24 | np.random.seed(seed)
25 | torch.manual_seed(seed)
26 |
27 | if torch.cuda.is_available():
28 | torch.cuda.manual_seed(seed)
29 | torch.cuda.manual_seed_all(seed) # if use multi-GPU
30 | torch.backends.cudnn.deterministic = True
31 | torch.backends.cudnn.benchmark = False
32 |
33 | # Set hyperparameter
34 | epochs= 10
35 | batch_size= 256
36 |
37 | # MNIST dataset
38 | mnist_train = datasets.MNIST(root='../../../data/',
39 | train=True,
40 | transform=transforms.ToTensor(),
41 | download=True)
42 | print("Downloading Train Data Done ! ")
43 |
44 | train_loader = DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=2)
45 |
46 | # Defining Model
47 | class Generator(nn.Module):
48 | def __init__(self):
49 | super(Generator, self).__init__()
50 | self.linear1 = nn.Linear(100, 256)
51 | self.bnorm1 = nn.BatchNorm1d(256)
52 | self.linear2 = nn.Linear(256, 512)
53 | self.bnorm2 = nn.BatchNorm1d(512)
54 | self.linear3 = nn.Linear(512, 784)
55 |
56 | def forward(self, X):
57 | X = F.leaky_relu(self.bnorm1(self.linear1(X)), negative_slope=0.03)
58 | X = F.leaky_relu(self.bnorm2(self.linear2(X)), negative_slope=0.03)
59 | X = torch.sigmoid(self.linear3(X))
60 | return X
61 |
62 | class Discriminator(nn.Module):
63 | def __init__(self):
64 | super(Discriminator, self).__init__()
65 | self.linear1 = nn.Linear(784, 256)
66 | self.linear2 = nn.Linear(256, 64)
67 | self.linear3 = nn.Linear(64, 1)
68 |
69 | def forward(self, X):
70 | X = F.leaky_relu(self.linear1(X), negative_slope=0.03)
71 | X = F.leaky_relu(self.linear2(X), negative_slope=0.03)
72 | X = torch.sigmoid(self.linear3(X))
73 | return X
74 |
75 | G = Generator().to(device)
76 | D = Discriminator().to(device)
77 |
78 | criterion = nn.BCELoss()
79 | d_optimizer = optim.Adam(D.parameters(), lr=0.0002)
80 | g_optimizer = optim.Adam(G.parameters(), lr=0.0002)
81 |
82 | # Helper Function
83 | def plot_generator(G, num=10):
84 | z = torch.randn(num, 100).to(device)
85 |
86 | generated = G.forward(z).cpu().detach()
87 | plt.figure(figsize=(8, 2))
88 | for i in range(num):
89 | plt.subplot(1, num, i+1)
90 | plt.imshow(generated[i].view(28, 28), cmap=plt.cm.gray)
91 | plt.axis('off')
92 | plt.show()
93 |
94 | plot_generator(G)
95 |
96 | # Training loop
97 | for epoch in range(epochs):
98 | G.train()
99 | D.train()
100 | avg_g_loss = 0
101 | avg_d_loss = 0
102 |
103 | with tqdm(total=len(train_loader)) as t:
104 | t.set_description(f'[{epoch+1}/{epochs}]')
105 | for i, (batch_img, _) in enumerate(train_loader):
106 |
107 | tmp_batch_siae = batch_img.shape[0]
108 |
109 | X = batch_img.view(tmp_batch_siae, -1).to(device)
110 |
111 | real_lab = torch.ones(tmp_batch_siae, 1).to(device)
112 |
113 | fake_lab = torch.zeros(tmp_batch_siae, 1).to(device)
114 |
115 | # Training Discriminator
116 | D_pred = D.forward(X)
117 | d_loss_real = criterion(D_pred, real_lab)
118 | real_score = D_pred
119 |
120 | z = torch.randn(tmp_batch_siae, 100).to(device)
121 |
122 | fake_images = G.forward(z)
123 | G_pred = D.forward(fake_images)
124 | d_loss_fake = criterion(G_pred, fake_lab)
125 | fake_score = G_pred
126 |
127 | d_loss = d_loss_real + d_loss_fake
128 | d_optimizer.zero_grad()
129 | d_loss.backward()
130 | d_optimizer.step()
131 |
132 | # Training Generator
133 | z = torch.randn(tmp_batch_siae, 100).to(device)
134 | fake_images = G.forward(z)
135 | G_pred = D.forward(fake_images)
136 | g_loss = criterion(G_pred, real_lab)
137 |
138 | g_optimizer.zero_grad()
139 | g_loss.backward()
140 | g_optimizer.step()
141 |
142 | avg_g_loss += g_loss.item()
143 | avg_d_loss += d_loss.item()
144 |
145 |
146 | t.set_postfix({
147 | "D loss": f"{d_loss.item():05.3f}",
148 | "G loss": f"{g_loss.item():05.3f}"
149 | })
150 | t.update()
151 |
152 |
153 | print(f"Epoch : {epoch+1}, D Loss : {avg_d_loss/len(train_loader):.3f}, G Loss : {avg_g_loss/len(train_loader):.3f}")
154 | plot_generator(G)
155 | print("Training Done !")
156 |
157 | # Sample
158 | num_sample = 64
159 | z = torch.randn(num_sample, 100).to(device)
160 |
161 | generated = G.forward(z).cpu().detach()
162 | plt.figure(figsize=(8, 8))
163 | for i in range(num_sample):
164 | plt.subplot(8, 8, i+1)
165 | plt.imshow(generated[i].view(28, 28), cmap=plt.cm.gray)
166 | plt.axis('off')
167 | plt.show()
--------------------------------------------------------------------------------
/03_Advance/CNN/ResNet/tf_keras.py:
--------------------------------------------------------------------------------
1 | #%%
2 | # Import Package
3 | import os
4 | import cv2 as cv
5 | import numpy as np
6 | import tensorflow as tf
7 | from matplotlib import pyplot as plt
8 | from tensorflow.keras import layers, models, losses, optimizers, datasets, utils
9 |
10 | # %%
11 | # Data Prepare
12 |
13 | URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
14 | path_to_zip = utils.get_file('flower_photos.tgz', origin=URL, extract=True)
15 |
16 | PATH = os.path.join(os.path.dirname(path_to_zip), 'flower_photos')
17 |
18 | category_list = [i for i in os.listdir(PATH) if os.path.isdir(os.path.join(PATH, i)) ]
19 | print(category_list)
20 |
21 | num_classes = len(category_list)
22 | img_size = 150
23 |
24 | def read_img(path, img_size):
25 | img = cv.imread(path)
26 | img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
27 | img = cv.resize(img, (img_size, img_size))
28 | return img
29 |
30 | imgs_tr = []
31 | labs_tr = []
32 |
33 | imgs_val = []
34 | labs_val = []
35 |
36 | for i, category in enumerate(category_list):
37 | path = os.path.join(PATH, category)
38 | imgs_list = os.listdir(path)
39 | print("Total '%s' images : %d"%(category, len(imgs_list)))
40 | ratio = int(np.round(0.05 * len(imgs_list)))
41 | print("%s Images for Training : %d"%(category, len(imgs_list[ratio:])))
42 | print("%s Images for Validation : %d"%(category, len(imgs_list[:ratio])))
43 | print("=============================")
44 |
45 | imgs = [read_img(os.path.join(path, img),img_size) for img in imgs_list]
46 | labs = [i]*len(imgs_list)
47 |
48 | imgs_tr += imgs[ratio:]
49 | labs_tr += labs[ratio:]
50 |
51 | imgs_val += imgs[:ratio]
52 | labs_val += labs[:ratio]
53 |
54 | imgs_tr = np.array(imgs_tr)/255.
55 | labs_tr = utils.to_categorical(np.array(labs_tr), num_classes)
56 |
57 | imgs_val = np.array(imgs_val)/255.
58 | labs_val = utils.to_categorical(np.array(labs_val), num_classes)
59 |
60 | print(imgs_tr.shape, labs_tr.shape)
61 | print(imgs_val.shape, labs_val.shape)
62 |
63 | # %%
64 | # Build Networks
65 | def conv_block(x, num_filters, ksize, strides=(1, 1), padding='same', activation='relu', name='conv_block'):
66 | output = layers.Conv2D(num_filters, ksize, strides=strides, padding=padding, activation="linear", name=name+"_conv")(x)
67 | output = layers.BatchNormalization(name=name+"_bn")(output)
68 | output = layers.Activation(activation, name=name+"_Act")(output)
69 | return output
70 |
71 | def residual_block(x, num_filters, strides=(1, 1), activation='relu', use_branch=True, name='res_block'):
72 |
73 | if use_branch:
74 | branch1 = conv_block(x, num_filters, 1, strides=strides, padding='valid', activation='linear', name=name+"_Branch1")
75 | else :
76 | branch1 = x
77 |
78 | branch2 = conv_block(x, num_filters//4, 1, strides=strides, padding='valid', activation=activation, name=name+"_Branch2a")
79 | branch2 = conv_block(branch2, num_filters//4, 3, activation=activation, name=name+"_Branch2b")
80 | branch2 = conv_block(branch2, num_filters, 1, activation='linear', name=name+"_Branch2c")
81 |
82 | output = layers.Add(name=name+"_Add")([branch1, branch2])
83 | output = layers.Activation(activation, name=name+"_Act")(output)
84 | return output
85 |
86 | def build_resnet(input_shape=(None, None, 3, ), num_classes=10, num_layer = 50, name="Net"):
87 |
88 |
89 | blocks_dict = {
90 | 50: [3, 4, 6, 3],
91 | 101: [3, 4, 23, 3],
92 | 152: [3, 8, 36, 3]
93 | }
94 |
95 | num_channel_list = [256, 512, 1024, 2048]
96 | block_name = ['a', 'b', 'c', 'd']
97 | assert num_layer in blocks_dict.keys(), "Number of layer must be in %s"%blocks_dict.keys()
98 |
99 | name = name+str(num_layer)
100 |
101 | last_act = 'sigmoid' if num_classes==1 else 'softmax'
102 |
103 | _input = layers.Input(shape=input_shape, name=name+"_input")
104 |
105 | x = layers.ZeroPadding2D((3, 3), name=name+"_pad")(_input)
106 | x = conv_block(x, 64, 7, (2, 2), 'valid', 'relu', name=name+"_stem")
107 | x = layers.MaxPool2D(name=name+'_pool')(x)
108 |
109 | for idx, num_iter in enumerate(blocks_dict[num_layer]):
110 | for j in range(num_iter):
111 | if j==0:
112 | x = residual_block(x, num_channel_list[idx], activation='relu', strides=(2, 2), name=name+"_res_"+block_name[idx]+str(j))
113 | else:
114 | x = residual_block(x, num_channel_list[idx], activation='relu', use_branch=False, name=name+"_res_"+block_name[idx]+str(j))
115 |
116 | x = layers.GlobalAveragePooling2D(name=name+"_GAP")(x)
117 | x = layers.Dense(num_classes, activation=last_act, name=name+"_Output")(x)
118 | return models.Model(_input, x, name=name)
119 |
120 | num_layer = 50
121 | input_shape = imgs_tr.shape[1:]
122 |
123 | resnet = build_resnet(input_shape=input_shape, num_classes=num_classes, num_layer=num_layer, name="ResNet")
124 | resnet.summary()
125 |
126 |
127 | loss = 'binary_crossentropy' if num_classes==1 else 'categorical_crossentropy'
128 | resnet.compile(optimizer=optimizers.Adam(), loss=loss, metrics=['accuracy'])
129 |
130 | # %%
131 | # Training Network
132 | epochs=100
133 | batch_size=16
134 |
135 | history=resnet.fit(imgs_tr, labs_tr, epochs = epochs, batch_size=batch_size, validation_data=[imgs_val, labs_val])
136 |
137 | plt.figure(figsize=(10, 4))
138 | plt.subplot(121)
139 | plt.title("Loss graph")
140 | plt.plot(history.history['loss'])
141 | plt.plot(history.history['val_loss'])
142 | plt.legend(['Train', 'Validation'], loc='upper right')
143 |
144 | plt.subplot(122)
145 | plt.title("Acc graph")
146 | plt.plot(history.history['acc'])
147 | plt.plot(history.history['val_acc'])
148 | plt.legend(['Train', 'Validation'], loc='upper right')
149 |
150 | plt.show()
--------------------------------------------------------------------------------