├── .gitignore ├── Autoencoder ├── MyColor.py ├── model.py └── train.py ├── GANS ├── Recolor_GAN.ipynb └── readme ├── README.md └── Results └── Autoencoder ├── 1.jpg ├── 2.jpg ├── 3.jpg ├── 4.jpg ├── 5.jpg ├── 6.jpg ├── img_0.png ├── img_6 .png └── res2.jpg /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /Autoencoder/MyColor.py: -------------------------------------------------------------------------------- 1 | from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input 2 | from keras.models import Model, load_model,Sequential 3 | from keras.preprocessing.image import ImageDataGenerator 4 | from keras.layers import Input, Dense, UpSampling2D, RepeatVector, Reshape,Dropout 5 | from keras.layers.core import Dropout, Lambda 6 | from keras.layers.convolutional import Conv2D, Conv2DTranspose 7 | from keras.layers.pooling import MaxPooling2D 8 | from keras.layers.merge import concatenate 9 | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau 10 | from keras import backend as K 11 | import tensorflow as tf 12 | 13 | def Colorize(): 14 | embed_input = Input(shape=(1000,)) 15 | #Encoder 16 | encoder_input = Input(shape=(256, 256, 1,)) 17 | encoder_output = Conv2D(128, (3,3), activation='relu', padding='same',strides=1)(encoder_input) 18 | encoder_output = MaxPooling2D((2, 2), padding='same')(encoder_output) 19 | encoder_output = Conv2D(128, (4,4), activation='relu', padding='same')(encoder_output) 20 | encoder_output = Conv2D(128, (3,3), activation='relu', padding='same',strides=1)(encoder_output) 21 | encoder_output = MaxPooling2D((2, 2), padding='same')(encoder_output) 22 | encoder_output = Conv2D(256, (4,4), activation='relu', padding='same')(encoder_output) 23 | encoder_output = Conv2D(256, (3,3), activation='relu', padding='same',strides=1)(encoder_output) 24 | encoder_output = MaxPooling2D((2, 2), padding='same')(encoder_output) 25 | encoder_output = Conv2D(256, (4,4), activation='relu', padding='same')(encoder_output) 26 | encoder_output = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output) 27 | encoder_output = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output) 28 | 29 | #Fusion 30 | fusion_output = RepeatVector(32 * 32)(embed_input) 31 | fusion_output = Reshape(([32, 32, 1000]))(fusion_output) 32 | fusion_output = concatenate([encoder_output, fusion_output], axis=3) 33 | fusion_output = Conv2D(256, (1, 1), activation='relu', padding='same')(fusion_output) 34 | 35 | #Decoder 36 | decoder_output = Conv2D(128, (3,3), activation='relu', padding='same')(fusion_output) 37 | decoder_output = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output) 38 | decoder_output = UpSampling2D((2, 2))(decoder_output) 39 | decoder_output = Conv2D(128, (3,3), activation='relu', padding='same')(decoder_output) 40 | decoder_output = UpSampling2D((2, 2))(decoder_output) 41 | decoder_output = Conv2D(64, (4,4), activation='relu', padding='same')(decoder_output) 42 | decoder_output = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output) 43 | decoder_output = Conv2D(32, (2,2), activation='relu', padding='same')(decoder_output) 44 | decoder_output = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output) 45 | decoder_output = UpSampling2D((2, 2))(decoder_output) 46 | 47 | encoder_output_2 = Conv2D(128, (3,3), activation='relu', padding='same',strides=1)(encoder_input) 48 | encoder_output_2 = MaxPooling2D((2, 2), padding='same')(encoder_output_2) 49 | encoder_output_2 = Conv2D(128, (4,4), activation='relu', padding='same')(encoder_output_2) 50 | encoder_output_2 = Conv2D(128, (3,3), activation='relu', padding='same',strides=1)(encoder_output_2) 51 | encoder_output_2 = MaxPooling2D((2, 2), padding='same')(encoder_output_2) 52 | encoder_output_2 = Conv2D(256, (4,4), activation='relu', padding='same')(encoder_output_2) 53 | encoder_output_2 = Conv2D(256, (3,3), activation='relu', padding='same',strides=1)(encoder_output_2) 54 | encoder_output_2 = MaxPooling2D((2, 2), padding='same')(encoder_output_2) 55 | encoder_output_2 = Conv2D(256, (4,4), activation='relu', padding='same')(encoder_output_2) 56 | encoder_output_2 = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output_2) 57 | encoder_output_2 = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output_2) 58 | 59 | #Fusion 60 | fusion_output_2 = Conv2D(512, (1, 1), activation='relu', padding='same')(encoder_output_2) 61 | 62 | #Decoder 63 | decoder_output_2 = Conv2D(128, (3,3), activation='relu', padding='same')(fusion_output_2) 64 | decoder_output_2 = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output_2) 65 | decoder_output_2 = UpSampling2D((2, 2))(decoder_output_2) 66 | decoder_output_2 = Conv2D(128, (3,3), activation='relu', padding='same')(decoder_output_2) 67 | decoder_output_2 = UpSampling2D((2, 2))(decoder_output_2) 68 | decoder_output_2 = Conv2D(64, (4,4), activation='relu', padding='same')(decoder_output_2) 69 | decoder_output_2 = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output_2) 70 | decoder_output_2 = Conv2D(32, (2,2), activation='relu', padding='same')(decoder_output_2) 71 | decoder_output_2 = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output_2) 72 | decoder_output_2 = UpSampling2D((2, 2))(decoder_output_2) 73 | 74 | encoder_input_3 = concatenate([decoder_output, decoder_output_2], axis=3) 75 | encoder_output_3 = Conv2D(128, (3,3), activation='relu', padding='same',strides=1)(encoder_input_3) 76 | encoder_output_3 = MaxPooling2D((2, 2), padding='same')(encoder_output_3) 77 | encoder_output_3 = Conv2D(128, (4,4), activation='relu', padding='same')(encoder_output_3) 78 | encoder_output_3 = Conv2D(128, (3,3), activation='relu', padding='same',strides=1)(encoder_output_3) 79 | encoder_output_3 = MaxPooling2D((2, 2), padding='same')(encoder_output_3) 80 | encoder_output_3 = Conv2D(256, (4,4), activation='relu', padding='same')(encoder_output_3) 81 | encoder_output_3 = Conv2D(256, (3,3), activation='relu', padding='same',strides=1)(encoder_output_3) 82 | encoder_output_3 = MaxPooling2D((2, 2), padding='same')(encoder_output_3) 83 | encoder_output_3 = Conv2D(256, (4,4), activation='relu', padding='same')(encoder_output_3) 84 | encoder_output_3 = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output_3) 85 | encoder_output_3 = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output_3) 86 | 87 | #Fusion 88 | fusion_output_3 = Conv2D(512, (1, 1), activation='relu', padding='same')(encoder_output_3) 89 | 90 | #Decoder 91 | decoder_output_3 = Conv2D(128, (3,3), activation='relu', padding='same')(fusion_output_3) 92 | decoder_output_3 = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output_3) 93 | decoder_output_3 = UpSampling2D((2, 2))(decoder_output_3) 94 | decoder_output_3 = Conv2D(128, (3,3), activation='relu', padding='same')(decoder_output_3) 95 | decoder_output_3 = UpSampling2D((2, 2))(decoder_output_3) 96 | decoder_output_3 = Conv2D(64, (4,4), activation='relu', padding='same')(decoder_output_3) 97 | decoder_output_3 = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output_3) 98 | decoder_output_3 = Conv2D(32, (2,2), activation='relu', padding='same')(decoder_output_3) 99 | decoder_output_3 = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output_3) 100 | decoder_output_3 = UpSampling2D((2, 2))(decoder_output_3) 101 | 102 | return Model(inputs=[encoder_input, embed_input], outputs=decoder_output_3) 103 | -------------------------------------------------------------------------------- /Autoencoder/model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | import warnings 5 | import MyColor 6 | import numpy as np 7 | import pandas as pd 8 | import cv2 9 | 10 | import matplotlib.pyplot as plt 11 | 12 | from tqdm import tqdm 13 | from itertools import chain 14 | import skimage 15 | from PIL import Image 16 | from skimage.io import imread, imshow, imread_collection, concatenate_images 17 | from skimage.transform import resize 18 | from skimage.util import crop, pad 19 | from skimage.morphology import label 20 | from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb 21 | from sklearn.model_selection import train_test_split 22 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img 23 | from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb 24 | from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input 25 | from keras.models import Model, load_model,Sequential 26 | from keras.preprocessing.image import ImageDataGenerator 27 | from keras.layers import Input, Dense, UpSampling2D, RepeatVector, Reshape,Dropout 28 | from keras.layers.core import Dropout, Lambda 29 | from keras.layers.convolutional import Conv2D, Conv2DTranspose 30 | from keras.layers.pooling import MaxPooling2D 31 | from keras.layers.merge import concatenate 32 | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau 33 | from keras import backend as K 34 | 35 | import tensorflow as tf 36 | 37 | from keras.models import load_model 38 | model = load_model('/New_Data/Art_Colorization_Model.h5') 39 | 40 | ########### 41 | 42 | model.compile(optimizer='adam', loss='mean_squared_error') 43 | 44 | 45 | 46 | datagen = ImageDataGenerator( 47 | shear_range=0.2, 48 | zoom_range=0.2, 49 | rotation_range=10, 50 | horizontal_flip=True) 51 | 52 | #Create embedding 53 | def create_inception_embedding(grayscaled_rgb): 54 | def resize_gray(x): 55 | return resize(x, (299, 299, 3), mode='constant') 56 | grayscaled_rgb_resized = np.array([resize_gray(x) for x in grayscaled_rgb]) 57 | grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized) 58 | with inception.graph.as_default(): 59 | embed = inception.predict(grayscaled_rgb_resized) 60 | return embed 61 | 62 | 63 | ######### 64 | 65 | inception = InceptionResNetV2(weights='imagenet', include_top=True) 66 | inception.graph = tf.get_default_graph() 67 | 68 | ######## 69 | 70 | %%time 71 | from skimage.io import imsave 72 | 73 | color_me = [] 74 | for filename in os.listdir('/Dataset/Test/'): 75 | color_me.append(img_to_array(load_img('/Dataset/Test/'+filename))) 76 | color_me = np.array(color_me, dtype=float) 77 | gray_me = gray2rgb(rgb2gray(1.0/255*color_me)) 78 | color_me_embed = create_inception_embedding(gray_me) 79 | color_me = rgb2lab(1.0/255*color_me)[:,:,:,0] 80 | color_me = color_me.reshape(color_me.shape+(1,)) 81 | 82 | 83 | # Test model 84 | output = model.predict([color_me, color_me_embed]) 85 | output = output * 128 86 | 87 | # Output colorizations 88 | for i in range(len(output)): 89 | cur = np.zeros((256, 256, 3)) 90 | cur[:,:,0] = color_me[i][:,:,0] 91 | cur[:,:,1:] = output[i] 92 | imsave("/Dataset/img_"+str(i)+".png", lab2rgb(cur)) 93 | ########## 94 | -------------------------------------------------------------------------------- /Autoencoder/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import random 4 | import warnings 5 | import MyColor 6 | import numpy as np 7 | import pandas as pd 8 | import cv2 9 | import argparse 10 | import matplotlib.pyplot as plt 11 | 12 | from tqdm import tqdm 13 | from itertools import chain 14 | import skimage 15 | from PIL import Image 16 | from skimage.io import imread, imshow, imread_collection, concatenate_images 17 | from skimage.transform import resize 18 | from skimage.util import crop, pad 19 | from skimage.morphology import label 20 | from skimage.color import rgb2gray, gray2rgb, rgb2lab, lab2rgb 21 | from sklearn.model_selection import train_test_split 22 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img 23 | from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb 24 | from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input 25 | from keras.models import Model, load_model,Sequential 26 | from keras.preprocessing.image import ImageDataGenerator 27 | from keras.layers import Input, Dense, UpSampling2D, RepeatVector, Reshape,Dropout 28 | from keras.layers.core import Dropout, Lambda 29 | from keras.layers.convolutional import Conv2D, Conv2DTranspose 30 | from keras.layers.pooling import MaxPooling2D 31 | from keras.layers.merge import concatenate 32 | from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau 33 | from keras import backend as K 34 | 35 | import tensorflow as tf 36 | 37 | warnings.filterwarnings('ignore', category=UserWarning, module='skimage') 38 | seed = 42 39 | random.seed = seed 40 | np.random.seed = seed 41 | 42 | 43 | ap = argparse.ArgumentParser() 44 | ap.add_argument("-lr", "--learning_rate", type=float, default=0.0001, 45 | help="Learning Rate") 46 | ap.add_argument("-e", "--epochs", type=int, default=10, 47 | help="Number of epochs to run") 48 | args = vars(ap.parse_args()) 49 | 50 | ######## 51 | IMG_WIDTH = 256 52 | IMG_HEIGHT = 256 53 | IMG_CHANNELS = 3 54 | INPUT_SHAPE=(IMG_HEIGHT, IMG_WIDTH, 1) 55 | TRAIN_PATH = '/Dataset/Train/' 56 | 57 | train_ids = next(os.walk(TRAIN_PATH))[2] 58 | 59 | ######### 60 | X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) 61 | print('Loading Images ... ') 62 | sys.stdout.flush() 63 | for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): 64 | path = TRAIN_PATH + id_+'' 65 | try: 66 | img = imread(path) 67 | img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) 68 | X_train[n] = img 69 | except: 70 | print('image missed') 71 | X_train = X_train.astype('float32') / 255. 72 | 73 | ########## 74 | 75 | inception = InceptionResNetV2(weights='imagenet', include_top=True) 76 | inception.graph = tf.get_default_graph() 77 | 78 | ########## 79 | 80 | model= MyColor.Colorize() 81 | model.compile(optimizer='adam', loss='mean_squared_error') 82 | model.summary() 83 | 84 | # Image transformer 85 | datagen = ImageDataGenerator( 86 | shear_range=0.2, 87 | zoom_range=0.2, 88 | rotation_range=20, 89 | horizontal_flip=True) 90 | 91 | #Create embedding 92 | def create_inception_embedding(grayscaled_rgb): 93 | def resize_gray(x): 94 | return resize(x, (299, 299, 3), mode='constant') 95 | grayscaled_rgb_resized = np.array([resize_gray(x) for x in grayscaled_rgb]) 96 | grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized) 97 | with inception.graph.as_default(): 98 | embed = inception.predict(grayscaled_rgb_resized) 99 | return embed 100 | 101 | #Generate training data 102 | def image_a_b_gen(dataset=X_train, batch_size = 50): 103 | for batch in datagen.flow(dataset, batch_size=batch_size): 104 | X_batch = rgb2gray(batch) 105 | grayscaled_rgb = gray2rgb(X_batch) 106 | lab_batch = rgb2lab(batch) 107 | X_batch = lab_batch[:,:,:,0] 108 | X_batch = X_batch.reshape(X_batch.shape+(1,)) 109 | Y_batch = lab_batch[:,:,:,1:] / 128 110 | yield [X_batch, create_inception_embedding(grayscaled_rgb)], Y_batch 111 | 112 | ############ 113 | 114 | learning_rate_reduction = ReduceLROnPlateau(monitor='loss', 115 | patience=3, 116 | verbose=1, 117 | factor=0.5, 118 | min_lr=args["learning_rate"]) 119 | filepath = "/Dataset/Art_Colorization_Model.h5" 120 | checkpoint = ModelCheckpoint(filepath, 121 | save_best_only=True, 122 | monitor='loss', 123 | mode='min') 124 | 125 | model_callbacks = [learning_rate_reduction,checkpoint] 126 | 127 | model.compile(optimizer='adam', loss='mean_squared_error') 128 | 129 | ############# 130 | 131 | BATCH_SIZE = 40 132 | with tf.device('/gpu:0'): 133 | model.fit_generator(image_a_b_gen(X_train,BATCH_SIZE), 134 | epochs=args["epochs"], 135 | verbose=1, 136 | steps_per_epoch=X_train.shape[0]/BATCH_SIZE, 137 | callbacks=model_callbacks) 138 | 139 | model.save(filepath) 140 | model.save_weights("/Dataset/Art_Colorization_Weights.h5") 141 | 142 | -------------------------------------------------------------------------------- /GANS/readme: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Image-Colorization 2 | Coloring Black & White Images with the help of Autoencoder, Variational Autoencoder, Generative Adversarial Networks. 3 | 4 | Note : This repository is updated upto Autoencoder model only. VE and GANS based implementation will be updated soon. 5 | 6 | Image Colorization with Inception-Resnet 7 | Using Inception-Resnet-v2, we train our model to colorize grayscale image. 8 | Colorization has an use in wide variety of domain,hence, it is a important problem in 9 | computer vision. Due to resource constraints, dataset used here contains only 30,000 images collected from web. 10 | 11 | ## Prerequisites: 12 | - Linux OR Windows 10 13 | - Python 3 14 | 15 | ## Requirements: 16 | - Keras 17 | - Tensorflow, Tensorflow gpu 18 | - Numpy 19 | - Pandas 20 | - Scikit-image 21 | - opencv 22 | 23 | ## How to use: 24 | For Training use train.py like this - 25 | ``` 26 | python train.py -lr 0.0001 -e 10 27 | ``` 28 | 29 | For Using pre-trained model : 30 | ``` 31 | python model.py 32 | ``` 33 | 34 | ## Results: 35 | ![](Results/Autoencoder/img_0.png) 36 | ![](Results/Autoencoder/res2.jpg) 37 | 38 | 39 | ## Model Architecture: 40 | - This autoencoder model is based on a research paper mentioned in the references. We just tried writing code for this paper. 41 | 42 | **References**: 43 | - Deep Koalarization: Image Colorization using CNNs and Inception-ResNet-v2 44 | - https://arxiv.org/abs/1712.03400 45 | -------------------------------------------------------------------------------- /Results/Autoencoder/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/1.jpg -------------------------------------------------------------------------------- /Results/Autoencoder/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/2.jpg -------------------------------------------------------------------------------- /Results/Autoencoder/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/3.jpg -------------------------------------------------------------------------------- /Results/Autoencoder/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/4.jpg -------------------------------------------------------------------------------- /Results/Autoencoder/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/5.jpg -------------------------------------------------------------------------------- /Results/Autoencoder/6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/6.jpg -------------------------------------------------------------------------------- /Results/Autoencoder/img_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/img_0.png -------------------------------------------------------------------------------- /Results/Autoencoder/img_6 .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/img_6 .png -------------------------------------------------------------------------------- /Results/Autoencoder/res2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajaychaudhary7/Image-Colorization/a439c2503d15074e292e5102d3de9dc8b6b528d1/Results/Autoencoder/res2.jpg --------------------------------------------------------------------------------