├── .gitattributes ├── .gitignore ├── 1) CNN Review ├── fruit │ ├── cnn_fruit_acc.png │ ├── cnn_fruit_anlatım.py │ ├── cnn_fruit_hist.json │ └── cnn_fruit_loss.png └── mnist │ ├── cnn_mnist.py │ ├── cnn_mnist_acc.png │ ├── cnn_mnist_hist.json │ └── cnn_mnist_loss.png ├── 2) Deep Residual Network ├── CNN_DRN.pdf ├── cnn_anlatim.py └── drn_anlatim.py ├── 3) Transfer Learning ├── cfar10_vgg19 │ ├── transferLearning_vgg19_cfar10.py │ ├── transfer_learning_vgg19_cfar10.json │ ├── transfer_learning_vgg19_cfar10_acc.png │ └── transfer_learning_vgg19_cfar10_loss.png └── fruit_vgg16 │ ├── transfer_learning_fruit_hist.json │ ├── transfer_learning_vgg16_fruit_anlatım.py │ ├── vgg16_accuracy.png │ └── vgg16_loss.png ├── 4) Autoencoders ├── autoencoder_fashion_mnist_loss.png ├── autoencoder_result.png ├── autoencoders_FashionMNIST.py └── autoencoders_hist.json ├── 5) GANs ├── Figure_1.png ├── Figure_2.png ├── Figure_3.png └── gans_.py └── Advance Deep Learning.pptx /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .nox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | .hypothesis/ 49 | .pytest_cache/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | db.sqlite3 59 | 60 | # Flask stuff: 61 | instance/ 62 | .webassets-cache 63 | 64 | # Scrapy stuff: 65 | .scrapy 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | # Jupyter Notebook 74 | .ipynb_checkpoints 75 | 76 | # IPython 77 | profile_default/ 78 | ipython_config.py 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | # mkdocs documentation 106 | /site 107 | 108 | # mypy 109 | .mypy_cache/ 110 | .dmypy.json 111 | dmypy.json 112 | 113 | # Pyre type checker 114 | .pyre/ 115 | -------------------------------------------------------------------------------- /1) CNN Review/fruit/cnn_fruit_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/1) CNN Review/fruit/cnn_fruit_acc.png -------------------------------------------------------------------------------- /1) CNN Review/fruit/cnn_fruit_anlatım.py: -------------------------------------------------------------------------------- 1 | # libraries 2 | from keras.models import Sequential 3 | from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense 4 | from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img 5 | import matplotlib.pyplot as plt 6 | from glob import glob 7 | 8 | train_path = "fruits-360/Training/" 9 | test_path = "fruits-360/Test/" 10 | 11 | img = load_img(train_path + "Apple Braeburn/0_100.jpg") 12 | plt.imshow(img) 13 | plt.axis("off") 14 | plt.show() 15 | 16 | x = img_to_array(img) 17 | print(x.shape) 18 | 19 | className = glob(train_path + '/*' ) 20 | numberOfClass = len(className) 21 | print("NumberOfClass: ",numberOfClass) 22 | 23 | #%% CNN Model 24 | model = Sequential() 25 | model.add(Conv2D(32,(3,3),input_shape = x.shape)) 26 | model.add(Activation("relu")) 27 | model.add(MaxPooling2D()) 28 | 29 | model.add(Conv2D(32,(3,3))) 30 | model.add(Activation("relu")) 31 | model.add(MaxPooling2D()) 32 | 33 | model.add(Conv2D(64,(3,3))) 34 | model.add(Activation("relu")) 35 | model.add(MaxPooling2D()) 36 | 37 | model.add(Flatten()) 38 | model.add(Dense(1024)) 39 | model.add(Activation("relu")) 40 | model.add(Dropout(0.5)) 41 | model.add(Dense(numberOfClass)) # output 42 | model.add(Activation("softmax")) 43 | 44 | model.compile(loss = "categorical_crossentropy", 45 | optimizer = "rmsprop", 46 | metrics = ["accuracy"]) 47 | 48 | batch_size = 32 49 | 50 | #%% Data Generation - Train - Test 51 | train_datagen = ImageDataGenerator(rescale= 1./255, 52 | shear_range = 0.3, 53 | horizontal_flip=True, 54 | zoom_range = 0.3) 55 | 56 | test_datagen = ImageDataGenerator(rescale= 1./255) 57 | 58 | train_generator = train_datagen.flow_from_directory( 59 | train_path, 60 | target_size=x.shape[:2], 61 | batch_size = batch_size, 62 | color_mode= "rgb", 63 | class_mode= "categorical") 64 | 65 | test_generator = test_datagen.flow_from_directory( 66 | test_path, 67 | target_size=x.shape[:2], 68 | batch_size = batch_size, 69 | color_mode= "rgb", 70 | class_mode= "categorical") 71 | 72 | hist = model.fit_generator( 73 | generator = train_generator, 74 | steps_per_epoch = 1600 // batch_size, 75 | epochs=100, 76 | validation_data = test_generator, 77 | validation_steps = 800 // batch_size) 78 | 79 | #%% model save 80 | model.save_weights("deneme.h5") 81 | 82 | #%% model evaluation 83 | print(hist.history.keys()) 84 | plt.plot(hist.history["loss"], label = "Train Loss") 85 | plt.plot(hist.history["val_loss"], label = "Validation Loss") 86 | plt.legend() 87 | plt.show() 88 | plt.figure() 89 | plt.plot(hist.history["acc"], label = "Train acc") 90 | plt.plot(hist.history["val_acc"], label = "Validation acc") 91 | plt.legend() 92 | plt.show() 93 | 94 | #%% save history 95 | import json 96 | with open("deneme.json","w") as f: 97 | json.dump(hist.history, f) 98 | 99 | #%% load history 100 | import codecs 101 | with codecs.open("cnn_fruit_hist.json", "r",encoding = "utf-8") as f: 102 | h = json.loads(f.read()) 103 | plt.plot(h["loss"], label = "Train Loss") 104 | plt.plot(h["val_loss"], label = "Validation Loss") 105 | plt.legend() 106 | plt.show() 107 | plt.figure() 108 | plt.plot(h["acc"], label = "Train acc") 109 | plt.plot(h["val_acc"], label = "Validation acc") 110 | plt.legend() 111 | plt.show() 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | -------------------------------------------------------------------------------- /1) CNN Review/fruit/cnn_fruit_hist.json: -------------------------------------------------------------------------------- 1 | {"val_loss": [3.7828673934936523, 2.830067319869995, 1.941979079246521, 1.3527477741241456, 0.898037543296814, 0.8079891896247864, 0.7916615748405457, 0.5038800704479217, 0.6403790581226348, 0.4695431685447693, 0.39719865918159486, 0.5403569304943084, 0.5720959496498108, 0.3291285020112991, 0.2297844684123993, 0.3021917423605919, 0.3168895775079727, 0.19027065321803094, 0.2532686027884483, 0.20705562680959702, 0.22148142602742876, 0.14230416409671306, 0.35564872086048127, 0.2382924537360668, 0.32132767885923386, 0.14943477667868138, 0.12058869205415249, 0.1335323731601238, 0.1416072715818882, 0.09804250054061413, 0.2781789118051529, 0.28275610573589804, 0.18073273688554764, 0.20158658638596536, 0.13997661836445333, 0.09964026518166065, 0.12439317828975618, 0.16328035255894066, 0.13258844073861836, 0.15468545965850353, 0.24457417972385884, 0.10243735816161044, 0.07369299702346326, 0.11435789695009589, 0.07989154034294188, 0.12224260807037353, 0.2529451330006123, 0.21092954337596892, 0.1706612992659211, 0.12674777051433922, 0.06364587476477027, 0.11244662242010235, 0.18005702324211598, 0.15707935094833375, 0.14458620401099323, 0.089594431349542, 0.06953159496188163, 0.674744873009622, 0.1200331768207252, 0.0524421969638206, 0.05115071479929611, 0.36798069409105594, 0.03145641772076488, 0.14994407329708337, 0.11932969559915364, 0.15335341319441795, 0.04149526544846594, 0.05807728186831809, 0.06088544747792184, 0.07139332332182675, 0.0625040021026507, 0.042036964282160624, 0.060076724365353586, 0.08951836733613164, 0.0658582309866324, 0.08818725784309209, 0.10484588572755456, 0.06448203517589718, 0.09862718860269525, 0.14093998269177974, 0.06716217202949337, 0.5279600621759891, 0.07232448260358898, 0.07218029372626916, 0.046950384224473965, 0.17332372000440954, 0.09695710237370804, 0.06857708707553684, 0.1096513191901613, 0.03885058881482109, 0.13839924602769316, 0.06812051651766524, 0.11003472164789856, 0.11424933394417167, 0.05662335770299251, 0.12824114846996962, 0.037746845725923774, 0.08849344835281954, 0.12121636484029295, 0.0633680422234147], "val_acc": [0.11, 0.305, 0.4775, 0.625, 0.73125, 0.765, 0.74125, 0.82875, 0.79625, 0.84625, 0.8775, 0.82625, 0.8075, 0.885, 0.92625, 0.9075, 0.88875, 0.945, 0.9175, 0.9275, 0.9379042690815006, 0.94375, 0.8775, 0.93125, 0.8975, 0.9425, 0.96125, 0.9675, 0.94875, 0.96875, 0.905, 0.89875, 0.94125, 0.9375, 0.95125, 0.97, 0.95, 0.94625, 0.9625, 0.94125, 0.94, 0.9521345408274317, 0.97375, 0.9575, 0.97625, 0.95, 0.9175, 0.92875, 0.935, 0.97875, 0.98, 0.9575, 0.9325, 0.93875, 0.96125, 0.9625, 0.9725, 0.8725, 0.97, 0.9875, 0.97875, 0.908150064683053, 0.9875, 0.95875, 0.95875, 0.9575, 0.99125, 0.98125, 0.98, 0.97375, 0.98125, 0.98875, 0.97875, 0.97125, 0.97375, 0.9675, 0.965, 0.97875, 0.9775, 0.96875, 0.98, 0.86625, 0.9650711513583441, 0.97125, 0.9875, 0.945, 0.96625, 0.98625, 0.97, 0.98375, 0.96625, 0.98875, 0.97375, 0.97375, 0.98375, 0.96375, 0.98625, 0.98625, 0.975, 0.9825], "loss": [4.373511033058167, 3.523947548866272, 2.7437290000915526, 2.2504486656188964, 1.8288328075408935, 1.5054595232009889, 1.2628457009792329, 1.164657962322235, 0.9086183226108551, 0.8599660986661911, 0.7759626024961471, 0.6828628140687942, 0.7039060515165328, 0.5393660950660706, 0.5484953501820564, 0.4907473999261856, 0.5021174994111061, 0.41482330948114393, 0.3876559276878834, 0.4376536241173744, 0.394152866601944, 0.3468692748248577, 0.3237726443260908, 0.3379783045500517, 0.3495334392040968, 0.2918773806467652, 0.31027585826814175, 0.2856320247799158, 0.27776105411350727, 0.28935639768838883, 0.2746781243732436, 0.27137677170336244, 0.27930697187781334, 0.2208851984888315, 0.20958189746364952, 0.23517126992344856, 0.20914970837533475, 0.22323588823899626, 0.17044791417196392, 0.218344993442297, 0.20663137243594976, 0.15712751243263484, 0.22375287855044007, 0.20923717573285103, 0.18545481984503567, 0.1535607006214559, 0.2057445796020329, 0.1762756627704948, 0.1754169911891222, 0.18945513175800444, 0.17190624948590993, 0.15076971042901277, 0.12666484860237687, 0.1615709424391389, 0.13801486240699887, 0.11947452697902917, 0.1455083518382162, 0.1438488830672577, 0.13431545921601354, 0.14578889971133321, 0.15066021306440233, 0.14117282179828566, 0.15585356794297694, 0.14744831504533068, 0.10234634860418737, 0.18703707697801292, 0.1651340716984123, 0.09737541435752065, 0.14738180029904469, 0.14747105601010843, 0.12269209465011954, 0.1287458043883089, 0.13881998261320405, 0.11391135648329509, 0.1298411136341747, 0.12027814875822514, 0.09145401814952493, 0.11198649274650961, 0.11387528217863291, 0.12432203976903111, 0.1494224447896704, 0.1017628982939641, 0.09450711626326665, 0.118243357129395, 0.11282241088338196, 0.14076472945511342, 0.08763410389743513, 0.14761159070767463, 0.09828108060872182, 0.12231326757231727, 0.09293715218664148, 0.1293115272005393, 0.10492944307858125, 0.12276715873973444, 0.09283890256425366, 0.10978195942007005, 0.11642159299088234, 0.0925765351334121, 0.10549348525382811, 0.13669762818644812], "acc": [0.045625, 0.12875, 0.260625, 0.349375, 0.45, 0.53125, 0.60125, 0.630625, 0.700625, 0.725, 0.75625, 0.78, 0.77625, 0.820625, 0.821875, 0.834375, 0.8325, 0.85625, 0.875625, 0.865625, 0.883125, 0.890625, 0.896875, 0.8925, 0.888125, 0.90125, 0.896875, 0.91125, 0.91125, 0.915625, 0.9137603043753963, 0.925, 0.910625, 0.925, 0.93375, 0.9275, 0.93125, 0.934375, 0.944375, 0.925625, 0.930625, 0.949375, 0.934375, 0.94, 0.94125, 0.95125, 0.93875, 0.94875, 0.9475, 0.950625, 0.948125, 0.9525, 0.96, 0.949375, 0.954375, 0.96125, 0.955, 0.9575, 0.961875, 0.9525, 0.95375, 0.9632213062777425, 0.95125, 0.9575, 0.9675, 0.946875, 0.95, 0.96625, 0.960625, 0.954375, 0.965, 0.96375, 0.969375, 0.961875, 0.966875, 0.9675, 0.9675, 0.96125, 0.963125, 0.964375, 0.959375, 0.968125, 0.973125, 0.9675, 0.96625, 0.966875, 0.970625, 0.958125, 0.969375, 0.965, 0.970625, 0.9644895370957515, 0.975, 0.969375, 0.96875, 0.97, 0.96875, 0.9725, 0.970625, 0.96375]} -------------------------------------------------------------------------------- /1) CNN Review/fruit/cnn_fruit_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/1) CNN Review/fruit/cnn_fruit_loss.png -------------------------------------------------------------------------------- /1) CNN Review/mnist/cnn_mnist.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization 3 | from keras.utils import to_categorical 4 | import matplotlib.pyplot as plt 5 | import pandas as pd 6 | import numpy as np 7 | import warnings 8 | warnings.filterwarnings("ignore") 9 | 10 | # load and preprocess 11 | def load_and_preprocess(data_path): 12 | data = pd.read_csv(data_path) 13 | data = data.as_matrix() 14 | np.random.shuffle(data) 15 | x = data[:,1:].reshape(-1,28,28,1)/255.0 16 | y = data[:,0].astype(np.int32) 17 | y = to_categorical(y, num_classes=len(set(y))) 18 | 19 | return x,y 20 | 21 | train_data_path = "mnist-in-csv\mnist_train.csv" 22 | test_data_path = "mnist-in-csv\mnist_test.csv" 23 | 24 | x_train,y_train = load_and_preprocess(train_data_path) 25 | x_test, y_test = load_and_preprocess(test_data_path) 26 | 27 | # %% visualize 28 | index = 55 29 | vis = x_train.reshape(60000,28,28) 30 | plt.imshow(vis[index,:,:]) 31 | plt.legend() 32 | plt.axis("off") 33 | plt.show() 34 | print(np.argmax(y_train[index])) 35 | 36 | #%% CNN 37 | numberOfClass = y_train.shape[1] 38 | 39 | model = Sequential() 40 | 41 | model.add(Conv2D(input_shape = (28,28,1), filters = 16, kernel_size = (3,3))) 42 | model.add(BatchNormalization()) 43 | model.add(Activation("relu")) 44 | model.add(MaxPooling2D()) 45 | 46 | model.add(Conv2D(filters = 64, kernel_size = (3,3))) 47 | model.add(BatchNormalization()) 48 | model.add(Activation("relu")) 49 | model.add(MaxPooling2D()) 50 | 51 | model.add(Conv2D(filters = 128, kernel_size = (3,3))) 52 | model.add(BatchNormalization()) 53 | model.add(Activation("relu")) 54 | model.add(MaxPooling2D()) 55 | 56 | model.add(Flatten()) 57 | model.add(Dense(units = 256)) 58 | model.add(Activation("relu")) 59 | model.add(Dropout(0.2)) 60 | model.add(Dense(units = numberOfClass)) 61 | model.add(Activation("softmax")) 62 | 63 | model.compile(loss = "categorical_crossentropy", 64 | optimizer = "adam", 65 | metrics = ["accuracy"]) 66 | 67 | # Train 68 | hist = model.fit(x_train,y_train, validation_data=(x_test,y_test), epochs= 25, batch_size= 4000) 69 | 70 | #%% 71 | model.save_weights('cnn_mnist_model.h5') # always save your weights after training or during training 72 | #%% evaluation 73 | print(hist.history.keys()) 74 | plt.plot(hist.history["loss"],label = "Train Loss") 75 | plt.plot(hist.history["val_loss"],label = "Validation Loss") 76 | plt.legend() 77 | plt.show() 78 | plt.figure() 79 | plt.plot(hist.history["acc"],label = "Train Accuracy") 80 | plt.plot(hist.history["val_acc"],label = "Validation Accuracy") 81 | plt.legend() 82 | plt.show() 83 | 84 | #%% save history 85 | import json 86 | with open('cnn_mnist_hist.json', 'w') as f: 87 | json.dump(hist.history, f) 88 | 89 | #%% load history 90 | import codecs 91 | with codecs.open("cnn_mnist_hist.json", 'r', encoding='utf-8') as f: 92 | h = json.loads(f.read()) 93 | 94 | plt.figure() 95 | plt.plot(h["loss"],label = "Train Loss") 96 | plt.plot(h["val_loss"],label = "Validation Loss") 97 | plt.legend() 98 | plt.show() 99 | plt.figure() 100 | plt.plot(h["acc"],label = "Train Accuracy") 101 | plt.plot(h["val_acc"],label = "Validation Accuracy") 102 | plt.legend() 103 | plt.show() 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /1) CNN Review/mnist/cnn_mnist_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/1) CNN Review/mnist/cnn_mnist_acc.png -------------------------------------------------------------------------------- /1) CNN Review/mnist/cnn_mnist_hist.json: -------------------------------------------------------------------------------- 1 | {"val_loss": [0.37443819642066956, 0.2207856297492981, 0.17974503934383393, 0.14607862532138824, 0.15787744224071504, 0.16923997104167937, 0.17517462074756623, 0.14431844651699066, 0.132527194917202, 0.12442253828048706, 0.09384540915489196, 0.08888400793075561, 0.0936183288693428, 0.07282561510801315, 0.08093169331550598, 0.07179485261440277, 0.0695055216550827, 0.06224901378154755, 0.06845839470624923, 0.06503757834434509, 0.05898387655615807, 0.05937715396285057, 0.055677573382854464, 0.06460635513067245, 0.05455688536167145], "val_acc": [0.8904999971389771, 0.9315999984741211, 0.942199993133545, 0.953000009059906, 0.9523000001907349, 0.9473999857902526, 0.9458000063896179, 0.9553999900817871, 0.960099995136261, 0.9614999890327454, 0.9710000038146973, 0.9723000168800354, 0.9708999991416931, 0.976800000667572, 0.9755000114440918, 0.978000009059906, 0.9782999992370606, 0.9812000036239624, 0.9785000085830688, 0.9794000029563904, 0.9828999996185303, 0.9827999949455262, 0.9834000110626221, 0.9799999952316284, 0.9833999872207642], "loss": [1.0776045620441437, 0.2784407466650009, 0.1571031888326009, 0.11143920173247655, 0.09028110404809316, 0.07355750103791554, 0.06292072882254919, 0.0553840604921182, 0.04789880414803823, 0.0418184148768584, 0.03732488031188647, 0.033084701374173166, 0.029455275336901347, 0.026102326065301894, 0.023187400152285893, 0.02028365358710289, 0.018152614248295625, 0.01584945401797692, 0.013527192547917366, 0.012547054328024387, 0.010836684020857017, 0.009715889394283295, 0.008798306373258432, 0.00731104202568531, 0.006850371633966764], "acc": [0.6835500001907349, 0.9199166615804036, 0.9535000006357829, 0.9668999989827474, 0.9728833357493083, 0.977483340104421, 0.981416666507721, 0.9836666584014893, 0.9857499996821085, 0.9879999955495199, 0.9889499942461649, 0.9906500101089477, 0.9916166623433431, 0.9924333294232687, 0.9938833395640055, 0.9946166753768921, 0.9953166643778483, 0.9963499943415324, 0.9968833327293396, 0.9972499966621399, 0.9978666623433431, 0.9980166594187418, 0.9982333342234294, 0.9988999923070272, 0.9988499959309896]} -------------------------------------------------------------------------------- /1) CNN Review/mnist/cnn_mnist_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/1) CNN Review/mnist/cnn_mnist_loss.png -------------------------------------------------------------------------------- /2) Deep Residual Network/CNN_DRN.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/2) Deep Residual Network/CNN_DRN.pdf -------------------------------------------------------------------------------- /2) Deep Residual Network/cnn_anlatim.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from PIL import Image 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import os 8 | import time 9 | 10 | #%% device confic EKSTRA DEFAULT CPU but GPU 11 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 12 | print("Device: ",device) 13 | 14 | #%% 15 | def read_images(path, num_img): 16 | array = np.zeros([num_img, 64*32]) 17 | i = 0 18 | for img in os.listdir(path): 19 | img_path = path + "\\" + img 20 | img = Image.open(img_path, mode = "r") 21 | data = np.asarray(img, dtype = "uint8") 22 | data = data.flatten() 23 | array[i,:] = data 24 | i += 1 25 | return array 26 | 27 | # read train negative 28 | train_negative_path = r"D:\resnet\LSIFIR.tar\LSIFIR\Classification\Train\neg" 29 | num_train_negative_img = 43390 30 | train_negative_array = read_images(train_negative_path, num_train_negative_img) 31 | 32 | x_train_negative_tensor = torch.from_numpy(train_negative_array) 33 | print("x_train_negative_tensor:", x_train_negative_tensor.size()) 34 | 35 | y_train_negative_tensor = torch.zeros(num_train_negative_img, dtype = torch.long) 36 | print("y_train_negatice_tensor:", y_train_negative_tensor.size()) 37 | 38 | # read train positive 39 | train_positive_path = r"D:\resnet\LSIFIR.tar\LSIFIR\Classification\Train\pos" 40 | num_train_positive_img = 10208 41 | train_positive_array = read_images(train_positive_path, num_train_positive_img) 42 | 43 | x_train_positive_tensor = torch.from_numpy(train_positive_array) 44 | print("x_train_positive_tensor:", x_train_positive_tensor.size()) 45 | 46 | y_train_positive_tensor = torch.ones(num_train_positive_img, dtype = torch.long) 47 | print("y_train_positive_tensor:", y_train_positive_tensor.size()) 48 | 49 | # concat train 50 | x_train = torch.cat((x_train_negative_tensor,x_train_positive_tensor),0) 51 | y_train = torch.cat((y_train_negative_tensor,y_train_positive_tensor),0) 52 | print("x_train: ",x_train.size()) 53 | print("y_train: ",y_train.size()) 54 | 55 | # -------------------------------------------------------- 56 | # read test negative 22050 57 | test_negative_path = r"D:\resnet\LSIFIR.tar\LSIFIR\Classification\Test\neg" 58 | num_test_negative_img = 22050 59 | test_negative_array = read_images(test_negative_path,num_test_negative_img) 60 | x_test_negative_tensor = torch.from_numpy(test_negative_array[:20855,:]) 61 | print("x_test_negative_tensor: ",x_test_negative_tensor.size()) 62 | y_test_negative_tensor = torch.zeros(20855,dtype = torch.long) 63 | print("y_test_negative_tensor: ",y_test_negative_tensor.size()) 64 | 65 | # read test positive 5944 66 | test_positive_path = r"D:\resnet\LSIFIR.tar\LSIFIR\Classification\Test\pos" 67 | num_test_positive_img = 5944 68 | test_positive_array = read_images(test_positive_path,num_test_positive_img) 69 | x_test_positive_tensor = torch.from_numpy(test_positive_array) 70 | print("x_test_positive_tensor: ",x_test_positive_tensor.size()) 71 | y_test_positive_tensor = torch.zeros(num_test_positive_img,dtype = torch.long) 72 | print("y_test_positive_tensor: ",y_test_positive_tensor.size()) 73 | 74 | # concat test 75 | x_test = torch.cat((x_test_negative_tensor, x_test_positive_tensor), 0) 76 | y_test = torch.cat((y_test_negative_tensor, y_test_positive_tensor), 0) 77 | print("x_test: ",x_test.size()) 78 | print("y_test: ",y_test.size()) 79 | 80 | #%% visualize 81 | plt.imshow(x_train[39900,:].reshape(64,32), cmap = "gray") 82 | 83 | #%% CNN 84 | 85 | # Hyperparameter 86 | num_epochs = 5000 87 | num_classes = 2 88 | batch_size = 8933 89 | learning_rate = 0.00001 90 | 91 | class Net(nn.Module): 92 | 93 | def __init__(self): 94 | super(Net,self).__init__() 95 | 96 | self.conv1 = nn.Conv2d(1,10,5) 97 | self.pool = nn.MaxPool2d(2,2) 98 | self.conv2 = nn.Conv2d(10,16,5) 99 | 100 | self.fc1 = nn.Linear(16*13*5,520) 101 | self.fc2 = nn.Linear(520,130) 102 | self.fc3 = nn.Linear(130,num_classes) 103 | 104 | def forward(self, x): 105 | 106 | x = self.pool(F.relu((self.conv1(x)))) 107 | x = self.pool(F.relu(self.conv2(x))) 108 | 109 | x = x.view(-1,16*13*5) 110 | x = F.relu(self.fc1(x)) 111 | x = F.relu(self.fc2(x)) 112 | x = self.fc3(x) 113 | return x 114 | 115 | import torch.utils.data 116 | 117 | train = torch.utils.data.TensorDataset(x_train, y_train) 118 | trainloader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = True ) 119 | 120 | test = torch.utils.data.TensorDataset(x_test, y_test) 121 | testloader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = False ) 122 | 123 | net = Net() 124 | # net = Net().to(device) 125 | 126 | #%% loss and optimizer 127 | criterion = nn.CrossEntropyLoss() 128 | 129 | import torch.optim as optim 130 | optimizer = optim.SGD(net.parameters(), lr = learning_rate, momentum=0.8) 131 | 132 | #%% train a network 133 | start = time.time() 134 | train_acc = [] 135 | test_acc = [] 136 | loss_list = [] 137 | use_gpu = False # True 138 | 139 | for epoch in range(num_epochs): 140 | for i, data in enumerate(trainloader, 0): 141 | 142 | inputs, labels = data 143 | inputs = inputs.view(batch_size, 1, 64, 32) # reshape 144 | inputs = inputs.float() # float 145 | 146 | # use gpu 147 | if use_gpu: 148 | if torch.cuda.is_available(): 149 | inputs, labels = inputs.to(device), labels.to(device) 150 | 151 | # zero gradient 152 | optimizer.zero_grad() 153 | 154 | # forward 155 | outputs = net(inputs) 156 | 157 | # loss 158 | loss = criterion(outputs, labels) 159 | 160 | # back 161 | loss.backward() 162 | 163 | # update weights 164 | optimizer.step() 165 | 166 | # test 167 | correct = 0 168 | total = 0 169 | with torch.no_grad(): 170 | for data in testloader: 171 | images, labels= data 172 | 173 | images = images.view(batch_size,1,64,32) 174 | images = images.float() 175 | 176 | # gpu 177 | if use_gpu: 178 | if torch.cuda.is_available(): 179 | images, labels = images.to(device), labels.to(device) 180 | 181 | outputs = net(images) 182 | 183 | _, predicted = torch.max(outputs.data,1) 184 | 185 | total += labels.size(0) 186 | correct += (predicted == labels).sum().item() 187 | 188 | acc1 = 100*correct/total 189 | print("accuracy test: ",acc1) 190 | test_acc.append(acc1) 191 | 192 | # train 193 | correct = 0 194 | total = 0 195 | with torch.no_grad(): 196 | for data in trainloader: 197 | images, labels= data 198 | 199 | images = images.view(batch_size,1,64,32) 200 | images = images.float() 201 | 202 | # gpu 203 | if use_gpu: 204 | if torch.cuda.is_available(): 205 | images, labels = images.to(device), labels.to(device) 206 | 207 | outputs = net(images) 208 | 209 | _, predicted = torch.max(outputs.data,1) 210 | 211 | total += labels.size(0) 212 | correct += (predicted == labels).sum().item() 213 | 214 | acc2 = 100*correct/total 215 | print("accuracy train: ",acc2) 216 | train_acc.append(acc2) 217 | 218 | 219 | print("train is done.") 220 | 221 | 222 | 223 | end = time.time() 224 | process_time = (end - start)/60 225 | print("process time: ",process_time) 226 | 227 | 228 | #%% visualize 229 | fig, ax1 = plt.subplots() 230 | 231 | plt.plot(loss_list,label = "Loss",color = "black") 232 | 233 | ax2 = ax1.twinx() 234 | 235 | ax2.plot(np.array(test_acc)/100,label = "Test Acc",color="green") 236 | ax2.plot(np.array(train_acc)/100,label = "Train Acc",color= "red") 237 | ax1.legend() 238 | ax2.legend() 239 | ax1.set_xlabel('Epoch') 240 | fig.tight_layout() 241 | plt.title("Loss vs Test Accuracy") 242 | plt.show() 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | -------------------------------------------------------------------------------- /2) Deep Residual Network/drn_anlatim.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from PIL import Image 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | import os 7 | import torch.utils.data 8 | #%% Device config 9 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 10 | print("Device: ",device) 11 | 12 | #%% Dataset 13 | def read_images(path, num_img): 14 | array = np.zeros([num_img,64*32]) 15 | i = 0 16 | for img in os.listdir(path): 17 | img_path = path + "\\" + img 18 | img = Image.open(img_path, mode = 'r') 19 | data = np.asarray(img,dtype = "uint8") 20 | data = data.flatten() 21 | array[i,:] = data 22 | i += 1 23 | return array 24 | 25 | # read train negative 43390 26 | train_negative_path = r"LSIFIR.tar\LSIFIR\Classification\Train\neg" 27 | num_train_negative_img = 43390 28 | train_negative_array = read_images(train_negative_path,num_train_negative_img) 29 | x_train_negative_tensor = torch.from_numpy(train_negative_array[:42000,:]) 30 | print("x_train_negative_tensor: ",x_train_negative_tensor.size()) 31 | y_train_negative_tensor = torch.zeros(42000,dtype = torch.long) 32 | print("y_train_negative_tensor: ",y_train_negative_tensor.size()) 33 | 34 | # read train positive 10208 35 | train_positive_path = r"LSIFIR.tar\LSIFIR\Classification\Train\pos" 36 | num_train_positive_img = 10208 37 | train_positive_array = read_images(train_positive_path,num_train_positive_img) 38 | x_train_positive_tensor = torch.from_numpy(train_positive_array[:10000,:]) 39 | print("x_train_positive_tensor: ",x_train_positive_tensor.size()) 40 | y_train_positive_tensor = torch.ones(10000,dtype = torch.long) 41 | print("y_train_positive_tensor: ",y_train_positive_tensor.size()) 42 | 43 | # concat train 44 | x_train = torch.cat((x_train_negative_tensor, x_train_positive_tensor), 0) 45 | y_train = torch.cat((y_train_negative_tensor, y_train_positive_tensor), 0) 46 | print("x_train: ",x_train.size()) 47 | print("y_train: ",y_train.size()) 48 | 49 | # read test negative 22050 50 | test_negative_path = r"LSIFIR.tar\LSIFIR\Classification\Test\neg" 51 | num_test_negative_img = 22050 52 | test_negative_array = read_images(test_negative_path,num_test_negative_img) 53 | x_test_negative_tensor = torch.from_numpy(test_negative_array[:18056,:]) 54 | print("x_test_negative_tensor: ",x_test_negative_tensor.size()) 55 | y_test_negative_tensor = torch.zeros(18056,dtype = torch.long) 56 | print("y_test_negative_tensor: ",y_test_negative_tensor.size()) 57 | 58 | # read test positive 5944 59 | test_positive_path = r"LSIFIR.tar\LSIFIR\Classification\Test\pos" 60 | num_test_positive_img = 5944 61 | test_positive_array = read_images(test_positive_path,num_test_positive_img) 62 | x_test_positive_tensor = torch.from_numpy(test_positive_array) 63 | print("x_test_positive_tensor: ",x_test_positive_tensor.size()) 64 | y_test_positive_tensor = torch.zeros(num_test_positive_img,dtype = torch.long) 65 | print("y_test_positive_tensor: ",y_test_positive_tensor.size()) 66 | 67 | # concat test 68 | x_test = torch.cat((x_test_negative_tensor, x_test_positive_tensor), 0) 69 | y_test = torch.cat((y_test_negative_tensor, y_test_positive_tensor), 0) 70 | print("x_test: ",x_test.size()) 71 | print("y_test: ",y_test.size()) 72 | 73 | 74 | #%% visualize 75 | plt.imshow(x_train[45001,:].reshape(64,32), cmap='gray') # 45002 ve 1001 76 | 77 | # %% 78 | num_classes = 2 79 | # Hyper parameters 80 | num_epochs = 100 81 | batch_size = 2000 82 | learning_rate = 0.0001 83 | 84 | train = torch.utils.data.TensorDataset(x_train,y_train) 85 | trainloader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = True) 86 | 87 | test = torch.utils.data.TensorDataset(x_test,y_test) 88 | testloader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = False) 89 | 90 | 91 | # %% 92 | 93 | def conv3x3(in_planes, out_planes, stride = 1): 94 | return nn.Conv2d(in_planes, out_planes, kernel_size = 3, stride = stride, padding = 1, bias = False) 95 | 96 | def conv1x1(in_planes, out_planes, stride = 1): 97 | return nn.Conv2d(in_planes, out_planes, kernel_size = 1, stride = stride, bias = False) 98 | 99 | class BasicBlock(nn.Module): 100 | 101 | expansion = 1 102 | 103 | def __init__(self,inplanes, planes, stride = 1, downsample = None): 104 | super(BasicBlock,self).__init__() 105 | self.conv1 = conv3x3(inplanes, planes, stride) 106 | self.bn1 = nn.BatchNorm2d(planes) 107 | self.relu = nn.ReLU(inplace = True) 108 | self.drop = nn.Dropout(0.9) 109 | self.conv2 = conv3x3(planes, planes) 110 | self.bn2 = nn.BatchNorm2d(planes) 111 | self.downsample = downsample 112 | self.stride = stride 113 | 114 | def forward(self, x): 115 | identity = x 116 | 117 | out = self.conv1(x) 118 | out = self.bn1(out) 119 | out = self.relu(out) 120 | out = self.drop(out) 121 | out = self.conv2(out) 122 | out = self.bn2(out) 123 | out = self.drop(out) 124 | 125 | if self.downsample is not None: 126 | identity = self.downsample(x) 127 | 128 | out += identity 129 | out = self.relu(out) 130 | return out 131 | 132 | class ResNet(nn.Module): 133 | 134 | def __init__(self, block, layers, num_classes = num_classes): 135 | super(ResNet,self).__init__() 136 | self.inplanes = 64 137 | self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride = 2, padding = 3, bias= False) 138 | self.bn1 = nn.BatchNorm2d(64) 139 | self.relu = nn.ReLU(inplace = True) 140 | self.maxpool = nn.MaxPool2d(kernel_size= 3, stride = 2, padding = 1) 141 | self.layer1 = self._make_layer(block, 64, layers[0], stride = 1) 142 | self.layer2 = self._make_layer(block, 128, layers[1], stride = 2) 143 | self.layer3 = self._make_layer(block, 256, layers[2], stride = 2) 144 | 145 | self.avgpool = nn.AdaptiveAvgPool2d((1,1)) 146 | self.fc = nn.Linear(256*block.expansion, num_classes) 147 | 148 | for m in self.modules(): 149 | if isinstance(m,nn.Conv2d): 150 | nn.init.kaiming_normal_(m.weight, mode = "fan_out", nonlinearity = "relu") 151 | elif isinstance(m, nn.BatchNorm2d): 152 | nn.init.constant_(m.weight,1) 153 | nn.init.constant_(m.bias,0) 154 | 155 | def _make_layer(self, block, planes, blocks, stride = 1): 156 | downsample = None 157 | if stride != 1 or self.inplanes != planes*block.expansion: 158 | downsample = nn.Sequential( 159 | conv1x1(self.inplanes, planes*block.expansion, stride), 160 | nn.BatchNorm2d(planes*block.expansion)) 161 | layers = [] 162 | layers.append(block(self.inplanes, planes, stride, downsample)) 163 | self.inplanes = planes*block.expansion 164 | for _ in range(1,blocks): 165 | layers.append(block(self.inplanes, planes)) 166 | 167 | return nn.Sequential(*layers) 168 | 169 | 170 | def forward(self,x): 171 | 172 | x = self.conv1(x) 173 | x = self.bn1(x) 174 | x = self.relu(x) 175 | x = self.maxpool(x) 176 | x = self.layer1(x) 177 | x = self.layer2(x) 178 | x = self.layer3(x) 179 | x = self.avgpool(x) 180 | x = x.view(x.size(0),-1) 181 | x = self.fc(x) 182 | 183 | return x 184 | 185 | model = ResNet(BasicBlock, [2,2,2]) 186 | 187 | # model = ResNet(BasicBlock, [2,2,2]).to(device) 188 | 189 | 190 | #%% 191 | 192 | criterion = nn.CrossEntropyLoss() 193 | optimizer = torch.optim.Adam(model.parameters(),lr = learning_rate) 194 | 195 | 196 | #%% train 197 | 198 | loss_list = [] 199 | train_acc = [] 200 | test_acc = [] 201 | use_gpu = False 202 | 203 | total_step = len(trainloader) 204 | 205 | for epoch in range(num_epochs): 206 | for i, (images, labels) in enumerate(trainloader): 207 | 208 | images = images.view(batch_size,1,64,32) 209 | images = images.float() 210 | 211 | # gpu 212 | if use_gpu: 213 | if torch.cuda.is_available(): 214 | images, labels = images.to(device), labels.to(device) 215 | 216 | outputs = model(images) 217 | 218 | loss = criterion(outputs, labels) 219 | 220 | # backward and optimization 221 | optimizer.zero_grad() 222 | loss.backward() 223 | optimizer.step() 224 | 225 | if i % 2 == 0: 226 | print("epoch: {} {}/{}".format(epoch,i,total_step)) 227 | 228 | # train 229 | correct = 0 230 | total = 0 231 | with torch.no_grad(): 232 | for data in trainloader: 233 | images, labels = data 234 | images = images.view(batch_size,1,64,32) 235 | images = images.float() 236 | 237 | # gpu 238 | if use_gpu: 239 | if torch.cuda.is_available(): 240 | images, labels = images.to(device), labels.to(device) 241 | 242 | outputs = model(images) 243 | _, predicted = torch.max(outputs.data,1) 244 | total += labels.size(0) 245 | correct += (predicted == labels).sum().item() 246 | print("Accuracy train %d %%"%(100*correct/total)) 247 | train_acc.append(100*correct/total) 248 | 249 | # test 250 | correct = 0 251 | total = 0 252 | with torch.no_grad(): 253 | for data in testloader: 254 | images, labels = data 255 | images = images.view(batch_size,1,64,32) 256 | images = images.float() 257 | 258 | # gpu 259 | if use_gpu: 260 | if torch.cuda.is_available(): 261 | images, labels = images.to(device), labels.to(device) 262 | 263 | outputs = model(images) 264 | _, predicted = torch.max(outputs.data,1) 265 | total += labels.size(0) 266 | correct += (predicted == labels).sum().item() 267 | print("Accuracy test %d %%"%(100*correct/total)) 268 | train_acc.append(100*correct/total) 269 | 270 | loss_list.append(loss.item()) 271 | 272 | #%% visualize 273 | 274 | fig, ax1 = plt.subplots() 275 | plt.plot(loss_list,label = "Loss",color = "black") 276 | ax2 = ax1.twinx() 277 | ax2.plot(np.array(test_acc)/100,label = "Test Acc",color="green") 278 | ax2.plot(np.array(train_acc)/100,label = "Train Acc",color= "red") 279 | ax1.legend() 280 | ax2.legend() 281 | ax1.set_xlabel('Epoch') 282 | fig.tight_layout() 283 | plt.title("Loss vs Test Accuracy") 284 | plt.show() 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | -------------------------------------------------------------------------------- /3) Transfer Learning/cfar10_vgg19/transferLearning_vgg19_cfar10.py: -------------------------------------------------------------------------------- 1 | from keras.applications.vgg19 import VGG19 2 | from keras.models import Sequential 3 | from keras.utils import to_categorical 4 | from keras.layers import Dense, Flatten 5 | from keras.datasets import cifar10 6 | import matplotlib.pyplot as plt 7 | import cv2 8 | import numpy as np 9 | 10 | #%% 11 | (x_train, y_train),(x_test, y_test) = cifar10.load_data() 12 | print("x_train shape",x_train.shape) 13 | print("train sample:",x_train.shape[0]) 14 | 15 | numberOfClass = 10 16 | 17 | y_train = to_categorical(y_train, numberOfClass) 18 | y_test = to_categorical(y_test, numberOfClass) 19 | 20 | input_shape = x_train.shape[1:] 21 | 22 | #%% visualize 23 | plt.imshow(x_train[5511].astype(np.uint8)) 24 | plt.axis("off") 25 | plt.show() 26 | 27 | # %% increase dimension 28 | def resize_img(img): 29 | numberOfImage = img.shape[0] 30 | new_array = np.zeros((numberOfImage, 48,48,3)) 31 | for i in range(numberOfImage): 32 | new_array[i] = cv2.resize(img[i,:,:,:],(48,48)) 33 | return new_array 34 | 35 | x_train = resize_img(x_train) 36 | x_test = resize_img(x_test) 37 | print("increased dim x_train: ",x_train.shape) 38 | 39 | plt.figure() 40 | plt.imshow(x_train[5511].astype(np.uint8)) 41 | plt.axis("off") 42 | plt.show() 43 | 44 | #%% vgg19 45 | 46 | vgg = VGG19(include_top = False, weights = "imagenet", input_shape = (48,48,3)) 47 | 48 | print(vgg.summary()) 49 | 50 | vgg_layer_list = vgg.layers 51 | print(vgg_layer_list) 52 | 53 | model = Sequential() 54 | for layer in vgg_layer_list: 55 | model.add(layer) 56 | 57 | print(model.summary()) 58 | 59 | for layer in model.layers: 60 | layer.trainable = False 61 | 62 | # fully con layers 63 | model.add(Flatten()) 64 | model.add(Dense(128)) 65 | model.add(Dense(numberOfClass, activation= "softmax")) 66 | 67 | print(model.summary()) 68 | 69 | 70 | model.compile(loss = "categorical_crossentropy", 71 | optimizer = "rmsprop", 72 | metrics = ["accuracy"]) 73 | 74 | hist = model.fit(x_train, y_train, validation_split = 0.2, epochs = 5, batch_size = 1000) 75 | 76 | #%% model save 77 | model.save_weights("example.h5") 78 | 79 | #%% 80 | plt.plot(hist.history["loss"], label = "train loss") 81 | plt.plot(hist.history["val_loss"], label = "val loss") 82 | plt.legend() 83 | plt.show() 84 | 85 | plt.figure() 86 | plt.plot(hist.history["acc"], label = "train acc") 87 | plt.plot(hist.history["val_acc"], label = "val acc") 88 | plt.legend() 89 | plt.show() 90 | 91 | #%% load 92 | import json, codecs 93 | with codecs.open("transfer_learning_vgg19_cfar10.json","r",encoding = "utf-8") as f: 94 | n = json.loads(f.read()) 95 | 96 | plt.plot(n["acc"], label = "train acc") 97 | plt.plot(n["val_acc"], label = "val acc") 98 | plt.legend() 99 | plt.show() 100 | 101 | 102 | #%% save 103 | with open('transfer_learning_vgg19_cfar10.json', 'w') as f: 104 | json.dump(hist.history, f) 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | -------------------------------------------------------------------------------- /3) Transfer Learning/cfar10_vgg19/transfer_learning_vgg19_cfar10.json: -------------------------------------------------------------------------------- 1 | {"val_loss": [4.939669799804688, 4.349473571777343, 3.985617995262146, 4.033215808868408, 3.736153268814087], "val_acc": [0.526199996471405, 0.5359000027179718, 0.5454999923706054, 0.5455999970436096, 0.5272999942302704], "loss": [6.903065609931946, 4.631463956832886, 3.914230924844742, 3.511655879020691, 3.3830894410610197], "acc": [0.4298749979585409, 0.5448749989271164, 0.563475002348423, 0.5671000018715858, 0.5582749947905541]} -------------------------------------------------------------------------------- /3) Transfer Learning/cfar10_vgg19/transfer_learning_vgg19_cfar10_acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/3) Transfer Learning/cfar10_vgg19/transfer_learning_vgg19_cfar10_acc.png -------------------------------------------------------------------------------- /3) Transfer Learning/cfar10_vgg19/transfer_learning_vgg19_cfar10_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/3) Transfer Learning/cfar10_vgg19/transfer_learning_vgg19_cfar10_loss.png -------------------------------------------------------------------------------- /3) Transfer Learning/fruit_vgg16/transfer_learning_fruit_hist.json: -------------------------------------------------------------------------------- 1 | {"val_loss": [1.8168636560440063, 1.3159908509254457, 0.9140003436803817, 1.34772301197052, 0.9027495175600052, 0.6429956570267678, 0.8772050695121288, 0.7249424248933792, 0.9851230728626251, 0.7767874189466238, 0.7426828277483583, 0.989865493029356, 0.722635422796011, 0.7258544232696295, 0.5485492765903472, 0.4903283538261894, 0.6581058548018336, 0.5763356258533895, 0.5484539793804288, 0.5581577056250535, 0.6186219661822708, 0.41206534934091904, 0.46490364662546196, 0.594343911694159, 0.6180022718009422], "val_acc": [0.6725, 0.805, 0.82375, 0.845, 0.9025, 0.8975, 0.89625, 0.905, 0.885, 0.92375, 0.92375, 0.9, 0.92125, 0.935, 0.93125, 0.94125, 0.93, 0.9475, 0.9425, 0.94375, 0.9417852522639069, 0.95375, 0.96125, 0.945, 0.94], "loss": [4.341708356142044, 1.2780172103643417, 0.9341228514909744, 0.846471039801836, 0.6757170380651951, 0.7653973357472569, 0.7585247777216136, 0.6729798558354377, 0.7053993166889996, 0.6915440997999395, 0.6166774104113574, 0.6251583324093372, 0.7974036473870365, 0.7297086812008637, 0.558818646928994, 0.46388216651714176, 0.40846112178245675, 0.6206256602622671, 0.5341988766631403, 0.4590899243945296, 0.44003716348030136, 0.4425451439609969, 0.47574402926583387, 0.34723615419031373, 0.48007544795987994], "acc": [0.43625, 0.803125, 0.865625, 0.8975, 0.926875, 0.926875, 0.9325, 0.946875, 0.944375, 0.945, 0.951875, 0.954375, 0.94375, 0.9475, 0.9575, 0.966875, 0.97, 0.95875, 0.959375, 0.968125, 0.96875, 0.96875, 0.9675, 0.976875, 0.9675]} -------------------------------------------------------------------------------- /3) Transfer Learning/fruit_vgg16/transfer_learning_vgg16_fruit_anlatım.py: -------------------------------------------------------------------------------- 1 | from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img 2 | from keras.models import Sequential 3 | from keras.layers import Dense 4 | from keras.applications.vgg16 import VGG16 5 | import matplotlib.pyplot as plt 6 | from glob import glob 7 | 8 | train_path = "fruits-360/Training/" 9 | test_path = "fruits-360/Test/" 10 | 11 | img = load_img(train_path + "Avocado/0_100.jpg") 12 | plt.imshow(img) 13 | plt.axis("off") 14 | plt.show() 15 | 16 | x = img_to_array(img) 17 | print(x.shape) 18 | 19 | numberOfClass = len(glob(train_path+"/*")) 20 | 21 | vgg = VGG16() 22 | 23 | print(vgg.summary()) 24 | print(type(vgg)) 25 | 26 | vgg_layer_list = vgg.layers 27 | print(vgg_layer_list) 28 | 29 | model = Sequential() 30 | for i in range(len(vgg_layer_list)-1): 31 | model.add(vgg_layer_list[i]) 32 | 33 | print(model.summary()) 34 | 35 | for layers in model.layers: 36 | layers.trainable = False 37 | 38 | model.add(Dense(numberOfClass, activation="softmax")) 39 | 40 | print(model.summary()) 41 | 42 | model.compile(loss = "categorical_crossentropy", 43 | optimizer = "rmsprop", 44 | metrics = ["accuracy"]) 45 | 46 | # train 47 | train_data = ImageDataGenerator().flow_from_directory(train_path,target_size = (224,224)) 48 | test_data = ImageDataGenerator().flow_from_directory(test_path,target_size = (224,224)) 49 | 50 | batch_size = 32 51 | 52 | hist = model.fit_generator(train_data, 53 | steps_per_epoch=1600//batch_size, 54 | epochs= 25, 55 | validation_data=test_data, 56 | validation_steps= 800//batch_size) 57 | 58 | #%% 59 | model.save_weights("deneme.h5") 60 | 61 | #%% evaluation 62 | print(hist.history.keys()) 63 | plt.plot(hist.history["loss"],label = "training loss") 64 | plt.plot(hist.history["val_loss"],label = "validation loss") 65 | plt.legend() 66 | plt.show() 67 | plt.figure() 68 | plt.plot(hist.history["acc"],label = "training acc") 69 | plt.plot(hist.history["val_acc"],label = "validation acc") 70 | plt.legend() 71 | plt.show() 72 | 73 | #%% save history 74 | import json, codecs 75 | with open("deneme.json","w") as f: 76 | json.dump(hist.history,f) 77 | 78 | #%% load history 79 | with codecs.open("transfer_learning_fruit_hist.json","r",encoding = "utf-8") as f: 80 | n = json.loads(f.read()) 81 | 82 | plt.plot(n["loss"],label = "training loss") 83 | plt.plot(n["val_loss"],label = "validation loss") 84 | plt.legend() 85 | plt.show() 86 | plt.figure() 87 | plt.plot(n["acc"],label = "training acc") 88 | plt.plot(n["val_acc"],label = "validation acc") 89 | plt.legend() 90 | plt.show() 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /3) Transfer Learning/fruit_vgg16/vgg16_accuracy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/3) Transfer Learning/fruit_vgg16/vgg16_accuracy.png -------------------------------------------------------------------------------- /3) Transfer Learning/fruit_vgg16/vgg16_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/3) Transfer Learning/fruit_vgg16/vgg16_loss.png -------------------------------------------------------------------------------- /4) Autoencoders/autoencoder_fashion_mnist_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/4) Autoencoders/autoencoder_fashion_mnist_loss.png -------------------------------------------------------------------------------- /4) Autoencoders/autoencoder_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/4) Autoencoders/autoencoder_result.png -------------------------------------------------------------------------------- /4) Autoencoders/autoencoders_FashionMNIST.py: -------------------------------------------------------------------------------- 1 | # https://blog.keras.io/building-autoencoders-in-keras.html 2 | from keras.models import Model 3 | from keras.layers import Input, Dense 4 | from keras.datasets import fashion_mnist 5 | import matplotlib.pyplot as plt 6 | import json, codecs 7 | import warnings 8 | warnings.filterwarnings("ignore") 9 | 10 | (x_train, _), (x_test, _) = fashion_mnist.load_data() 11 | 12 | x_train = x_train.astype("float32") / 255.0 13 | x_test = x_test.astype("float32") / 255.0 14 | 15 | x_train = x_train.reshape((len(x_train), x_train.shape[1:][0]*x_train.shape[1:][1])) 16 | x_test = x_test.reshape((len(x_test), x_test.shape[1:][0]*x_test.shape[1:][1])) 17 | 18 | plt.imshow(x_train[4000].reshape(28,28)) 19 | plt.axis("off") 20 | plt.show() 21 | 22 | #%% 23 | 24 | input_img = Input(shape = (784,)) 25 | 26 | encoded = Dense(32, activation="relu")(input_img) 27 | 28 | encoded = Dense(16, activation="relu")(encoded) 29 | 30 | decoded = Dense(32, activation="relu")(encoded) 31 | 32 | decoded = Dense(784, activation="sigmoid")(decoded) 33 | 34 | autoencoder = Model(input_img,decoded) 35 | 36 | autoencoder.compile(optimizer="rmsprop",loss="binary_crossentropy") 37 | 38 | hist = autoencoder.fit(x_train, 39 | x_train, 40 | epochs=200, 41 | batch_size=256, 42 | shuffle=True, 43 | validation_data = (x_train,x_train)) 44 | 45 | #%% save model 46 | autoencoder.save_weights("autoencoder_model.h5") 47 | 48 | #%% evaluation 49 | print(hist.history.keys()) 50 | 51 | plt.plot(hist.history["loss"],label = "Train loss") 52 | plt.plot(hist.history["val_loss"],label = "Val loss") 53 | 54 | plt.legend() 55 | plt.show() 56 | 57 | # %% save hist 58 | with open("autoencoders_hist.json","w") as f: 59 | json.dump(hist.history,f) 60 | 61 | 62 | # %% load history 63 | with codecs.open("autoencoders_hist.json","r", encoding="utf-8") as f: 64 | n = json.loads(f.read()) 65 | #%% 66 | print(n.keys()) 67 | plt.plot(n["loss"],label = "Train loss") 68 | plt.plot(n["val_loss"],label = "Val loss") 69 | 70 | #%% 71 | encoder = Model(input_img,encoded) 72 | encoded_img = encoder.predict(x_test) 73 | 74 | plt.imshow(x_test[1500].reshape(28,28)) 75 | plt.axis("off") 76 | plt.show() 77 | 78 | plt.figure() 79 | plt.imshow(encoded_img[1500].reshape(4,4)) 80 | plt.axis("off") 81 | plt.show() 82 | 83 | decoded_imgs = autoencoder.predict(x_test) 84 | 85 | n = 10 86 | plt.figure(figsize=(20, 4)) 87 | for i in range(n): 88 | # display original 89 | ax = plt.subplot(2, n, i+1) 90 | plt.imshow(x_test[i].reshape(28, 28)) 91 | plt.axis("off") 92 | 93 | # display reconstruction 94 | ax = plt.subplot(2, n, i + n+1) 95 | plt.imshow(decoded_imgs[i].reshape(28, 28)) 96 | plt.axis("off") 97 | plt.show() 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /4) Autoencoders/autoencoders_hist.json: -------------------------------------------------------------------------------- 1 | {"val_loss": [0.35104556244214374, 0.3315355701287587, 0.32315587882995606, 0.31843218232790627, 0.31275415592193606, 0.31010723174413046, 0.3119535127480825, 0.3058676630020142, 0.30524764394760134, 0.30419478810628253, 0.3032996253967285, 0.3036788475195567, 0.3012390224615733, 0.3003977311134338, 0.29999243788719177, 0.29990589195887246, 0.2976150538921356, 0.29788812979062396, 0.2979808695634206, 0.29767934188842776, 0.2966845703601837, 0.2965118071715037, 0.29786051173210143, 0.29629203837712603, 0.29497634857495625, 0.29504072108268736, 0.29501926970481873, 0.29511476821899413, 0.2960813595294952, 0.29431544016202293, 0.2944883994420369, 0.2941987255891164, 0.294267126194636, 0.29397137347857155, 0.29317945551872254, 0.29352189938227335, 0.29310042578379314, 0.2934252440929413, 0.2931195564746857, 0.2931386455059051, 0.29379524914423627, 0.29325362764994306, 0.29257285965283714, 0.2929176423072815, 0.2924707183678945, 0.2921142006556193, 0.292410249265035, 0.29232323184013365, 0.2926126448790232, 0.2922850304444631, 0.29167872071266177, 0.2919350328286489, 0.2918873261610667, 0.2909308761755625, 0.29223345861434935, 0.2911651215394338, 0.29092901953061423, 0.2911173722743988, 0.29089440286954243, 0.2908920912265778, 0.2910698149045308, 0.29112284558614093, 0.2894557463169098, 0.2915674413045247, 0.2905148502508799, 0.29087014711697895, 0.29009828708966573, 0.28981346093813576, 0.2906120039621989, 0.29016547287305194, 0.29023827713330586, 0.29082699723243716, 0.29079870433807375, 0.289779008547465, 0.2903683053811391, 0.28988783882459007, 0.2894431853135427, 0.29105593078931175, 0.28974051140149437, 0.2893448098341624, 0.29007371074358623, 0.28877084107398987, 0.2895082736333211, 0.2900600020726522, 0.28950928713480634, 0.29012569403648375, 0.2894775043487549, 0.2888314267317454, 0.28874885789553323, 0.28869562714894614, 0.2895863983631134, 0.2887211909135183, 0.28919788570404054, 0.2895414846261342, 0.2891392202536265, 0.28947320291201273, 0.2891333858013153, 0.2892201992511749, 0.2897293736298879, 0.2886059762954712, 0.28949923357963564, 0.2890092464605967, 0.2886545252799988, 0.28985222172737124, 0.28813949116071064, 0.2891260205427806, 0.28871346406936643, 0.29075771123568217, 0.28918146783510845, 0.28859489555358886, 0.28918137532869975, 0.28919345525105794, 0.2891382485707601, 0.2895365938663483, 0.2883090797106425, 0.28803715710639954, 0.28789175470670064, 0.2899027242501577, 0.2899403013388316, 0.2884860431512197, 0.287987442557017, 0.28875958414077757, 0.288214759683609, 0.28831324825286864, 0.28890070317586264, 0.28874722186724344, 0.2889058013280233, 0.2883011415640513, 0.2875629458268483, 0.2883696989059448, 0.2885770081202189, 0.2884081219673157, 0.28783357818921407, 0.2873425280412038, 0.2883300118605296, 0.2882498580932617, 0.28834532710711164, 0.2882757963021596, 0.2890041501204173, 0.2890433860460917, 0.2880326321919759, 0.2878123074054718, 0.2889702167987824, 0.2884920196056366, 0.28809367593129476, 0.28834841663042704, 0.28763330330848697, 0.28787634380658467, 0.2887246868610382, 0.2884905102888743, 0.2881044488588969, 0.28773555680910745, 0.2880965451081594, 0.2879000828584035, 0.28846963877677917, 0.28822086304028827, 0.2883337746302287, 0.2881063288529714, 0.2884898586591085, 0.287928528992335, 0.28765009846687317, 0.28840198826789853, 0.28756276593208313, 0.2878229407628377, 0.2879813578128815, 0.28850641255378723, 0.2883028877735138, 0.28735308264096576, 0.28860155879656474, 0.28774413523674014, 0.2884238718509674, 0.287517919921875, 0.28718329205513, 0.2876647917111715, 0.2878899426619212, 0.2871463088830312, 0.28718072028160097, 0.28812304081916806, 0.2882084362665812, 0.2877818782329559, 0.2885885398387909, 0.287789755821228, 0.2876224332968394, 0.2875818597793579, 0.2879179347197215, 0.288107359568278, 0.2876808299223582, 0.2876656592845917, 0.28807436931928, 0.28778472412427264, 0.28734011295636497, 0.28883622234662376, 0.28754282177289325, 0.28700951495170596, 0.28797704909642535, 0.2876306019306183, 0.2875739332675934, 0.2877576710542043, 0.2870710236390432, 0.2883935274600983], "loss": [0.40631904368400573, 0.335887051598231, 0.3234317870616913, 0.3181187033494314, 0.31443366470336914, 0.3116470457235972, 0.30940552741686506, 0.3075385718186696, 0.30598472323417664, 0.30462591977119446, 0.30342928312619527, 0.3024543677488963, 0.30152256768544516, 0.30073364016215004, 0.3000302422205607, 0.29944243221282957, 0.2989174078623454, 0.2984269819418589, 0.2979898315270742, 0.29757813873291017, 0.29718417185147605, 0.2968438533306122, 0.29649428571065267, 0.2961659527460734, 0.295855171362559, 0.29560512614250184, 0.2952632597446442, 0.2950288572152456, 0.2947860993862152, 0.29458545101483663, 0.2943809573968251, 0.29416246116956074, 0.293989843861262, 0.2938047372659047, 0.29366648518244426, 0.29350395901997883, 0.2933552785396576, 0.29321741711298627, 0.2931029602686564, 0.2929667091369629, 0.2928667401154836, 0.292743456141154, 0.29261898212432863, 0.2925090696175893, 0.29239543449083966, 0.2922853254795075, 0.2921979304154714, 0.2920770096619924, 0.29201931071281434, 0.2918827534198761, 0.2918016941388448, 0.2917001677354177, 0.2915947482585907, 0.29151070764859516, 0.2913984397093455, 0.2913264107545217, 0.2912094365596771, 0.29113267537752785, 0.2910393513361613, 0.29097088980674746, 0.2908411209106445, 0.29075377343495684, 0.2906472281138102, 0.290582027276357, 0.2904905914624532, 0.29038992311159767, 0.2903278832912445, 0.29025029697418214, 0.29018317057291665, 0.29011953156789144, 0.290045019642512, 0.28998843251864115, 0.2899306829452515, 0.2898773930867513, 0.28983139640490213, 0.2897631931145986, 0.28974583503405255, 0.2896775565783183, 0.28961640469233196, 0.28957679435412087, 0.28953888176282244, 0.28950280232429504, 0.289454118569692, 0.289405596335729, 0.28938532923062643, 0.2893377481619517, 0.2892995227495829, 0.2892524670124054, 0.28921574495633445, 0.289208212741216, 0.2891632685820262, 0.2891409017721812, 0.2891092318375905, 0.2890647158145905, 0.28904866388638817, 0.2890300871690114, 0.2889893098990122, 0.2889703429063161, 0.2889191348393758, 0.2889139458497365, 0.28888235861460365, 0.28885909857749936, 0.28881739996274314, 0.28880496247609455, 0.28879331130981445, 0.28877546865145365, 0.2887439258257548, 0.28871684810320536, 0.2887105759620667, 0.28867638386090594, 0.2886665771325429, 0.2886427855014801, 0.28861621748606364, 0.28860544096628826, 0.2885957125822703, 0.28856059284210206, 0.2885479523976644, 0.2885156926314036, 0.2885105883280436, 0.2884830638408661, 0.2884686575571696, 0.28844710844357807, 0.28846198031107584, 0.28841932536760967, 0.28839864012400307, 0.28836504604021707, 0.28839286573727924, 0.28836548953056335, 0.28834734552701313, 0.28833186755180357, 0.2883061303933461, 0.2883096843242645, 0.2882940903663635, 0.28828299876848856, 0.28824247473080955, 0.2882546569983164, 0.28821807516415915, 0.28821525632540385, 0.2881963453133901, 0.2881745911916097, 0.2881769662221273, 0.2881560717423757, 0.2881592515627543, 0.2881334566116333, 0.2881228663285573, 0.28812047634124754, 0.28811834060351055, 0.28809577120145163, 0.28807875350316364, 0.28804942536354067, 0.28807085258165993, 0.2880355875492096, 0.28803663194974266, 0.28802522298494976, 0.28798514285087584, 0.2880206364949544, 0.2879720485051473, 0.28797439430554705, 0.2879900932153066, 0.28794606773058573, 0.2879394373734792, 0.28793945983250935, 0.2879342004776001, 0.2878960621992747, 0.2879113248984019, 0.28789786562919617, 0.28786836965878804, 0.28787393798828126, 0.287859480714798, 0.2878239347139994, 0.2878308758417765, 0.2878116850694021, 0.28781903726259866, 0.2878033707300822, 0.2877989290396373, 0.2877717144012451, 0.2877805277665456, 0.28775954874356585, 0.28775694365501403, 0.2877379759947459, 0.28774243324597676, 0.28774326872825623, 0.2877170466899872, 0.28773249265352885, 0.287699103864034, 0.2876856326897939, 0.28769730025927226, 0.2876707080682119, 0.2876471073150635, 0.2876641202290853, 0.28765378376642864, 0.2876265848318736, 0.2876308104038239, 0.2876195884545644, 0.28760668390591937, 0.2876108578999837, 0.2875728511333466, 0.28756922165552773, 0.2875768598397573, 0.28756771144866944]} -------------------------------------------------------------------------------- /5) GANs/Figure_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/5) GANs/Figure_1.png -------------------------------------------------------------------------------- /5) GANs/Figure_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/5) GANs/Figure_2.png -------------------------------------------------------------------------------- /5) GANs/Figure_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/5) GANs/Figure_3.png -------------------------------------------------------------------------------- /5) GANs/gans_.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Dense, Dropout, Input, ReLU 2 | from keras.models import Model, Sequential 3 | from keras.optimizers import Adam 4 | from keras.datasets import mnist 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 9 | x_train = (x_train.astype(np.float32)-127.5)/127.5 10 | 11 | print(x_train.shape) 12 | 13 | x_train = x_train.reshape(x_train.shape[0],x_train.shape[1]*x_train.shape[2]) 14 | print(x_train.shape) 15 | 16 | #%% 17 | #plt.imshow(x_test[12]) 18 | 19 | #%% create generator 20 | def create_generator(): 21 | 22 | generator = Sequential() 23 | generator.add(Dense(units = 512, input_dim = 100)) 24 | generator.add(ReLU()) 25 | 26 | generator.add(Dense(units = 512)) 27 | generator.add(ReLU()) 28 | 29 | generator.add(Dense(units = 1024)) 30 | generator.add(ReLU()) 31 | 32 | generator.add(Dense(units = 784, activation = "tanh")) 33 | 34 | generator.compile(loss = "binary_crossentropy", 35 | optimizer = Adam(lr = 0.0001, beta_1 = 0.5)) 36 | return generator 37 | 38 | g = create_generator() 39 | g.summary() 40 | 41 | #%% dsicriminator 42 | 43 | def create_discriminator(): 44 | discriminator = Sequential() 45 | discriminator.add(Dense(units=1024,input_dim = 784)) 46 | discriminator.add(ReLU()) 47 | discriminator.add(Dropout(0.4)) 48 | 49 | discriminator.add(Dense(units=512)) 50 | discriminator.add(ReLU()) 51 | discriminator.add(Dropout(0.4)) 52 | 53 | discriminator.add(Dense(units=256)) 54 | discriminator.add(ReLU()) 55 | 56 | discriminator.add(Dense(units=1, activation = "sigmoid")) 57 | 58 | discriminator.compile(loss = "binary_crossentropy", 59 | optimizer= Adam(lr = 0.0001, beta_1=0.5)) 60 | return discriminator 61 | 62 | d = create_discriminator() 63 | d.summary() 64 | 65 | 66 | #%% gans 67 | def create_gan(discriminator, generator): 68 | discriminator.trainable = False 69 | gan_input = Input(shape=(100,)) 70 | x = generator(gan_input) 71 | gan_output = discriminator(x) 72 | gan = Model(inputs = gan_input, outputs = gan_output) 73 | gan.compile(loss = "binary_crossentropy", optimizer="adam") 74 | return gan 75 | 76 | gan = create_gan(d,g) 77 | gan.summary() 78 | 79 | 80 | # %% train 81 | 82 | epochs = 50 83 | batch_size = 256 84 | 85 | for e in range(epochs): 86 | for _ in range(batch_size): 87 | 88 | noise = np.random.normal(0,1, [batch_size,100]) 89 | 90 | generated_images = g.predict(noise) 91 | 92 | image_batch = x_train[np.random.randint(low = 0, high = x_train.shape[0],size = batch_size)] 93 | 94 | x = np.concatenate([image_batch, generated_images]) 95 | 96 | y_dis = np.zeros(batch_size*2) 97 | y_dis[:batch_size] = 1 98 | 99 | d.trainable = True 100 | d.train_on_batch(x,y_dis) 101 | 102 | noise = np.random.normal(0,1,[batch_size,100]) 103 | 104 | y_gen = np.ones(batch_size) 105 | 106 | d.trainable = False 107 | 108 | gan.train_on_batch(noise, y_gen) 109 | print("epochs: ",e) 110 | 111 | 112 | 113 | #%% save model 114 | g.save_weights('gans_model.h5') # always save your weights after training or during training 115 | 116 | 117 | #%% visualize 118 | noise= np.random.normal(loc=0, scale=1, size=[100, 100]) 119 | generated_images = g.predict(noise) 120 | generated_images = generated_images.reshape(100,28,28) 121 | plt.imshow(generated_images[66], interpolation='nearest') 122 | plt.axis('off') 123 | plt.show() 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | -------------------------------------------------------------------------------- /Advance Deep Learning.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dataiteam/Advanced-Deep-Learning/d3123095cec5b860c29d50a1ce4f25d33ca0042f/Advance Deep Learning.pptx --------------------------------------------------------------------------------