├── README.md ├── chapter_02 ├── __init__.py ├── augmentation_demo.py ├── minivggnet_flowers17.py ├── minivggnet_flowers17_data_aug.py └── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── datasets │ ├── SimpleDatasetLoader.py │ ├── SimpleDatasetLoader.pyc │ ├── __init__.py │ ├── __init__.pyc │ └── __pycache__ │ │ ├── SimpleDatasetLoader.cpython-36.pyc │ │ └── __init__.cpython-36.pyc │ ├── nn │ ├── __init__.py │ ├── __init__.pyc │ └── conv │ │ ├── MiniVGGNet.py │ │ ├── MiniVGGNet.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ └── preprocessing │ ├── AspectAwarePreprocessor.py │ ├── AspectAwarePreprocessor.pyc │ ├── ImageMove.py │ ├── ImageMove.pyc │ ├── ImageToArrayPreprocessor.py │ ├── ImageToArrayPreprocessor.pyc │ ├── SimplePreprocessor.py │ ├── __init__.py │ └── __init__.pyc ├── chapter_03 ├── __init__.py ├── extract_features.py ├── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── datasets │ │ ├── SimpleDatasetLoader.py │ │ ├── SimpleDatasetLoader.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ ├── io │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── hdf5datasetwriter.cpython-36.pyc │ │ └── hdf5datasetwriter.py │ ├── nn │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ └── conv │ │ │ ├── MiniVGGNet.py │ │ │ ├── MiniVGGNet.pyc │ │ │ ├── __init__.py │ │ │ └── __init__.pyc │ └── preprocessing │ │ ├── AspectAwarePreprocessor.py │ │ ├── AspectAwarePreprocessor.pyc │ │ ├── ImageMove.py │ │ ├── ImageMove.pyc │ │ ├── ImageToArrayPreprocessor.py │ │ ├── ImageToArrayPreprocessor.pyc │ │ ├── SimplePreprocessor.py │ │ ├── __init__.py │ │ └── __init__.pyc └── train_model.py ├── chapter_04 ├── __init__.py ├── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── datasets │ │ ├── SimpleDatasetLoader.py │ │ ├── SimpleDatasetLoader.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ ├── io │ │ ├── __init__.py │ │ └── hdf5datasetwriter.py │ ├── nn │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ └── conv │ │ │ ├── MiniVGGNet.py │ │ │ ├── MiniVGGNet.pyc │ │ │ ├── __init__.py │ │ │ └── __init__.pyc │ ├── preprocessing │ │ ├── AspectAwarePreprocessor.py │ │ ├── AspectAwarePreprocessor.pyc │ │ ├── ImageMove.py │ │ ├── ImageMove.pyc │ │ ├── ImageToArrayPreprocessor.py │ │ ├── ImageToArrayPreprocessor.pyc │ │ ├── SimplePreprocessor.py │ │ ├── __init__.py │ │ └── __init__.pyc │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── ranked.cpython-36.pyc │ │ └── ranked.py └── rank_accuracy.py ├── chapter_05 ├── __init__.py ├── finetune_flowers17.py ├── inspact_model.py └── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ └── __init__.cpython-36.pyc │ ├── datasets │ ├── SimpleDatasetLoader.py │ ├── SimpleDatasetLoader.pyc │ ├── __init__.py │ ├── __init__.pyc │ └── __pycache__ │ │ ├── SimpleDatasetLoader.cpython-36.pyc │ │ └── __init__.cpython-36.pyc │ ├── io │ ├── __init__.py │ └── hdf5datasetwriter.py │ ├── nn │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ └── conv │ │ ├── MiniVGGNet.py │ │ ├── MiniVGGNet.pyc │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── fcheadnet.cpython-36.pyc │ │ ├── fcheadnet.py │ │ ├── lenet.py │ │ └── shallownet.py │ ├── preprocessing │ ├── AspectAwarePreprocessor.py │ ├── AspectAwarePreprocessor.pyc │ ├── ImageMove.py │ ├── ImageMove.pyc │ ├── ImageToArrayPreprocessor.py │ ├── ImageToArrayPreprocessor.pyc │ ├── SimplePreprocessor.py │ ├── __init__.py │ ├── __init__.pyc │ └── __pycache__ │ │ ├── AspectAwarePreprocessor.cpython-36.pyc │ │ ├── ImageToArrayPreprocessor.cpython-36.pyc │ │ └── __init__.cpython-36.pyc │ └── utils │ ├── __init__.py │ └── ranked.py ├── chapter_06 ├── __init__.py ├── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── datasets │ │ ├── SimpleDatasetLoader.py │ │ ├── SimpleDatasetLoader.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ ├── io │ │ ├── __init__.py │ │ └── hdf5datasetwriter.py │ ├── nn │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ └── conv │ │ │ ├── MiniVGGNet.py │ │ │ ├── MiniVGGNet.pyc │ │ │ ├── __init__.py │ │ │ ├── __init__.pyc │ │ │ ├── __pycache__ │ │ │ ├── MiniVGGNet.cpython-36.pyc │ │ │ └── __init__.cpython-36.pyc │ │ │ ├── fcheadnet.py │ │ │ ├── lenet.py │ │ │ └── shallownet.py │ ├── preprocessing │ │ ├── AspectAwarePreprocessor.py │ │ ├── AspectAwarePreprocessor.pyc │ │ ├── ImageMove.py │ │ ├── ImageMove.pyc │ │ ├── ImageToArrayPreprocessor.py │ │ ├── ImageToArrayPreprocessor.pyc │ │ ├── SimplePreprocessor.py │ │ ├── __init__.py │ │ └── __init__.pyc │ └── utils │ │ ├── __init__.py │ │ └── ranked.py ├── test_ensemble.py └── train_models.py ├── chapter_09 ├── __init__.py └── dogs_vs_cats │ ├── __init__.py │ ├── build_dogs_vs_cats.py │ ├── config │ ├── __pycache__ │ │ └── dogs_vs_cats_config.cpython-36.pyc │ └── dogs_vs_cats_config.py │ └── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ └── __init__.cpython-36.pyc │ ├── datasets │ ├── SimpleDatasetLoader.py │ ├── SimpleDatasetLoader.pyc │ ├── __init__.py │ └── __init__.pyc │ ├── io │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── hdf5datasetwriter.cpython-36.pyc │ └── hdf5datasetwriter.py │ ├── nn │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ └── conv │ │ ├── MiniVGGNet.py │ │ ├── MiniVGGNet.pyc │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ ├── MiniVGGNet.cpython-36.pyc │ │ └── __init__.cpython-36.pyc │ │ ├── fcheadnet.py │ │ ├── lenet.py │ │ └── shallownet.py │ ├── preprocessing │ ├── AspectAwarePreprocessor.py │ ├── AspectAwarePreprocessor.pyc │ ├── ImageMove.py │ ├── ImageMove.pyc │ ├── ImageToArrayPreprocessor.py │ ├── ImageToArrayPreprocessor.pyc │ ├── SimplePreprocessor.py │ ├── __init__.py │ ├── __init__.pyc │ └── __pycache__ │ │ ├── AspectAwarePreprocessor.cpython-36.pyc │ │ └── __init__.cpython-36.pyc │ └── utils │ ├── __init__.py │ └── ranked.py ├── chapter_10 ├── datasets │ └── kaggle_dogs_vs_cats │ │ └── train │ │ └── README.md └── dogs_vs_cats │ ├── __init__.py │ ├── config │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── dogs_vs_cats_config.cpython-36.pyc │ └── dogs_vs_cats_config.py │ ├── crop_accuracy.py │ ├── dogs_vs_cats.pickle │ ├── extract_features.py │ ├── output │ └── dogs_vs_cats_mean.json │ ├── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── callbacks │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── trainingmonitor.cpython-36.pyc │ │ └── trainingmonitor.py │ ├── datasets │ │ ├── SimpleDatasetLoader.py │ │ ├── SimpleDatasetLoader.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ ├── io │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── hdf5datasetgenerator.cpython-36.pyc │ │ │ └── hdf5datasetwriter.cpython-36.pyc │ │ ├── hdf5datasetgenerator.py │ │ └── hdf5datasetwriter.py │ ├── nn │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ └── conv │ │ │ ├── MiniVGGNet.py │ │ │ ├── MiniVGGNet.pyc │ │ │ ├── __init__.py │ │ │ ├── __init__.pyc │ │ │ ├── __pycache__ │ │ │ ├── MiniVGGNet.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── alexnet.cpython-36.pyc │ │ │ ├── alexnet.py │ │ │ ├── fcheadnet.py │ │ │ ├── lenet.py │ │ │ └── shallownet.py │ ├── preprocessing │ │ ├── ImageMove.py │ │ ├── ImageMove.pyc │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ ├── AspectAwarePreprocessor.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── croppreprocessor.cpython-36.pyc │ │ │ ├── imagetoarraypreprocessor.cpython-36.pyc │ │ │ ├── meanpreprocessor.cpython-36.pyc │ │ │ ├── patchpreprocessor.cpython-36.pyc │ │ │ └── simplespreprocessor.cpython-36.pyc │ │ ├── aspectawarepreprocessor.py │ │ ├── croppreprocessor.py │ │ ├── imagetoarraypreprocessor.py │ │ ├── meanpreprocessor.py │ │ ├── patchpreprocessor.py │ │ └── simplespreprocessor.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── ranked.cpython-36.pyc │ │ └── ranked.py │ ├── train_alexnet.py │ └── train_model.py ├── chapter_11 ├── datasets │ └── README.md └── deepergooglenet │ ├── __init__.py │ ├── build_tiny_imagenet.py │ ├── config │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── dogs_vs_cats_config.cpython-36.pyc │ │ └── tiny_imagenet_config.cpython-36.pyc │ └── tiny_imagenet_config.py │ ├── googlenet_cifar10.py │ ├── output │ ├── checkpoints │ │ └── README.md │ ├── dogs_vs_cats_mean.json │ └── tiny-image-net-200-mean.json │ ├── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── callbacks │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── epochcheckpoint.cpython-36.pyc │ │ │ └── trainingmonitor.cpython-36.pyc │ │ ├── epochcheckpoint.py │ │ └── trainingmonitor.py │ ├── datasets │ │ ├── SimpleDatasetLoader.py │ │ ├── SimpleDatasetLoader.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ ├── io │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── hdf5datasetgenerator.cpython-36.pyc │ │ │ └── hdf5datasetwriter.cpython-36.pyc │ │ ├── hdf5datasetgenerator.py │ │ └── hdf5datasetwriter.py │ ├── nn │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ └── conv │ │ │ ├── MiniVGGNet.py │ │ │ ├── MiniVGGNet.pyc │ │ │ ├── __init__.py │ │ │ ├── __init__.pyc │ │ │ ├── __pycache__ │ │ │ ├── MiniVGGNet.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── alexnet.cpython-36.pyc │ │ │ ├── deepergooglenet.cpython-36.pyc │ │ │ └── minigooglenet.cpython-36.pyc │ │ │ ├── alexnet.py │ │ │ ├── deepergooglenet.py │ │ │ ├── fcheadnet.py │ │ │ ├── lenet.py │ │ │ ├── minigooglenet.py │ │ │ └── shallownet.py │ ├── preprocessing │ │ ├── ImageMove.py │ │ ├── ImageMove.pyc │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ ├── AspectAwarePreprocessor.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── croppreprocessor.cpython-36.pyc │ │ │ ├── imagetoarraypreprocessor.cpython-36.pyc │ │ │ ├── meanpreprocessor.cpython-36.pyc │ │ │ ├── patchpreprocessor.cpython-36.pyc │ │ │ └── simplespreprocessor.cpython-36.pyc │ │ ├── aspectawarepreprocessor.py │ │ ├── croppreprocessor.py │ │ ├── imagetoarraypreprocessor.py │ │ ├── meanpreprocessor.py │ │ ├── patchpreprocessor.py │ │ └── simplespreprocessor.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── ranked.cpython-36.pyc │ │ └── ranked.py │ ├── rank_accuracy.py │ └── train.py ├── chapter_12 ├── __init__.py └── resnet_tiny_imagenet │ ├── config │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── dogs_vs_cats_config.cpython-36.pyc │ └── tiny_imagenet_config.py │ ├── output │ └── dogs_vs_cats_mean.json │ ├── pyimagesearch │ ├── __init__.py │ ├── __init__.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── callbacks │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── epochcheckpoint.cpython-36.pyc │ │ │ └── trainingmonitor.cpython-36.pyc │ │ ├── epochcheckpoint.py │ │ └── trainingmonitor.py │ ├── datasets │ │ ├── SimpleDatasetLoader.py │ │ ├── SimpleDatasetLoader.pyc │ │ ├── __init__.py │ │ └── __init__.pyc │ ├── io │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── hdf5datasetgenerator.cpython-36.pyc │ │ │ └── hdf5datasetwriter.cpython-36.pyc │ │ ├── hdf5datasetgenerator.py │ │ └── hdf5datasetwriter.py │ ├── nn │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ └── conv │ │ │ ├── MiniVGGNet.py │ │ │ ├── MiniVGGNet.pyc │ │ │ ├── __init__.py │ │ │ ├── __init__.pyc │ │ │ ├── __pycache__ │ │ │ ├── MiniVGGNet.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── alexnet.cpython-36.pyc │ │ │ └── resnet.cpython-36.pyc │ │ │ ├── alexnet.py │ │ │ ├── deepergooglenet.py │ │ │ ├── fcheadnet.py │ │ │ ├── lenet.py │ │ │ ├── minigooglenet.py │ │ │ ├── resnet.py │ │ │ └── shallownet.py │ ├── preprocessing │ │ ├── ImageMove.py │ │ ├── ImageMove.pyc │ │ ├── __init__.py │ │ ├── __init__.pyc │ │ ├── __pycache__ │ │ │ ├── AspectAwarePreprocessor.cpython-36.pyc │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── croppreprocessor.cpython-36.pyc │ │ │ ├── imagetoarraypreprocessor.cpython-36.pyc │ │ │ ├── meanpreprocessor.cpython-36.pyc │ │ │ ├── patchpreprocessor.cpython-36.pyc │ │ │ └── simplespreprocessor.cpython-36.pyc │ │ ├── aspectawarepreprocessor.py │ │ ├── croppreprocessor.py │ │ ├── imagetoarraypreprocessor.py │ │ ├── meanpreprocessor.py │ │ ├── patchpreprocessor.py │ │ └── simplespreprocessor.py │ └── utils │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ └── ranked.cpython-36.pyc │ │ └── ranked.py │ ├── resnet_cifar10.py │ ├── resnet_cifar10_decay.py │ ├── train.py │ └── train_decay.py └── chapter_13 ├── build_imagenet.py ├── config └── imagenet_alexnet_config.py ├── imagenet └── tfrecords │ └── __init__.py └── pyimagesearch ├── __init__.py ├── callbacks ├── __init__.py └── trainingmonitor.py ├── datasets └── __init__.py ├── io └── __init__.py ├── nn ├── __init__.py ├── __init__.pyc └── conv │ ├── __init__.py │ └── __init__.pyc ├── preprocessing └── __init__.py └── utils ├── __init__.py ├── imagenethelper.py └── imagenettfrecord.py /README.md: -------------------------------------------------------------------------------- 1 | # Deep_Learning_For_Computer_Vision_With_Python 2 | 3 | - [深度学习与计算机视觉(PB-13)—ImageNet数据集准备](https://lonepatient.top/2018/07/01/Deep_Learning_For_Computer_Vision_With_Python_PB_13.html) 4 | - [深度学习与计算机视觉(PB-12)—ResNet](https://lonepatient.top/2018/06/25/Deep_Learning_For_Computer_Vision_With_Python_PB_12.html) 5 | - [深度学习与计算机视觉(PB-11)—GoogLeNet](https://lonepatient.top/2018/06/19/Deep_Learning_For_Computer_Vision_With_Python_PB_11.html) 6 | - [深度学习与计算机视觉(PB-10)—Kaggle之猫狗比赛](https://lonepatient.top/2018/04/19/Deep_Learning_For_Computer_Vision_With_Python_PB_10.html) 7 | - [深度学习与计算机视觉(PB-09)—使用HDF5保存大数据集](https://lonepatient.top/2018/04/09/Deep_Learning_For_Computer_Vision_With_Python_PB_09.html) 8 | - [深度学习与计算机视觉(PB-08)—应用深度学习最佳途径](https://lonepatient.top/2018/04/02/Deep_Learning_For_Computer_Vision_With_Python_PB_08.html) 9 | - [深度学习与计算机视觉(PB-07)—优化算法](https://lonepatient.top/2018/03/25/Deep_Learning_For_Computer_Vision_With_Python_PB_07.html) 10 | - [深度学习与计算机视觉(PB-06)—模型集成](https://lonepatient.top/2018/03/16/Deep_Learning_For_Computer_Vision_With_Python_PB_06.html) 11 | - [深度学习与计算机视觉(PB-05)—网络微调](https://lonepatient.top/2018/03/09/Deep_Learning_For_Computer_Vision_With_Python_PB_05.html) 12 | - [深度学习与计算机视觉(PB-04)—rank-N准确度](https://lonepatient.top/2018/03/02/Deep_Learning_For_Computer_Vision_With_Python_PB_04.html) 13 | - [深度学习与计算机视觉(PB-03)—特征提取](https://lonepatient.top/2018/02/25/Deep_Learning_For_Computer_Vision_With_Python_PB_03.html) 14 | - [深度学习与计算机视觉(PB-02)—数据增强](https://lonepatient.top/2018/02/18/Deep_Learning_For_Computer_Vision_With_Python_PB_02.html) 15 | -------------------------------------------------------------------------------- /chapter_02/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_02/augmentation_demo.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.preprocessing.image import ImageDataGenerator 4 | from keras.preprocessing.image import img_to_array 5 | from keras.preprocessing.image import load_img 6 | import numpy as np 7 | import argparse 8 | 9 | # 构造参数解析和解析参数 10 | ap = argparse.ArgumentParser() 11 | ap.add_argument('-i','--image',required=True,help = 'path to the input image') 12 | ap.add_argument('-o','--ouput',required=True,help ='path to ouput directory to store augmentation examples') 13 | ap.add_argument('-p','--prefix',type=str,default='image',help='output fielname prefix') 14 | args = vars(ap.parse_args()) 15 | 16 | # 加载图像,并转化为numpy的array 17 | print('[INFO] loading example image...') 18 | image = load_img(args['image']) 19 | image = img_to_array(image) 20 | #增加一个维度 21 | image = np.expand_dims(image,axis = 0) #在0位置增加数据,主要是batch size 22 | 23 | aug = ImageDataGenerator( 24 | rotation_range=30, # 旋转角度 25 | width_shift_range=0.1,#水平平移幅度 26 | height_shift_range= 0.1,#上下平移幅度 27 | shear_range=0.2,# 逆时针方向的剪切变黄角度 28 | zoom_range=0.2,#随机缩放的角度 29 | horizontal_flip=True,#水平翻转 30 | fill_mode='nearest'#变换超出边界的处理 31 | ) 32 | # 初始化目前为止的图片产生数量 33 | total = 0 34 | 35 | print("[INFO] generating images...") 36 | imageGen = aug.flow(image,batch_size=1,save_to_dir=args['output'],save_prefix=args['prefix'],save_format='jpg') 37 | for image in imageGen: 38 | total += 1 39 | if total == 10: 40 | break 41 | 42 | # python augmentation_demo.py --image jemma.png --output output -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/datasets/__pycache__/SimpleDatasetLoader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/datasets/__pycache__/SimpleDatasetLoader.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/AspectAwarePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocesser: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/ImageToArrayPreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocess: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/SimplePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_02/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_02/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_03/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 18-6-19 下午11:26 3 | # @Author : lwt 4 | # @File : __init__.py.py 5 | # @Software: PyCharm -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | import sys 5 | class HDF5DatasetWriter: 6 | def __init__(self,dims,outputPath,dataKey='images',bufSize=1000): 7 | # 判断输出路径是否存在 8 | # 当文件存在时。hdf5无法进行重写,因此进行判断处理 9 | if os.path.isfile(outputPath): 10 | print("The supplied ‘outputPath‘ already " 11 | "exists and cannot be overwritten. Manually delete " 12 | "the file before continuing.", outputPath) 13 | os.system('rm -rf %s'%outputPath) 14 | # 初始化一个HDF5 15 | self.db = h5py.File(outputPath,'w') 16 | # 存储图片数据或者特征 17 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 18 | # 存储标签数据 19 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 20 | # 缓冲大小 21 | self.bufSize = bufSize 22 | self.buffer = {"data":[],"labels":[]} 23 | # 索引 24 | self.idx = 0 25 | 26 | def add(self,rows,labels): 27 | # 增加数据 28 | self.buffer['data'].extend(rows) 29 | self.buffer['labels'].extend(labels) 30 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 31 | if len(self.buffer['data']) >= self.bufSize: 32 | self.flush() 33 | 34 | def flush(self): 35 | # 将buffers写入磁盘并初始化buffer 36 | i = self.idx + len(self.buffer['data']) 37 | self.data[self.idx:i] = self.buffer['data'] 38 | self.labels[self.idx:i] = self.buffer['labels'] 39 | self.idx = i # 指针 40 | self.buffer = {"data":[],"labels":[]} 41 | 42 | def storeClassLabels(self,classLabels): 43 | # 一个dataset存储数据标签名称 44 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 45 | dt = h5py.special_dtype(vlen = str) # python3 46 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 47 | labelSet[:] = classLabels 48 | 49 | def close(self): 50 | if len(self.buffer['data']) >0 : 51 | self.flush() 52 | # 关闭dataset 53 | self.db.close() 54 | -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/AspectAwarePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocesser: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/ImageToArrayPreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocess: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/SimplePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_03/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_03/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_03/train_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from sklearn.linear_model import LogisticRegression 3 | from sklearn.model_selection import GridSearchCV 4 | from sklearn.metrics import classification_report 5 | import argparse 6 | import pickle 7 | import h5py 8 | 9 | ap = argparse.ArgumentParser() 10 | ap.add_argument("-d","--db",required = True,help = 'path HDF5 database') 11 | ap.add_argument("-m","--model",required = True,help = 'path to output model') 12 | ap.add_argument('-j','--jobs',type=int,default=-1,help='# of jobs to run when tuning hyperparameters') 13 | args = vars(ap.parse_args()) 14 | 15 | db = h5py.File(args['db'],'r') 16 | i = int(db['labels'].shape[0] * 0.75) 17 | # 模型调优 18 | print("[INFO] tuning hyperparameters...") 19 | params = {"C":[0.1,1.0,10.0,100.0,1000.0,10000.0]} 20 | model = GridSearchCV(LogisticRegression(),params,cv=3,n_jobs=args['jobs']) 21 | model.fit(db['features'][:i],db['labels'][:i]) 22 | print("[INFO] best hyperparameters :{} ".format(model.best_estimator_)) 23 | # 评估模型 24 | print("[INFO] evaluating...") 25 | preds = model.predict(db['features'][i:]) 26 | print(classification_report(db['labels'][i:],preds,target_names=db['label_names'])) 27 | 28 | # 保存模型到磁盘 29 | print("[INFO] saving model...") 30 | f = open(args['model'],'wb') 31 | f.write(pickle.dumps(model.best_estimator_)) 32 | f.close() 33 | db.close() 34 | -------------------------------------------------------------------------------- /chapter_04/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',bufSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = bufSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 42 | dt = h5py.special_dtype(vlen = str) # python3 43 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 44 | labelSet[:] = classLabels 45 | 46 | def close(self): 47 | if len(self.buffer['data']) >0 : 48 | self.flush() 49 | # 关闭dataset 50 | self.db.close() 51 | -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/AspectAwarePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocesser: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/ImageToArrayPreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocess: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/SimplePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_04/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_04/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_04/rank_accuracy.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from pyimagesearch.utils.ranked import rank5_accuracy 3 | import argparse 4 | import pickle 5 | import h5py 6 | 7 | # 解析命令行参数 8 | ap = argparse.ArgumentParser() 9 | ap.add_argument('-d','--db',required=True,help='path HDF5 databases') 10 | ap.add_argument('-m','--model',required=True,help = 'path to pre-trained model') 11 | args = vars(ap.parse_args()) 12 | 13 | # 加载模型 14 | print("[INFO] loading pre-trained model...") 15 | model = pickle.loads(open(args['model'],'rb').read()) 16 | 17 | db = h5py.File(args['db'],'r') 18 | i = int(db['labels'].shape[0] * 0.75) 19 | # 继续宁预测 20 | print ("[INFO] predicting....") 21 | preds = model.predict_proba(db['features'][i:]) 22 | (rank1,rank5) = rank5_accuracy(preds,db['labels'][i:]) 23 | # 结果打印 24 | print("[INFO] rank-1:{:.2f}%".format(rank1 * 100)) 25 | print("[INFO] rank-5:{:.2f}%".format(rank5 * 100)) 26 | db.close() 27 | 28 | -------------------------------------------------------------------------------- /chapter_05/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/inspact_model.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要模块 3 | from keras.applications import VGG16 4 | import argparse 5 | 6 | # 命令行参数设置 7 | ap = argparse.ArgumentParser() 8 | ap.add_argument('-i','--include_top',type = int,default=1,help='whether or not to include top of CNN') 9 | args = vars(ap.parse_args()) 10 | 11 | # 加载VGG16模型 12 | print('[INFO] loading network...') 13 | model = VGG16(weights='imagenet',include_top=args['include_top']>0) 14 | print("[INFO] showing layers...") 15 | 16 | # 遍历VGG16所有层结构 17 | for (i,layer) in enumerate(model.layers): 18 | print("[INFO] {}\t{}".format(i,layer.__class__.__name__)) -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/datasets/__pycache__/SimpleDatasetLoader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/datasets/__pycache__/SimpleDatasetLoader.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/datasets/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/datasets/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',bufSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = bufSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 42 | dt = h5py.special_dtype(vlen = str) # python3 43 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 44 | labelSet[:] = classLabels 45 | 46 | def close(self): 47 | if len(self.buffer['data']) >0 : 48 | self.flush() 49 | # 关闭dataset 50 | self.db.close() 51 | -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/__pycache__/fcheadnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/nn/conv/__pycache__/fcheadnet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/fcheadnet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.layers.core import Dropout 4 | from keras.layers.core import Flatten 5 | from keras.layers.core import Dense 6 | 7 | class FCHeadNet: 8 | @staticmethod 9 | def build(baseModel,classes,D): 10 | # 初始化top部分 11 | headModel = baseModel.output 12 | headModel = Flatten(name='flatten')(headModel) 13 | headModel = Dense(D,activation='relu')(headModel) 14 | headModel = Dropout(0.5)(headModel) 15 | # 增加一个softmaxc层 16 | headModel = Dense(classes,activation='softmax')(headModel) 17 | return headModel -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/lenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/nn/conv/shallownet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/AspectAwarePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocesser: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/ImageToArrayPreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocess: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/SimplePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/__pycache__/ImageToArrayPreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/__pycache__/ImageToArrayPreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_05/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_05/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_06/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 18-6-19 下午11:26 3 | # @Author : lwt 4 | # @File : __init__.py.py 5 | # @Software: PyCharm -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',bufSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = bufSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 42 | dt = h5py.special_dtype(vlen = str) # python3 43 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 44 | labelSet[:] = classLabels 45 | 46 | def close(self): 47 | if len(self.buffer['data']) >0 : 48 | self.flush() 49 | # 关闭dataset 50 | self.db.close() 51 | -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/fcheadnet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.layers.core import Dropout 4 | from keras.layers.core import Flatten 5 | from keras.layers.core import Dense 6 | 7 | class FCHeadNet: 8 | @staticmethod 9 | def build(baseModel,classes,D): 10 | # 初始化top部分 11 | headModel = baseModel.output 12 | headModel = Flatten(name='flatten')(headModel) 13 | headModel = Dense(D,activation='relu')(headModel) 14 | headModel = Dropout(0.5)(headModel) 15 | # 增加一个softmaxc层 16 | headModel = Dense(classes,activation='softmax')(headModel) 17 | return headModel -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/lenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/nn/conv/shallownet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/AspectAwarePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocesser: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/ImageToArrayPreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocess: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/SimplePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_06/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_06/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_06/test_ensemble.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from sklearn.preprocessing import LabelBinarizer 3 | from sklearn.metrics import classification_report 4 | from keras.models import load_model 5 | from keras.datasets import cifar10 6 | import numpy as np 7 | import argparse 8 | import glob 9 | import os 10 | 11 | 12 | ap = argparse.ArgumentParser() 13 | ap.add_argument('-m','--models',required=True,help='path to models directory') 14 | args = vars(ap.parse_args()) 15 | 16 | (testX,testY) = cifar10.load_data()[1] 17 | testX = testX.astype('float') /255.0 18 | 19 | labelNames = ["airplane", "automobile", "bird", "cat", "deer", 20 | "dog", "frog", "horse", "ship", "truck"] 21 | # 类别one-hot编码 22 | lb = LabelBinarizer() 23 | testY = lb.fit_transform(testY) 24 | 25 | modelPaths = os.path.sep.join([args['models'],"*.model"]) 26 | modelPaths = list(glob.glob(modelPaths)) 27 | models = [] 28 | 29 | for (i,modelPath) in enumerate(modelPaths): 30 | print("[INFO] loading model {}/{}".format(i+1,len(modelPaths))) 31 | models.append(load_model(modelPath)) 32 | 33 | print("[INFO] evaluating ensemble...") 34 | predictions = [] 35 | #遍历模型 36 | for model in models: 37 | # 模型预测 38 | predictions.append(model.predict(testX,batch_size=64)) 39 | 40 | # 平均所有模型结果 41 | predictions = np.average(predictions,axis = 0) 42 | # 模型结果 43 | print(classification_report(testY.argmax(axis =1), 44 | predictions.argmax(axis=1),target_names=labelNames)) -------------------------------------------------------------------------------- /chapter_09/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/config/dogs_vs_cats_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 原始图像路径 4 | IMAGES_PATH = "../datasets/kaggle_dogs_vs_cats/train" 5 | 6 | #类别总数 7 | NUM_CLASSES = 2 8 | # 验证数据集大小 9 | NUM_VAL_IMAGES = 1250 * NUM_CLASSES 10 | # 测试数据集代销 11 | NUM_TEST_IMAGES = 1250 * NUM_CLASSES 12 | 13 | # hdf5数据保存路径 14 | TRAIN_HDF5 = "../datasets/kaggle_dogs_vs_cats/hdf5/train.hdf5" 15 | VAL_HDF5 = "../datasets/kaggle_dogs_vs_cats/hdf5/val.hdf5" 16 | TEST_HDF5 = "../datasets/kaggle_dogs_vs_cats/hdf5/test.hdf5" 17 | 18 | # 模型保存路径 19 | MODEL_PATH = "output/alexnet_dogs_vs_cats.model" 20 | 21 | # 数据均值保存路径 22 | DATASET_MEAN = "output/dogs_vs_cats_mean.json" 23 | 24 | # 其余输出保存路径 25 | OUTPUT_PATH = "output" -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',bufSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = bufSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 42 | dt = h5py.special_dtype(vlen = str) # python3 43 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 44 | labelSet[:] = classLabels 45 | 46 | def close(self): 47 | if len(self.buffer['data']) >0 : 48 | self.flush() 49 | # 关闭dataset 50 | self.db.close() 51 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/fcheadnet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.layers.core import Dropout 4 | from keras.layers.core import Flatten 5 | from keras.layers.core import Dense 6 | 7 | class FCHeadNet: 8 | @staticmethod 9 | def build(baseModel,classes,D): 10 | # 初始化top部分 11 | headModel = baseModel.output 12 | headModel = Flatten(name='flatten')(headModel) 13 | headModel = Dense(D,activation='relu')(headModel) 14 | headModel = Dropout(0.5)(headModel) 15 | # 增加一个softmaxc层 16 | headModel = Dense(classes,activation='softmax')(headModel) 17 | return headModel -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/lenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/nn/conv/shallownet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/AspectAwarePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocessor: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/AspectAwarePreprocessor.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/ImageToArrayPreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocess: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/ImageToArrayPreprocessor.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/SimplePreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_09/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_09/dogs_vs_cats/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_10/datasets/kaggle_dogs_vs_cats/train/README.md: -------------------------------------------------------------------------------- 1 | 训练数据集 2 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/config/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/config/dogs_vs_cats_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # 原始图像路径 4 | IMAGES_PATH = "../datasets/kaggle_dogs_vs_cats/train" 5 | 6 | #类别总数 7 | NUM_CLASSES = 2 8 | # 验证数据集大小 9 | NUM_VAL_IMAGES = 1250 * NUM_CLASSES 10 | # 测试数据集代销 11 | NUM_TEST_IMAGES = 1250 * NUM_CLASSES 12 | 13 | # hdf5数据保存路径 14 | TRAIN_HDF5 = "../datasets/kaggle_dogs_vs_cats/hdf5/train.hdf5" 15 | VAL_HDF5 = "../datasets/kaggle_dogs_vs_cats/hdf5/val.hdf5" 16 | TEST_HDF5 = "../datasets/kaggle_dogs_vs_cats/hdf5/test.hdf5" 17 | 18 | # 模型保存路径 19 | MODEL_PATH = "output/alexnet_dogs_vs_cats.model" 20 | 21 | # 数据均值保存路径 22 | DATASET_MEAN = "output/dogs_vs_cats_mean.json" 23 | 24 | # 其余输出保存路径 25 | OUTPUT_PATH = "output" -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/dogs_vs_cats.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/dogs_vs_cats.pickle -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/output/dogs_vs_cats_mean.json: -------------------------------------------------------------------------------- 1 | {"R": 124.97444899064222, "G": 115.97971080586517, "B": 106.13593549819402} -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainingmonitor import TrainingMonitor 2 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/callbacks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/callbacks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/callbacks/__pycache__/trainingmonitor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/callbacks/__pycache__/trainingmonitor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/io/__pycache__/hdf5datasetgenerator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/io/__pycache__/hdf5datasetgenerator.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/io/hdf5datasetgenerator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from keras.utils import np_utils 3 | import numpy as np 4 | import h5py 5 | 6 | class HDF5DatasetGenerator: 7 | 8 | def __init__(self,dbPath,batchSize,preprocessors = None,aug = None,binarize=True,classes=2): 9 | # 保存参数列表 10 | self.batchSize = batchSize 11 | self.preprocessors = preprocessors 12 | self.aug = aug 13 | self.binarize = binarize 14 | self.classes = classes 15 | # hdf5数据集 16 | self.db = h5py.File(dbPath) 17 | self.numImages = self.db['labels'].shape[0] 18 | 19 | def generator(self,passes=np.inf): 20 | epochs = 0 21 | # 默认是无限循环遍历 22 | while epochs < passes: 23 | # 遍历数据 24 | for i in np.arange(0,self.numImages,self.batchSize): 25 | # 从hdf5中提取数据集 26 | images = self.db['images'][i: i+self.batchSize] 27 | labels = self.db['labels'][i: i+self.batchSize] 28 | # one-hot编码 29 | if self.binarize: 30 | labels = np_utils.to_categorical(labels,self.classes) 31 | # 预处理 32 | if self.preprocessors is not None: 33 | proImages = [] 34 | for image in images: 35 | for p in self.preprocessors: 36 | image = p.preprocess(image) 37 | proImages.append(image) 38 | images = np.array(proImages) 39 | if self.aug is not None: 40 | (images,labels) = next(self.aug.flow(images, 41 | labels,batch_size = self.batchSize)) 42 | # 返回 43 | yield (images,labels) 44 | epochs += 1 45 | def close(self): 46 | # 关闭db 47 | self.db.close() 48 | 49 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',buffSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = buffSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 42 | dt = h5py.special_dtype(vlen = str) # python3 43 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 44 | labelSet[:] = classLabels 45 | 46 | def close(self): 47 | if len(self.buffer['data']) >0 : 48 | self.flush() 49 | # 关闭dataset 50 | self.db.close() 51 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/alexnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/__pycache__/alexnet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/fcheadnet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.layers.core import Dropout 4 | from keras.layers.core import Flatten 5 | from keras.layers.core import Dense 6 | 7 | class FCHeadNet: 8 | @staticmethod 9 | def build(baseModel,classes,D): 10 | # 初始化top部分 11 | headModel = baseModel.output 12 | headModel = Flatten(name='flatten')(headModel) 13 | headModel = Dense(D,activation='relu')(headModel) 14 | headModel = Dropout(0.5)(headModel) 15 | # 增加一个softmaxc层 16 | headModel = Dense(classes,activation='softmax')(headModel) 17 | return headModel -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/lenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/nn/conv/shallownet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/croppreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/croppreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/imagetoarraypreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/imagetoarraypreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/meanpreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/meanpreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/patchpreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/patchpreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/simplespreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/__pycache__/simplespreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/aspectawarepreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocessor: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/croppreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import cv2 5 | 6 | class CropPreprocessor: 7 | def __init__(self,width,height,horiz = True,inter=cv2.INTER_AREA): 8 | # 保存目标参数 9 | self.width = width 10 | self.height = height 11 | self.horiz = horiz 12 | self.inter = inter 13 | 14 | def preprocess(self,image): 15 | crops = [] 16 | # 原始图像的高跟宽 17 | (h,w) = image.shape[:2] 18 | #四个角 19 | coords = [ 20 | [0,0,self.width,self.height], 21 | [w - self.width,0,w,self.height], 22 | [w - self.width,h - self.height,w,h], 23 | [0,h - self.height,self.width,h] 24 | ] 25 | # 计算中心区域 26 | dW = int(0.5 * (w - self.width)) 27 | dH = int(0.5 * (h - self.height)) 28 | coords.append([dW,dH,w - dW,h - dH]) 29 | 30 | for (startX,startY,endX,endY) in coords: 31 | # 裁剪 32 | crop = image[startY:endY,startX:endX] 33 | # 由于裁剪过程,可能会造成大小相差1左右,所以进行插值 34 | crop = cv2.resize(crop,(self.width,self.height), 35 | interpolation = self.inter) 36 | crops.append(crop) 37 | if self.horiz: 38 | # 水平翻转 39 | mirrors = [cv2.flip(x,1) for x in crops] 40 | crops.extend(mirrors) 41 | return np.array(crops) 42 | 43 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/imagetoarraypreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocessor: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/meanpreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import cv2 3 | class MeanPreprocessor: 4 | def __init__(self,rMean,gMean,bMean): 5 | # 三个颜色通道的平均值 6 | self.rMean = rMean 7 | self.gMean = gMean 8 | self.bMean = bMean 9 | 10 | def preprocess(self,image): 11 | # cv2分割得到的是BGR,而不是RGB 12 | (B,G,R) = cv2.split(image.astype("float32")) 13 | # 减去对应通道的均值 14 | R -= self.rMean 15 | G -= self.gMean 16 | B -= self.bMean 17 | # 18 | return cv2.merge([B,G,R]) 19 | 20 | 21 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/patchpreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from sklearn.feature_extraction.image import extract_patches_2d 3 | 4 | class PatchPreprocessor: 5 | def __init__(self,width,height): 6 | # 目标图像的宽和高 7 | self.width = width 8 | self.height = height 9 | 10 | def preprocess(self,image): 11 | # 随机裁剪出目标大小图像 12 | return extract_patches_2d(image,(self.height,self.width), 13 | max_patches = 1)[0] -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/preprocessing/simplespreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_10/dogs_vs_cats/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/train_alexnet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #加载所需模块 3 | import matplotlib 4 | matplotlib.use('Agg') 5 | from config import dogs_vs_cats_config as config 6 | from pyimagesearch.preprocessing import imagetoarraypreprocessor as IAP 7 | from pyimagesearch.preprocessing import simplespreprocessor as SP 8 | from pyimagesearch.preprocessing import patchpreprocessor as PP 9 | from pyimagesearch.preprocessing import meanpreprocessor as MP 10 | from pyimagesearch.callbacks import trainingmonitor as TM 11 | from pyimagesearch.io import hdf5datasetgenerator as HDF 12 | from pyimagesearch.nn.conv import alexnet 13 | from keras.preprocessing.image import ImageDataGenerator 14 | from keras.optimizers import Adam 15 | import json 16 | import os 17 | 18 | # 数据增强 19 | aug = ImageDataGenerator(rotation_range = 20,zoom_range = 0.15, 20 | width_shift_range = 0.2,height_shift_range = 0.2, 21 | shear_range=0.15,horizontal_flip=True, 22 | fill_mode='nearest') 23 | 24 | #加载RGB均值文件 25 | means = json.loads(open(config.DATASET_MEAN).read()) 26 | 27 | # 预处理 28 | sp = SP.SimplePreprocessor(227,227) 29 | pp = PP.PatchPreprocessor(227,227) 30 | mp = MP.MeanPreprocessor(means['R'],means['G'],means['B']) 31 | iap = IAP.ImageToArrayPreprocessor() 32 | 33 | #初始化训练数据集和验证数据集生成器 34 | trainGen = HDF.HDF5DatasetGenerator(dbPath=config.TRAIN_HDF5,batchSize=128,aug=aug,preprocessors= [pp,mp,iap],classes = 2) 35 | valGen = HDF.HDF5DatasetGenerator(config.VAL_HDF5,128,preprocessors=[sp,mp,iap],classes =2) 36 | # 初始化优化器 37 | print("[INFO] compiling model...") 38 | opt = Adam(lr=1e-3) 39 | model = alexnet.AlexNet.build(width=227,height=227,depth=3, 40 | classes=2,reg=0.0002) 41 | model.compile(loss = 'binary_crossentropy',optimizer=opt, 42 | metrics = ['accuracy']) 43 | # callbacks 44 | path = os.path.sep.join([config.OUTPUT_PATH,"{}.png".format(os.getpid())]) 45 | callbacks = [TM.TrainingMonitor(path)] 46 | 47 | # 训练网络 48 | model.fit_generator( 49 | trainGen.generator(), 50 | steps_per_epoch = trainGen.numImages // 128, 51 | validation_data = valGen.generator(), 52 | validation_steps = valGen.numImages // 128, 53 | epochs = 75, 54 | max_queue_size = 128 * 2, 55 | callbacks = callbacks, 56 | verbose = 1) 57 | 58 | # 保存模型文件 59 | print("[INFO] serializing model ....") 60 | model.save(config.MODEL_PATH,overwrite = True) 61 | 62 | # 关闭HDF5数据 63 | trainGen.close() 64 | valGen.close() 65 | -------------------------------------------------------------------------------- /chapter_10/dogs_vs_cats/train_model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 加载所需模块 3 | from sklearn.linear_model import LogisticRegression 4 | from sklearn.model_selection import GridSearchCV 5 | from sklearn.metrics import classification_report 6 | from sklearn.metrics import accuracy_score 7 | import argparse 8 | import pickle 9 | import h5py 10 | 11 | #命令行参数 12 | ap = argparse.ArgumentParser() 13 | ap.add_argument("-d","--db",required = True, 14 | help = 'path HDF5 datasetbase') 15 | ap.add_argument('-m','--model',required=True, 16 | help='path to output model') 17 | ap.add_argument('-j','--jobs',type=int,default=-1, 18 | help = '# of jobs to run when tuning hyperparameters') 19 | 20 | args = vars(ap.parse_args()) 21 | 22 | # 读取hdf5数据集 23 | db = h5py.File(args['db'],'r') 24 | i = int(db['labels'].shape[0] * 0.75)# 分割点 25 | 26 | print("[INFO] tuning hyperparameters...") 27 | # 正则化系数参数范围 28 | params = {"C":[0.0001,0.001,0.01,0.1,1.0]} 29 | # 网格搜索,进行调参 30 | model = GridSearchCV(LogisticRegression(),params,cv =3, 31 | n_jobs = args['jobs']) 32 | model.fit(db['feature'][:i],db['labels'][:i]) 33 | print('[INFO] best hyperparameters: {}'.format(model.best_params_)) 34 | 35 | #性能结果 36 | print('[INFO] evaluating...') 37 | preds = model.predict(db['feature'][i:]) 38 | print(classification_report(db['labels'][i:],preds, 39 | target_names = db['label_names'])) 40 | #计算准确度 41 | acc = accuracy_score(db['labels'][i:],preds) 42 | print('[INFO] score: {}'.format(acc)) 43 | 44 | # 保存模型 45 | print('[INFO] saving model...') 46 | with open(args['model'],'wb') as fw: 47 | fw.write(pickle.dumps(model.best_estimator_)) 48 | 49 | db.close() 50 | 51 | 52 | -------------------------------------------------------------------------------- /chapter_11/datasets/README.md: -------------------------------------------------------------------------------- 1 | data 2 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/config/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/config/__pycache__/tiny_imagenet_config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/config/__pycache__/tiny_imagenet_config.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/config/tiny_imagenet_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from os import path 3 | 4 | #训练数据集和验证数据集路径 5 | TRAIN_IMAGES = "../datasets/tiny-imagenet-200/train" 6 | VAL_IMAGES = "../datasets/tiny-imagenet-200/val/images" 7 | 8 | # 验证数据集与标签映射文件 9 | VAL_MAPPINGS = "../datasets/tiny-imagenet-200/val/val_annotations.txt" 10 | 11 | # WordNet hierarchy文件路径 12 | WORDNET_IDS = '../datasets/tiny-imagenet-200/wnids.txt' 13 | WORD_LABELS = '../datasets/tiny-imagenet-200/words.txt' 14 | 15 | # 从train数据中构造test数据 16 | NUM_CLASSES = 100 17 | NUM_TEST_IMAGES = 30 * NUM_CLASSES 18 | 19 | # 定义输出路径 20 | TRAIN_HDF5 = "../datasets/tiny-imagenet-200/hdf5/train.hdf5" 21 | VAL_HDF5 = "../datasets/tiny-imagenet-200/hdf5/val.hdf5" 22 | TEST_HDF5 = "../datasets/tiny-imagenet-200/hdf5/test.hdf5" 23 | 24 | # 数据均值文件 25 | DATASET_MEAN = "output/tiny-image-net-200-mean.json" 26 | 27 | # 输出路径和性能结果 28 | OUTPUT_PATH = "output" 29 | MODEL_PATH = path.sep.join([OUTPUT_PATH, 30 | "checkpoints/epoch_70.hdf5"]) 31 | FIG_PATH = path.sep.join([OUTPUT_PATH, 32 | 'deepergooglenet_tinyimagenet.png']) 33 | JSON_PATH = path.sep.join([OUTPUT_PATH, 34 | 'deepergooglenet_tinyimagenet.json']) 35 | 36 | 37 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/output/checkpoints/README.md: -------------------------------------------------------------------------------- 1 | path to save checkpoints model 2 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/output/dogs_vs_cats_mean.json: -------------------------------------------------------------------------------- 1 | {"R": 124.97444899064222, "G": 115.97971080586517, "B": 106.13593549819402} -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/output/tiny-image-net-200-mean.json: -------------------------------------------------------------------------------- 1 | {"R": 120.73113376731423, "G": 112.79646927929156, "B": 101.5318787785731} -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainingmonitor import TrainingMonitor 2 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/callbacks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/callbacks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/callbacks/__pycache__/epochcheckpoint.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/callbacks/__pycache__/epochcheckpoint.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/callbacks/__pycache__/trainingmonitor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/callbacks/__pycache__/trainingmonitor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/callbacks/epochcheckpoint.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | from keras.callbacks import ModelCheckpoint 4 | import os 5 | 6 | class EpochCheckpoint(ModelCheckpoint): 7 | def __init__(self,filepath,every,startAt=0): 8 | super(EpochCheckpoint,self).__init__(filepath) 9 | self.every = every 10 | self.startAt = startAt 11 | 12 | def on_epoch_end(self,epoch,logs = {}): 13 | filepath = os.path.join(self.filepath,"epoch_{epoch:02d}.hdf5") 14 | if (epoch+self.startAt+1) % self.every == 0: 15 | filepath = filepath.format(epoch = epoch +1+self.startAt) 16 | self.model.save(filepath,overwrite = True) 17 | ''' 18 | from keras.callbacks import Callback 19 | import os 20 | 21 | class EpochCheckpoint(Callback): 22 | def __init__(self, outputPath, every=5, startAt=0): 23 | # call the parent constructor 24 | super(Callback, self).__init__() 25 | 26 | # store the base output path for the model, the number of 27 | # epochs that must pass before the model is serialized to 28 | # disk and the current epoch value 29 | self.outputPath = outputPath 30 | self.every = every 31 | self.intEpoch = startAt 32 | 33 | def on_epoch_end(self, epoch, logs={}): 34 | # check to see if the model should be serialized to disk 35 | if (self.intEpoch + 1) % self.every == 0: 36 | path = os.path.sep.join([self.outputPath, "epoch_{}.hdf5".format(self.intEpoch + 1)]) 37 | self.model.save(path, overwrite=True) 38 | 39 | # increment the internal epoch counter 40 | self.intEpoch += 1 41 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/io/__pycache__/hdf5datasetgenerator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/io/__pycache__/hdf5datasetgenerator.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/io/hdf5datasetgenerator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from keras.utils import np_utils 3 | import numpy as np 4 | import h5py 5 | 6 | class HDF5DatasetGenerator: 7 | 8 | def __init__(self,dbPath,batchSize,preprocessors = None,aug = None,binarize=True,classes=2): 9 | # 保存参数列表 10 | self.batchSize = batchSize 11 | self.preprocessors = preprocessors 12 | self.aug = aug 13 | self.binarize = binarize 14 | self.classes = classes 15 | # hdf5数据集 16 | self.db = h5py.File(dbPath) 17 | self.numImages = self.db['labels'].shape[0] 18 | 19 | def generator(self,passes=np.inf): 20 | epochs = 0 21 | # 默认是无限循环遍历 22 | while epochs < passes: 23 | # 遍历数据 24 | for i in np.arange(0,self.numImages,self.batchSize): 25 | # 从hdf5中提取数据集 26 | images = self.db['images'][i: i+self.batchSize] 27 | labels = self.db['labels'][i: i+self.batchSize] 28 | # one-hot编码 29 | if self.binarize: 30 | labels = np_utils.to_categorical(labels,self.classes) 31 | # 预处理 32 | if self.preprocessors is not None: 33 | proImages = [] 34 | for image in images: 35 | for p in self.preprocessors: 36 | image = p.preprocess(image) 37 | proImages.append(image) 38 | images = np.array(proImages) 39 | if self.aug is not None: 40 | (images,labels) = next(self.aug.flow(images, 41 | labels,batch_size = self.batchSize)) 42 | # 返回 43 | yield (images,labels) 44 | epochs += 1 45 | def close(self): 46 | # 关闭db 47 | self.db.close() 48 | 49 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',buffSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = buffSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | dt = h5py.special_dtype(vlen = str) 42 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 43 | labelSet[:] = classLabels 44 | 45 | def close(self): 46 | if len(self.buffer['data']) >0 : 47 | self.flush() 48 | # 关闭dataset 49 | self.db.close() 50 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/alexnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/alexnet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/deepergooglenet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/deepergooglenet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/minigooglenet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/nn/conv/__pycache__/minigooglenet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/fcheadnet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.layers.core import Dropout 4 | from keras.layers.core import Flatten 5 | from keras.layers.core import Dense 6 | 7 | class FCHeadNet: 8 | @staticmethod 9 | def build(baseModel,classes,D): 10 | # 初始化top部分 11 | headModel = baseModel.output 12 | headModel = Flatten(name='flatten')(headModel) 13 | headModel = Dense(D,activation='relu')(headModel) 14 | headModel = Dropout(0.5)(headModel) 15 | # 增加一个softmaxc层 16 | headModel = Dense(classes,activation='softmax')(headModel) 17 | return headModel -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/lenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/nn/conv/shallownet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/croppreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/croppreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/imagetoarraypreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/imagetoarraypreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/meanpreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/meanpreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/patchpreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/patchpreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/simplespreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/preprocessing/__pycache__/simplespreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/aspectawarepreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocessor: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/croppreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import cv2 5 | 6 | class CropPreprocessor: 7 | def __init__(self,width,height,horiz = True,inter=cv2.INTER_AREA): 8 | # 保存目标参数 9 | self.width = width 10 | self.height = height 11 | self.horiz = horiz 12 | self.inter = inter 13 | 14 | def preprocess(self,image): 15 | crops = [] 16 | # 原始图像的高跟宽 17 | (h,w) = image.shape[:2] 18 | #四个角 19 | coords = [ 20 | [0,0,self.width,self.height], 21 | [w - self.width,0,w,self.height], 22 | [w - self.width,h - self.height,w,h], 23 | [0,h - self.height,self.width,h] 24 | ] 25 | # 计算中心区域 26 | dW = int(0.5 * (w - self.width)) 27 | dH = int(0.5 * (h - self.height)) 28 | coords.append([dW,dH,w - dW,h - dH]) 29 | 30 | for (startX,startY,endX,endY) in coords: 31 | # 裁剪 32 | crop = image[startY:endY,startX:endX] 33 | # 由于裁剪过程,可能会造成大小相差1左右,所以进行插值 34 | crop = cv2.resize(crop,(self.width,self.height), 35 | interpolation = self.inter) 36 | crops.append(crop) 37 | if self.horiz: 38 | # 水平翻转 39 | mirrors = [cv2.flip(x,1) for x in crops] 40 | crops.extend(mirrors) 41 | return np.array(crops) 42 | 43 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/imagetoarraypreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocessor: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/meanpreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import cv2 3 | class MeanPreprocessor: 4 | def __init__(self,rMean,gMean,bMean): 5 | # 三个颜色通道的平均值 6 | self.rMean = rMean 7 | self.gMean = gMean 8 | self.bMean = bMean 9 | 10 | def preprocess(self,image): 11 | # cv2分割得到的是BGR,而不是RGB 12 | (B,G,R) = cv2.split(image.astype("float32")) 13 | # 减去对应通道的均值 14 | R -= self.rMean 15 | G -= self.gMean 16 | B -= self.bMean 17 | # 18 | return cv2.merge([B,G,R]) 19 | 20 | 21 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/patchpreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from sklearn.feature_extraction.image import extract_patches_2d 3 | 4 | class PatchPreprocessor: 5 | def __init__(self,width,height): 6 | # 目标图像的宽和高 7 | self.width = width 8 | self.height = height 9 | 10 | def preprocess(self,image): 11 | # 随机裁剪出目标大小图像 12 | return extract_patches_2d(image,(self.height,self.width), 13 | max_patches = 1)[0] -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/preprocessing/simplespreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_11/deepergooglenet/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_11/deepergooglenet/rank_accuracy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 加载所需模块 3 | from config import tiny_imagenet_config as config 4 | from pyimagesearch.preprocessing import imagetoarraypreprocessor as ITA 5 | from pyimagesearch.preprocessing import simplespreprocessor as SP 6 | from pyimagesearch.preprocessing import meanpreprocessor as MP 7 | from pyimagesearch.io import hdf5datasetgenerator as HDFG 8 | from pyimagesearch.utils.ranked import rank5_accuracy 9 | from keras.models import load_model 10 | import json 11 | 12 | # 加载RGB均值文件 13 | means = json.loads(open(config.DATASET_MEAN).read()) 14 | 15 | # 初始化预处理 16 | sp = SP.SimplePreprocessor(64,64) 17 | mp = MP.MeanPreprocessor(means['R'],means['G'],means['B']) 18 | iap = ITA.ImageToArrayPreprocessor() 19 | 20 | 21 | # 初始化测试数据集生成器 22 | testGen = HDFG.HDF5DatasetGenerator(config.TEST_HDF5,64,preprocessors = [sp,mp,iap],classes=config.NUM_CLASSES) 23 | 24 | # 加载预训练好的模型 25 | print("[INFO] loading model ...") 26 | model = load_model(config.MODEL_PATH) 27 | 28 | # 对测试集进行预测 29 | print("[INFO] predicting on test data...") 30 | predictions = model.predict_generator(testGen.generator(),steps = testGen.numImages // 64,max_queue_size = 64 * 2) 31 | 32 | # 计算rank-1和rank5准确度 33 | (rank1,rank5) = rank5_accuracy(predictions,testGen.db['labels']) 34 | print("[INFO] rank-1: {:.2f}%".format(rank1 * 100)) 35 | print("[INFO] rank-5: {:.2f}%".format(rank5 * 100)) 36 | 37 | #关闭数据库 38 | testGen.close() 39 | -------------------------------------------------------------------------------- /chapter_12/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/config/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/config/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/config/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/config/__pycache__/dogs_vs_cats_config.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/config/tiny_imagenet_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from os import path 3 | 4 | #训练数据集和验证数据集路径 5 | TRAIN_IMAGES = "../datasets/tiny-imagenet-200/train" 6 | VAL_IMAGES = "../datasets/tiny-imagenet-200/val/images" 7 | 8 | # 验证数据集与标签映射文件 9 | VAL_MAPPINGS = "../datasets/tiny-imagenet-200/val/val_annotations.txt" 10 | 11 | # WordNet hierarchy文件路径 12 | WORDNET_IDS = '../datasets/tiny-imagenet-200/wnids.txt' 13 | WORD_LABELS = '../datasets/tiny-imagenet-200/words.txt' 14 | 15 | # 从train数据中构造test数据 16 | NUM_CLASSES = 200 17 | NUM_TEST_IMAGES = 50 * NUM_CLASSES 18 | 19 | # 定义输出路径 20 | TRAIN_HDF5 = "../datasets/tiny-imagenet-200/hdf5/train.hdf5" 21 | VAL_HDF5 = "../datasets/tiny-imagenet-200/hdf5/val.hdf5" 22 | TEST_HDF5 = "../datasets/tiny-imagenet-200/hdf5/test.hdf5" 23 | 24 | # 数据均值文件 25 | DATASET_MEAN = "output/tiny-image-net-200-mean.json" 26 | 27 | # 输出路径和性能结果 28 | OUTPUT_PATH = "output" 29 | MODEL_PATH = path.sep.join([OUTPUT_PATH, 30 | "resnet_tinyimagenet.hdf5"]) 31 | FIG_PATH = path.sep.join([OUTPUT_PATH, 32 | 'resnet56_tinyimagenet.png']) 33 | JSON_PATH = path.sep.join([OUTPUT_PATH, 34 | 'resnet56_tinyimage.json']) 35 | 36 | 37 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/output/dogs_vs_cats_mean.json: -------------------------------------------------------------------------------- 1 | {"R": 124.97444899064222, "G": 115.97971080586517, "B": 106.13593549819402} -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/__init__.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainingmonitor import TrainingMonitor 2 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__pycache__/epochcheckpoint.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__pycache__/epochcheckpoint.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__pycache__/trainingmonitor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/__pycache__/trainingmonitor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/callbacks/epochcheckpoint.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.callbacks import ModelCheckpoint 3 | import os 4 | 5 | class EpochCheckpoint(ModelCheckpoint): 6 | def __init__(self, filepath,every, startAt=0): 7 | super(EpochCheckpoint, self).__init__(filepath) 8 | self.every = every 9 | self.startAt = startAt 10 | 11 | def on_epoch_end(self,epoch=24,logs= {}): 12 | filepath = os.path.join(self.filepath,"epoch_{epoch:02d}.hdf5") 13 | if (epoch+self.startAt+1) % self.every == 0: 14 | filepath = filepath.format(epoch=epoch + self.startAt + 1) 15 | self.model.save(filepath,overwrite = True) 16 | 17 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/datasets/SimpleDatasetLoader.py: -------------------------------------------------------------------------------- 1 | #encodig:utf-8 2 | 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | class SimpleDatasetLoader: 8 | def __init__(self,preprocessors = None): 9 | self.preprocessors = preprocessors 10 | 11 | if self.preprocessors is None: 12 | self.preprocessors = [] 13 | 14 | 15 | def load(self,imagePaths,verbose = -1): 16 | data = [] 17 | labels = [] 18 | for (i,imagePath) in enumerate(imagePaths): 19 | image = cv2.imread(imagePath) 20 | label = imagePath.split(os.path.sep)[-2] 21 | if self.preprocessors is not None: 22 | for preprocessor in self.preprocessors: 23 | image = preprocessor.preprocess(image) 24 | 25 | data.append(image) 26 | labels.append(label) 27 | 28 | if verbose > 0 and i >0 and (i+1) % verbose == 0: 29 | print("[INFO] processed {0}/{1}".format(i+1,len(imagePaths))) 30 | 31 | return (np.array(data),np.array(labels)) 32 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/datasets/SimpleDatasetLoader.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/datasets/SimpleDatasetLoader.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/datasets/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/datasets/__init__.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__pycache__/hdf5datasetgenerator.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__pycache__/hdf5datasetgenerator.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/io/__pycache__/hdf5datasetwriter.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/io/hdf5datasetgenerator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from keras.utils import np_utils 3 | import numpy as np 4 | import h5py 5 | 6 | class HDF5DatasetGenerator: 7 | 8 | def __init__(self,dbPath,batchSize,preprocessors = None,aug = None,binarize=True,classes=2): 9 | # 保存参数列表 10 | self.batchSize = batchSize 11 | self.preprocessors = preprocessors 12 | self.aug = aug 13 | self.binarize = binarize 14 | self.classes = classes 15 | # hdf5数据集 16 | self.db = h5py.File(dbPath) 17 | self.numImages = self.db['labels'].shape[0] 18 | 19 | def generator(self,passes=np.inf): 20 | epochs = 0 21 | # 默认是无限循环遍历 22 | while epochs < passes: 23 | # 遍历数据 24 | for i in np.arange(0,self.numImages,self.batchSize): 25 | # 从hdf5中提取数据集 26 | images = self.db['images'][i: i+self.batchSize] 27 | labels = self.db['labels'][i: i+self.batchSize] 28 | # one-hot编码 29 | if self.binarize: 30 | labels = np_utils.to_categorical(labels,self.classes) 31 | # 预处理 32 | if self.preprocessors is not None: 33 | proImages = [] 34 | for image in images: 35 | for p in self.preprocessors: 36 | image = p.preprocess(image) 37 | proImages.append(image) 38 | images = np.array(proImages) 39 | if self.aug is not None: 40 | (images,labels) = next(self.aug.flow(images, 41 | labels,batch_size = self.batchSize)) 42 | # 返回 43 | yield (images,labels) 44 | epochs += 1 45 | def close(self): 46 | # 关闭db 47 | self.db.close() 48 | 49 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/io/hdf5datasetwriter.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import h5py 3 | import os 4 | class HDF5DatasetWriter: 5 | def __init__(self,dims,outputPath,dataKey='images',buffSize=1000): 6 | # 判断输出路径是否存在 7 | if os.path.exists(outputPath): 8 | raise ValueError("The supplied ‘outputPath‘ already " 9 | "exists and cannot be overwritten. Manually delete " 10 | "the file before continuing.", outputPath) 11 | # 初始化一个HDF5 12 | self.db = h5py.File(outputPath,'w') 13 | # 存储图片数据或者特征 14 | self.data = self.db.create_dataset(dataKey,dims,dtype='float') 15 | # 存储标签数据 16 | self.labels = self.db.create_dataset('labels',(dims[0],),dtype='int') 17 | # 缓冲大小 18 | self.bufSize = buffSize 19 | self.buffer = {"data":[],"labels":[]} 20 | # 索引 21 | self.idx = 0 22 | 23 | def add(self,rows,labels): 24 | # 增加数据 25 | self.buffer['data'].extend(rows) 26 | self.buffer['labels'].extend(labels) 27 | # 如果buffer数据大小超过bufSize,则将数据写入磁盘中 28 | if len(self.buffer['data']) >= self.bufSize: 29 | self.flush() 30 | 31 | def flush(self): 32 | # 将buffers写入磁盘并初始化buffer 33 | i = self.idx + len(self.buffer['data']) 34 | self.data[self.idx:i] = self.buffer['data'] 35 | self.labels[self.idx:i] = self.buffer['labels'] 36 | self.idx = i # 指针 37 | self.buffer = {"data":[],"labels":[]} 38 | 39 | def storeClassLabels(self,classLabels): 40 | # 一个dataset存储数据标签名称 41 | # dt = h5py.special_dtype(vlen = unicode) # python2.7 42 | dt = h5py.special_dtype(vlen = str) # python3 43 | labelSet =self.db.create_dataset('label_names',(len(classLabels),),dtype=dt) 44 | labelSet[:] = classLabels 45 | 46 | def close(self): 47 | if len(self.buffer['data']) >0 : 48 | self.flush() 49 | # 关闭dataset 50 | self.db.close() 51 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/MiniVGGNet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.models import Sequential 3 | from keras.layers.core import Activation,Flatten,Dense,Dropout 4 | from keras.layers.convolutional import Conv2D 5 | from keras.layers.pooling import MaxPooling2D 6 | from keras.layers.normalization import BatchNormalization 7 | from keras import backend as K 8 | 9 | class MiniVGGNet: 10 | @staticmethod 11 | def build(width,height,depth,classes): 12 | model = Sequential() 13 | inputShape = (height,width,depth) 14 | chanDim = -1 15 | 16 | if K.image_data_format() == "channels_first": 17 | inputShape = (depth,height,width) 18 | chanDim = 1 19 | 20 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv1')) 21 | model.add(Activation('relu')) 22 | model.add(BatchNormalization(axis=chanDim)) 23 | model.add(Conv2D(32,(3,3),padding='same',input_shape=inputShape,name='block1_conv2')) 24 | model.add(Activation('relu')) 25 | model.add(BatchNormalization(axis = chanDim)) 26 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block1_pool')) 27 | model.add(Dropout(0.25)) 28 | 29 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv1')) 30 | model.add(Activation('relu')) 31 | model.add(BatchNormalization(axis=chanDim)) 32 | model.add(Conv2D(64,(3,3),padding='same',input_shape=inputShape,name='block2_conv2')) 33 | model.add(Activation('relu')) 34 | model.add(BatchNormalization(axis = chanDim)) 35 | model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),name='block2_pool')) 36 | model.add(Dropout(0.25)) 37 | 38 | model.add(Flatten()) 39 | model.add(Dense(512)) 40 | model.add(Activation('relu')) 41 | model.add(BatchNormalization()) 42 | model.add(Dropout(0.5)) 43 | 44 | model.add(Dense(classes)) 45 | model.add(Activation("softmax")) 46 | return model 47 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/MiniVGGNet.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/MiniVGGNet.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/MiniVGGNet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/alexnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/alexnet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/resnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/__pycache__/resnet.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/fcheadnet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | # 加载所需要的模块 3 | from keras.layers.core import Dropout 4 | from keras.layers.core import Flatten 5 | from keras.layers.core import Dense 6 | 7 | class FCHeadNet: 8 | @staticmethod 9 | def build(baseModel,classes,D): 10 | # 初始化top部分 11 | headModel = baseModel.output 12 | headModel = Flatten(name='flatten')(headModel) 13 | headModel = Dense(D,activation='relu')(headModel) 14 | headModel = Dropout(0.5)(headModel) 15 | # 增加一个softmaxc层 16 | headModel = Dense(classes,activation='softmax')(headModel) 17 | return headModel -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/lenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/nn/conv/shallownet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/ImageMove.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import os 3 | from PIL import Image 4 | from glob import glob 5 | 6 | class MoveImageToLabel: 7 | def __init__(self,dataPath): 8 | self.dataPath = dataPath 9 | 10 | def makeFolder(self): 11 | for i in range(17): 12 | foldername = self.dataPath + "/{0}".format(str(i)) 13 | if not os.path.isdir(foldername): 14 | os.makedirs(foldername) 15 | 16 | def move(self): 17 | for imageName in glob(self.dataPath+"/jpg/*.jpg"): 18 | imageNum = imageName.split(".")[0][-4:] 19 | a = int(imageNum) // 80 20 | b = int(imageNum) % 80 21 | if b==0: 22 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a-1),imageNum) 23 | newimg = Image.open(imageName) 24 | newimg.save(fl) 25 | else: 26 | fl=self.dataPath+"/{0}/image_{1}.jpg".format(str(a),imageNum) 27 | newimg = Image.open(imageName) 28 | newimg.save(fl) 29 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/ImageMove.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/ImageMove.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__init__.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/AspectAwarePreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/croppreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/croppreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/imagetoarraypreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/imagetoarraypreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/meanpreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/meanpreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/patchpreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/patchpreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/simplespreprocessor.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/__pycache__/simplespreprocessor.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/aspectawarepreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import imutils 3 | import cv2 4 | 5 | class AspectAwarePreprocessor: 6 | def __init__(self,width,height,inter=cv2.INTER_AREA): 7 | self.width = width 8 | self.height = height 9 | self.inter = inter 10 | 11 | def preprocess(self,image): 12 | (h,w) = image.shape[:2] 13 | dw = 0 14 | dh = 0 15 | if w < h: 16 | image = imutils.resize(image,width = self.width,inter = self.inter) 17 | dh = int((image.shape[0] - self.height) / 2.0) 18 | else: 19 | image = imutils.resize(image,height=self.height,inter = self.inter) 20 | dw = int((image.shape[1] - self.width) /2.0) 21 | (h,w) = image.shape[:2] 22 | image = image[dh:h - dh,dw:w-dw] 23 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/croppreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import cv2 5 | 6 | class CropPreprocessor: 7 | def __init__(self,width,height,horiz = True,inter=cv2.INTER_AREA): 8 | # 保存目标参数 9 | self.width = width 10 | self.height = height 11 | self.horiz = horiz 12 | self.inter = inter 13 | 14 | def preprocess(self,image): 15 | crops = [] 16 | # 原始图像的高跟宽 17 | (h,w) = image.shape[:2] 18 | #四个角 19 | coords = [ 20 | [0,0,self.width,self.height], 21 | [w - self.width,0,w,self.height], 22 | [w - self.width,h - self.height,w,h], 23 | [0,h - self.height,self.width,h] 24 | ] 25 | # 计算中心区域 26 | dW = int(0.5 * (w - self.width)) 27 | dH = int(0.5 * (h - self.height)) 28 | coords.append([dW,dH,w - dW,h - dH]) 29 | 30 | for (startX,startY,endX,endY) in coords: 31 | # 裁剪 32 | crop = image[startY:endY,startX:endX] 33 | # 由于裁剪过程,可能会造成大小相差1左右,所以进行插值 34 | crop = cv2.resize(crop,(self.width,self.height), 35 | interpolation = self.inter) 36 | crops.append(crop) 37 | if self.horiz: 38 | # 水平翻转 39 | mirrors = [cv2.flip(x,1) for x in crops] 40 | crops.extend(mirrors) 41 | return np.array(crops) 42 | 43 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/imagetoarraypreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from keras.preprocessing.image import img_to_array 3 | 4 | class ImageToArrayPreprocessor: 5 | def __init__(self,dataFormat = None): 6 | self.dataFormat = dataFormat 7 | 8 | def preprocess(self,image): 9 | return img_to_array(image,data_format=self.dataFormat) -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/meanpreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import cv2 3 | class MeanPreprocessor: 4 | def __init__(self,rMean,gMean,bMean): 5 | # 三个颜色通道的平均值 6 | self.rMean = rMean 7 | self.gMean = gMean 8 | self.bMean = bMean 9 | 10 | def preprocess(self,image): 11 | # cv2分割得到的是BGR,而不是RGB 12 | (B,G,R) = cv2.split(image.astype("float32")) 13 | # 减去对应通道的均值 14 | R -= self.rMean 15 | G -= self.gMean 16 | B -= self.bMean 17 | # 18 | return cv2.merge([B,G,R]) 19 | 20 | 21 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/patchpreprocessor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from sklearn.feature_extraction.image import extract_patches_2d 3 | 4 | class PatchPreprocessor: 5 | def __init__(self,width,height): 6 | # 目标图像的宽和高 7 | self.width = width 8 | self.height = height 9 | 10 | def preprocess(self,image): 11 | # 随机裁剪出目标大小图像 12 | return extract_patches_2d(image,(self.height,self.width), 13 | max_patches = 1)[0] -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/preprocessing/simplespreprocessor.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | 4 | class SimplePreprocessor: 5 | def __init__(self,width,height,inter = cv2.INTER_AREA): 6 | self.width = width 7 | self.height = height 8 | self.inter = inter 9 | 10 | def preprocess(self,image): 11 | return cv2.resize(image,(self.width,self.height),interpolation=self.inter) 12 | 13 | -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_12/resnet_tiny_imagenet/pyimagesearch/utils/__pycache__/ranked.cpython-36.pyc -------------------------------------------------------------------------------- /chapter_12/resnet_tiny_imagenet/pyimagesearch/utils/ranked.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import numpy as np 3 | 4 | def rank5_accuracy(preds,labels): 5 | #初始化 6 | rank1 = 0 7 | rank5 = 0 8 | # 遍历数据集 9 | for (p,gt) in zip(preds,labels): 10 | # 通过降序对概率进行排序 11 | p = np.argsort(p)[::-1] 12 | # 检查真实标签是否落在top5中 13 | if gt in p[:5]: 14 | rank5 += 1 15 | # 检验真实标签是否等于top1 16 | if gt == p[0]: 17 | rank1 += 1 18 | # 计算准确度 19 | rank1 /= float(len(labels)) 20 | rank5 /= float(len(labels)) 21 | 22 | return rank1,rank5 23 | 24 | 25 | -------------------------------------------------------------------------------- /chapter_13/build_imagenet.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import cv2 3 | import json 4 | import numpy as np 5 | from sklearn.model_selection import train_test_split 6 | from config import imagenet_alexnet_config as config 7 | from pyimagesearch.utils.imagenethelper import ImageNetHelper 8 | from pyimagesearch.utils.imagenettfrecord import ImageNetTfrecord 9 | 10 | 11 | print('[INFO] loading image paths...') 12 | inh = ImageNetHelper(config) 13 | (trainPaths,trainLabels) = inh.buildTrainingSet() 14 | (valPaths,valLabels) = inh.buildValidationSet() 15 | 16 | print('[INFO] constructing splits...') 17 | split = train_test_split(trainPaths,trainLabels, 18 | test_size = config.NUM_TEST_IMAGES,stratify=trainLabels, 19 | random_state=42) 20 | trainPaths,testPaths,trainLabels,testLabels = split 21 | 22 | datasets = [ 23 | ('train',trainPaths,trainLabels,config.TRAIN_TFRECORD), 24 | ('val',valPaths,valLabels,config.VAL_TFRECORD), 25 | ('test',testPaths,testLabels,config.TEST_TFRECORD) 26 | ] 27 | 28 | (R,G,B) = [],[],[] 29 | 30 | for (dType,paths,labels,outputPath) in datasets: 31 | print('[INFO] building {}...'.format(outputPath)) 32 | inr = ImageNetTfrecord(outputPath) 33 | 34 | probar = inh._pbar(name='Building %s List: '%dType,maxval=len(paths)) 35 | for (i,(path,label)) in enumerate(zip(paths,labels)): 36 | inr._save_one(label=label, filename=path, isTrain=True) 37 | if dType == 'train': 38 | image = cv2.imread(path) 39 | (b,g,r) = cv2.mean(image)[:3] 40 | R.append(r) 41 | G.append(g) 42 | B.append(b) 43 | probar.update(i) 44 | probar.finish() 45 | inr.tfwriter.close() 46 | 47 | print('[INFO] serializing means...') 48 | D = {'R':np.mean(R),'G':np.mean(G),'B':np.mean(B)} 49 | with open(config.DATASET_MEAN,'w') as f: 50 | f.write(json.dumps) 51 | -------------------------------------------------------------------------------- /chapter_13/config/imagenet_alexnet_config.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from os import path 3 | 4 | # 定义Imagenet数据集路径 5 | BASE_PATH = 'ILSVRC' 6 | 7 | # 基于base path 定义原始图像和工具路径 8 | IMAGES_PATH = path.sep.join([BASE_PATH,'Data/CLS-LOC']) 9 | IMAGE_SETS_PATH = path.sep.join([BASE_PATH,'ImageSets/CLS-LOC/']) 10 | DEVKIT_PATH = path.sep.join([BASE_PATH,'devkit/data']) 11 | 12 | # 定义WordNet IDs文件路径 13 | WORD_IDS = path.sep.join([DEVKIT_PATH,'map_clsloc.txt']) 14 | 15 | # 定义training文件路径 16 | TRAIN_LIST = path.sep.join([IMAGE_SETS_PATH,'train_cls.txt']) 17 | 18 | # 定义验证集数据路径以及对应的标签文件路径 19 | VAL_LIST = path.sep.join([IMAGE_SETS_PATH,'val.txt']) 20 | VAL_LABELS = path.sep.join([DEVKIT_PATH,'ILSVRC2015_clsloc_validation_ground_truth.txt']) 21 | # 定义val blacklisted 文件路径 22 | VAL_BLACKLIST = path.sep.join([DEVKIT_PATH,'ILSVRC2015_clsloc_validation_blacklist.txt']) 23 | 24 | # 定义类别个数 25 | # 定义我们需要从train数据集中划分一个子集作为test的大小 26 | NUM_CLASSES = 1000 27 | NUM_TEST_IMAGES = 50 * NUM_CLASSES 28 | 29 | # 定义tfrecord文件的输出路径 30 | TF_OUTPUT = 'imagenet' 31 | TRAIN_TFRECORD = path.sep.join([TF_OUTPUT,'tfrecords/train.tfrecords']) 32 | VAL_TFRECORD = path.sep.join([TF_OUTPUT,'tfrecords/val.tfrecords']) 33 | TEST_TFRECORD = path.sep.join([TF_OUTPUT,'tfrecords/test.tfrecords']) 34 | 35 | # 定义均值文件路径 36 | DATASET_MEAN = 'outputs/imagenet_mean.json' 37 | 38 | # 定义batch大小 39 | BATCH_SIZE = 128 40 | -------------------------------------------------------------------------------- /chapter_13/imagenet/tfrecords/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainingmonitor import TrainingMonitor 2 | -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/io/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/nn/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/nn/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_13/pyimagesearch/nn/__init__.pyc -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/nn/conv/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/nn/conv/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lonePatient/Deep_Learning_For_Computer_Vision_With_Python/9211e179d65f23e1645849024121c8ca8c49dba3/chapter_13/pyimagesearch/nn/conv/__init__.pyc -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 -------------------------------------------------------------------------------- /chapter_13/pyimagesearch/utils/imagenettfrecord.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | from os import path 3 | import tensorflow as tf 4 | 5 | class ImageNetTfrecord(object): 6 | def __init__(self,tfrecord_name): 7 | self.tfrecord_name = tfrecord_name 8 | self.tfwriter = tf.python_io.TFRecordWriter(self.tfrecord_name) 9 | 10 | def _int64_feature(self,value): 11 | """Wrapper for inserting int64 features into Example proto.""" 12 | if not isinstance(value, list): 13 | value = [value] 14 | return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) 15 | 16 | def _float_feature(self,value): 17 | """Wrapper for inserting float features into Example proto.""" 18 | if not isinstance(value, list): 19 | value = [value] 20 | return tf.train.Feature(float_list=tf.train.FloatList(value=value)) 21 | 22 | def _bytes_feature(self,value): 23 | """Wrapper for inserting bytes features into Example proto.""" 24 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) 25 | 26 | 27 | def _process_image(self,filename): 28 | """Process a single image file.""" 29 | with tf.gfile.FastGFile(filename, 'rb') as f: 30 | image_data = f.read() 31 | return image_data 32 | 33 | def _save_one(self,label,filename,isTrain=True): 34 | image_data = self._process_image(filename) 35 | name = path.split(filename)[-1] 36 | if isTrain: 37 | example = tf.train.Example(features=tf.train.Features(feature={ 38 | 'image': self._bytes_feature(tf.compat.as_bytes(image_data)), 39 | 'label': self._int64_feature(label), 40 | 'name': self._bytes_feature(tf.compat.as_bytes(name)) 41 | })) 42 | self.tfwriter.write(example.SerializeToString()) 43 | 44 | else: 45 | label = int(-1) 46 | example = tf.train.Example(features=tf.train.Features(feature={ 47 | 'image': self._bytes_feature(tf.compat.as_bytes(image_data)), 48 | 'label': self._int64_feature(label), 49 | 'name': self._bytes_feature(tf.compat.as_bytes(name)) 50 | })) 51 | self.tfwriter.write(example.SerializeToString()) 52 | 53 | --------------------------------------------------------------------------------