├── demo_pic.png ├── linear2.pickle ├── .gitignore ├── README.md ├── config.py.example ├── requirements.txt ├── requirements2.txt ├── predict.py ├── app.py └── age.prototxt /demo_pic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makagan/pier38_age_estimator/master/demo_pic.png -------------------------------------------------------------------------------- /linear2.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/makagan/pier38_age_estimator/master/linear2.pickle -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dex_imdb_wiki.caffemodel 2 | imagenet_mean.binaryproto 3 | /env 4 | secrets.py 5 | config.py 6 | *.pyc 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ``` 3 | virtualenv env 4 | source env/bin/activate 5 | pip install -U -r requirements.txt 6 | ``` 7 | -------------------------------------------------------------------------------- /config.py.example: -------------------------------------------------------------------------------- 1 | AWS_ACCESS_KEY_ID="YOUR AWS ACCESS KEY" 2 | AWS_SECRET_ACCESS_KEY="YOUR AWS SECRET" 3 | BUCKET="bucket-name" 4 | 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Flask 2 | numpy 3 | pandas 4 | scikit-image 5 | scipy 6 | scikit-learn 7 | virtualenv 8 | gunicorn 9 | boto3 10 | celery 11 | requests 12 | redis 13 | -------------------------------------------------------------------------------- /requirements2.txt: -------------------------------------------------------------------------------- 1 | backports.shutil-get-terminal-size==1.0.0 2 | cycler==0.10.0 3 | Cython==0.27.1 4 | decorator==4.1.2 5 | enum34==1.1.6 6 | functools32==3.2.3.post2 7 | h5py==2.7.1 8 | ipython==5.5.0 9 | ipython-genutils==0.2.0 10 | leveldb==0.194 11 | matplotlib==2.0.2 12 | networkx==2.0 13 | nose==1.3.7 14 | numpy==1.11.0 15 | olefile==0.44 16 | pandas==0.20.3 17 | pathlib2==2.3.0 18 | pexpect==4.2.1 19 | pickleshare==0.7.4 20 | Pillow==4.3.0 21 | prompt-toolkit==1.0.15 22 | protobuf==3.4.0 23 | ptyprocess==0.5.2 24 | pydot==1.2.3 25 | Pygments==2.2.0 26 | pyparsing==2.2.0 27 | python-dateutil==1.5 28 | python-gflags==3.1.1 29 | pytz==2017.2 30 | PyWavelets==0.5.2 31 | PyYAML==3.12 32 | scandir==1.6 33 | scikit-image==0.13.1 34 | scipy==0.17.0 35 | simplegeneric==0.8.1 36 | six==1.11.0 37 | subprocess32==3.2.7 38 | traitlets==4.3.2 39 | virtualenv==15.1.0 40 | wcwidth==0.1.7 41 | Flask 42 | boto3 43 | requests 44 | -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | import sys 5 | """ 6 | If you do not have caffe root setup 7 | 8 | caffe_root = '~/caffe/' #Path to you caffe root 9 | sys.path.insert(0, caffe_root + 'python') 10 | """ 11 | 12 | import caffe 13 | # NOTE: Comment the line below, if you want to run in CPU mode 14 | #caffe.set_mode_gpu() 15 | caffe.set_mode_cpu() 16 | 17 | # age.prototxt dex_imdb_wiki.caffemodel imagenet_mean.binaryproto 18 | 19 | 20 | mean_filename=os.path.join('imagenet_mean.binaryproto') 21 | proto_data = open(mean_filename, "rb").read() 22 | a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data) 23 | mean = caffe.io.blobproto_to_array(a)[0].mean(1).mean(1) 24 | 25 | net_pretrained = os.path.join("dex_imdb_wiki.caffemodel") 26 | net_model_file = os.path.join("age.prototxt") 27 | Net = caffe.Classifier(net_model_file, net_pretrained, 28 | mean=mean, 29 | channel_swap=(2,1,0), 30 | raw_scale=255, 31 | image_dims=(256, 256)) 32 | 33 | 34 | def predict(image_path, verbose=False): 35 | input_image = caffe.io.load_image(image_path) 36 | prediction = Net.predict([input_image],oversample=False) 37 | 38 | if verbose: print "="*100 39 | cum_sum = 0 40 | for _idx, val in enumerate(prediction[0]): 41 | if verbose: print _idx , ": ", val*100,"%" 42 | cum_sum += _idx*val 43 | if verbose: 44 | print "="*100 45 | print 'predicted category is {0}'.format(prediction.argmax()) 46 | print "Weighted mean prediction ", cum_sum 47 | print "Integreified Weighted mean prediction ", int(cum_sum) 48 | return cum_sum 49 | 50 | image_path = "demo_pic.png" 51 | for k in range(10): 52 | print "Age : ", predict(image_path) 53 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import sys 4 | import caffe 5 | from flask import Flask 6 | from flask import jsonify 7 | from flask import request 8 | import requests 9 | import json 10 | 11 | import boto3 12 | import io 13 | import tempfile 14 | import json 15 | import os 16 | from sklearn.linear_model import LinearRegression 17 | import pickle 18 | import config as config 19 | 20 | app = Flask(__name__) 21 | 22 | #caffe.set_mode_gpu() 23 | caffe.set_mode_cpu() 24 | 25 | mean_filename=os.path.join('imagenet_mean.binaryproto') 26 | proto_data = open(mean_filename, "rb").read() 27 | a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data) 28 | mean = caffe.io.blobproto_to_array(a)[0].mean(1).mean(1) 29 | 30 | net_pretrained = os.path.join("dex_imdb_wiki.caffemodel") 31 | net_model_file = os.path.join("age.prototxt") 32 | Net = caffe.Classifier(net_model_file, net_pretrained, 33 | mean=mean, 34 | channel_swap=(2,1,0), 35 | raw_scale=255, 36 | image_dims=(256, 256)) 37 | 38 | audio_model = LinearRegression() 39 | ### hard code linear coefficiencts ### 40 | audio_model.coef_ = np.array([-0.00414669]) 41 | audio_model.intercept_ = 98.2135216968 42 | 43 | @app.route('/',methods=['GET']) 44 | def index(): 45 | return "
Hello, world!
" 46 | 47 | @app.route('/api/predict',methods=['GET']) 48 | def api_predict(): 49 | try: 50 | payload = request.json 51 | prediction_uuid = request.json.get('prediction_uuid') 52 | image_path_s3 = request.json.get('image_s3_key') 53 | 54 | image_path = "demo_pic.png" 55 | prediction = predict(image_path_s3) 56 | resp = jsonify({"status": "cool", "prediction": prediction, "prediction_uuid": prediction_uuid, "image_s3_key": image_path_s3 }) 57 | resp.status_code = 200 58 | return resp 59 | except Exception as e: 60 | raise e 61 | 62 | 63 | @app.route('/api/audio',methods=['GET']) 64 | def api_audio(): 65 | try: 66 | audio_threshold = request.json.get('audio_threshold') 67 | x = [[audio_threshold]] 68 | prediction = audio_model.predict(x) 69 | prediction = 25 70 | resp = jsonify({"prediction": prediction}) 71 | resp.status_code = 200 72 | return resp 73 | except Exception as e: 74 | raise e 75 | 76 | 77 | def predict(image_path_s3, verbose=False): 78 | image_path = s3_to_tempfile(image_path_s3) 79 | input_image = caffe.io.load_image(image_path) 80 | prediction = Net.predict([input_image],oversample=False) 81 | 82 | if verbose: 83 | print "="*100 84 | cum_sum = 0 85 | for _idx, val in enumerate(prediction[0]): 86 | if verbose: print _idx , ": ", val*100,"%" 87 | cum_sum += _idx*val 88 | if verbose: 89 | print "="*100 90 | print 'predicted category is {0}'.format(prediction.argmax()) 91 | print "Weighted mean prediction ", cum_sum 92 | print "Integreified Weighted mean prediction ", int(cum_sum) 93 | return prediction.argmax() 94 | 95 | def s3_to_tempfile(key): 96 | s3_obj = get_s3_obj(key) 97 | f = tempfile.NamedTemporaryFile(delete=False) 98 | f.write(s3_obj.read()) 99 | f.seek(0) 100 | return f.name 101 | 102 | def get_s3_obj(key): 103 | try: 104 | s3 = boto3.client('s3',aws_access_key_id=config.AWS_ACCESS_KEY_ID,aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY) 105 | #s3_url = '{}/{}/{}'.format(s3.meta.endpoint_url, BUCKET, key) 106 | s3_obj = s3.get_object(Bucket=config.BUCKET, Key=key)['Body'] 107 | app.logger.info("Downloading S3 key: %s " % key) 108 | return s3_obj 109 | except Exception as e: 110 | app.logger.info(e) 111 | raise e 112 | 113 | #image_path = "demo_pic.png" 114 | #for k in range(10): 115 | # print "Age : ", predict(image_path) 116 | 117 | if __name__ == '__main__': 118 | app.run(host="0.0.0.0") 119 | -------------------------------------------------------------------------------- /age.prototxt: -------------------------------------------------------------------------------- 1 | name: "VGG_ILSVRC_16_layers" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 224 6 | input_dim: 224 7 | layer { 8 | bottom: "data" 9 | top: "conv1_1" 10 | name: "conv1_1" 11 | type: "Convolution" 12 | convolution_param { 13 | num_output: 64 14 | pad: 1 15 | kernel_size: 3 16 | } 17 | } 18 | layer { 19 | bottom: "conv1_1" 20 | top: "conv1_1" 21 | name: "relu1_1" 22 | type: "ReLU" 23 | } 24 | layer { 25 | bottom: "conv1_1" 26 | top: "conv1_2" 27 | name: "conv1_2" 28 | type: "Convolution" 29 | convolution_param { 30 | num_output: 64 31 | pad: 1 32 | kernel_size: 3 33 | } 34 | } 35 | layer { 36 | bottom: "conv1_2" 37 | top: "conv1_2" 38 | name: "relu1_2" 39 | type: "ReLU" 40 | } 41 | layer { 42 | bottom: "conv1_2" 43 | top: "pool1" 44 | name: "pool1" 45 | type: "Pooling" 46 | pooling_param { 47 | pool: MAX 48 | kernel_size: 2 49 | stride: 2 50 | } 51 | } 52 | layer { 53 | bottom: "pool1" 54 | top: "conv2_1" 55 | name: "conv2_1" 56 | type: "Convolution" 57 | convolution_param { 58 | num_output: 128 59 | pad: 1 60 | kernel_size: 3 61 | } 62 | } 63 | layer { 64 | bottom: "conv2_1" 65 | top: "conv2_1" 66 | name: "relu2_1" 67 | type: "ReLU" 68 | } 69 | layer { 70 | bottom: "conv2_1" 71 | top: "conv2_2" 72 | name: "conv2_2" 73 | type: "Convolution" 74 | convolution_param { 75 | num_output: 128 76 | pad: 1 77 | kernel_size: 3 78 | } 79 | } 80 | layer { 81 | bottom: "conv2_2" 82 | top: "conv2_2" 83 | name: "relu2_2" 84 | type: "ReLU" 85 | } 86 | layer { 87 | bottom: "conv2_2" 88 | top: "pool2" 89 | name: "pool2" 90 | type: "Pooling" 91 | pooling_param { 92 | pool: MAX 93 | kernel_size: 2 94 | stride: 2 95 | } 96 | } 97 | layer { 98 | bottom: "pool2" 99 | top: "conv3_1" 100 | name: "conv3_1" 101 | type: "Convolution" 102 | convolution_param { 103 | num_output: 256 104 | pad: 1 105 | kernel_size: 3 106 | } 107 | } 108 | layer { 109 | bottom: "conv3_1" 110 | top: "conv3_1" 111 | name: "relu3_1" 112 | type: "ReLU" 113 | } 114 | layer { 115 | bottom: "conv3_1" 116 | top: "conv3_2" 117 | name: "conv3_2" 118 | type: "Convolution" 119 | convolution_param { 120 | num_output: 256 121 | pad: 1 122 | kernel_size: 3 123 | } 124 | } 125 | layer { 126 | bottom: "conv3_2" 127 | top: "conv3_2" 128 | name: "relu3_2" 129 | type: "ReLU" 130 | } 131 | layer { 132 | bottom: "conv3_2" 133 | top: "conv3_3" 134 | name: "conv3_3" 135 | type: "Convolution" 136 | convolution_param { 137 | num_output: 256 138 | pad: 1 139 | kernel_size: 3 140 | } 141 | } 142 | layer { 143 | bottom: "conv3_3" 144 | top: "conv3_3" 145 | name: "relu3_3" 146 | type: "ReLU" 147 | } 148 | layer { 149 | bottom: "conv3_3" 150 | top: "pool3" 151 | name: "pool3" 152 | type: "Pooling" 153 | pooling_param { 154 | pool: MAX 155 | kernel_size: 2 156 | stride: 2 157 | } 158 | } 159 | layer { 160 | bottom: "pool3" 161 | top: "conv4_1" 162 | name: "conv4_1" 163 | type: "Convolution" 164 | convolution_param { 165 | num_output: 512 166 | pad: 1 167 | kernel_size: 3 168 | } 169 | } 170 | layer { 171 | bottom: "conv4_1" 172 | top: "conv4_1" 173 | name: "relu4_1" 174 | type: "ReLU" 175 | } 176 | layer { 177 | bottom: "conv4_1" 178 | top: "conv4_2" 179 | name: "conv4_2" 180 | type: "Convolution" 181 | convolution_param { 182 | num_output: 512 183 | pad: 1 184 | kernel_size: 3 185 | } 186 | } 187 | layer { 188 | bottom: "conv4_2" 189 | top: "conv4_2" 190 | name: "relu4_2" 191 | type: "ReLU" 192 | } 193 | layer { 194 | bottom: "conv4_2" 195 | top: "conv4_3" 196 | name: "conv4_3" 197 | type: "Convolution" 198 | convolution_param { 199 | num_output: 512 200 | pad: 1 201 | kernel_size: 3 202 | } 203 | } 204 | layer { 205 | bottom: "conv4_3" 206 | top: "conv4_3" 207 | name: "relu4_3" 208 | type: "ReLU" 209 | } 210 | layer { 211 | bottom: "conv4_3" 212 | top: "pool4" 213 | name: "pool4" 214 | type: "Pooling" 215 | pooling_param { 216 | pool: MAX 217 | kernel_size: 2 218 | stride: 2 219 | } 220 | } 221 | layer { 222 | bottom: "pool4" 223 | top: "conv5_1" 224 | name: "conv5_1" 225 | type: "Convolution" 226 | convolution_param { 227 | num_output: 512 228 | pad: 1 229 | kernel_size: 3 230 | } 231 | } 232 | layer { 233 | bottom: "conv5_1" 234 | top: "conv5_1" 235 | name: "relu5_1" 236 | type: "ReLU" 237 | } 238 | layer { 239 | bottom: "conv5_1" 240 | top: "conv5_2" 241 | name: "conv5_2" 242 | type: "Convolution" 243 | convolution_param { 244 | num_output: 512 245 | pad: 1 246 | kernel_size: 3 247 | } 248 | } 249 | layer { 250 | bottom: "conv5_2" 251 | top: "conv5_2" 252 | name: "relu5_2" 253 | type: "ReLU" 254 | } 255 | layer { 256 | bottom: "conv5_2" 257 | top: "conv5_3" 258 | name: "conv5_3" 259 | type: "Convolution" 260 | convolution_param { 261 | num_output: 512 262 | pad: 1 263 | kernel_size: 3 264 | } 265 | } 266 | layer { 267 | bottom: "conv5_3" 268 | top: "conv5_3" 269 | name: "relu5_3" 270 | type: "ReLU" 271 | } 272 | layer { 273 | bottom: "conv5_3" 274 | top: "pool5" 275 | name: "pool5" 276 | type: "Pooling" 277 | pooling_param { 278 | pool: MAX 279 | kernel_size: 2 280 | stride: 2 281 | } 282 | } 283 | layer { 284 | bottom: "pool5" 285 | top: "fc6" 286 | name: "fc6" 287 | type: "InnerProduct" 288 | inner_product_param { 289 | num_output: 4096 290 | } 291 | } 292 | layer { 293 | bottom: "fc6" 294 | top: "fc6" 295 | name: "relu6" 296 | type: "ReLU" 297 | } 298 | layer { 299 | bottom: "fc6" 300 | top: "fc6" 301 | name: "drop6" 302 | type: "Dropout" 303 | dropout_param { 304 | dropout_ratio: 0.5 305 | } 306 | } 307 | layer { 308 | bottom: "fc6" 309 | top: "fc7" 310 | name: "fc7" 311 | type: "InnerProduct" 312 | inner_product_param { 313 | num_output: 4096 314 | } 315 | } 316 | layer { 317 | bottom: "fc7" 318 | top: "fc7" 319 | name: "relu7" 320 | type: "ReLU" 321 | } 322 | layer { 323 | bottom: "fc7" 324 | top: "fc7" 325 | name: "drop7" 326 | type: "Dropout" 327 | dropout_param { 328 | dropout_ratio: 0.5 329 | } 330 | } 331 | layer { 332 | bottom: "fc7" 333 | top: "fc8-101" 334 | name: "fc8-101" 335 | type: "InnerProduct" 336 | inner_product_param { 337 | num_output: 101 338 | } 339 | } 340 | layer { 341 | bottom: "fc8-101" 342 | top: "prob" 343 | name: "prob" 344 | type: "Softmax" 345 | } 346 | --------------------------------------------------------------------------------