├── DownloadFaceData.m ├── FaceWarp ├── AlignError.m ├── Load3DFace.m ├── Recover3DPose.m ├── Ref_face_mesh.mat ├── WarpFace.m ├── WarpToFrontalFace.m └── face_pts.txt ├── LICENSE ├── README.md ├── SelectGoodFace.m ├── SetConfig.m ├── SetPaths.m ├── data └── demo │ └── IntraFace_cache │ └── detection.mat ├── demo_download.mat ├── feature ├── COPYING ├── ExtractFeature.m ├── features31.cc ├── features31.mexa64 └── features31.mexw64 ├── models ├── CLMWILD_attractive_model.mat ├── CLMWILD_serious_model.mat ├── IntraFace_attractive_model.mat └── IntraFace_serious_model.mat ├── points.png ├── svm ├── TestModel.m ├── libsvm_COPYRIGHT ├── svmpredict.mexa64 └── svmpredict.mexw64 ├── utils ├── ImCropPad.m ├── LoadImageList.m └── mkdirs.m └── wrapper ├── CLMWILDDetectPoints.m └── IntraFaceDetectPoints.m /DownloadFaceData.m: -------------------------------------------------------------------------------- 1 | % Jun-Yan Zhu (junyanz at eecs dot berkeley dot edu) 2 | % University of California, Berkeley 3 | % a script to download portrait data from the Gallagher Collection Person Dataset 4 | % (http://chenlab.ece.cornell.edu/people/Andy/GallagherDataset.html) 5 | % See the first row of Figure 17 for details. 6 | 7 | %% get image urls and bounding boxes 8 | SetPaths; 9 | load('demo_download.mat', 'imgList', 'imgURLs', 'bboxes'); 10 | nImgs = numel(imgList); 11 | 12 | imgFold_o = 'data/demo/imgs_o'; % original images 13 | imgFold = 'data/demo/imgs'; % cropped faces 14 | mkdirs({imgFold_o, imgFold}); 15 | 16 | disp('install the Gallagher Collection Person Dataset'); 17 | parfor n = 1 : nImgs 18 | name = imgList{n}; 19 | impath = fullfile(imgFold_o, name); 20 | if ~exist(impath, 'file') 21 | im = imread(imgURLs{n}); 22 | fprintf('download image (%s)\n', name); 23 | imwrite(im, impath); 24 | end 25 | end 26 | 27 | %% crop faces using the pre-computed bounding boxes 28 | % The face bounding boxes are generated by Viola-Jones Face detector 29 | % http://www.mathworks.com/help/vision/ref/vision.cascadeobjectdetector-class.html 30 | disp('crop faces'); 31 | for n = 1 : nImgs 32 | im = imread(fullfile(imgFold_o, imgList{n})); 33 | crop = ImCropPad(im, bboxes{n}); 34 | crop = imresize(crop, [250, 250]); 35 | imwrite(crop, fullfile(imgFold, imgList{n})); 36 | end 37 | 38 | -------------------------------------------------------------------------------- /FaceWarp/AlignError.m: -------------------------------------------------------------------------------- 1 | function [error] = AlignError(inputpts,Ref ) 2 | Ref.alignpoints(10:13,:) = []; 3 | refpts = Ref.alignpoints(:,1:2); % ref points in 2D 4 | Input.alignpoints = inputpts; % input 2D points 5 | numfp = length(inputpts); % number of points - 13 6 | Input.numfp = numfp; 7 | Input.TFORM = cp2tform(inputpts,refpts,'affine'); % estimate a global affine transformation 8 | 9 | refalignpoints = Ref.alignpoints; % 3D pnts 10 | p2 = tformfwd(Input.TFORM, Input.alignpoints); % apply transformation to 2D points 11 | p2 = p2(:,[2 1])'; % exchange x,y for 3D points 12 | p3 = refalignpoints(:,[2 1 3])'; % exchagne x, y for 3D points 13 | 14 | % remove translation 15 | p22 = mean(p2,2); 16 | p33 = mean(p3,2); 17 | p2c = p2-p22*ones(1,numfp); 18 | p3c = p3-p33*ones(1,numfp); 19 | 20 | % find 2D to 3D affine 21 | AR = p2c*p3c'*inv(p3c*p3c'); % solve AR * p3c = p2c 22 | error = (AR*p3c-p2c).^2; 23 | error = mean(sqrt(error(1,:)+error(2,:))); 24 | 25 | end 26 | 27 | -------------------------------------------------------------------------------- /FaceWarp/Load3DFace.m: -------------------------------------------------------------------------------- 1 | modelName = 'Ref_face_mesh'; 2 | load(modelName); 3 | 4 | Ref.alignpoints(10,:) = mean(Ref.alignpoints(1:2,:),1); 5 | Ref.alignpoints(11,:) = mean(Ref.alignpoints(3:4,:),1); 6 | Ref.alignpoints(12,:) = mean(Ref.alignpoints(2:3,:),1); 7 | Ref.alignpoints(13,:) = mean(Ref.alignpoints(8:9,:),1); 8 | numfp = 13; 9 | 10 | for i = 1:numfp; 11 | Ref.alignpoints(i,3) = Ref.Z(round(Ref.alignpoints(i,2)),round(Ref.alignpoints(i,1))); 12 | end 13 | -------------------------------------------------------------------------------- /FaceWarp/Recover3DPose.m: -------------------------------------------------------------------------------- 1 | function Input = Recover3DPose(Img,inputpts,Ref) 2 | Ref.alignpoints(10:13,:) = []; 3 | szeModel = size(Ref.RGB); % 3D model image size 4 | refpts = Ref.alignpoints(:,1:2); % ref points in 2D 5 | Input.alignpoints = inputpts; % input 2D points 6 | Input.Img = Img; % original image 7 | numfp = length(inputpts); % number of points - 13 8 | Input.numfp = numfp; 9 | % align 2D xy with 3D xy 10 | Input.TFORM = cp2tform(inputpts,refpts,'affine'); % estimate a global affine transformation 11 | Input.alignImg = imtransform(Img, Input.TFORM,'XData',... 12 | [1 szeModel(2)],'YData',[1 szeModel(1)],'Size',szeModel(1:2)); % apply global transformation 13 | % align 14 | Input.TFORM2 = cp2tform(inputpts,refpts,'nonreflective similarity'); 15 | yadd=150; xadd=150; %.7*yadd; 16 | Input.alignImg2 = imtransform(Img,Input.TFORM2,'XData',... 17 | [-xadd szeModel(2)+xadd],'YData',[-yadd szeModel(1)+yadd*.2],'Size',szeModel(1:2)+[yadd*1.2 xadd*2]); 18 | 19 | 20 | refalignpoints = Ref.alignpoints; % 3D pnts 21 | p2 = tformfwd(Input.TFORM, Input.alignpoints); % apply transformation to 2D points 22 | p2 = p2(:,[2 1])'; % exchange x,y for 3D points 23 | p3 = refalignpoints(:,[2 1 3])'; % exchagne x, y for 3D points 24 | 25 | 26 | % remove translation 27 | p22 = mean(p2,2); 28 | p33 = mean(p3,2); 29 | p2c = p2-p22*ones(1,numfp); 30 | p3c = p3-p33*ones(1,numfp); 31 | 32 | % find 2D to 3D affine 33 | AR = p2c*p3c'*inv(p3c*p3c'); % solve AR * p3c = p2c 34 | A = [AR ; cross(AR(1,:),AR(2,:))]; 35 | t = p22 - A(1:2,:)*p33; % remove tranlation 36 | t(3) = 0; 37 | 38 | 39 | % set data 40 | Input.A = A; 41 | Input.t = t; 42 | Input.p3 = p3; 43 | -------------------------------------------------------------------------------- /FaceWarp/Ref_face_mesh.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/FaceWarp/Ref_face_mesh.mat -------------------------------------------------------------------------------- /FaceWarp/WarpFace.m: -------------------------------------------------------------------------------- 1 | function img_aligned = WarpFace(img, Ref, pts) 2 | Input = Recover3DPose(img, pts, Ref); 3 | P3 = Input.A* Ref.P' + repmat(Input.t, 1, numel(Ref.Z)); 4 | img_aligned = WarpToFrontalFace(Input.alignImg,P3,Ref.P',Ref.M); 5 | end 6 | 7 | -------------------------------------------------------------------------------- /FaceWarp/WarpToFrontalFace.m: -------------------------------------------------------------------------------- 1 | function newimg = WarpToFrontalFace(im, P3m, P3o, mask) 2 | % im -- aligned image; 3 | % P3m -- P3 (ref model points after 3d alignment transformation) 4 | % P3o -- the points before the transformation -- Ref.Points 5 | 6 | [h, w, c]=size(im); 7 | P3m(:,~mask)=[]; 8 | P3o(:,~mask)=[]; 9 | 10 | newimg=zeros(h, w, c); 11 | 12 | for i = 1 : length(P3m) 13 | x = round(P3m(2,i)); 14 | y = round(P3m(1,i)); 15 | 16 | xnew = round(P3o(2,i)); 17 | ynew = round(P3o(1,i)); 18 | 19 | if xnew > 0 && ynew > 0 && xnew < w && ynew < h ... 20 | && x < w && y < h && x > 0 && y > 0 21 | newimg(ynew, xnew, :) = im(y, x, :); 22 | end 23 | end 24 | 25 | newimg = uint8(newimg); 26 | 27 | -------------------------------------------------------------------------------- /FaceWarp/face_pts.txt: -------------------------------------------------------------------------------- 1 | 98.02 198.40 2 | 136.58 198.40 3 | 189.60 183.94 4 | 232.98 183.94 5 | 136.58 256.24 6 | 165.50 265.88 7 | 199.24 246.60 8 | 131.76 289.98 9 | 223.34 275.52 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Jun-Yan Zhu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SelectGoodFace 2 | #### [[Project](http://efrosprojects.eecs.berkeley.edu/mirrormirror/)] [[Paper](http://efrosprojects.eecs.berkeley.edu/mirrormirror/mirrormirror.pdf)] 3 | Contact: Jun-Yan Zhu (junyanz at cs dot cmu dot edu) 4 | 5 | ## Overview 6 | This program can select attractive/serious portraits from a personal photo collection. Given the photo collection of the *same* person as input, our program computes the attractiveness/seriousness scores on all the faces. The scores are predicted by the SVM models pre-trained on the face data that we collected for our paper. 7 | 8 | The program assumes only one person in each input image. Please use other software (e.g. Picasa) to identify and localize the subject before running our code. See Section 8 and Figure 17 in the paper for details. 9 | 10 | The code can only be used for non-commercial purposes. Please cite the following work if you use our code and data for your research: 11 | 12 | [Mirror Mirror: Crowdsourcing Better Portraits](http://efrosprojects.eecs.berkeley.edu/mirrormirror/) 13 | [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/), [Aseem Agarwala](http://www.agarwala.org/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros/), [Eli Shechtman](https://research.adobe.com/person/eli-shechtman/), [Jue Wang](http://www.juew.org/) 14 | In ACM Transactions on Graphics (Proceedings of SIGGRAPH Asia 2014) 15 | 16 | [Project Page](http://efrosprojects.eecs.berkeley.edu/mirrormirror/) 17 | 18 | Please cite our paper if you use our code for your research. 19 | ``` 20 | @article{zhu2014mirror, 21 | author = {Jun-Yan Zhu and Aseem Agarwala and Alexei A Efros and Eli Shechtman and Jue Wang}, 22 | title = {Mirror Mirror: Crowdsourcing Better Portraits}, 23 | journal = {ACM Transactions on Graphics (SIGGRAPH Asia 2014)}, 24 | volume = {33}, 25 | number = {6}, 26 | year = {2014}, 27 | } 28 | ``` 29 | 30 | ## Installation 31 | * Download and unzip the code. 32 | * Install face tracker: our program currently supports two face trackers: 33 | - IntraFace: download the MATLAB version from http://www.humansensing.cs.cmu.edu/intraface/ and put the software in the "IntraFace" folder. 34 | - CLM-Wild: download "DRMF Code Version 2.0" from (https://sites.google.com/site/akshayasthana/clm-wild-code) 35 | and put the software in the "CLMWILD" folder. 36 | * Download the test face dataset: run "DownloadFaceData.m" to get the face images from http://chenlab.ece.cornell.edu/people/Andy/GallagherDataset.html 37 | * Run "SelectGoodFace.m" to rank the faces by attractiveness and seriousness. "SelectGoodFace.m" is the main entry function. You can find the results in "data/demo/<CONF.tracker>\_result". 38 | * Modify the configuration file "SetConfig.m" if you want to run the code on your own data. 39 | 40 | 41 | ## System Requirement 42 | * Windows and Linux. 43 | * Mex files compiled on 64-bit Windows and Linux (features31.mexw64, features31.mexa64) are provided. If they don't work for you, compile "features31.cc". (mex features31.cc) 44 | 45 | 46 | ## Notes on face trackers 47 | * We used IntraFace in our paper. However, IntraFace is temporally suspended according to the authors' website. I will update my code once the new version of IntraFace is released. CLM-WILD tracker is publicly available now. 48 | * You can also use other face tracker as you like. Please modify the Line 40~44 in "SelectGoodFace.m" and write your own wrapper to adapt other face trackers. For each image, my code needs: 49 | - Nine facial points: \[9x2 double\] (see "points.png") 50 | - 3D pose (Pitch, Yaw, Roll): \[3x1 double\] (set the pose as \[ \] if your face tracker doesn't provide pose information) 51 | - Confidence score: set the confidence score as 1 if your face tracker doesn't provide the confidence score. 52 | * For "the Gallagher Collection Person Dataset", I provided the precomputed facial points ("data/demo/IntraFace_cache/detection.mat") detected by IntraFace so that you can reproduce the first row of Figure 17 in our paper. 53 | 54 | 55 | ## Usage 56 | * Input: please set the image folder and the parameters in "SetConfig.m" 57 | - Folder: set CONF.dataFold as your data folder 58 | - Images: You need to put your images in CONF.dataFold/imgs/ 59 | - Tracker: You need to specify your tracker in "CONF.tracker" (IntraFace, CLMWILD). We included pre-trained SVM models for different trackers. (e.g. models/CLMWILD_attractive_model.mat for CLM-Wild tracker; models/IntraFace_attractive_model.mat for IntraFace tracker, etc. ) 60 | - Parameters for filtering out "bad" faces: 61 | * CONF.smallFace = 250; % ignore small face 62 | * CONF.poseThres = 15; % ignore non-frontal face (e.g. 15 degree) 63 | * CONF.trackConfThres = 0.5; % ignore tracking failure (e.g. confidence <0.5) 64 | * CONF.alignErrorThres = 8; % ignore poor alignment (e.g. mean pixel error > 8) 65 | * Output: our program outputs ranking results for both attractiveness and seriousness in the following folder: CONF.dataFold/[CONF.tracker '\_result'] 66 | 67 | ## Acknowledgement 68 | Part of the face alignment code is based on the work by Fei Yang. 69 | -------------------------------------------------------------------------------- /SelectGoodFace.m: -------------------------------------------------------------------------------- 1 | %% Jun-Yan Zhu (junyanz at eecs dot berkeley dot edu) 2 | % University of California, Berkeley 3 | % select attractive/serious portraits from a personal photo collection 4 | 5 | %% set parameters 6 | SetPaths; 7 | CONF = SetConfig; 8 | imgFold = CONF.imgFold; 9 | 10 | small_face_size = CONF.smallFace; % ignore small face 11 | pose_thres = CONF.poseThres; % ignore non-frontal face 12 | conf_thres = CONF.trackConfThres; % ignore tracking failure 13 | error_thres = CONF.alignErrorThres; % ignore poor alignment 14 | Load3DFace; 15 | 16 | %% download dataset for 'girl' 17 | if strcmp(imgFold, 'data/girl/imgs') && ~exist(imgFold, 'dir') 18 | DownloadFaceData; 19 | end 20 | 21 | %% load images 22 | imgList = LoadImageList(imgFold); 23 | nImgs = numel(imgList); 24 | fprintf('load (%d) images\n', nImgs); 25 | ims = cell(nImgs, 1); 26 | for n = 1 : nImgs 27 | ims{n} = imread(fullfile(imgFold, imgList{n})); 28 | end 29 | 30 | %% detect facial points 31 | detectPath = fullfile(CONF.cacheFold, 'detection.mat'); 32 | 33 | if ~exist(detectPath, 'file') 34 | disp('(run) facial point tracking'); 35 | points = cell(nImgs, 1); 36 | poses = cell(nImgs, 1); 37 | confs = zeros(nImgs, 1); 38 | 39 | % detect facial points (replace your face detection code here) 40 | if strcmp(CONF.tracker, 'IntraFace') 41 | [points, poses, confs] = IntraFaceDetectPoints(ims); % IntraFace wrapper 42 | elseif strcmp(CONF.tracker, 'CLMWILD') 43 | [points, poses, confs] = CLMWILDDetectPoints(ims); % CLM-WILD wrapper 44 | end 45 | 46 | save(detectPath, 'points', 'poses', 'confs'); 47 | else 48 | disp('(load) facial point detection'); 49 | load(detectPath); 50 | end 51 | 52 | %% ignore "bad" faces 53 | filterPath = fullfile(CONF.cacheFold, 'filter.mat'); 54 | if ~exist(filterPath, 'file') 55 | disp('(run) bad face filtering'); 56 | filterIdx = ones(nImgs,1); 57 | for n = 1 : nImgs 58 | % ignore small face 59 | if size(ims{n}, 1) * size(ims{n}, 2) < small_face_size^2 60 | filterIdx(n) = 0; 61 | continue; 62 | end 63 | 64 | pts = points{n}; 65 | pose = poses{n}; 66 | conf = confs(n); 67 | 68 | % ignore tracking failure 69 | if isempty(pts) || conf < conf_thres 70 | filterIdx(n) = 0; 71 | continue; 72 | end 73 | 74 | % ignore non-frontal face 75 | if ~isempty(pose) 76 | if abs(pose(1)) > pose_thres ... 77 | || abs(pose(2)) > pose_thres... 78 | || abs(pose(3)) > pose_thres 79 | filterIdx(n) = 0; 80 | continue; 81 | end 82 | end 83 | 84 | % ignore poor alignment 85 | error = AlignError(pts, Ref);% large alignment error 86 | if error > error_thres 87 | filterIdx(n) = 0; 88 | continue; 89 | end 90 | end 91 | 92 | save(filterPath, 'filterIdx'); 93 | else 94 | disp('(load) bad face filtering'); 95 | load(filterPath, 'filterIdx'); 96 | end 97 | 98 | remain_ids = find(filterIdx == 1); 99 | imgList = imgList(remain_ids); 100 | nImgs = numel(imgList); 101 | fprintf('(%d) remaining images\n', nImgs); 102 | points = points(remain_ids); 103 | ims = ims(remain_ids); 104 | 105 | %% compute features 106 | featPath = fullfile(CONF.cacheFold, 'features.mat'); 107 | 108 | if ~exist(featPath, 'file') 109 | disp('(run) feature extraction'); 110 | features = cell(nImgs, 1); 111 | 112 | for n = 1: nImgs 113 | pts = points{n}; 114 | warp_im = WarpFace(ims{n}, Ref, pts); % warp face to the canonical pose 115 | features{n} = ExtractFeature(warp_im); % compute features 116 | end 117 | save(featPath, 'features'); 118 | else 119 | disp('(load) feature extraction'); 120 | load(featPath); 121 | end 122 | 123 | scorePath = fullfile(CONF.rstFold, 'scores.mat'); 124 | 125 | %% predict scores 126 | if ~exist(scorePath, 'file') 127 | disp('(run) score prediction'); 128 | features = cat(2, features{:}); 129 | features = double(features'); 130 | 131 | % load models 132 | load(CONF.modelAttractive); 133 | attractive_model = model; 134 | load(CONF.modelSerious); 135 | serious_model = model; 136 | 137 | attractive_scores = TestModel(features, attractive_model); 138 | serious_scores = TestModel(features, serious_model); 139 | save(scorePath, 'imgList', 'attractive_scores', 'serious_scores'); 140 | else 141 | disp('(load) score prediction'); 142 | load(scorePath); 143 | end 144 | 145 | %% save results 146 | disp('save ranked results'); 147 | 148 | rstFold = CONF.rstFold; 149 | fold = fullfile(rstFold, 'attractive'); 150 | mkdirs(fold); 151 | [vs, ids] = sort(attractive_scores, 'descend'); 152 | 153 | for k = 1 : numel(ids) 154 | outPath = fullfile(fold, ... 155 | sprintf('top%3.3d_s%3.3f.jpg', k, vs(k))); 156 | if ~exist(outPath, 'file') 157 | imwrite(ims{ids(k)}, outPath); 158 | end 159 | end 160 | 161 | fold = fullfile(rstFold, 'serious'); 162 | mkdirs(fold); 163 | [vs, ids] = sort(serious_scores, 'descend'); 164 | 165 | for k = 1 : numel(ids) 166 | outPath = fullfile(fold, ... 167 | sprintf('top%3.3d_s%3.3f.jpg', k, vs(k))); 168 | if ~exist(outPath, 'file') 169 | imwrite(ims{ids(k)}, outPath); 170 | end 171 | end -------------------------------------------------------------------------------- /SetConfig.m: -------------------------------------------------------------------------------- 1 | % author: Jun-Yan Zhu (UC Berkeley) 2 | % function: set paramters 3 | function CONF = SetConfig() 4 | % set the data folder 5 | CONF.tracker = 'IntraFace'; 6 | % IntraFace: please download intraface from http://www.humansensing.cs.cmu.edu/intraface/ 7 | % CLMWILD: please download CLM-WILD from https://sites.google.com/site/akshayasthana/clm-wild-code 8 | CONF.dataFold = 'data/demo/'; % data folder 9 | CONF.imgFold = fullfile(CONF.dataFold, 'imgs'); % image folder 10 | CONF.cacheFold = fullfile(CONF.dataFold, [CONF.tracker '_cache']); % cache folder 11 | CONF.rstFold = fullfile(CONF.dataFold, [CONF.tracker '_result']); % result folder 12 | CONF.modelAttractive = fullfile('models', [CONF.tracker '_attractive_model.mat']); % path to attractiveness model 13 | CONF.modelSerious = fullfile('models', [CONF.tracker '_serious_model.mat']); % path to seriousness model 14 | 15 | mkdirs({CONF.imgFold, CONF.cacheFold, CONF.rstFold}); 16 | 17 | % ignore bad images 18 | CONF.smallFace = 250; % ignore small face 19 | CONF.poseThres = 15; % ignore non-frontal face (e.g. 15 degree) 20 | CONF.trackConfThres = 0.5; % ignore tracking failure (e.g. confidence <0.5) 21 | CONF.alignErrorThres = 8; % ignore poor alignment (e.g. mean pixel error > 8) 22 | end -------------------------------------------------------------------------------- /SetPaths.m: -------------------------------------------------------------------------------- 1 | % Jun-Yan Zhu (junyanz at eecs dot berkeley dot edu) 2 | % University of California, Berkeley 3 | % add paths 4 | disp('add paths'); 5 | addpath('utils'); 6 | addpath('svm'); 7 | addpath('feature'); 8 | addpath('FaceWarp'); 9 | addpath('wrapper'); 10 | % face tracker package 11 | addpath(genpath('IntraFace')); 12 | addpath(genpath('CLMWILD')); -------------------------------------------------------------------------------- /data/demo/IntraFace_cache/detection.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/data/demo/IntraFace_cache/detection.mat -------------------------------------------------------------------------------- /demo_download.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/demo_download.mat -------------------------------------------------------------------------------- /feature/COPYING: -------------------------------------------------------------------------------- 1 | Copyright (C) 2008, 2009, 2010 Pedro Felzenszwalb, Ross Girshick 2 | Copyright (C) 2007 Pedro Felzenszwalb, Deva Ramanan 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining 5 | a copy of this software and associated documentation files (the 6 | "Software"), to deal in the Software without restriction, including 7 | without limitation the rights to use, copy, modify, merge, publish, 8 | distribute, sublicense, and/or sell copies of the Software, and to 9 | permit persons to whom the Software is furnished to do so, subject to 10 | the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 17 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 19 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 20 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 21 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /feature/ExtractFeature.m: -------------------------------------------------------------------------------- 1 | function feat = ExtractFeatures(im) 2 | rects{1} = [25, 20, 120, 170]; sizes{1} = [80, 64]; % faces: 8*6 3 | rects{2} = [28, 143, 114, 44]; sizes{2} = [32, 64]; % mouth: 2*6 4 | rects{3} = [28, 60, 44, 45]; sizes{3} = [48, 64]; %left eye: 4*6 5 | rects{4} = [98, 60, 44, 45]; sizes{4} = [48, 64]; %right eye: 4*6 6 | rects{5} = [50, 45, 75, 30]; sizes{5} = [32, 64]; % wrinkle: 2*6 7 | nParts = numel(rects); 8 | hogs = cell(nParts, 1); 9 | % disp = im; 10 | 11 | for n = 1 : nParts 12 | rect = rects{n}; 13 | rect(3:4) = rect(3:4)-1; 14 | patch = imcrop(im, rect); 15 | patch = imresize(patch, sizes{n}); 16 | hog = single(features31(im2double(patch), 8)); 17 | hogs{n} = hog(:); 18 | end 19 | % figure(1), imshow(disp); 20 | feat = cat(1, hogs{:}); 21 | end -------------------------------------------------------------------------------- /feature/features31.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include "mex.h" 3 | 4 | // small value, used to avoid division by zero 5 | #define eps 0.0001 6 | 7 | // unit vectors used to compute gradient orientation 8 | double uu[9] = {1.0000, 9 | 0.9397, 10 | 0.7660, 11 | 0.500, 12 | 0.1736, 13 | -0.1736, 14 | -0.5000, 15 | -0.7660, 16 | -0.9397}; 17 | double vv[9] = {0.0000, 18 | 0.3420, 19 | 0.6428, 20 | 0.8660, 21 | 0.9848, 22 | 0.9848, 23 | 0.8660, 24 | 0.6428, 25 | 0.3420}; 26 | 27 | static inline double min(double x, double y) { return (x <= y ? x : y); } 28 | static inline double max(double x, double y) { return (x <= y ? y : x); } 29 | 30 | static inline int min(int x, int y) { return (x <= y ? x : y); } 31 | static inline int max(int x, int y) { return (x <= y ? y : x); } 32 | 33 | // main function: 34 | // takes a double color image and a bin size 35 | // returns HOG features 36 | mxArray *process(const mxArray *mximage, const mxArray *mxsbin) { 37 | double *im = (double *)mxGetPr(mximage); 38 | const int *dims = mxGetDimensions(mximage); 39 | if (mxGetNumberOfDimensions(mximage) != 3 || 40 | dims[2] != 3 || 41 | mxGetClassID(mximage) != mxDOUBLE_CLASS) 42 | mexErrMsgTxt("Invalid input"); 43 | 44 | int sbin = (int)mxGetScalar(mxsbin); 45 | 46 | // memory for caching orientation histograms & their norms 47 | int blocks[2]; 48 | blocks[0] = (int)round((double)dims[0]/(double)sbin); 49 | blocks[1] = (int)round((double)dims[1]/(double)sbin); 50 | double *hist = (double *)mxCalloc(blocks[0]*blocks[1]*18, sizeof(double)); 51 | double *norm = (double *)mxCalloc(blocks[0]*blocks[1], sizeof(double)); 52 | 53 | // memory for HOG features 54 | int out[3]; 55 | out[0] = max(blocks[0]-2, 0); 56 | out[1] = max(blocks[1]-2, 0); 57 | out[2] = 27+4; 58 | mxArray *mxfeat = mxCreateNumericArray(3, out, mxDOUBLE_CLASS, mxREAL); 59 | double *feat = (double *)mxGetPr(mxfeat); 60 | 61 | int visible[2]; 62 | visible[0] = blocks[0]*sbin; 63 | visible[1] = blocks[1]*sbin; 64 | 65 | for (int x = 1; x < visible[1]-1; x++) { 66 | for (int y = 1; y < visible[0]-1; y++) { 67 | // first color channel 68 | double *s = im + min(x, dims[1]-2)*dims[0] + min(y, dims[0]-2); 69 | double dy = *(s+1) - *(s-1); 70 | double dx = *(s+dims[0]) - *(s-dims[0]); 71 | double v = dx*dx + dy*dy; 72 | 73 | // second color channel 74 | s += dims[0]*dims[1]; 75 | double dy2 = *(s+1) - *(s-1); 76 | double dx2 = *(s+dims[0]) - *(s-dims[0]); 77 | double v2 = dx2*dx2 + dy2*dy2; 78 | 79 | // third color channel 80 | s += dims[0]*dims[1]; 81 | double dy3 = *(s+1) - *(s-1); 82 | double dx3 = *(s+dims[0]) - *(s-dims[0]); 83 | double v3 = dx3*dx3 + dy3*dy3; 84 | 85 | // pick channel with strongest gradient 86 | if (v2 > v) { 87 | v = v2; 88 | dx = dx2; 89 | dy = dy2; 90 | } 91 | if (v3 > v) { 92 | v = v3; 93 | dx = dx3; 94 | dy = dy3; 95 | } 96 | 97 | // snap to one of 18 orientations 98 | double best_dot = 0; 99 | int best_o = 0; 100 | for (int o = 0; o < 9; o++) { 101 | double dot = uu[o]*dx + vv[o]*dy; 102 | if (dot > best_dot) { 103 | best_dot = dot; 104 | best_o = o; 105 | } else if (-dot > best_dot) { 106 | best_dot = -dot; 107 | best_o = o+9; 108 | } 109 | } 110 | 111 | // add to 4 histograms around pixel using linear interpolation 112 | double xp = ((double)x+0.5)/(double)sbin - 0.5; 113 | double yp = ((double)y+0.5)/(double)sbin - 0.5; 114 | int ixp = (int)floor(xp); 115 | int iyp = (int)floor(yp); 116 | double vx0 = xp-ixp; 117 | double vy0 = yp-iyp; 118 | double vx1 = 1.0-vx0; 119 | double vy1 = 1.0-vy0; 120 | v = sqrt(v); 121 | 122 | if (ixp >= 0 && iyp >= 0) { 123 | *(hist + ixp*blocks[0] + iyp + best_o*blocks[0]*blocks[1]) += 124 | vx1*vy1*v; 125 | } 126 | 127 | if (ixp+1 < blocks[1] && iyp >= 0) { 128 | *(hist + (ixp+1)*blocks[0] + iyp + best_o*blocks[0]*blocks[1]) += 129 | vx0*vy1*v; 130 | } 131 | 132 | if (ixp >= 0 && iyp+1 < blocks[0]) { 133 | *(hist + ixp*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]) += 134 | vx1*vy0*v; 135 | } 136 | 137 | if (ixp+1 < blocks[1] && iyp+1 < blocks[0]) { 138 | *(hist + (ixp+1)*blocks[0] + (iyp+1) + best_o*blocks[0]*blocks[1]) += 139 | vx0*vy0*v; 140 | } 141 | } 142 | } 143 | 144 | // compute energy in each block by summing over orientations 145 | for (int o = 0; o < 9; o++) { 146 | double *src1 = hist + o*blocks[0]*blocks[1]; 147 | double *src2 = hist + (o+9)*blocks[0]*blocks[1]; 148 | double *dst = norm; 149 | double *end = norm + blocks[1]*blocks[0]; 150 | while (dst < end) { 151 | *(dst++) += (*src1 + *src2) * (*src1 + *src2); 152 | src1++; 153 | src2++; 154 | } 155 | } 156 | 157 | // compute features 158 | for (int x = 0; x < out[1]; x++) { 159 | for (int y = 0; y < out[0]; y++) { 160 | double *dst = feat + x*out[0] + y; 161 | double *src, *p, n1, n2, n3, n4; 162 | 163 | p = norm + (x+1)*blocks[0] + y+1; 164 | n1 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); 165 | p = norm + (x+1)*blocks[0] + y; 166 | n2 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); 167 | p = norm + x*blocks[0] + y+1; 168 | n3 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); 169 | p = norm + x*blocks[0] + y; 170 | n4 = 1.0 / sqrt(*p + *(p+1) + *(p+blocks[0]) + *(p+blocks[0]+1) + eps); 171 | 172 | double t1 = 0; 173 | double t2 = 0; 174 | double t3 = 0; 175 | double t4 = 0; 176 | 177 | // contrast-sensitive features 178 | src = hist + (x+1)*blocks[0] + (y+1); 179 | for (int o = 0; o < 18; o++) { 180 | double h1 = min(*src * n1, 0.2); 181 | double h2 = min(*src * n2, 0.2); 182 | double h3 = min(*src * n3, 0.2); 183 | double h4 = min(*src * n4, 0.2); 184 | *dst = 0.5 * (h1 + h2 + h3 + h4); 185 | t1 += h1; 186 | t2 += h2; 187 | t3 += h3; 188 | t4 += h4; 189 | dst += out[0]*out[1]; 190 | src += blocks[0]*blocks[1]; 191 | } 192 | 193 | // contrast-insensitive features 194 | src = hist + (x+1)*blocks[0] + (y+1); 195 | for (int o = 0; o < 9; o++) { 196 | double sum = *src + *(src + 9*blocks[0]*blocks[1]); 197 | double h1 = min(sum * n1, 0.2); 198 | double h2 = min(sum * n2, 0.2); 199 | double h3 = min(sum * n3, 0.2); 200 | double h4 = min(sum * n4, 0.2); 201 | *dst = 0.5 * (h1 + h2 + h3 + h4); 202 | dst += out[0]*out[1]; 203 | src += blocks[0]*blocks[1]; 204 | } 205 | 206 | // texture features 207 | *dst = 0.2357 * t1; 208 | dst += out[0]*out[1]; 209 | *dst = 0.2357 * t2; 210 | dst += out[0]*out[1]; 211 | *dst = 0.2357 * t3; 212 | dst += out[0]*out[1]; 213 | *dst = 0.2357 * t4; 214 | } 215 | } 216 | 217 | mxFree(hist); 218 | mxFree(norm); 219 | return mxfeat; 220 | } 221 | 222 | // matlab entry point 223 | // F = features(image, bin) 224 | // image should be color with double values 225 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { 226 | if (nrhs != 2) 227 | mexErrMsgTxt("Wrong number of inputs"); 228 | if (nlhs != 1) 229 | mexErrMsgTxt("Wrong number of outputs"); 230 | plhs[0] = process(prhs[0], prhs[1]); 231 | } 232 | 233 | 234 | 235 | -------------------------------------------------------------------------------- /feature/features31.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/feature/features31.mexa64 -------------------------------------------------------------------------------- /feature/features31.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/feature/features31.mexw64 -------------------------------------------------------------------------------- /models/CLMWILD_attractive_model.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/models/CLMWILD_attractive_model.mat -------------------------------------------------------------------------------- /models/CLMWILD_serious_model.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/models/CLMWILD_serious_model.mat -------------------------------------------------------------------------------- /models/IntraFace_attractive_model.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/models/IntraFace_attractive_model.mat -------------------------------------------------------------------------------- /models/IntraFace_serious_model.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/models/IntraFace_serious_model.mat -------------------------------------------------------------------------------- /points.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/points.png -------------------------------------------------------------------------------- /svm/TestModel.m: -------------------------------------------------------------------------------- 1 | function [pred] = TestModel(data, model) 2 | weights = zeros([size(data, 1), 1]); 3 | pred = svmpredict(weights, data, model, '-q'); 4 | end 5 | 6 | -------------------------------------------------------------------------------- /svm/libsvm_COPYRIGHT: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | 16 | 3. Neither name of copyright holders nor the names of its contributors 17 | may be used to endorse or promote products derived from this software 18 | without specific prior written permission. 19 | 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 25 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 28 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /svm/svmpredict.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/svm/svmpredict.mexa64 -------------------------------------------------------------------------------- /svm/svmpredict.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junyanz/SelectGoodFace/801ef545f8a183d26fe017b00b8d3cbe626bd733/svm/svmpredict.mexw64 -------------------------------------------------------------------------------- /utils/ImCropPad.m: -------------------------------------------------------------------------------- 1 | function [crop] = ImCropPad(im, rect) 2 | % rect: [xmin, ymin, width, height] 3 | %% pad pixels 4 | xmin = rect(1); 5 | ymin = rect(2); 6 | xmax = rect(3)+rect(1)-1; 7 | ymax = rect(4)+rect(2)-1; 8 | xpad= max(xmax-size(im,2)+1, -xmin); 9 | ypad = max(ymax-size(im,1)+1, -ymin); 10 | pad = ceil(max(xpad, ypad)); 11 | 12 | if pad <= 0 13 | crop = imcrop(im, rect); 14 | else 15 | im_pad = padarray(im, [pad, pad], 0); 16 | %% imcrop 17 | rect(1:2) = rect(1:2) + pad; 18 | crop = imcrop(im_pad, rect); 19 | end 20 | 21 | end 22 | 23 | -------------------------------------------------------------------------------- /utils/LoadImageList.m: -------------------------------------------------------------------------------- 1 | %LOADIMAGELIST: list all the image files in the given directory. 2 | % Support image format: png, jpg, jpeg, bmp 3 | % Author: Jun-Yan Zhu (junyanz@eecs.berkeley.edu) 4 | % Input: 5 | % imgDir (string): the directory that stores the images. 6 | % Output: 7 | % imgList (a cell array of strings): each cell stores the name of a 8 | % image. The strings are sorted by alphabetical order. 9 | function [imgList] = LoadImageList(imgDir) 10 | if ispc 11 | formats = {'png', 'jpg', 'jpeg', 'bmp'}; 12 | else 13 | formats = {'png', 'jpg', 'jpeg', 'bmp', 'PNG', 'JPG', 'JPEG', 'BMP'}; 14 | end 15 | nFormats = numel(formats); 16 | imgLists = cell(nFormats, 1); 17 | for n = 1 : numel(formats) 18 | imgLists{n} = dir(fullfile(imgDir, ['*.' formats{n}])); 19 | end 20 | 21 | imgList = cat(1, imgLists{:}); 22 | [~, idx] = sort({imgList.name}); 23 | imgList = {imgList(idx).name}; 24 | end 25 | -------------------------------------------------------------------------------- /utils/mkdirs.m: -------------------------------------------------------------------------------- 1 | function [] = mkdirs(folders ) 2 | if iscell(folders) 3 | for n = 1 : numel(folders) 4 | if ~exist(folders{n}, 'dir') 5 | mkdir(folders{n}); 6 | end 7 | end 8 | else 9 | if ~exist(folders, 'dir') 10 | mkdir(folders); 11 | end 12 | end 13 | end 14 | 15 | -------------------------------------------------------------------------------- /wrapper/CLMWILDDetectPoints.m: -------------------------------------------------------------------------------- 1 | function [points, poses, confs] = CLMWILDDetectPoints(ims) 2 | clm_model = 'CLMWILD/model/DRMF_Model.mat'; 3 | fprintf('load clm model (%s)\n', clm_model); 4 | load(clm_model); 5 | nImgs = numel(ims); 6 | bbox_method = 0; 7 | visualize = 0; 8 | points = cell(nImgs, 1); 9 | poses = cell(nImgs, 1); 10 | confs = zeros(nImgs, 1); 11 | 12 | parfor n = 1 : nImgs 13 | facedata = []; 14 | facedata(1).name = sprintf('image_%4.4d', n); 15 | facedata(1).img = im2double(ims{n}); 16 | facedata(1).bbox = []; 17 | facedata(1).points = []; 18 | facedata(1).pose = []; 19 | facedata = DRMF(clm_model, facedata, bbox_method, visualize); 20 | if ~isempty(facedata(1).points) 21 | points{n} = GetPoints(facedata(1).points); 22 | end 23 | if ~isempty(facedata(1).pose) 24 | poses{n} = facedata(1).pose; 25 | end 26 | confs(n) = 1; 27 | end 28 | end 29 | 30 | function [pnts9] = GetPoints(pnts) 31 | % pnts9 = zeros(9, 2); 32 | map = [37, 40, 43, 46, 32, 34, 36, 49, 55]; 33 | pnts9 = pnts(map, :); 34 | % for k = 1 : numel(map); 35 | % pnts9(k, :) = pnts(map(k), :); 36 | % end 37 | end 38 | 39 | -------------------------------------------------------------------------------- /wrapper/IntraFaceDetectPoints.m: -------------------------------------------------------------------------------- 1 | function [points, poses, confs] = IntraFaceDetectPoints(ims) 2 | %% load IntraFace model (replace your face detection code here) 3 | disp('cd IntraFace'); 4 | cd('IntraFace'); 5 | [DM,TM,option] = xx_initialize; 6 | disp('cd ..'); 7 | cd('..'); 8 | 9 | %% detect points 10 | nImgs = numel(ims); 11 | points = cell(nImgs, 1); 12 | poses = cell(nImgs, 1); 13 | confs = zeros(nImgs, 1); 14 | 15 | for n = 1 : nImgs 16 | faces = DM{1}.fd_h.detect(ims{n}, 'MinNeighbors', option.min_neighbors,... 17 | 'ScaleFactor', 1.2, 'MinSize', option.min_face_size); 18 | % select the largest face 19 | if isempty(faces) 20 | output= []; 21 | else 22 | tmp = cat(1, faces{:}); 23 | [~, face_id] = max(tmp(:,3) .* tmp(:,4)); 24 | face = faces{face_id}; 25 | output = xx_track_detect(DM, TM, ims{n}, face, option); 26 | end 27 | 28 | if ~isempty(output) && ~isempty(output.pose); 29 | pose = output.pose.angle; 30 | pose(3) = 180 - pose(3); 31 | poses{n} = pose; 32 | end 33 | 34 | if ~isempty(output) && ~isempty(output.pred) 35 | points{n} = IntraFaceGetNinePoints(double(output.pred)); 36 | end 37 | 38 | if ~isempty(output) && ~isempty(output.conf) 39 | confs(n) = output.conf; 40 | end 41 | end 42 | end 43 | 44 | function [pnts9] = IntraFaceGetNinePoints(pnts) 45 | % pnts9 = zeros(9, 2); 46 | map = [20,23, 26, 29, 15, 17, 19, 32, 38]; 47 | pnts9 = pnts(map, :); 48 | % for k = 1 : numel(map); 49 | % pnts9(k, :) = pnts(map(k), :); 50 | % end 51 | end 52 | --------------------------------------------------------------------------------