├── Evaluation ├── 300W │ ├── 300W-Test-All.pdf │ ├── 300W-Test-All.png │ ├── 300W-Test-Indoor.pdf │ ├── 300W-Test-Outdoor.pdf │ ├── 300W_test_all.mat │ ├── 300W_test_indoor.mat │ ├── 300W_test_outdoor.mat │ ├── plot_300W_all_results.m │ ├── plot_300W_indoor_results.m │ ├── plot_300W_outdoor_results.m │ ├── readme.md │ └── util │ │ ├── compute_eye_corner_error.m │ │ ├── distinguishable_colors.m │ │ ├── drawLandmarks.m │ │ ├── drawRect.m │ │ └── plot_ced.m └── Menpo2D │ ├── Menpo Profile Test Set.pdf │ ├── Menpo Profile Test Set.png │ ├── Menpo Semifrontal Test Set.pdf │ ├── Menpo Semifrontal Test Set.png │ ├── menpo_test_figure_profile.m │ ├── menpo_test_figure_semifrontal.m │ ├── profile.mat │ ├── readme.md │ ├── semifrontal.mat │ └── util │ ├── LoadPTS.m │ ├── SavePTS.m │ ├── compute_diag_error.m │ ├── drawLandmarks.m │ ├── drawRect.m │ └── plot_ced.m ├── README.md ├── menpo2D_landmarks.png └── menpo3D_landmarks.png /Evaluation/300W/300W-Test-All.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W-Test-All.pdf -------------------------------------------------------------------------------- /Evaluation/300W/300W-Test-All.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W-Test-All.png -------------------------------------------------------------------------------- /Evaluation/300W/300W-Test-Indoor.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W-Test-Indoor.pdf -------------------------------------------------------------------------------- /Evaluation/300W/300W-Test-Outdoor.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W-Test-Outdoor.pdf -------------------------------------------------------------------------------- /Evaluation/300W/300W_test_all.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W_test_all.mat -------------------------------------------------------------------------------- /Evaluation/300W/300W_test_indoor.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W_test_indoor.mat -------------------------------------------------------------------------------- /Evaluation/300W/300W_test_outdoor.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/300W/300W_test_outdoor.mat -------------------------------------------------------------------------------- /Evaluation/300W/plot_300W_all_results.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | clc; 3 | 4 | addpath('./util'); 5 | 6 | load('300W_test_all.mat'); 7 | 8 | title = '300W-Test-All'; 9 | 10 | num = 4; 11 | 12 | colors = lines(num); 13 | 14 | x_limit = 0.1; 15 | 16 | linewidth = 3; 17 | 18 | fontsize = 12; 19 | 20 | % Render curves 21 | plot_ced(x, y, methods, strrep(title,'_',' '), ... 22 | x_limit, colors, linewidth, fontsize); 23 | 24 | ax = gca; 25 | outerpos = ax.OuterPosition; 26 | ti = ax.TightInset; 27 | left = outerpos(1) + ti(1); 28 | bottom = outerpos(2) + ti(2); 29 | ax_width = outerpos(3) - ti(1) - ti(3); 30 | ax_height = outerpos(4) - ti(2) - ti(4); 31 | ax.Position = [left bottom ax_width ax_height]; 32 | 33 | fig = gcf; 34 | fig.PaperPositionMode = 'auto'; 35 | fig_pos = fig.PaperPosition; 36 | fig.PaperSize = [fig_pos(3) fig_pos(4)]* 1.05; 37 | 38 | print(fig,'-dpdf',[title '.pdf']); 39 | -------------------------------------------------------------------------------- /Evaluation/300W/plot_300W_indoor_results.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | clc; 3 | 4 | addpath = './util'; 5 | 6 | load('300W_test_indoor.mat'); 7 | 8 | title = '300W-Test-Indoor'; 9 | 10 | num = 4; 11 | 12 | colors = lines(num); 13 | 14 | x_limit = 0.08; 15 | 16 | linewidth = 3; 17 | 18 | fontsize = 12; 19 | 20 | % Render curves 21 | plot_ced(x, y, methods, strrep(title,'_',' '), ... 22 | x_limit, colors, linewidth, fontsize); 23 | 24 | ax = gca; 25 | outerpos = ax.OuterPosition; 26 | ti = ax.TightInset; 27 | left = outerpos(1) + ti(1); 28 | bottom = outerpos(2) + ti(2); 29 | ax_width = outerpos(3) - ti(1) - ti(3); 30 | ax_height = outerpos(4) - ti(2) - ti(4); 31 | ax.Position = [left bottom ax_width ax_height]; 32 | 33 | fig = gcf; 34 | fig.PaperPositionMode = 'auto'; 35 | fig_pos = fig.PaperPosition; 36 | fig.PaperSize = [fig_pos(3) fig_pos(4)]* 1.05; 37 | 38 | print(fig,'-dpdf',[title '.pdf']); 39 | 40 | 41 | -------------------------------------------------------------------------------- /Evaluation/300W/plot_300W_outdoor_results.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | clc; 3 | 4 | addpath = './util'; 5 | 6 | load('300W_test_outdoor.mat'); 7 | 8 | title = '300W-Test-Outdoor'; 9 | 10 | num = 4; 11 | 12 | colors = lines(num); 13 | 14 | x_limit = 0.08; 15 | 16 | linewidth = 3; 17 | 18 | fontsize = 12; 19 | 20 | % Render curves 21 | plot_ced(x, y, methods, strrep(title,'_',' '), ... 22 | x_limit, colors, linewidth, fontsize); 23 | 24 | ax = gca; 25 | outerpos = ax.OuterPosition; 26 | ti = ax.TightInset; 27 | left = outerpos(1) + ti(1); 28 | bottom = outerpos(2) + ti(2); 29 | ax_width = outerpos(3) - ti(1) - ti(3); 30 | ax_height = outerpos(4) - ti(2) - ti(4); 31 | ax.Position = [left bottom ax_width ax_height]; 32 | 33 | fig = gcf; 34 | fig.PaperPositionMode = 'auto'; 35 | fig_pos = fig.PaperPosition; 36 | fig.PaperSize = [fig_pos(3) fig_pos(4)]* 1.05; 37 | 38 | print(fig,'-dpdf',[title '.pdf']); 39 | -------------------------------------------------------------------------------- /Evaluation/300W/readme.md: -------------------------------------------------------------------------------- 1 | # Results: 2 | 3 | ![300W](https://github.com/jiankangdeng/MenpoBenchmark/blob/master/Evaluation/300W/300W-Test-All.png) 4 | 5 | # Notes: 6 | 7 | 1. In the 300W-2013 challenge, face boxes are provided. In the 300W-2014 challenge, face boxes are not provided. 8 | 9 | 2. The 300W challenges witnessed the performance difference between cascade regression based methods and deep convolutional neural network based methods. 10 | 11 | # References: 12 | 13 | 300W Datasets: 14 | 15 | ``` 16 | @inproceedings{sagonas2013300, 17 | title={300 faces in-the-wild challenge: The first facial landmark localization challenge}, 18 | author={Sagonas, Christos and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, 19 | booktitle={International Conference on Computer Vision Workshops}, 20 | year={2013} 21 | } 22 | 23 | @article{sagonas2016300, 24 | title={300 faces in-the-wild challenge: Database and results}, 25 | author={Sagonas, Christos and Antonakos, Epameinondas and Tzimiropoulos, Georgios and Zafeiriou, Stefanos and Pantic, Maja}, 26 | journal={Image and vision computing}, 27 | volume={47}, 28 | pages={3--18}, 29 | year={2016}, 30 | publisher={Elsevier} 31 | } 32 | ``` 33 | 34 | Top 2 participants (zhou and yan) in the 300W-2013 challenge and top 2 participants (fan and deng) in the 300W-2014 challenge: 35 | 36 | ``` 37 | @inproceedings{zhou2013extensive, 38 | title={Extensive facial landmark localization with coarse-to-fine convolutional network cascade}, 39 | author={Zhou, Erjin and Fan, Haoqiang and Cao, Zhimin and Jiang, Yuning and Yin, Qi}, 40 | booktitle={International Conference on Computer Vision Workshops}, 41 | year={2013} 42 | } 43 | 44 | @inproceedings{yan2013learn, 45 | title={Learn to combine multiple hypotheses for face alignment}, 46 | author={Yan, Junjie and Lei, Zhen and Yi, Dong and Li, Stan}, 47 | booktitle={International Conference on Computer Vision Workshops}, 48 | year={2013} 49 | } 50 | 51 | @article{fan2016approaching, 52 | title={Approaching human level facial landmark localization by deep learning}, 53 | author={Fan, Haoqiang and Zhou, Erjin}, 54 | journal={Image and Vision Computing}, 55 | volume={47}, 56 | pages={27--35}, 57 | year={2016}, 58 | publisher={Elsevier} 59 | } 60 | 61 | @article{deng2016m3, 62 | title={M3 CSR: Multi-view, multi-scale and multi-component cascade shape regression}, 63 | author={Deng, Jiankang and Liu, Qingshan and Yang, Jing and Tao, Dacheng}, 64 | journal={Image and Vision Computing}, 65 | volume={47}, 66 | pages={19--26}, 67 | year={2016}, 68 | publisher={Elsevier} 69 | } 70 | ``` 71 | 72 | 73 | -------------------------------------------------------------------------------- /Evaluation/300W/util/compute_eye_corner_error.m: -------------------------------------------------------------------------------- 1 | function [ error_per_image ] = compute_eye_corner_error( ground_truth_all, detected_points_all ) 2 | num_of_images = size(ground_truth_all,3); 3 | num_of_points = size(ground_truth_all,1); 4 | 5 | error_per_image = zeros(num_of_images,1); 6 | 7 | for i =1:num_of_images 8 | detected_points = detected_points_all(:,:,i); 9 | ground_truth_points = ground_truth_all(:,:,i); 10 | interocular_distance = norm(ground_truth_points(37,:)-ground_truth_points(46,:)); 11 | %interocular_distance = norm(mean(ground_truth_points(37:42,:))-mean(ground_truth_points(43:48,:))); 12 | sum=0; 13 | for j=1:num_of_points 14 | sum = sum+norm(detected_points(j,:)-ground_truth_points(j,:)); 15 | end 16 | error_per_image(i) = sum/(num_of_points*interocular_distance); 17 | end 18 | end 19 | 20 | -------------------------------------------------------------------------------- /Evaluation/300W/util/distinguishable_colors.m: -------------------------------------------------------------------------------- 1 | function colors = distinguishable_colors(n_colors,bg,func) 2 | % DISTINGUISHABLE_COLORS: pick colors that are maximally perceptually distinct 3 | % 4 | % When plotting a set of lines, you may want to distinguish them by color. 5 | % By default, Matlab chooses a small set of colors and cycles among them, 6 | % and so if you have more than a few lines there will be confusion about 7 | % which line is which. To fix this problem, one would want to be able to 8 | % pick a much larger set of distinct colors, where the number of colors 9 | % equals or exceeds the number of lines you want to plot. Because our 10 | % ability to distinguish among colors has limits, one should choose these 11 | % colors to be "maximally perceptually distinguishable." 12 | % 13 | % This function generates a set of colors which are distinguishable 14 | % by reference to the "Lab" color space, which more closely matches 15 | % human color perception than RGB. Given an initial large list of possible 16 | % colors, it iteratively chooses the entry in the list that is farthest (in 17 | % Lab space) from all previously-chosen entries. While this "greedy" 18 | % algorithm does not yield a global maximum, it is simple and efficient. 19 | % Moreover, the sequence of colors is consistent no matter how many you 20 | % request, which facilitates the users' ability to learn the color order 21 | % and avoids major changes in the appearance of plots when adding or 22 | % removing lines. 23 | % 24 | % Syntax: 25 | % colors = distinguishable_colors(n_colors) 26 | % Specify the number of colors you want as a scalar, n_colors. This will 27 | % generate an n_colors-by-3 matrix, each row representing an RGB 28 | % color triple. If you don't precisely know how many you will need in 29 | % advance, there is no harm (other than execution time) in specifying 30 | % slightly more than you think you will need. 31 | % 32 | % colors = distinguishable_colors(n_colors,bg) 33 | % This syntax allows you to specify the background color, to make sure that 34 | % your colors are also distinguishable from the background. Default value 35 | % is white. bg may be specified as an RGB triple or as one of the standard 36 | % "ColorSpec" strings. You can even specify multiple colors: 37 | % bg = {'w','k'} 38 | % or 39 | % bg = [1 1 1; 0 0 0] 40 | % will only produce colors that are distinguishable from both white and 41 | % black. 42 | % 43 | % colors = distinguishable_colors(n_colors,bg,rgb2labfunc) 44 | % By default, distinguishable_colors uses the image processing toolbox's 45 | % color conversion functions makecform and applycform. Alternatively, you 46 | % can supply your own color conversion function. 47 | % 48 | % Example: 49 | % c = distinguishable_colors(25); 50 | % figure 51 | % image(reshape(c,[1 size(c)])) 52 | % 53 | % Example using the file exchange's 'colorspace': 54 | % func = @(x) colorspace('RGB->Lab',x); 55 | % c = distinguishable_colors(25,'w',func); 56 | 57 | % Copyright 2010-2011 by Timothy E. Holy 58 | 59 | % Parse the inputs 60 | if (nargin < 2) 61 | bg = [1 1 1]; % default white background 62 | else 63 | if iscell(bg) 64 | % User specified a list of colors as a cell aray 65 | bgc = bg; 66 | for i = 1:length(bgc) 67 | bgc{i} = parsecolor(bgc{i}); 68 | end 69 | bg = cat(1,bgc{:}); 70 | else 71 | % User specified a numeric array of colors (n-by-3) 72 | bg = parsecolor(bg); 73 | end 74 | end 75 | 76 | % Generate a sizable number of RGB triples. This represents our space of 77 | % possible choices. By starting in RGB space, we ensure that all of the 78 | % colors can be generated by the monitor. 79 | n_grid = 30; % number of grid divisions along each axis in RGB space 80 | x = linspace(0,1,n_grid); 81 | [R,G,B] = ndgrid(x,x,x); 82 | rgb = [R(:) G(:) B(:)]; 83 | if (n_colors > size(rgb,1)/3) 84 | error('You can''t readily distinguish that many colors'); 85 | end 86 | 87 | % Convert to Lab color space, which more closely represents human 88 | % perception 89 | if (nargin > 2) 90 | lab = func(rgb); 91 | bglab = func(bg); 92 | else 93 | C = makecform('srgb2lab'); 94 | lab = applycform(rgb,C); 95 | bglab = applycform(bg,C); 96 | end 97 | 98 | % If the user specified multiple background colors, compute distances 99 | % from the candidate colors to the background colors 100 | mindist2 = inf(size(rgb,1),1); 101 | for i = 1:size(bglab,1)-1 102 | dX = bsxfun(@minus,lab,bglab(i,:)); % displacement all colors from bg 103 | dist2 = sum(dX.^2,2); % square distance 104 | mindist2 = min(dist2,mindist2); % dist2 to closest previously-chosen color 105 | end 106 | 107 | % Iteratively pick the color that maximizes the distance to the nearest 108 | % already-picked color 109 | colors = zeros(n_colors,3); 110 | lastlab = bglab(end,:); % initialize by making the "previous" color equal to background 111 | for i = 1:n_colors 112 | dX = bsxfun(@minus,lab,lastlab); % displacement of last from all colors on list 113 | dist2 = sum(dX.^2,2); % square distance 114 | mindist2 = min(dist2,mindist2); % dist2 to closest previously-chosen color 115 | [~,index] = max(mindist2); % find the entry farthest from all previously-chosen colors 116 | colors(i,:) = rgb(index,:); % save for output 117 | lastlab = lab(index,:); % prepare for next iteration 118 | end 119 | end 120 | 121 | function c = parsecolor(s) 122 | if ischar(s) 123 | c = colorstr2rgb(s); 124 | elseif isnumeric(s) && size(s,2) == 3 125 | c = s; 126 | else 127 | error('MATLAB:InvalidColorSpec','Color specification cannot be parsed.'); 128 | end 129 | end 130 | 131 | function c = colorstr2rgb(c) 132 | % Convert a color string to an RGB value. 133 | % This is cribbed from Matlab's whitebg function. 134 | % Why don't they make this a stand-alone function? 135 | rgbspec = [1 0 0;0 1 0;0 0 1;1 1 1;0 1 1;1 0 1;1 1 0;0 0 0]; 136 | cspec = 'rgbwcmyk'; 137 | k = find(cspec==c(1)); 138 | if isempty(k) 139 | error('MATLAB:InvalidColorString','Unknown color string.'); 140 | end 141 | if k~=3 || length(c)==1, 142 | c = rgbspec(k,:); 143 | elseif length(c)>2, 144 | if strcmpi(c(1:3),'bla') 145 | c = [0 0 0]; 146 | elseif strcmpi(c(1:3),'blu') 147 | c = [0 0 1]; 148 | else 149 | error('MATLAB:UnknownColorString', 'Unknown color string.'); 150 | end 151 | end 152 | end 153 | -------------------------------------------------------------------------------- /Evaluation/300W/util/drawLandmarks.m: -------------------------------------------------------------------------------- 1 | function [ image ] = drawLandmarks( image , landmarks ,landmarkColor) 2 | green(1, 1, :) = landmarkColor; 3 | if size(image, 3) == 1 4 | image = repmat(image, [1 1 3]); 5 | end 6 | height=size(image,1); 7 | width=size(image,2); 8 | pointNum = size(landmarks,1); 9 | landmarks = round(landmarks) + 1; 10 | for i = 1 : pointNum 11 | p = landmarks(i,:); 12 | if p(1)<3 || p(1)>width-2 || p(2)<3 || p(2)>height-2 13 | continue; 14 | end 15 | image(p(2) - 1 : p(2) + 1, p(1) - 2 : p(1) + 2, :) = repmat(green, [3 5]); 16 | image([p(2) - 2 p(2) + 2], p(1) - 1 : p(1) + 1, :) = repmat(green, [2 3]); 17 | end 18 | end 19 | 20 | -------------------------------------------------------------------------------- /Evaluation/300W/util/drawRect.m: -------------------------------------------------------------------------------- 1 | function [img] = drawRect (img , Rect) 2 | red(1, 1, :) = [255 0 0]; 3 | Rect=round(Rect); 4 | if size(img, 3) == 1 5 | img = repmat(img, [1 1 3]); 6 | end 7 | imgSize=size(img); 8 | Rect(1)=max(Rect(1),2); 9 | Rect(2)=max(Rect(2),2); 10 | Rect(3)=min(Rect(3),imgSize(2)-1); 11 | Rect(4)=min(Rect(4),imgSize(1)-1); 12 | width=Rect(3)-Rect(1)+1; 13 | height=Rect(4)-Rect(2)+1; 14 | 15 | img(Rect(2):Rect(4), Rect(1) - 1 : Rect(1) + 1, :) = repmat(red, [height 3]); 16 | img(Rect(2):Rect(4), Rect(3) - 1 : Rect(3) + 1, :) = repmat(red, [height 3]); 17 | 18 | img(Rect(2)-1:Rect(2)+1, Rect(1) : Rect(3), :) = repmat(red, [3 width]); 19 | img(Rect(4)-1:Rect(4)+1, Rect(1) : Rect(3), :) = repmat(red, [3 width]); 20 | end -------------------------------------------------------------------------------- /Evaluation/300W/util/plot_ced.m: -------------------------------------------------------------------------------- 1 | function plot_ced(bins, ced_values, legend_entries, title_str, x_limit, ... 2 | colors, linewidth, fontsize) 3 | % Check arguments 4 | numberOfCurves = size(ced_values, 2); 5 | if nargin < 5 6 | x_limit = 0.08; 7 | end 8 | if nargin < 6 9 | colors = colormap(lines(numberOfCurves)); 10 | end 11 | if nargin < 7 12 | linewidth = 2; 13 | end 14 | if nargin < 8 15 | fontsize = 12; 16 | end 17 | 18 | % Render curves 19 | figure; 20 | for k=1:numberOfCurves 21 | plot(bins, ced_values(:, k), ... 22 | 'color', colors(k, :), ... 23 | 'linewidth', linewidth, ... 24 | 'marker', 'none', ... 25 | 'linestyle', '-'); 26 | hold on; 27 | end 28 | hold off; 29 | 30 | % Enable grid 31 | grid on; 32 | set(gca, 'gridlinestyle', '--'); 33 | set(gca, 'fontsize', fontsize); 34 | set(gca, 'ytick', 0:0.1:1); 35 | 36 | % Set labels, limits and legend 37 | ylabel('Images Proportion', 'fontsize', fontsize); 38 | xlabel('Point-to-point Normalized RMS Error', 'fontsize', fontsize); 39 | %title(title_str, 'fontsize', fontsize); 40 | xlim([0, x_limit]); 41 | ylim([0.0, 1.0]); 42 | legend(legend_entries, 'Location', 'NorthWest'); 43 | 44 | end 45 | -------------------------------------------------------------------------------- /Evaluation/Menpo2D/Menpo Profile Test Set.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/Menpo2D/Menpo Profile Test Set.pdf -------------------------------------------------------------------------------- /Evaluation/Menpo2D/Menpo Profile Test Set.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/Menpo2D/Menpo Profile Test Set.png -------------------------------------------------------------------------------- /Evaluation/Menpo2D/Menpo Semifrontal Test Set.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/Menpo2D/Menpo Semifrontal Test Set.pdf -------------------------------------------------------------------------------- /Evaluation/Menpo2D/Menpo Semifrontal Test Set.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/Menpo2D/Menpo Semifrontal Test Set.png -------------------------------------------------------------------------------- /Evaluation/Menpo2D/menpo_test_figure_profile.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | clc; 3 | 4 | addpath util; 5 | 6 | load('profile.mat'); 7 | 8 | count=length(profile); 9 | 10 | pts_num = 39; 11 | 12 | % for i=1:length(profile) 13 | % profile{i}.tmp=LoadPTS(sprintf('./profile/%s/%s.pts', profile{i}.status, profile{i}.name)); 14 | % end 15 | 16 | gt=zeros(pts_num,2,count); 17 | for i=1:length(profile) 18 | gt(:,:,i)=profile{i}.gt; 19 | end 20 | 21 | % num = 5; 22 | num = 4; 23 | 24 | spacing=0.0005; 25 | sampling = 0:spacing:0.35; 26 | x = sampling; 27 | y = repmat(x',1,num); 28 | profile_errs=cell(num,1); 29 | det=zeros(pts_num,2,count); 30 | 31 | % participants = {'yang','he','wu','deng','tmp'}; 32 | participants = {'yang','he','wu','deng'}; 33 | 34 | for iter = 1:num 35 | for i=1:length(profile) 36 | det(:,:,i)=profile{i}.(participants{iter}); 37 | end 38 | profile_errs{iter} = compute_diag_error( gt, det ); 39 | for i=1:numel(sampling) 40 | y(i,iter) = sum(profile_errs{iter} < sampling(i)) / numel(profile_errs{iter}); 41 | end 42 | end 43 | 44 | title = 'Menpo Profile Test Set'; 45 | 46 | % methods={'J. Yang et al','Z. He et al', 'W. Wu et al', 'J. Deng et al','tmp'}; 47 | methods={'J. Yang et al','Z. He et al', 'W. Wu et al', 'J. Deng et al'}; 48 | 49 | colors = lines(num); 50 | 51 | x_limit = 0.03; 52 | 53 | linewidth = 3; 54 | 55 | fontsize = 12; 56 | 57 | plot_ced(x, y, methods, strrep(title,'_',' '), ... 58 | x_limit, colors, linewidth, fontsize); 59 | 60 | ax = gca; 61 | outerpos = ax.OuterPosition; 62 | ti = ax.TightInset; 63 | left = outerpos(1) + ti(1); 64 | bottom = outerpos(2) + ti(2); 65 | ax_width = outerpos(3) - ti(1) - ti(3); 66 | ax_height = outerpos(4) - ti(2) - ti(4); 67 | ax.Position = [left bottom ax_width ax_height]; 68 | 69 | fig = gcf; 70 | fig.PaperPositionMode = 'auto'; 71 | fig_pos = fig.PaperPosition; 72 | fig.PaperSize = [fig_pos(3) fig_pos(4)]* 1.05; 73 | 74 | print(fig,'-dpdf',[ title '.pdf']); -------------------------------------------------------------------------------- /Evaluation/Menpo2D/menpo_test_figure_semifrontal.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | clc; 3 | 4 | addpath util; 5 | 6 | load('semifrontal.mat'); 7 | count=length(semifrontal); 8 | pts_num = 68; 9 | 10 | % for i=1:length(semifrontal) 11 | % semifrontal{i}.tmp = LoadPTS(sprintf('./semifrontal/%s.pts',semifrontal{i}.name)); 12 | % end 13 | 14 | gt=zeros(pts_num,2,count); 15 | for i=1:length(semifrontal) 16 | gt(:,:,i)=semifrontal{i}.gt; 17 | end 18 | 19 | % num = 5; 20 | num = 4; 21 | 22 | spacing=0.0005; 23 | sampling = 0:spacing:0.35; 24 | x = sampling; 25 | y = repmat(x',1,num); 26 | semifrontal_errs=cell(num,1); 27 | det=zeros(pts_num,2,count); 28 | 29 | % participants = {'yang','he','wu','deng','tmp'}; 30 | participants = {'yang','he','wu','deng'}; 31 | 32 | for iter = 1:num 33 | for i=1:length(semifrontal) 34 | det(:,:,i)=semifrontal{i}.(participants{iter}); 35 | end 36 | semifrontal_errs{iter} = compute_diag_error( gt, det ); 37 | for i=1:numel(sampling) 38 | y(i,iter) = sum(semifrontal_errs{iter} < sampling(i)) / numel(semifrontal_errs{iter}); 39 | end 40 | end 41 | 42 | title = 'Menpo Semifrontal Test Set'; 43 | 44 | % methods={'J. Yang et al','Z. He et al', 'W. Wu et al', 'J. Deng et al','tmp'}; 45 | methods={'J. Yang et al','Z. He et al', 'W. Wu et al', 'J. Deng et al'}; 46 | 47 | colors = lines(num); 48 | 49 | x_limit = 0.03; 50 | 51 | linewidth = 3; 52 | 53 | fontsize = 12; 54 | 55 | % Render curves 56 | plot_ced(x, y, methods, strrep(title,'_',' '), ... 57 | x_limit, colors, linewidth, fontsize); 58 | 59 | ax = gca; 60 | outerpos = ax.OuterPosition; 61 | ti = ax.TightInset; 62 | left = outerpos(1) + ti(1); 63 | bottom = outerpos(2) + ti(2); 64 | ax_width = outerpos(3) - ti(1) - ti(3); 65 | ax_height = outerpos(4) - ti(2) - ti(4); 66 | ax.Position = [left bottom ax_width ax_height]; 67 | 68 | fig = gcf; 69 | fig.PaperPositionMode = 'auto'; 70 | fig_pos = fig.PaperPosition; 71 | fig.PaperSize = [fig_pos(3) fig_pos(4)]* 1.05; 72 | 73 | print(fig,'-dpdf',[ title '.pdf']); -------------------------------------------------------------------------------- /Evaluation/Menpo2D/profile.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/Menpo2D/profile.mat -------------------------------------------------------------------------------- /Evaluation/Menpo2D/readme.md: -------------------------------------------------------------------------------- 1 | # Results: 2 | 3 | ![Menpo2D_Semifrontal_Test](https://github.com/jiankangdeng/MenpoBenchmark/blob/master/Evaluation/Menpo2D/Menpo%20Semifrontal%20Test%20Set.png) 4 | 5 | ![Menpo2D_Profile_Test](https://github.com/jiankangdeng/MenpoBenchmark/blob/master/Evaluation/Menpo2D/Menpo%20Profile%20Test%20Set.png) 6 | 7 | # Notes: 8 | 9 | 1. Please contact me (j.deng16@imperial.ac.uk), if you do not want to be included in this data set. 10 | We will remove related images by request. 11 | 12 | 2. If you find some wrong annotations (> 10 faces), please contact us (j.deng16@imperial.ac.uk) to refine the ground-truth. 13 | The ground-truth will keep on updating. 14 | 15 | 3. If you want your method to be a baseline, please send me (j.deng16@imperial.ac.uk) your results in the semifrontal/profile.mat or in the format of .pts. 16 | 17 | # References: 18 | 19 | Menpo2D Datasets: 20 | 21 | ``` 22 | @article{deng2018menpo, 23 | title={The Menpo benchmark for multi-pose 2D and 3D facial landmark localisation and tracking}, 24 | author={Deng, Jiankang and Roussos, Anastasios and Chrysos, Grigorios and Ververas, Evangelos and Kotsia, Irene and Shen, Jie and Zafeiriou, Stefanos}, 25 | journal={International Journal of Computer Vision}, 26 | pages={1--26}, 27 | year={2018}, 28 | publisher={Springer} 29 | } 30 | 31 | @inproceedings{zafeiriou2017menpo2d, 32 | title={The menpo facial landmark localisation challenge: A step towards the solution}, 33 | author={Zafeiriou, Stefanos and Trigeorgis, George and Chrysos, Grigorios and Deng, Jiankang and Shen, Jie}, 34 | booktitle={Computer Vision and Pattern Recognition (CVPR) Workshops}, 35 | year={2017} 36 | } 37 | ``` 38 | Top 3 participants (yang, he, wu) in the Menpo 2D challenge and our baseline (deng): 39 | 40 | ``` 41 | @inproceedings{yang2017stacked, 42 | title={Stacked hourglass network for robust facial landmark localisation}, 43 | author={Yang, Jing and Liu, Qingshan and Zhang, Kaihua}, 44 | booktitle={Computer Vision and Pattern Recognition Workshops (CVPRW)}, 45 | year={2017} 46 | } 47 | 48 | @inproceedings{he2017robust, 49 | title={Robust fec-cnn: A high accuracy facial landmark detection system}, 50 | author={He, Zhenliang and Zhang, Jie and Kan, Meina and Shan, Shiguang and Chen, Xilin}, 51 | booktitle={Computer Vision and Pattern Recognition Workshops (CVPRW)}, 52 | year={2017} 53 | } 54 | 55 | @inproceedings{wu2017leveraging, 56 | title={Leveraging intra and inter-dataset variations for robust face alignment}, 57 | author={Wu, Wenyan and Yang, Shuo}, 58 | booktitle={Computer Vision and Pattern Recognition Workshops (CVPRW)}, 59 | year={2017} 60 | } 61 | 62 | @article{deng2017joint, 63 | title={Joint multi-view face alignment in the wild}, 64 | author={Deng, Jiankang and Trigeorgis, George and Zhou, Yuxiang and Zafeiriou, Stefanos}, 65 | journal={arXiv:1708.06023}, 66 | year={2017} 67 | } 68 | ``` 69 | 70 | 71 | -------------------------------------------------------------------------------- /Evaluation/Menpo2D/semifrontal.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/Evaluation/Menpo2D/semifrontal.mat -------------------------------------------------------------------------------- /Evaluation/Menpo2D/util/LoadPTS.m: -------------------------------------------------------------------------------- 1 | function pts = LoadPTS(ptsPath) 2 | pts=[]; 3 | ptsFile=fopen(ptsPath); 4 | if ptsFile>0 5 | npoints=textscan(ptsFile,'%s %f',1, 'HeaderLines', 1); 6 | points=textscan(ptsFile, '%f %f',npoints{2},'MultipleDelimsAsOne',2,'Headerlines',2); 7 | pts=cell2mat(points); 8 | fclose(ptsFile); 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /Evaluation/Menpo2D/util/SavePTS.m: -------------------------------------------------------------------------------- 1 | function SavePTS(pts, ptsPath) 2 | fPTS = fopen(ptsPath, 'w'); 3 | numPoints = size(pts, 1); 4 | fprintf(fPTS, 'version: 1.0\r\nn_points: %d\r\n{\r\n', numPoints); 5 | for j = 1 : numPoints 6 | fprintf(fPTS, '%f %f\r\n', pts(j, 1), pts(j, 2)); 7 | end 8 | fprintf(fPTS, '}\r\n'); 9 | fclose(fPTS); 10 | end 11 | -------------------------------------------------------------------------------- /Evaluation/Menpo2D/util/compute_diag_error.m: -------------------------------------------------------------------------------- 1 | function [ error_per_image ] = compute_diag_error( ground_truth_all, detected_points_all ) 2 | %compute_error 3 | % compute the average point-to-point Euclidean error normalized by the 4 | % diag bounding box distance (measured as the Euclidean distance between the 5 | % outer corners of the eyes) 6 | % 7 | % Inputs: 8 | % grounth_truth_all, size: num_of_points x 2 x num_of_images 9 | % detected_points_all, size: num_of_points x 2 x num_of_images 10 | % Output: 11 | % error_per_image, size: num_of_images x 1 12 | 13 | 14 | num_of_images = size(ground_truth_all,3); 15 | num_of_points = size(ground_truth_all,1); 16 | 17 | error_per_image = zeros(num_of_images,1); 18 | 19 | for i =1:num_of_images 20 | detected_points = detected_points_all(:,:,i); 21 | ground_truth_points = ground_truth_all(:,:,i); 22 | 23 | diag_distance = norm([min(ground_truth_points(:,1)) min(ground_truth_points(:,2))]-[max(ground_truth_points(:,1)) max(ground_truth_points(:,2))]); 24 | 25 | sum=0; 26 | for j=1:num_of_points 27 | sum = sum+norm(detected_points(j,:)-ground_truth_points(j,:)); 28 | end 29 | error_per_image(i) = sum/(num_of_points*diag_distance); 30 | end 31 | 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Evaluation/Menpo2D/util/drawLandmarks.m: -------------------------------------------------------------------------------- 1 | function [ image ] = drawLandmarks( image , landmarks ,landmarkColor) 2 | green(1, 1, :) = landmarkColor; 3 | if size(image, 3) == 1 4 | image = repmat(image, [1 1 3]); 5 | end 6 | height=size(image,1); 7 | width=size(image,2); 8 | pointNum = size(landmarks,1); 9 | landmarks = round(landmarks) + 1; 10 | for i = 1 : pointNum 11 | p = landmarks(i,:); 12 | if p(1)<3 || p(1)>width-2 || p(2)<3 || p(2)>height-2 13 | continue; 14 | end 15 | image(p(2) - 1 : p(2) + 1, p(1) - 2 : p(1) + 2, :) = repmat(green, [3 5]); 16 | image([p(2) - 2 p(2) + 2], p(1) - 1 : p(1) + 1, :) = repmat(green, [2 3]); 17 | end 18 | end 19 | 20 | -------------------------------------------------------------------------------- /Evaluation/Menpo2D/util/drawRect.m: -------------------------------------------------------------------------------- 1 | function [img] = drawRect (img , Rect) 2 | red(1, 1, :) = [255 0 0]; 3 | Rect=round(Rect); 4 | if size(img, 3) == 1 5 | img = repmat(img, [1 1 3]); 6 | end 7 | imgSize=size(img); 8 | Rect(1)=max(Rect(1),2); 9 | Rect(2)=max(Rect(2),2); 10 | Rect(3)=min(Rect(3),imgSize(2)-1); 11 | Rect(4)=min(Rect(4),imgSize(1)-1); 12 | width=Rect(3)-Rect(1)+1; 13 | height=Rect(4)-Rect(2)+1; 14 | 15 | img(Rect(2):Rect(4), Rect(1) - 1 : Rect(1) + 1, :) = repmat(red, [height 3]); 16 | img(Rect(2):Rect(4), Rect(3) - 1 : Rect(3) + 1, :) = repmat(red, [height 3]); 17 | 18 | img(Rect(2)-1:Rect(2)+1, Rect(1) : Rect(3), :) = repmat(red, [3 width]); 19 | img(Rect(4)-1:Rect(4)+1, Rect(1) : Rect(3), :) = repmat(red, [3 width]); 20 | end -------------------------------------------------------------------------------- /Evaluation/Menpo2D/util/plot_ced.m: -------------------------------------------------------------------------------- 1 | function plot_ced(bins, ced_values, legend_entries, title_str, x_limit, ... 2 | colors, linewidth, fontsize) 3 | % Check arguments 4 | numberOfCurves = size(ced_values, 2); 5 | if nargin < 5 6 | x_limit = 0.08; 7 | end 8 | if nargin < 6 9 | colors = colormap(lines(numberOfCurves)); 10 | end 11 | if nargin < 7 12 | linewidth = 2; 13 | end 14 | if nargin < 8 15 | fontsize = 12; 16 | end 17 | 18 | % Render curves 19 | figure; 20 | for k=1:numberOfCurves 21 | plot(bins, ced_values(:, k), ... 22 | 'color', colors(k, :), ... 23 | 'linewidth', linewidth, ... 24 | 'marker', 'none', ... 25 | 'linestyle', '-'); 26 | hold on; 27 | end 28 | hold off; 29 | 30 | % Enable grid 31 | grid on; 32 | set(gca, 'gridlinestyle', '--'); 33 | set(gca, 'fontsize', fontsize); 34 | set(gca, 'ytick', 0:0.1:1); 35 | 36 | % Set labels, limits and legend 37 | ylabel('Images Proportion', 'fontsize', fontsize); 38 | xlabel('Face Diagonal Distance Normalized Point-to-point Error', 'fontsize', fontsize); 39 | title(title_str, 'fontsize', fontsize); 40 | xlim([0, x_limit]); 41 | ylim([0.0, 1.0]); 42 | legend(legend_entries, 'Location', 'NorthWest'); 43 | 44 | end 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MenpoBenchmark 2 | Multi-pose 2D and 3D Face Alignment and Tracking 3 | 4 | The face boxes and five facial landmarks within the annotation files are predicted by our face detector ([RetinaFace](https://github.com/deepinsight/insightface/tree/master/detection/retinaface)), which achieves state-of-the-art performance on the WiderFace dataset. 5 | We have released this face detector, thus the face alignment algorithms can be tested from scratch under in-the-wild environment. 6 | 7 | # 2D Face Alignment 8 | 9 | ## Dataset Download Links 10 | 11 | Menpo2D [Google Drive](https://drive.google.com/file/d/1CUqs0n135lye6J6RM5FQXT_DIT45dKvP/view?usp=sharing) 12 | 13 | 300W [Google Drive](https://drive.google.com/file/d/1VGT24gi5nd2TnGRLbHRtJkAGQbclkcJi/view?usp=sharing) 14 | 15 | COFW [Google Drive](https://drive.google.com/file/d/1mNVvmDlago54JwsqpP7aLoBP85Tuz5mA/view?usp=sharing) 16 | 17 | MultiPIE [Google Drive](https://drive.google.com/file/d/18JFjBTAZqthpORmEf2LuT14IuMYNyD_h/view?usp=sharing) 18 | 19 | XM2VTS [Google Drive](https://drive.google.com/file/d/1qdBlQhq9YEt5lzX1OGy5_AyjFL3vWxRs/view?usp=sharing) 20 | 21 | FRGC [Google Drive](https://drive.google.com/file/d/1T2Ux0tjd5CxI9PWZb5sXThuGvWH-oM5p/view?usp=sharing) 22 | 23 | ## Landmark Configuration 24 | 25 | 68/39 landmarks (The landmark configurations are from MultiPIE.) 26 | ![menpo2Dconfiguration](https://github.com/jiankangdeng/MenpoBenchmark/blob/master/menpo2D_landmarks.png) 27 | 28 | ## Image Training Datasets 29 | 30 | (1) 300W/Train (68; 3702) 31 | 32 | (2) **Menpo2D/Train/image/semifrontal (68; 6653)** 33 | 34 | (3) Menpo2D/Train/image/profile (39; 2290) 35 | 36 | ## Image Test Datasets 37 | 38 | (1) 300W/Validation (68; 135) 39 | 40 | (2) COFW (68; 507) 41 | 42 | (3) 300W/Test (68; 600) 43 | 44 | (4) **Menpo2D/Test/image/semifrontal (68; 5335)** 45 | 46 | (5) Menpo2D/Test/image/profile (39; 1946) 47 | 48 | ## Video Training Datasets 49 | 50 | (1) 300VW 51 | 52 | ## Video Test Datasets 53 | 54 | (1) 300VW 55 | 56 | # 3D Face Alignment 57 | 58 | ## Landmark Configuration 59 | 60 | 84 landmarks 61 | ![menpo3Dconfiguration](https://github.com/jiankangdeng/MenpoBenchmark/blob/master/menpo3D_landmarks.png) 62 | 63 | ## Image Training Datasets 64 | 65 | ## Image Test Datasets 66 | 67 | ## Video Training Datasets 68 | 69 | ## Video Test Datasets 70 | 71 | # Citation 72 | 73 | IBUG just provides the landmark annotations, but some face images are from other works. Please cite the original papers first and follow their data license. 74 | 75 | 76 | ``` 77 | 78 | @inproceedings{deng2020retinaface, 79 | title={Retinaface: Single-shot multi-level face localisation in the wild}, 80 | author={Deng, Jiankang and Guo, Jia and Ververas, Evangelos and Kotsia, Irene and Zafeiriou, Stefanos}, 81 | booktitle={Computer Vision and Pattern Recognition (CVPR)}, 82 | year={2020} 83 | } 84 | 85 | @article{deng2019menpo, 86 | title={The menpo benchmark for multi-pose 2d and 3d facial landmark localisation and tracking}, 87 | author={Deng, Jiankang and Roussos, Anastasios and Chrysos, Grigorios and Ververas, Evangelos and Kotsia, Irene and Shen, Jie and Zafeiriou, Stefanos}, 88 | journal={International Journal of Computer Vision}, 89 | volume={127}, 90 | number={6}, 91 | pages={599--624}, 92 | year={2019}, 93 | publisher={Springer} 94 | } 95 | 96 | @inproceedings{zafeiriou2017menpo2d, 97 | title={The menpo facial landmark localisation challenge: A step towards the solution}, 98 | author={Zafeiriou, Stefanos and Trigeorgis, George and Chrysos, Grigorios and Deng, Jiankang and Shen, Jie}, 99 | booktitle={Computer Vision and Pattern Recognition (CVPR) Workshops}, 100 | year={2017} 101 | } 102 | 103 | @inproceedings{zafeiriou2017menpo3d, 104 | title={The 3d menpo facial landmark tracking challenge}, 105 | author={Zafeiriou, Stefanos and Chrysos, Grigorios and Roussos, Anastasios and Ververas, Evangelos and Deng, Jiankang and Trigeorgis, George}, 106 | booktitle={International Conference on Computer Vision (ICCV) Workshops}, 107 | year={2017} 108 | } 109 | 110 | ``` 111 | 112 | -------------------------------------------------------------------------------- /menpo2D_landmarks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/menpo2D_landmarks.png -------------------------------------------------------------------------------- /menpo3D_landmarks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiankangdeng/MenpoBenchmark/7425b6d5571e3bda1943cb2d6838ed187c2c3a64/menpo3D_landmarks.png --------------------------------------------------------------------------------