├── .gitignore ├── LICENSE ├── README.md ├── pom.xml └── src └── main ├── java └── org │ └── imesha │ └── examples │ └── javacv │ ├── CNNAgeDetector.java │ ├── CNNGenderDetector.java │ ├── HaarFaceDetector.java │ ├── JavaCVExample.java │ └── util │ └── ImageUtils.java └── resources ├── caffe ├── age_net.caffemodel ├── deploy_agenet.prototxt ├── deploy_gendernet.prototxt └── gender_net.caffemodel ├── detection └── haarcascade_frontalface_alt.xml └── log4j2.xml /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### JetBrains template 3 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 4 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 5 | 6 | # User-specific stuff: 7 | .idea/workspace.xml 8 | .idea/tasks.xml 9 | 10 | # Sensitive or high-churn files: 11 | .idea/dataSources/ 12 | .idea/dataSources.ids 13 | .idea/dataSources.xml 14 | .idea/dataSources.local.xml 15 | .idea/sqlDataSources.xml 16 | .idea/dynamic.xml 17 | .idea/uiDesigner.xml 18 | 19 | # Gradle: 20 | .idea/gradle.xml 21 | .idea/libraries 22 | 23 | # Mongo Explorer plugin: 24 | .idea/mongoSettings.xml 25 | 26 | ## File-based project format: 27 | *.iws 28 | 29 | ## Plugin-specific files: 30 | 31 | # IntelliJ 32 | /out/ 33 | 34 | # mpeltonen/sbt-idea plugin 35 | .idea_modules/ 36 | 37 | # JIRA plugin 38 | atlassian-ide-plugin.xml 39 | 40 | # Crashlytics plugin (for Android Studio and IntelliJ) 41 | com_crashlytics_export_strings.xml 42 | crashlytics.properties 43 | crashlytics-build.properties 44 | fabric.properties 45 | ### Java template 46 | *.class 47 | 48 | # BlueJ files 49 | *.ctxt 50 | 51 | # Mobile Tools for Java (J2ME) 52 | .mtj.tmp/ 53 | 54 | # Package Files # 55 | *.jar 56 | *.war 57 | *.ear 58 | 59 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 60 | hs_err_pid* 61 | 62 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Imesha Sudasingha 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # JavaCV CNN (Convolutional Neural Networks) Example for Age and Gender Recognition 2 | 3 | A sample repository to demonstrate the usage of JavaCV and CNN for gender and age recognition. **Please refer [Age and gender recognition with JavaCV and CNN](https://medium.com/@Imesha94/age-and-gender-recognition-with-javacv-and-cnn-fdebb3d436c0) for the step by step guide.** 4 | 5 | This repository has made use of CNNs trained by [Gil Levi and Tal Hassner in 2015](http://www.openu.ac.il/home/hassner/projects/cnn_agegender). 6 | 7 | This simple program is capable of detecting human faces and predicting the gender and age of the detected face. 8 | 9 | ## Building project 10 | 11 | In order to build this project, run a `mvn clean install` at the project root. 12 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | org.imesha.examples 8 | javacv-example 9 | 1.0-SNAPSHOT 10 | 11 | 12 | 13 | org.bytedeco 14 | javacv-platform 15 | 1.5.1 16 | 17 | 18 | org.slf4j 19 | slf4j-api 20 | 1.7.22 21 | 22 | 23 | org.apache.logging.log4j 24 | log4j-slf4j-impl 25 | 2.7 26 | 27 | 28 | org.apache.logging.log4j 29 | log4j-core 30 | 2.17.1 31 | 32 | 33 | net.coobird 34 | thumbnailator 35 | 0.4.8 36 | 37 | 38 | 39 | 40 | 41 | 42 | maven-compiler-plugin 43 | 3.7.0 44 | 45 | 1.8 46 | 1.8 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /src/main/java/org/imesha/examples/javacv/CNNAgeDetector.java: -------------------------------------------------------------------------------- 1 | package org.imesha.examples.javacv; 2 | 3 | import org.bytedeco.javacpp.DoublePointer; 4 | import org.bytedeco.javacv.Frame; 5 | import org.bytedeco.opencv.opencv_core.Mat; 6 | import org.bytedeco.opencv.opencv_core.Point; 7 | import org.bytedeco.opencv.opencv_core.Size; 8 | import org.bytedeco.opencv.opencv_dnn.Net; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import java.io.File; 13 | import java.net.URISyntaxException; 14 | 15 | import static org.bytedeco.opencv.global.opencv_core.*; 16 | import static org.bytedeco.opencv.global.opencv_dnn.blobFromImage; 17 | import static org.bytedeco.opencv.global.opencv_dnn.readNetFromCaffe; 18 | import static org.bytedeco.opencv.global.opencv_imgproc.resize; 19 | 20 | /** 21 | * Age predictor using Convolution Neural Networks 22 | * 23 | * @author Imesha Sudasingha 24 | */ 25 | public class CNNAgeDetector { 26 | 27 | private static final Logger logger = LoggerFactory.getLogger(CNNAgeDetector.class); 28 | 29 | private static final String[] AGES = new String[]{"0-2", "4-6", "8-13", "15-20", "25-32", "38-43", "48-53", "60-"}; 30 | 31 | private Net ageNet; 32 | 33 | public CNNAgeDetector() { 34 | try { 35 | ageNet = new Net(); 36 | File protobuf = new File(getClass().getResource("/caffe/deploy_agenet.prototxt").toURI()); 37 | File caffeModel = new File(getClass().getResource("/caffe/age_net.caffemodel").toURI()); 38 | 39 | ageNet = readNetFromCaffe(protobuf.getAbsolutePath(), caffeModel.getAbsolutePath()); 40 | } catch (URISyntaxException e) { 41 | logger.error("Unable to load the caffe model", e); 42 | throw new IllegalStateException("Unable to load the caffe model", e); 43 | } 44 | } 45 | 46 | /** 47 | * Predicts the age of a {@link Mat} supplied to this method. The {@link Mat} is supposed to be the cropped face 48 | * of a human whose age is to be predicted. 49 | * 50 | * @param face cropped face 51 | * @param frame whole frame where the target human is also present 52 | * @return Predicted age range 53 | */ 54 | public String predictAge(Mat face, Frame frame) { 55 | try { 56 | Mat resizedMat = new Mat(); 57 | resize(face, resizedMat, new Size(256, 256)); 58 | normalize(resizedMat, resizedMat, 0, Math.pow(2, frame.imageDepth), NORM_MINMAX, -1, null); 59 | 60 | Mat inputBlob = blobFromImage(resizedMat); 61 | ageNet.setInput(inputBlob, "data", 1.0, null); //set the network input 62 | 63 | Mat prob = ageNet.forward("prob"); 64 | 65 | DoublePointer pointer = new DoublePointer(new double[1]); 66 | Point max = new Point(); 67 | minMaxLoc(prob, null, pointer, null, max, null); 68 | return AGES[max.x()]; 69 | } catch (Exception e) { 70 | logger.error("Error when processing gender", e); 71 | } 72 | return null; 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/main/java/org/imesha/examples/javacv/CNNGenderDetector.java: -------------------------------------------------------------------------------- 1 | package org.imesha.examples.javacv; 2 | 3 | import org.bytedeco.javacpp.indexer.Indexer; 4 | import org.bytedeco.javacv.Frame; 5 | import org.bytedeco.opencv.opencv_core.Mat; 6 | import org.bytedeco.opencv.opencv_core.Size; 7 | import org.bytedeco.opencv.opencv_dnn.Net; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.io.File; 12 | 13 | import static org.bytedeco.opencv.global.opencv_core.NORM_MINMAX; 14 | import static org.bytedeco.opencv.global.opencv_core.normalize; 15 | import static org.bytedeco.opencv.global.opencv_dnn.blobFromImage; 16 | import static org.bytedeco.opencv.global.opencv_dnn.readNetFromCaffe; 17 | import static org.bytedeco.opencv.global.opencv_imgproc.resize; 18 | 19 | /** 20 | * The class responsible for recognizing gender. This class use the concept of CNN (Convolution Neural Networks) to 21 | * identify the gender of a detected face. 22 | * 23 | * @author Imesha Sudasingha 24 | */ 25 | public class CNNGenderDetector { 26 | 27 | private static final Logger logger = LoggerFactory.getLogger(CNNGenderDetector.class); 28 | 29 | private Net genderNet; 30 | 31 | public CNNGenderDetector() { 32 | try { 33 | genderNet = new Net(); 34 | File protobuf = new File(getClass().getResource("/caffe/deploy_gendernet.prototxt").toURI()); 35 | File caffeModel = new File(getClass().getResource("/caffe/gender_net.caffemodel").toURI()); 36 | genderNet = readNetFromCaffe(protobuf.getAbsolutePath(), caffeModel.getAbsolutePath()); 37 | } catch (Exception e) { 38 | logger.error("Error reading prototxt", e); 39 | throw new IllegalStateException("Unable to start CNNGenderDetector", e); 40 | } 41 | } 42 | 43 | /** 44 | * Predicts gender of a given cropped face 45 | * 46 | * @param face the cropped face as a {@link Mat} 47 | * @param frame the original frame where the face was cropped from 48 | * @return Gender 49 | */ 50 | public Gender predictGender(Mat face, Frame frame) { 51 | try { 52 | Mat croppedMat = new Mat(); 53 | resize(face, croppedMat, new Size(256, 256)); 54 | normalize(croppedMat, croppedMat, 0, Math.pow(2, frame.imageDepth), NORM_MINMAX, -1, null); 55 | 56 | Mat inputBlob = blobFromImage(croppedMat); 57 | genderNet.setInput(inputBlob, "data", 1.0, null); //set the network input 58 | 59 | Mat prob = genderNet.forward("prob"); 60 | 61 | Indexer indexer = prob.createIndexer(); 62 | logger.debug("CNN results {},{}", indexer.getDouble(0, 0), indexer.getDouble(0, 1)); 63 | if (indexer.getDouble(0, 0) > indexer.getDouble(0, 1)) { 64 | logger.debug("Male detected"); 65 | return Gender.MALE; 66 | } else { 67 | logger.debug("Female detected"); 68 | return Gender.FEMALE; 69 | } 70 | } catch (Exception e) { 71 | logger.error("Error when processing gender", e); 72 | } 73 | return Gender.NOT_RECOGNIZED; 74 | } 75 | 76 | public enum Gender { 77 | MALE, 78 | FEMALE, 79 | NOT_RECOGNIZED 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/java/org/imesha/examples/javacv/HaarFaceDetector.java: -------------------------------------------------------------------------------- 1 | package org.imesha.examples.javacv; 2 | 3 | 4 | import org.bytedeco.javacv.Frame; 5 | import org.bytedeco.javacv.OpenCVFrameConverter; 6 | import org.bytedeco.opencv.opencv_core.CvMemStorage; 7 | import org.bytedeco.opencv.opencv_core.Mat; 8 | import org.bytedeco.opencv.opencv_core.Rect; 9 | import org.bytedeco.opencv.opencv_core.RectVector; 10 | import org.bytedeco.opencv.opencv_objdetect.CascadeClassifier; 11 | import org.slf4j.Logger; 12 | import org.slf4j.LoggerFactory; 13 | 14 | import java.io.File; 15 | import java.util.HashMap; 16 | import java.util.Map; 17 | 18 | import static org.bytedeco.opencv.global.opencv_core.cvReleaseMemStorage; 19 | 20 | /** 21 | * Face detector using haar classifier cascades 22 | * 23 | * @author Imesha Sudasingha 24 | */ 25 | public class HaarFaceDetector { 26 | 27 | private static final Logger logger = LoggerFactory.getLogger(HaarFaceDetector.class); 28 | 29 | //private CvHaarClassifierCascade haarClassifierCascade; 30 | CascadeClassifier faceCascade; 31 | private CvMemStorage storage; 32 | private OpenCVFrameConverter.ToIplImage iplImageConverter; 33 | private OpenCVFrameConverter.ToMat toMatConverter; 34 | 35 | public HaarFaceDetector() { 36 | iplImageConverter = new OpenCVFrameConverter.ToIplImage(); 37 | toMatConverter = new OpenCVFrameConverter.ToMat(); 38 | 39 | try { 40 | File haarCascade = new File(this.getClass().getResource("/detection/haarcascade_frontalface_alt.xml").toURI()); 41 | logger.debug("Using Haar Cascade file located at : {}", haarCascade.getAbsolutePath()); 42 | //haarClassifierCascade = new CvHaarClassifierCascade(cvload(haarCascade.getAbsolutePath())); 43 | faceCascade = new CascadeClassifier(haarCascade.getCanonicalPath()); 44 | 45 | } catch (Exception e) { 46 | logger.error("Error when trying to get the haar cascade", e); 47 | throw new IllegalStateException("Error when trying to get the haar cascade", e); 48 | } 49 | storage = CvMemStorage.create(); 50 | } 51 | 52 | /** 53 | * Detects and returns a map of cropped faces from a given captured frame 54 | * 55 | * @param frame the frame captured by the {@link org.bytedeco.javacv.FrameGrabber} 56 | * @return A map of faces along with their coordinates in the frame 57 | */ 58 | public Map detect(Frame frame) { 59 | Map detectedFaces = new HashMap<>(); 60 | 61 | /* 62 | * return a CV Sequence (kind of a list) with coordinates of rectangle face area. 63 | * (returns coordinates of left top corner & right bottom corner) 64 | */ 65 | //CvSeq detectObjects = cvHaarDetectObjects(iplImage, haarClassifierCascade, storage, 1.5, 3, CV_HAAR_DO_CANNY_PRUNING); 66 | RectVector detectObjects = new RectVector(); 67 | 68 | Mat matImage = toMatConverter.convert(frame); 69 | faceCascade.detectMultiScale(matImage, detectObjects); 70 | 71 | long numberOfPeople = detectObjects.size(); 72 | for (int i = 0; i < numberOfPeople; i++) { 73 | Rect rect = detectObjects.get(i); 74 | Mat croppedMat = matImage.apply(new Rect(rect.x(), rect.y(), rect.width(), rect.height())); 75 | detectedFaces.put(rect, croppedMat); 76 | } 77 | 78 | return detectedFaces; 79 | } 80 | 81 | @Override 82 | public void finalize() { 83 | cvReleaseMemStorage(storage); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/main/java/org/imesha/examples/javacv/JavaCVExample.java: -------------------------------------------------------------------------------- 1 | package org.imesha.examples.javacv; 2 | 3 | import org.bytedeco.javacv.Frame; 4 | import org.bytedeco.javacv.FrameGrabber; 5 | import org.bytedeco.javacv.OpenCVFrameConverter; 6 | import org.bytedeco.javacv.OpenCVFrameGrabber; 7 | import org.bytedeco.opencv.opencv_core.Mat; 8 | import org.bytedeco.opencv.opencv_core.Point; 9 | import org.bytedeco.opencv.opencv_core.Rect; 10 | import org.bytedeco.opencv.opencv_core.Scalar; 11 | import org.imesha.examples.javacv.util.ImageUtils; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | 15 | import javax.swing.*; 16 | import java.awt.*; 17 | import java.awt.event.WindowAdapter; 18 | import java.awt.event.WindowEvent; 19 | import java.awt.image.BufferedImage; 20 | import java.util.Map; 21 | 22 | import static org.bytedeco.opencv.global.opencv_imgproc.*; 23 | 24 | /** 25 | * An example to demonstrate JavaCV's frame grabbing and other features 26 | * 27 | * @author Imesha Sudasingha 28 | */ 29 | public class JavaCVExample { 30 | 31 | private static final Logger logger = LoggerFactory.getLogger(JavaCVExample.class); 32 | 33 | private FrameGrabber frameGrabber; 34 | private OpenCVFrameConverter.ToMat toMatConverter = new OpenCVFrameConverter.ToMat(); 35 | private volatile boolean running = false; 36 | 37 | private HaarFaceDetector faceDetector = new HaarFaceDetector(); 38 | private CNNAgeDetector ageDetector = new CNNAgeDetector(); 39 | private CNNGenderDetector genderDetector = new CNNGenderDetector(); 40 | 41 | private JFrame window; 42 | private JPanel videoPanel; 43 | 44 | public JavaCVExample() { 45 | window = new JFrame(); 46 | videoPanel = new JPanel(); 47 | 48 | window.setLayout(new BorderLayout()); 49 | window.setSize(new Dimension(1280, 720)); 50 | window.add(videoPanel, BorderLayout.CENTER); 51 | window.addWindowListener(new WindowAdapter() { 52 | @Override 53 | public void windowClosing(WindowEvent e) { 54 | stop(); 55 | } 56 | }); 57 | } 58 | 59 | /** 60 | * Starts the frame grabbers and then the frame processing. Grabbed and processed frames will be displayed in the 61 | * {@link #videoPanel} 62 | */ 63 | public void start() { 64 | // frameGrabber = new FFmpegFrameGrabber("/dev/video0"); 65 | // The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio), 66 | // DC1394FrameGrabber, FlyCapture2FrameGrabber, OpenKinectFrameGrabber, 67 | // PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber. 68 | frameGrabber = new OpenCVFrameGrabber(0); 69 | 70 | //frameGrabber.setFormat("mp4"); 71 | frameGrabber.setImageWidth(1280); 72 | frameGrabber.setImageHeight(720); 73 | 74 | logger.debug("Starting frame grabber"); 75 | try { 76 | frameGrabber.start(); 77 | logger.debug("Started frame grabber with image width-height : {}-{}", frameGrabber.getImageWidth(), frameGrabber.getImageHeight()); 78 | } catch (FrameGrabber.Exception e) { 79 | logger.error("Error when initializing the frame grabber", e); 80 | throw new RuntimeException("Unable to start the FrameGrabber", e); 81 | } 82 | 83 | SwingUtilities.invokeLater(() -> { 84 | window.setVisible(true); 85 | }); 86 | 87 | process(); 88 | 89 | logger.debug("Stopped frame grabbing."); 90 | } 91 | 92 | /** 93 | * Private method which will be called to star frame grabbing and carry on processing the grabbed frames 94 | */ 95 | private void process() { 96 | running = true; 97 | while (running) { 98 | try { 99 | // Here we grab frames from our camera 100 | final Frame frame = frameGrabber.grab(); 101 | 102 | Map detectedFaces = faceDetector.detect(frame); 103 | Mat mat = toMatConverter.convert(frame); 104 | 105 | detectedFaces.entrySet().forEach(rectMatEntry -> { 106 | String age = ageDetector.predictAge(rectMatEntry.getValue(), frame); 107 | CNNGenderDetector.Gender gender = genderDetector.predictGender(rectMatEntry.getValue(), frame); 108 | String caption = String.format("%s:[%s]", gender, age); 109 | logger.debug("Face's caption : {}", caption); 110 | 111 | rectangle(mat, new Point(rectMatEntry.getKey().x(), rectMatEntry.getKey().y()), 112 | new Point(rectMatEntry.getKey().width() + rectMatEntry.getKey().x(), rectMatEntry.getKey().height() + rectMatEntry.getKey().y()), 113 | Scalar.RED, 2, CV_AA, 0); 114 | 115 | int posX = Math.max(rectMatEntry.getKey().x() - 10, 0); 116 | int posY = Math.max(rectMatEntry.getKey().y() - 10, 0); 117 | putText(mat, caption, new Point(posX, posY), CV_FONT_HERSHEY_PLAIN, 1.0, 118 | new Scalar(255, 255, 255, 2.0)); 119 | }); 120 | 121 | // Show the processed mat in UI 122 | Frame processedFrame = toMatConverter.convert(mat); 123 | 124 | Graphics graphics = videoPanel.getGraphics(); 125 | BufferedImage resizedImage = ImageUtils.getResizedBufferedImage(processedFrame, videoPanel); 126 | SwingUtilities.invokeLater(() -> { 127 | graphics.drawImage(resizedImage, 0, 0, videoPanel); 128 | }); 129 | } catch (FrameGrabber.Exception e) { 130 | logger.error("Error when grabbing the frame", e); 131 | } catch (Exception e) { 132 | logger.error("Unexpected error occurred while grabbing and processing a frame", e); 133 | } 134 | } 135 | } 136 | 137 | /** 138 | * Stops and released resources attached to frame grabbing. Stops frame processing and, 139 | */ 140 | public void stop() { 141 | running = false; 142 | try { 143 | logger.debug("Releasing and stopping FrameGrabber"); 144 | frameGrabber.release(); 145 | frameGrabber.stop(); 146 | } catch (FrameGrabber.Exception e) { 147 | logger.error("Error occurred when stopping the FrameGrabber", e); 148 | } 149 | 150 | window.dispose(); 151 | } 152 | 153 | public static void main(String[] args) { 154 | JavaCVExample javaCVExample = new JavaCVExample(); 155 | 156 | logger.info("Starting javacv example"); 157 | new Thread(javaCVExample::start).start(); 158 | 159 | Runtime.getRuntime().addShutdownHook(new Thread(() -> { 160 | logger.info("Stopping javacv example"); 161 | javaCVExample.stop(); 162 | })); 163 | 164 | try { 165 | Thread.currentThread().join(); 166 | } catch (InterruptedException ignored) { } 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /src/main/java/org/imesha/examples/javacv/util/ImageUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * The MIT License (MIT) 3 | * Copyright (c) 2016 Imesha Sudasingha 4 | *

5 | * Permission is hereby granted, free of charge, 6 | * to any person obtaining a copy of this software and associated documentation files (the "Software"), 7 | * to deal in the Software without restriction, including without limitation the rights to use, copy, modify, 8 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, 9 | * and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 10 | *

11 | * The above copyright notice and this permission notice shall be included in all copies or 12 | * substantial portions of the Software. 13 | *

14 | * THE SOFTWARE IS PROVIDED "AS IS", 15 | * WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 16 | * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 18 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 19 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | */ 22 | package org.imesha.examples.javacv.util; 23 | 24 | import net.coobird.thumbnailator.Thumbnails; 25 | import org.bytedeco.javacv.Frame; 26 | import org.bytedeco.javacv.Java2DFrameConverter; 27 | import org.bytedeco.javacv.OpenCVFrameConverter; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import javax.swing.*; 32 | import java.awt.image.BufferedImage; 33 | import java.io.IOException; 34 | 35 | /** 36 | * Utils to be used for images related tasks 37 | * 38 | * @author erandi 39 | */ 40 | public class ImageUtils { 41 | 42 | private static final Logger logger = LoggerFactory.getLogger(ImageUtils.class); 43 | private static Java2DFrameConverter frameConverter = new Java2DFrameConverter(); 44 | private static OpenCVFrameConverter.ToMat matConverter = new OpenCVFrameConverter.ToMat(); 45 | 46 | 47 | /** 48 | * Method to get resized buffered image when user passes the relevant frame and video panel. 49 | * 50 | * @param frame frame to be converted to {@link BufferedImage} 51 | * @param videoPanel the {@link JPanel} which is to be used to obtain panel size 52 | * @return resized {@link BufferedImage} 53 | */ 54 | public static BufferedImage getResizedBufferedImage(Frame frame, JPanel videoPanel) { 55 | BufferedImage resizedImage = null; 56 | 57 | try { 58 | /* 59 | * We get notified about the frames that are being added. Then we pass each frame to BufferedImage. I have used 60 | * a library called Thumbnailator to achieve the resizing effect along with performance 61 | */ 62 | resizedImage = Thumbnails.of(frameConverter.getBufferedImage(frame)) 63 | .size(videoPanel.getWidth(), videoPanel.getHeight()) 64 | .asBufferedImage(); 65 | } catch (IOException e) { 66 | logger.error("Unable to convert the image to a buffered image", e); 67 | } 68 | 69 | return resizedImage; 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/main/resources/caffe/age_net.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMS94/javacv-cnn-example/4f872ea7fee4429ae75534f86b1fed362242ea9e/src/main/resources/caffe/age_net.caffemodel -------------------------------------------------------------------------------- /src/main/resources/caffe/deploy_agenet.prototxt: -------------------------------------------------------------------------------- 1 | name: "CaffeNet" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 227 6 | input_dim: 227 7 | layers { 8 | name: "conv1" 9 | type: CONVOLUTION 10 | bottom: "data" 11 | top: "conv1" 12 | convolution_param { 13 | num_output: 96 14 | kernel_size: 7 15 | stride: 4 16 | } 17 | } 18 | layers { 19 | name: "relu1" 20 | type: RELU 21 | bottom: "conv1" 22 | top: "conv1" 23 | } 24 | layers { 25 | name: "pool1" 26 | type: POOLING 27 | bottom: "conv1" 28 | top: "pool1" 29 | pooling_param { 30 | pool: MAX 31 | kernel_size: 3 32 | stride: 2 33 | } 34 | } 35 | layers { 36 | name: "norm1" 37 | type: LRN 38 | bottom: "pool1" 39 | top: "norm1" 40 | lrn_param { 41 | local_size: 5 42 | alpha: 0.0001 43 | beta: 0.75 44 | } 45 | } 46 | layers { 47 | name: "conv2" 48 | type: CONVOLUTION 49 | bottom: "norm1" 50 | top: "conv2" 51 | convolution_param { 52 | num_output: 256 53 | pad: 2 54 | kernel_size: 5 55 | } 56 | } 57 | layers { 58 | name: "relu2" 59 | type: RELU 60 | bottom: "conv2" 61 | top: "conv2" 62 | } 63 | layers { 64 | name: "pool2" 65 | type: POOLING 66 | bottom: "conv2" 67 | top: "pool2" 68 | pooling_param { 69 | pool: MAX 70 | kernel_size: 3 71 | stride: 2 72 | } 73 | } 74 | layers { 75 | name: "norm2" 76 | type: LRN 77 | bottom: "pool2" 78 | top: "norm2" 79 | lrn_param { 80 | local_size: 5 81 | alpha: 0.0001 82 | beta: 0.75 83 | } 84 | } 85 | layers { 86 | name: "conv3" 87 | type: CONVOLUTION 88 | bottom: "norm2" 89 | top: "conv3" 90 | convolution_param { 91 | num_output: 384 92 | pad: 1 93 | kernel_size: 3 94 | } 95 | } 96 | layers{ 97 | name: "relu3" 98 | type: RELU 99 | bottom: "conv3" 100 | top: "conv3" 101 | } 102 | layers { 103 | name: "pool5" 104 | type: POOLING 105 | bottom: "conv3" 106 | top: "pool5" 107 | pooling_param { 108 | pool: MAX 109 | kernel_size: 3 110 | stride: 2 111 | } 112 | } 113 | layers { 114 | name: "fc6" 115 | type: INNER_PRODUCT 116 | bottom: "pool5" 117 | top: "fc6" 118 | inner_product_param { 119 | num_output: 512 120 | } 121 | } 122 | layers { 123 | name: "relu6" 124 | type: RELU 125 | bottom: "fc6" 126 | top: "fc6" 127 | } 128 | layers { 129 | name: "drop6" 130 | type: DROPOUT 131 | bottom: "fc6" 132 | top: "fc6" 133 | dropout_param { 134 | dropout_ratio: 0.5 135 | } 136 | } 137 | layers { 138 | name: "fc7" 139 | type: INNER_PRODUCT 140 | bottom: "fc6" 141 | top: "fc7" 142 | inner_product_param { 143 | num_output: 512 144 | } 145 | } 146 | layers { 147 | name: "relu7" 148 | type: RELU 149 | bottom: "fc7" 150 | top: "fc7" 151 | } 152 | layers { 153 | name: "drop7" 154 | type: DROPOUT 155 | bottom: "fc7" 156 | top: "fc7" 157 | dropout_param { 158 | dropout_ratio: 0.5 159 | } 160 | } 161 | layers { 162 | name: "fc8" 163 | type: INNER_PRODUCT 164 | bottom: "fc7" 165 | top: "fc8" 166 | inner_product_param { 167 | num_output: 8 168 | } 169 | } 170 | layers { 171 | name: "prob" 172 | type: SOFTMAX 173 | bottom: "fc8" 174 | top: "prob" 175 | } -------------------------------------------------------------------------------- /src/main/resources/caffe/deploy_gendernet.prototxt: -------------------------------------------------------------------------------- 1 | name: "CaffeNet" 2 | input: "data" 3 | input_dim: 10 4 | input_dim: 3 5 | input_dim: 227 6 | input_dim: 227 7 | layers { 8 | name: "conv1" 9 | type: CONVOLUTION 10 | bottom: "data" 11 | top: "conv1" 12 | convolution_param { 13 | num_output: 96 14 | kernel_size: 7 15 | stride: 4 16 | } 17 | } 18 | layers { 19 | name: "relu1" 20 | type: RELU 21 | bottom: "conv1" 22 | top: "conv1" 23 | } 24 | layers { 25 | name: "pool1" 26 | type: POOLING 27 | bottom: "conv1" 28 | top: "pool1" 29 | pooling_param { 30 | pool: MAX 31 | kernel_size: 3 32 | stride: 2 33 | } 34 | } 35 | layers { 36 | name: "norm1" 37 | type: LRN 38 | bottom: "pool1" 39 | top: "norm1" 40 | lrn_param { 41 | local_size: 5 42 | alpha: 0.0001 43 | beta: 0.75 44 | } 45 | } 46 | layers { 47 | name: "conv2" 48 | type: CONVOLUTION 49 | bottom: "norm1" 50 | top: "conv2" 51 | convolution_param { 52 | num_output: 256 53 | pad: 2 54 | kernel_size: 5 55 | } 56 | } 57 | layers { 58 | name: "relu2" 59 | type: RELU 60 | bottom: "conv2" 61 | top: "conv2" 62 | } 63 | layers { 64 | name: "pool2" 65 | type: POOLING 66 | bottom: "conv2" 67 | top: "pool2" 68 | pooling_param { 69 | pool: MAX 70 | kernel_size: 3 71 | stride: 2 72 | } 73 | } 74 | layers { 75 | name: "norm2" 76 | type: LRN 77 | bottom: "pool2" 78 | top: "norm2" 79 | lrn_param { 80 | local_size: 5 81 | alpha: 0.0001 82 | beta: 0.75 83 | } 84 | } 85 | layers { 86 | name: "conv3" 87 | type: CONVOLUTION 88 | bottom: "norm2" 89 | top: "conv3" 90 | convolution_param { 91 | num_output: 384 92 | pad: 1 93 | kernel_size: 3 94 | } 95 | } 96 | layers{ 97 | name: "relu3" 98 | type: RELU 99 | bottom: "conv3" 100 | top: "conv3" 101 | } 102 | layers { 103 | name: "pool5" 104 | type: POOLING 105 | bottom: "conv3" 106 | top: "pool5" 107 | pooling_param { 108 | pool: MAX 109 | kernel_size: 3 110 | stride: 2 111 | } 112 | } 113 | layers { 114 | name: "fc6" 115 | type: INNER_PRODUCT 116 | bottom: "pool5" 117 | top: "fc6" 118 | inner_product_param { 119 | num_output: 512 120 | } 121 | } 122 | layers { 123 | name: "relu6" 124 | type: RELU 125 | bottom: "fc6" 126 | top: "fc6" 127 | } 128 | layers { 129 | name: "drop6" 130 | type: DROPOUT 131 | bottom: "fc6" 132 | top: "fc6" 133 | dropout_param { 134 | dropout_ratio: 0.5 135 | } 136 | } 137 | layers { 138 | name: "fc7" 139 | type: INNER_PRODUCT 140 | bottom: "fc6" 141 | top: "fc7" 142 | inner_product_param { 143 | num_output: 512 144 | } 145 | } 146 | layers { 147 | name: "relu7" 148 | type: RELU 149 | bottom: "fc7" 150 | top: "fc7" 151 | } 152 | layers { 153 | name: "drop7" 154 | type: DROPOUT 155 | bottom: "fc7" 156 | top: "fc7" 157 | dropout_param { 158 | dropout_ratio: 0.5 159 | } 160 | } 161 | layers { 162 | name: "fc8" 163 | type: INNER_PRODUCT 164 | bottom: "fc7" 165 | top: "fc8" 166 | inner_product_param { 167 | num_output: 2 168 | } 169 | } 170 | layers { 171 | name: "prob" 172 | type: SOFTMAX 173 | bottom: "fc8" 174 | top: "prob" 175 | } 176 | -------------------------------------------------------------------------------- /src/main/resources/caffe/gender_net.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IMS94/javacv-cnn-example/4f872ea7fee4429ae75534f86b1fed362242ea9e/src/main/resources/caffe/gender_net.caffemodel -------------------------------------------------------------------------------- /src/main/resources/log4j2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | --------------------------------------------------------------------------------