├── .github └── workflows │ └── build.yml ├── .gitignore ├── README.md ├── build.gradle ├── core-libs ├── core.jar ├── gluegen-rt.jar └── jogl-all.jar ├── data └── cascade-files │ ├── haarcascade_clock.xml │ ├── haarcascade_eye.xml │ ├── haarcascade_eye_tree_eyeglasses.xml │ ├── haarcascade_frontalface_alt.xml │ ├── haarcascade_frontalface_alt2.xml │ ├── haarcascade_frontalface_alt_tree.xml │ ├── haarcascade_frontalface_default.xml │ ├── haarcascade_fullbody.xml │ ├── haarcascade_lefteye_2splits.xml │ ├── haarcascade_lowerbody.xml │ ├── haarcascade_mcs_eyepair_big.xml │ ├── haarcascade_mcs_eyepair_small.xml │ ├── haarcascade_mcs_leftear.xml │ ├── haarcascade_mcs_lefteye.xml │ ├── haarcascade_mcs_mouth.xml │ ├── haarcascade_mcs_nose.xml │ ├── haarcascade_mcs_rightear.xml │ ├── haarcascade_mcs_righteye.xml │ ├── haarcascade_mcs_upperbody.xml │ ├── haarcascade_profileface.xml │ ├── haarcascade_righteye_2splits.xml │ ├── haarcascade_upperbody.xml │ ├── hogcascade_pedestrians.xml │ └── lbpcascade_frontalface.xml ├── examples ├── BackgroundSubtraction │ ├── BackgroundSubtraction.pde │ └── data │ │ └── street.mov ├── BrightestPoint │ ├── BrightestPoint.pde │ └── robot_light.jpg ├── BrightnessContrast │ ├── BrightnessContrast.pde │ └── test.jpg ├── CalibrationDemo │ ├── CalibrationDemo.pde │ └── data │ │ └── checkerboard.jpg ├── ColorChannels │ ├── ColorChannels.pde │ └── green_object.png ├── DepthFromStereo │ ├── DepthFromStereo.pde │ ├── scene_l.jpg │ └── scene_r.jpg ├── DilationAndErosion │ ├── DilationAndErosion.pde │ ├── line_drawing.jpg │ └── pen_sketch.jpg ├── FaceDetection │ ├── FaceDetection.pde │ └── data │ │ ├── test.jpg │ │ ├── test.png │ │ ├── testImage.png │ │ └── transparent_test.png ├── FilterImages │ ├── FilterImages.pde │ └── test.jpg ├── FindContours │ ├── FindContours.pde │ └── test.jpg ├── FindEdges │ ├── FindEdges.pde │ └── test.jpg ├── FindHistogram │ ├── FindHistogram.pde │ └── test.jpg ├── HSVColorTracking │ ├── HSVColorTracking.pde │ └── screenshots │ │ └── hsv_color_tracking.png ├── HistogramSkinDetection │ ├── HistogramSkinDetection.pde │ └── data │ │ ├── cb-cr.png │ │ └── test.jpg ├── HoughLineDetection │ ├── HoughLineDetection.pde │ └── film_scan.jpg ├── HueRangeSelection │ ├── HueRangeSelection.pde │ ├── colored_balls.jpg │ └── rainbow.jpg ├── ImageDiff │ ├── ImageDiff.pde │ ├── after.jpg │ └── before.jpg ├── ImageFiltering │ ├── ImageFiltering.pde │ └── screenshots │ │ ├── objects_basic_threshold.png │ │ └── touch_adaptive_threshold.png ├── ImageFilteringWithBlobPersistence │ ├── Blob.pde │ ├── ImageFilteringWithBlobPersistence.pde │ └── screenshots │ │ └── blob_persistence.png ├── LiveCamTest │ ├── LiveCamTest.pde │ └── data │ │ └── haarcascade_frontalface_alt.xml ├── LoadAndDisplayImage │ ├── LoadAndDisplayImage.pde │ └── data │ │ ├── test.jpg │ │ └── test.png ├── LumaVsGray │ ├── LumaVsGray.pde │ └── flashlight.jpg ├── MarkerDetection │ ├── MarkerDetection.pde │ └── marker_test.jpg ├── MorphologyOperations │ ├── MorphologyOperations.pde │ └── test.jpg ├── MultipleColorTracking │ ├── MultipleColorTracking.pde │ ├── screenshots │ │ └── multiple_color_tracking.png │ └── sketch.properties ├── OpticalFlow │ ├── OpticalFlow.pde │ └── data │ │ └── sample1.mov ├── RegionOfInterest │ ├── RegionOfInterest.pde │ └── test.jpg ├── WarpPerspective │ ├── WarpPerspective.pde │ └── cards.png ├── WhichFace │ ├── Face.pde │ ├── WhichFace.pde │ └── screenshots │ │ └── whichface.png └── WorkingWithColorImages │ ├── WorkingWithColorImages.pde │ └── test.jpg ├── gradle.properties ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── library.properties ├── license.txt ├── processing-library.gradle ├── settings.gradle └── src ├── main └── java │ └── gab │ └── opencv │ ├── Contour.java │ ├── ContourComparator.java │ ├── Flow.java │ ├── Histogram.java │ ├── Line.java │ └── OpenCV.java └── test └── java ├── AlphaChannelTest.java ├── GrabImageTest.java └── OpticalFlowTest.java /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | publishReleases: 7 | description: 'Publish to Releases' 8 | required: false 9 | default: 'true' 10 | 11 | 12 | jobs: 13 | deploy: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v2 18 | - uses: actions/setup-java@v2 19 | with: 20 | distribution: 'temurin' 21 | java-version: '11' 22 | - uses: gradle/gradle-build-action@v2 23 | 24 | - name: Get Library Version 25 | id: get_version 26 | run: | 27 | lib_version="$(./gradlew properties | grep ^version: | cut -d' ' -f2)" 28 | echo "Library Version: $lib_version" 29 | echo "version=$lib_version" >> $GITHUB_OUTPUT 30 | echo "v_version=v$lib_version" >> $GITHUB_OUTPUT 31 | shell: bash 32 | 33 | - name: Build Release Package 34 | run: | 35 | ./gradlew releaseProcessingLib --no-daemon 36 | 37 | - name: Build Android Release Package 38 | run: | 39 | ./gradlew releaseProcessingLib -PoutputNamePostfix=-android -PjavacppPlatform=android-arm,android-arm64,android-x86,android-x86_64 --no-daemon 40 | 41 | - name: Build Bare Release Package 42 | run: | 43 | ./gradlew releaseProcessingLib -Pdisable-fatjar -Pbare -PoutputNamePostfix=-bare --no-daemon 44 | 45 | # upload dist 46 | - name: Upload binaries to release 47 | if: ${{ github.event.inputs.publishReleases == 'true' }} 48 | uses: svenstaro/upload-release-action@v2 49 | with: 50 | repo_token: ${{ secrets.GITHUB_TOKEN }} 51 | file: release/* 52 | tag: "${{ steps.get_version.outputs.v_version }}" 53 | release_name: "Version ${{ steps.get_version.outputs.version }}" 54 | body: "Prebuilt opencv-processing libs version ${{ steps.get_version.outputs.version }}." 55 | overwrite: true 56 | file_glob: true 57 | prerelease: true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/** 2 | tmp/** 3 | tmp/**/* 4 | *.tmp 5 | *.bak 6 | *.swp 7 | *~.nib 8 | local.properties 9 | .settings/ 10 | .loadpath 11 | distribution/* 12 | lib/ 13 | 14 | # External tool builders 15 | .externalToolBuilders/ 16 | 17 | # Locally stored "Eclipse launch configurations" 18 | *.launch 19 | 20 | .DS_Store 21 | *.DS_Store 22 | /bin 23 | 24 | build 25 | .gradle 26 | .idea 27 | release/**/* -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## OpenCV for Processing 2 | 3 | **A Processing library for the [OpenCV](http://opencv.org/) computer vision library.** 4 | 5 | #### Why a new fork? 6 | This fork has been created to support more recent OpenCV versions (4.0<) for Processing and also more platforms (ARM & ARMHF). 7 | 8 | #### Content 9 | 10 | OpenCV for Processing is based on OpenCV's official Java bindings. It attempts to provide convenient wrappers for common OpenCV functions that are friendly to beginners and feel familiar to the Processing environment. 11 | 12 | See the included examples below for an overview of what's possible and links to the relevant example code. Complete documentation is available here: 13 | 14 | **[OpenCV for Processing reference](http://atduskgreg.github.io/opencv-processing/reference/)** 15 | 16 | OpenCV for Processing is based on the officially supported [OpenCV Java API](http://docs.opencv.org/java/), currently at version `4.9.0`. In addition to using the wrapped functionality, you can import OpenCV modules and use any of its documented functions: [OpenCV javadocs](http://docs.opencv.org/java/). See the advanced examples (HistogramSkinDetection, DepthFromStereo, and Marker Detection) below for details. (This style of API was inspired by Kyle McDonald's [ofxCv addon](https://github.com/kylemcdonald/ofxCv) for OpenFrameworks.) 17 | 18 | Contributions welcome. 19 | 20 | ### Build 21 | - Install JDK 8 (because of Processing) (JDK 11 for Processing 4) 22 | 23 | Run gradle to build a new release package under `/release/opencv-processing.zip`: 24 | 25 | ```bash 26 | # windows 27 | gradlew.bat releaseProcessingLib 28 | 29 | # mac / unix 30 | ./gradlew releaseProcessingLib 31 | ``` 32 | 33 | #### Platform Specific 34 | To build only on a specific platform use the property `javacppPlatform`: 35 | 36 | ```bash 37 | # builds with support for all platforms 38 | gradlew.bat releaseProcessingLib -PjavacppPlatform=linux-x86_64,macosx-x86_64,macosx-arm64,windows-x86_64,linux-armhf,linux-arm64 39 | ``` 40 | 41 | #### Build Options 42 | 43 | - `-Pdisable-fatjar` - Disable fat jar building (all dependencies as separate files) 44 | - `-Pbare` - Do not include dependencies (opencv, openblas). This allows other libraries to provide the opencv implementation. 45 | 46 | ### Installing 47 | 48 | OpenCV for Processing currently supports Mac OSX, 32-bit and 64-bit Windows, 32- and 64-bit Linux. Android support is hopefully coming soon (pull requests welcome). 49 | 50 | _NB: When running on the Mac, make sure you have Processing set to 64-bit mode in the Preferences_ 51 | 52 | See [here](https://github.com/cansik/opencv-processing/releases) for the latest release. 53 | 54 | ### Examples 55 | 56 | #### LiveCamTest 57 | 58 | Access a live camera and do image processing on the result, specifically face detection. 59 | 60 | Code: [LiveCamTest.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/LiveCamTest/LiveCamTest.pde) 61 | 62 | _Note: There's a bug that prevents live camera access in current versions of Processing 2.0 on machines with a Retina display._ 63 | 64 | #### FaceDetection 65 | 66 | Detect faces in images. 67 | 68 | Screen Shot 2013-04-08 at 1.22.18 PM 69 | 70 | Code: [FaceDetection.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/FaceDetection/FaceDetection.pde) 71 | 72 | #### BrightnessContrast 73 | 74 | Adjust the brightness and contrast of color and gray images. 75 | 76 | brightness and contrast 77 | 78 | Code: [BrightnessContrast.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/BrightnessContrast/BrightnessContrast.pde) 79 | 80 | #### FilterImages 81 | 82 | Basic filtering operations on images: threshold, blur, and adaptive thresholds. 83 | 84 | Screen Shot 2013-04-12 at 1.42.30 PM 85 | 86 | Code: [FilterImages.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/FilterImages/FilterImages.pde) 87 | 88 | #### FindContours 89 | 90 | Find contours in images and calculate polygon approximations of the contours (i.e., the closest straight line that fits the contour). 91 | 92 | contours with polygon approximations 93 | 94 | Code: [FindContours.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/FindContours/FindContours.pde) 95 | 96 | #### FindEdges 97 | 98 | Three different edge-detection techniques: Canny, Scharr, and Sobel. 99 | 100 | Screen Shot 2013-04-10 at 2.03.59 AM 101 | 102 | Code: [FindEdges.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/FindEdges/FindEdges.pde) 103 | 104 | #### FindLines 105 | 106 | Find straight lines in the image using Hough line detection. 107 | 108 | Hough line detection 109 | 110 | Code: [HoughLineDetection.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/HoughLineDetection/HoughLineDetection.pde) 111 | 112 | #### BrightestPoint 113 | 114 | Find the brightest point in an image. 115 | 116 | finding the brightest point 117 | 118 | Code: [BrightestPoint.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/BrightestPoint/BrightestPoint.pde) 119 | 120 | #### RegionOfInterest 121 | 122 | Assign a sub-section (or Region of Interest) of the image to be processed. Video of this example in action here: [Region of Interest demo on Vimeo](https://vimeo.com/69009345). 123 | 124 | region of interest 125 | 126 | Code: [RegionOfInterest.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/RegionOfInterest/RegionOfInterest.pde) 127 | 128 | #### ImageDiff 129 | 130 | Find the difference between two images in order to subtract the background or detect a new object in a scene. 131 | 132 | Screen Shot 2013-04-11 at 2.10.35 PM 133 | 134 | Code: [ImageDiff.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/ImageDiff/ImageDiff.pde) 135 | 136 | #### DilationAndErosion 137 | 138 | Thin (erode) and expand (dilate) an image in order to close holes. These are known as "morphological" operations. 139 | 140 | dilation and erosion 141 | 142 | Code: [DilationAndErosion.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/DilationAndErosion/DilationAndErosion.pde) 143 | 144 | #### BackgroundSubtraction 145 | 146 | Detect moving objects in a scene. Use background subtraction to distinguish background from foreground and contour tracking to track the foreground objects. 147 | 148 | Background Subtraction 149 | 150 | Code: [BackgroundSubtraction.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/BackgroundSubtraction/BackgroundSubtraction.pde) 151 | 152 | 153 | #### WorkingWithColorImages 154 | 155 | Demonstration of what you can do color images in OpenCV (threshold, blur, etc) and what you can't (lots of other operations). 156 | 157 | color operations: threshold and blur 158 | 159 | Code: [WorkingWithColorImages.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/WorkingWithColorImages/WorkingWithColorImages.pde) 160 | 161 | #### ColorChannels #### 162 | 163 | Separate a color image into red, green, blue or hue, saturation, and value channels in order to work with the channels individually. 164 | 165 | ColorChannels 166 | 167 | Code: [ColorChannels](https://github.com/atduskgreg/opencv-processing/blob/master/examples/ColorChannels/ColorChannels.pde) 168 | 169 | #### FindHistogram 170 | 171 | Demonstrates use of the findHistogram() function and the Histogram class to get and draw histograms for grayscale and individual color channels. 172 | 173 | gray, red, green, blue histograms 174 | 175 | Code: [FindHistogram.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/FindHistogram/FindHistogram.pde) 176 | 177 | #### HueRangeSelection 178 | 179 | Detect objects based on their color. Demonstrates the use of HSV color space as well as range-based image filtering. 180 | 181 | Hue-based color detection 182 | 183 | Code: [HueRangeSelection.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/HueRangeSelection/HueRangeSelection.pde) 184 | 185 | #### CalibrationDemo (in progress) 186 | 187 | An example of the process involved in calibrating a camera. Currently only detects the corners in a chessboard pattern. 188 | 189 | Screen Shot 2013-05-04 at 2.03.23 AM 190 | 191 | Code: [CalibrationDemo.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/CalibrationDemo/CalibrationDemo.pde) 192 | 193 | #### HistogramSkinDetection 194 | 195 | A more advanced example. Detecting skin in an image based on colors in a region of color space. Warning: uses un-wrapped OpenCV objects and functions. 196 | 197 | Screen Shot 2013-05-04 at 2.25.18 PM 198 | 199 | Code: [HistogramSkinDetection.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/HistogramSkinDetection/HistogramSkinDetection.pde) 200 | 201 | #### DepthFromStereo 202 | 203 | An advanced example. Calculates depth information from a pair of stereo images. Warning: uses un-wrapped OpenCV objects and functions. 204 | 205 | Screen Shot 2013-04-12 at 2.27.30 AM 206 | 207 | Code: [DepthFromStereo.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/DepthFromStereo/DepthFromStereo.pde) 208 | 209 | #### WarpPerspective (in progress) 210 | 211 | Un-distort an object that's in perspective. Coming to the real API soon. 212 | 213 | Warp Perspective 214 | 215 | Code: [WarpPerspective.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/WarpPerspective/WarpPerspective.pde) 216 | 217 | #### MarkerDetection 218 | 219 | An in-depth advanced example. Detect a CV marker in an image, warp perspective, and detect the number stored in the marker. Many steps in the code. Uses many un-wrapped OpenCV objects and functions. 220 | 221 | Screen Shot 2013-04-12 at 12.20.17 AM 222 | 223 | Code: [MarkerDetection.pde](https://github.com/atduskgreg/opencv-processing/blob/master/examples/MarkerDetection/MarkerDetection.pde) 224 | 225 | #### MorphologyOperations 226 | 227 | Open and close an image, or do more complicated morphological transformations. 228 | 229 | Morphology operations 230 | 231 | Code: [MorphologyOperations.pde](examples/MorphologyOperations/MorphologyOperations.pde) 232 | -------------------------------------------------------------------------------- /build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | id 'java-library' 3 | id 'org.bytedeco.gradle-javacpp-platform' version "1.5.10" 4 | } 5 | 6 | group 'gab.opencv' 7 | version '0.8.0' 8 | 9 | def javaCvVersion = '1.5.10' 10 | 11 | // We can set this on the command line too this way: -PjavacppPlatform=linux-x86_64,macosx-x86_64,windows-x86_64,etc 12 | ext { 13 | javacppPlatform = 'linux-x86_64,macosx-x86_64,macosx-arm64,windows-x86_64,linux-armhf,linux-arm64' // defaults to Loader.getPlatform() 14 | } 15 | 16 | sourceCompatibility = 1.8 17 | 18 | repositories { 19 | mavenCentral() 20 | maven { url 'https://jitpack.io' } 21 | } 22 | 23 | configurations { 24 | jar.archiveName = outputName + '.jar' 25 | } 26 | 27 | javadoc { 28 | source = sourceSets.main.allJava 29 | } 30 | 31 | dependencies { 32 | // compile 33 | testImplementation group: 'junit', name: 'junit', version: '4.13.1' 34 | 35 | // opencv 36 | implementation group: 'org.bytedeco', name: 'opencv-platform', version: "4.9.0-$javaCvVersion" 37 | implementation group: 'org.bytedeco', name: 'openblas-platform', version: "0.3.26-$javaCvVersion" 38 | 39 | // processing 40 | implementation fileTree(include: ["core.jar", "jogl-all-main.jar", "gluegen-rt-main.jar"], dir: 'core-libs') 41 | } 42 | 43 | task fatJar(type: Jar) { 44 | archiveFileName = "$outputName-complete.jar" 45 | duplicatesStrategy = DuplicatesStrategy.EXCLUDE 46 | dependsOn configurations.runtimeClasspath 47 | from { 48 | (configurations.runtimeClasspath).filter( {! (it.name =~ /core.jar/ || 49 | it.name =~ /jogl-all-main.jar/ || 50 | it.name =~ /gluegen-rt-main.jar/)}).collect { 51 | it.isDirectory() ? it : zipTree(it) 52 | } 53 | } 54 | with jar 55 | } 56 | 57 | // add processing library support 58 | apply from: "processing-library.gradle" -------------------------------------------------------------------------------- /core-libs/core.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/core-libs/core.jar -------------------------------------------------------------------------------- /core-libs/gluegen-rt.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/core-libs/gluegen-rt.jar -------------------------------------------------------------------------------- /core-libs/jogl-all.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/core-libs/jogl-all.jar -------------------------------------------------------------------------------- /data/cascade-files/haarcascade_mcs_leftear.xml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/data/cascade-files/haarcascade_mcs_leftear.xml -------------------------------------------------------------------------------- /data/cascade-files/haarcascade_mcs_rightear.xml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/data/cascade-files/haarcascade_mcs_rightear.xml -------------------------------------------------------------------------------- /examples/BackgroundSubtraction/BackgroundSubtraction.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import processing.video.*; 3 | 4 | Movie video; 5 | OpenCV opencv; 6 | 7 | void setup() { 8 | size(720, 480); 9 | video = new Movie(this, "street.mov"); 10 | opencv = new OpenCV(this, 720, 480); 11 | 12 | opencv.startBackgroundSubtraction(5, 3, 0.5); 13 | 14 | video.loop(); 15 | video.play(); 16 | } 17 | 18 | void draw() { 19 | image(video, 0, 0); 20 | 21 | if (video.width == 0 || video.height == 0) 22 | return; 23 | 24 | opencv.loadImage(video); 25 | opencv.updateBackground(); 26 | 27 | opencv.dilate(); 28 | opencv.erode(); 29 | 30 | noFill(); 31 | stroke(255, 0, 0); 32 | strokeWeight(3); 33 | for (Contour contour : opencv.findContours()) { 34 | contour.draw(); 35 | } 36 | } 37 | 38 | void movieEvent(Movie m) { 39 | m.read(); 40 | } 41 | -------------------------------------------------------------------------------- /examples/BackgroundSubtraction/data/street.mov: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/BackgroundSubtraction/data/street.mov -------------------------------------------------------------------------------- /examples/BrightestPoint/BrightestPoint.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | 5 | void setup() { 6 | PImage src = loadImage("robot_light.jpg"); 7 | src.resize(800, 0); 8 | size(800, 533); 9 | 10 | opencv = new OpenCV(this, src); 11 | } 12 | 13 | void draw() { 14 | image(opencv.getOutput(), 0, 0); 15 | PVector loc = opencv.max(); 16 | 17 | stroke(255, 0, 0); 18 | strokeWeight(4); 19 | noFill(); 20 | ellipse(loc.x, loc.y, 10, 10); 21 | } -------------------------------------------------------------------------------- /examples/BrightestPoint/robot_light.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/BrightestPoint/robot_light.jpg -------------------------------------------------------------------------------- /examples/BrightnessContrast/BrightnessContrast.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | PImage img; 4 | OpenCV opencv; 5 | 6 | void setup(){ 7 | img = loadImage("test.jpg"); 8 | size(1080, 720); 9 | opencv = new OpenCV(this, img); 10 | } 11 | 12 | void draw(){ 13 | opencv.loadImage(img); 14 | opencv.brightness((int)map(mouseX, 0, width, -255, 255)); 15 | image(opencv.getOutput(),0,0); 16 | } -------------------------------------------------------------------------------- /examples/BrightnessContrast/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/BrightnessContrast/test.jpg -------------------------------------------------------------------------------- /examples/CalibrationDemo/CalibrationDemo.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | PImage src; 4 | ArrayList cornerPoints; 5 | OpenCV opencv; 6 | 7 | void setup() { 8 | src = loadImage("checkerboard.jpg"); 9 | src.resize(500, 0); 10 | size(500, 333); 11 | 12 | opencv = new OpenCV(this, src); 13 | opencv.gray(); 14 | 15 | cornerPoints = opencv.findChessboardCorners(9,6); 16 | } 17 | 18 | void draw() { 19 | image( opencv.getOutput(), 0, 0); 20 | fill(255,0,0); 21 | noStroke(); 22 | for(PVector p : cornerPoints){ 23 | ellipse(p.x, p.y, 5, 5); 24 | } 25 | } -------------------------------------------------------------------------------- /examples/CalibrationDemo/data/checkerboard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/CalibrationDemo/data/checkerboard.jpg -------------------------------------------------------------------------------- /examples/ColorChannels/ColorChannels.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | PImage src, r, g, b, h, s, v; 5 | 6 | int imgH, imgW; 7 | 8 | void setup() { 9 | src = loadImage("green_object.png"); 10 | src.resize(800,0); 11 | opencv = new OpenCV(this, src); 12 | size(1200, 672); 13 | 14 | imgH = src.height/2; 15 | imgW = src.width/2; 16 | 17 | r = opencv.getSnapshot(opencv.getR()); 18 | g = opencv.getSnapshot(opencv.getG()); 19 | b = opencv.getSnapshot(opencv.getB()); 20 | 21 | opencv.useColor(HSB); 22 | 23 | h = opencv.getSnapshot(opencv.getH()); 24 | s = opencv.getSnapshot(opencv.getS()); 25 | v = opencv.getSnapshot(opencv.getV()); 26 | } 27 | 28 | void draw() { 29 | background(0); 30 | noTint(); 31 | image(src, imgW,0, imgW, imgH); 32 | 33 | tint(255,0,0); 34 | image(r, 0, imgH, imgW, imgH); 35 | 36 | tint(0,255,0); 37 | image(g, imgW, imgH, imgW, imgH); 38 | 39 | tint(0,0,255); 40 | image(b, 2*imgW, imgH, imgW, imgH); 41 | 42 | noTint(); 43 | image(h, 0, 2*imgH, imgW, imgH); 44 | image(s, imgW, 2*imgH, imgW, imgH); 45 | image(v, 2*imgW, 2*imgH, imgW, imgH); 46 | } -------------------------------------------------------------------------------- /examples/ColorChannels/green_object.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/ColorChannels/green_object.png -------------------------------------------------------------------------------- /examples/DepthFromStereo/DepthFromStereo.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import org.opencv.core.Mat; 3 | import org.opencv.calib3d.StereoBM; 4 | import org.opencv.core.CvType; 5 | import org.opencv.calib3d.StereoSGBM; 6 | 7 | OpenCV ocvL, ocvR; 8 | PImage imgL, imgR, depth1, depth2; 9 | 10 | void setup() { 11 | 12 | imgL = loadImage("scene_l.jpg"); 13 | imgR = loadImage("scene_r.jpg"); 14 | ocvL = new OpenCV(this, imgL); 15 | 16 | ocvR = new OpenCV(this, imgR); 17 | 18 | size(768, 576); 19 | 20 | ocvL.gray(); 21 | ocvR.gray(); 22 | Mat left = ocvL.getGray(); 23 | Mat right = ocvR.getGray(); 24 | 25 | Mat disparity = OpenCV.imitate(left); 26 | 27 | StereoSGBM stereo = StereoSGBM.create(0, 32, 3, 128, 256, 20, 16, 1, 100, 20, 1); 28 | stereo.compute(left, right, disparity ); 29 | 30 | Mat depthMat = OpenCV.imitate(left); 31 | disparity.convertTo(depthMat, depthMat.type()); 32 | 33 | depth1 = createImage(depthMat.width(), depthMat.height(), RGB); 34 | ocvL.toPImage(depthMat, depth1); 35 | 36 | StereoBM stereo2 = StereoBM.create(); 37 | stereo2.compute(left, right, disparity ); 38 | disparity.convertTo(depthMat, depthMat.type()); 39 | 40 | 41 | depth2 = createImage(depthMat.width(), depthMat.height(), RGB); 42 | ocvL.toPImage(depthMat, depth2); 43 | } 44 | 45 | void draw() { 46 | image(imgL, 0, 0); 47 | image(imgR, imgL.width, 0); 48 | 49 | image(depth1, 0, imgL.height); 50 | image(depth2, imgL.width, imgL.height); 51 | 52 | fill(255, 0, 0); 53 | text("left", 10, 20); 54 | text("right", 10 + imgL.width, 20); 55 | text("stereo SGBM", 10, imgL.height + 20); 56 | text("stereo BM", 10 + imgL.width, imgL.height+ 20); 57 | } 58 | -------------------------------------------------------------------------------- /examples/DepthFromStereo/scene_l.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/DepthFromStereo/scene_l.jpg -------------------------------------------------------------------------------- /examples/DepthFromStereo/scene_r.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/DepthFromStereo/scene_r.jpg -------------------------------------------------------------------------------- /examples/DilationAndErosion/DilationAndErosion.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | PImage src, dilated, eroded, both; 4 | OpenCV opencv; 5 | 6 | void setup() { 7 | src = loadImage("pen_sketch.jpg"); 8 | src.resize(src.width/2, 0); 9 | size(800, 786); 10 | 11 | opencv = new OpenCV(this, src); 12 | 13 | // Dilate and Erode both need a binary image 14 | // So, we'll make it gray and threshold it. 15 | opencv.gray(); 16 | opencv.threshold(100); 17 | // We'll also invert so that erosion eats away the lines 18 | // and dilation expands them (rather than vice-versa) 19 | opencv.invert(); 20 | // save a snapshot to use in both operations 21 | src = opencv.getSnapshot(); 22 | 23 | // erode and save snapshot for display 24 | opencv.erode(); 25 | eroded = opencv.getSnapshot(); 26 | 27 | // reload un-eroded image and dilate it 28 | opencv.loadImage(src); 29 | opencv.dilate(); 30 | // save dilated version for display 31 | dilated = opencv.getSnapshot(); 32 | // now erode on top of dilated version to close holes 33 | opencv.erode(); 34 | both = opencv.getSnapshot(); 35 | 36 | noLoop(); 37 | } 38 | 39 | void draw() { 40 | image(src, 0, 0); 41 | image(eroded, src.width, 0); 42 | image(dilated, 0, src.height); 43 | image(both, src.width, src.height); 44 | 45 | fill(0, 255, 0); 46 | text("original", 20, 20); 47 | text("erode", src.width + 20, 20); 48 | text("dilate", 20, src.height+20); 49 | text("dilate then erode\n(close holes)", src.width+20, src.height+20); 50 | } -------------------------------------------------------------------------------- /examples/DilationAndErosion/line_drawing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/DilationAndErosion/line_drawing.jpg -------------------------------------------------------------------------------- /examples/DilationAndErosion/pen_sketch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/DilationAndErosion/pen_sketch.jpg -------------------------------------------------------------------------------- /examples/FaceDetection/FaceDetection.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import java.awt.Rectangle; 3 | 4 | OpenCV opencv; 5 | Rectangle[] faces; 6 | 7 | void setup() { 8 | opencv = new OpenCV(this, "test.jpg"); 9 | size(1080, 720); 10 | 11 | opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE); 12 | faces = opencv.detect(); 13 | } 14 | 15 | void draw() { 16 | image(opencv.getInput(), 0, 0); 17 | 18 | noFill(); 19 | stroke(0, 255, 0); 20 | strokeWeight(3); 21 | for (int i = 0; i < faces.length; i++) { 22 | rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height); 23 | } 24 | } -------------------------------------------------------------------------------- /examples/FaceDetection/data/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FaceDetection/data/test.jpg -------------------------------------------------------------------------------- /examples/FaceDetection/data/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FaceDetection/data/test.png -------------------------------------------------------------------------------- /examples/FaceDetection/data/testImage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FaceDetection/data/testImage.png -------------------------------------------------------------------------------- /examples/FaceDetection/data/transparent_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FaceDetection/data/transparent_test.png -------------------------------------------------------------------------------- /examples/FilterImages/FilterImages.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | PImage img, thresh, blur, adaptive; 5 | 6 | void setup() { 7 | img = loadImage("test.jpg"); 8 | size(1080, 720); 9 | 10 | opencv = new OpenCV(this, img); 11 | PImage gray = opencv.getSnapshot(); 12 | 13 | opencv.threshold(80); 14 | thresh = opencv.getSnapshot(); 15 | 16 | opencv.loadImage(gray); 17 | opencv.blur(12); 18 | blur = opencv.getSnapshot(); 19 | 20 | opencv.loadImage(gray); 21 | opencv.adaptiveThreshold(591, 1); 22 | adaptive = opencv.getSnapshot(); 23 | } 24 | 25 | void draw() { 26 | pushMatrix(); 27 | scale(0.5); 28 | image(img, 0, 0); 29 | image(thresh, img.width, 0); 30 | image(blur, 0, img.height); 31 | image(adaptive, img.width, img.height); 32 | popMatrix(); 33 | 34 | fill(0); 35 | text("source", img.width/2 - 100, 20 ); 36 | text("threshold", img.width - 100, 20 ); 37 | text("blur", img.width/2 - 100, img.height/2 + 20 ); 38 | text("adaptive threshold", img.width - 150, img.height/2 + 20 ); 39 | } -------------------------------------------------------------------------------- /examples/FilterImages/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FilterImages/test.jpg -------------------------------------------------------------------------------- /examples/FindContours/FindContours.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | PImage src, dst; 4 | OpenCV opencv; 5 | 6 | ArrayList contours; 7 | ArrayList polygons; 8 | 9 | void setup() { 10 | src = loadImage("test.jpg"); 11 | size(1080, 360); 12 | opencv = new OpenCV(this, src); 13 | 14 | opencv.gray(); 15 | opencv.threshold(70); 16 | dst = opencv.getOutput(); 17 | 18 | contours = opencv.findContours(); 19 | println("found " + contours.size() + " contours"); 20 | } 21 | 22 | void draw() { 23 | scale(0.5); 24 | image(src, 0, 0); 25 | image(dst, src.width, 0); 26 | 27 | noFill(); 28 | strokeWeight(3); 29 | 30 | for (Contour contour : contours) { 31 | stroke(0, 255, 0); 32 | contour.draw(); 33 | 34 | stroke(255, 0, 0); 35 | beginShape(); 36 | for (PVector point : contour.getPolygonApproximation().getPoints()) { 37 | vertex(point.x, point.y); 38 | } 39 | endShape(); 40 | } 41 | } -------------------------------------------------------------------------------- /examples/FindContours/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FindContours/test.jpg -------------------------------------------------------------------------------- /examples/FindEdges/FindEdges.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | PImage src, canny, scharr, sobel; 5 | 6 | void setup() { 7 | src = loadImage("test.jpg"); 8 | size(1080, 720); 9 | 10 | opencv = new OpenCV(this, src); 11 | opencv.findCannyEdges(20,75); 12 | canny = opencv.getSnapshot(); 13 | 14 | opencv.loadImage(src); 15 | opencv.findScharrEdges(OpenCV.HORIZONTAL); 16 | scharr = opencv.getSnapshot(); 17 | 18 | opencv.loadImage(src); 19 | opencv.findSobelEdges(1,0); 20 | sobel = opencv.getSnapshot(); 21 | } 22 | 23 | 24 | void draw() { 25 | pushMatrix(); 26 | scale(0.5); 27 | image(src, 0, 0); 28 | image(canny, src.width, 0); 29 | image(scharr, 0, src.height); 30 | image(sobel, src.width, src.height); 31 | popMatrix(); 32 | 33 | text("Source", 10, 25); 34 | text("Canny", src.width/2 + 10, 25); 35 | text("Scharr", 10, src.height/2 + 25); 36 | text("Sobel", src.width/2 + 10, src.height/2 + 25); 37 | } -------------------------------------------------------------------------------- /examples/FindEdges/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FindEdges/test.jpg -------------------------------------------------------------------------------- /examples/FindHistogram/FindHistogram.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | Histogram grayHist, rHist, gHist, bHist; 5 | 6 | PImage img; 7 | 8 | void setup() { 9 | size(640, 400); 10 | img = loadImage("test.jpg"); 11 | opencv = new OpenCV(this, img); 12 | 13 | grayHist = opencv.findHistogram(opencv.getGray(), 256); 14 | rHist = opencv.findHistogram(opencv.getR(), 256); 15 | gHist = opencv.findHistogram(opencv.getG(), 256); 16 | bHist = opencv.findHistogram(opencv.getB(), 256); 17 | } 18 | 19 | void draw() { 20 | background(0); 21 | image(img, 10, 0, 300, 200); 22 | 23 | stroke(125); noFill(); 24 | rect(320, 10, 310, 180); 25 | 26 | fill(125); noStroke(); 27 | grayHist.draw(320, 10, 310, 180); 28 | 29 | stroke(255, 0, 0); noFill(); 30 | rect(10, height - 190, 200, 180); 31 | 32 | fill(255, 0, 0); noStroke(); 33 | rHist.draw(10, height - 190, 200, 180); 34 | 35 | stroke(0, 255, 0); noFill(); 36 | rect(220, height - 190, 200, 180); 37 | 38 | fill(0, 255, 0); noStroke(); 39 | gHist.draw(220, height - 190, 200, 180); 40 | 41 | stroke(0, 0, 255); noFill(); 42 | rect(430, height - 190, 200, 180); 43 | 44 | fill(0, 0, 255); noStroke(); 45 | bHist.draw(430, height - 190, 200, 180); 46 | } 47 | 48 | -------------------------------------------------------------------------------- /examples/FindHistogram/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/FindHistogram/test.jpg -------------------------------------------------------------------------------- /examples/HSVColorTracking/HSVColorTracking.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * HSVColorTracking 3 | * Greg Borenstein 4 | * https://github.com/atduskgreg/opencv-processing-book/blob/master/code/hsv_color_tracking/HSVColorTracking/HSVColorTracking.pde 5 | * 6 | * Modified by Jordi Tost @jorditost (color selection) 7 | * 8 | * University of Applied Sciences Potsdam, 2014 9 | */ 10 | 11 | import gab.opencv.*; 12 | import processing.video.*; 13 | import java.awt.Rectangle; 14 | 15 | Capture video; 16 | OpenCV opencv; 17 | PImage src, colorFilteredImage; 18 | ArrayList contours; 19 | 20 | // <1> Set the range of Hue values for our filter 21 | int rangeLow = 20; 22 | int rangeHigh = 35; 23 | 24 | void setup() { 25 | video = new Capture(this, "pipeline:autovideosrc"); 26 | video.start(); 27 | 28 | opencv = new OpenCV(this, video.width, video.height); 29 | contours = new ArrayList(); 30 | 31 | size(1280, 480, P2D); 32 | } 33 | 34 | void draw() { 35 | 36 | // Read last captured frame 37 | if (video.available()) { 38 | video.read(); 39 | } 40 | 41 | // <2> Load the new frame of our movie in to OpenCV 42 | opencv.loadImage(video); 43 | 44 | // Tell OpenCV to use color information 45 | opencv.useColor(); 46 | src = opencv.getSnapshot(); 47 | 48 | // <3> Tell OpenCV to work in HSV color space. 49 | opencv.useColor(HSB); 50 | 51 | // <4> Copy the Hue channel of our image into 52 | // the gray channel, which we process. 53 | opencv.setGray(opencv.getH().clone()); 54 | 55 | // <5> Filter the image based on the range of 56 | // hue values that match the object we want to track. 57 | opencv.inRange(rangeLow, rangeHigh); 58 | 59 | // <6> Get the processed image for reference. 60 | colorFilteredImage = opencv.getSnapshot(); 61 | 62 | /////////////////////////////////////////// 63 | // We could process our image here! 64 | // See ImageFiltering.pde 65 | /////////////////////////////////////////// 66 | 67 | // <7> Find contours in our range image. 68 | // Passing 'true' sorts them by descending area. 69 | contours = opencv.findContours(true, true); 70 | 71 | // <8> Display background images 72 | image(video, 0, 0); 73 | image(colorFilteredImage, src.width, 0); 74 | 75 | // <9> Check to make sure we've found any contours 76 | if (contours.size() > 0) { 77 | // <9> Get the first contour, which will be the largest one 78 | Contour biggestContour = contours.get(0); 79 | 80 | // <10> Find the bounding box of the largest contour, 81 | // and hence our object. 82 | Rectangle r = biggestContour.getBoundingBox(); 83 | 84 | // <11> Draw the bounding box of our object 85 | noFill(); 86 | strokeWeight(2); 87 | stroke(255, 0, 0); 88 | rect(r.x, r.y, r.width, r.height); 89 | 90 | // <12> Draw a dot in the middle of the bounding box, on the object. 91 | noStroke(); 92 | fill(255, 0, 0); 93 | ellipse(r.x + r.width/2, r.y + r.height/2, 30, 30); 94 | } 95 | } 96 | 97 | void mousePressed() { 98 | 99 | color c = get(mouseX, mouseY); 100 | println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c)); 101 | 102 | int hue = int(map(hue(c), 0, 255, 0, 180)); 103 | println("hue to detect: " + hue); 104 | 105 | rangeLow = hue - 5; 106 | rangeHigh = hue + 5; 107 | } 108 | -------------------------------------------------------------------------------- /examples/HSVColorTracking/screenshots/hsv_color_tracking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/HSVColorTracking/screenshots/hsv_color_tracking.png -------------------------------------------------------------------------------- /examples/HistogramSkinDetection/HistogramSkinDetection.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | import org.opencv.core.Core; 4 | import org.opencv.core.Mat; 5 | import org.opencv.core.Size; 6 | import org.opencv.core.Point; 7 | import org.opencv.core.Scalar; 8 | import org.opencv.core.CvType; 9 | import org.opencv.imgproc.Imgproc; 10 | 11 | OpenCV opencv; 12 | PImage src,dst, hist, histMask; 13 | 14 | Mat skinHistogram; 15 | 16 | void setup(){ 17 | src = loadImage("test.jpg"); 18 | src.resize(src.width/2, 0); 19 | size(1336, 360); 20 | // third argument is: useColor 21 | opencv = new OpenCV(this, src, true); 22 | 23 | skinHistogram = Mat.zeros(256, 256, CvType.CV_8UC1); 24 | Imgproc.ellipse(skinHistogram, new Point(113.0, 155.6), new Size(40.0, 25.2), 43.0, 0.0, 360.0, new Scalar(255, 255, 255), Core.FILLED); 25 | 26 | histMask = createImage(256,256, ARGB); 27 | opencv.toPImage(skinHistogram, histMask); 28 | hist = loadImage("cb-cr.png"); 29 | hist.blend(histMask, 0,0,256,256,0,0,256,256, ADD); 30 | 31 | dst = opencv.getOutput(); 32 | dst.loadPixels(); 33 | 34 | for(int i = 0; i < dst.pixels.length; i++){ 35 | 36 | Mat input = new Mat(new Size(1, 1), CvType.CV_8UC3); 37 | input.setTo(colorToScalar(dst.pixels[i])); 38 | Mat output = opencv.imitate(input); 39 | Imgproc.cvtColor(input, output, Imgproc.COLOR_BGR2YCrCb ); 40 | double[] inputComponents = output.get(0,0); 41 | if(skinHistogram.get((int)inputComponents[1], (int)inputComponents[2])[0] > 0){ 42 | dst.pixels[i] = color(255); 43 | } else { 44 | dst.pixels[i] = color(0); 45 | } 46 | } 47 | 48 | dst.updatePixels(); 49 | } 50 | 51 | // in BGR 52 | Scalar colorToScalar(color c){ 53 | return new Scalar(blue(c), green(c), red(c)); 54 | } 55 | 56 | 57 | void draw(){ 58 | image(src,0,0); 59 | image(dst, src.width, 0); 60 | image(hist, src.width*2, 0); 61 | } 62 | -------------------------------------------------------------------------------- /examples/HistogramSkinDetection/data/cb-cr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/HistogramSkinDetection/data/cb-cr.png -------------------------------------------------------------------------------- /examples/HistogramSkinDetection/data/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/HistogramSkinDetection/data/test.jpg -------------------------------------------------------------------------------- /examples/HoughLineDetection/HoughLineDetection.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | ArrayList lines; 5 | 6 | void setup() { 7 | PImage src = loadImage("film_scan.jpg"); 8 | src.resize(0, 800); 9 | size(796, 800); 10 | 11 | opencv = new OpenCV(this, src); 12 | opencv.findCannyEdges(20, 75); 13 | 14 | // Find lines with Hough line detection 15 | // Arguments are: threshold, minLengthLength, maxLineGap 16 | lines = opencv.findLines(100, 30, 20); 17 | } 18 | 19 | void draw() { 20 | image(opencv.getOutput(), 0, 0); 21 | strokeWeight(3); 22 | 23 | for (Line line : lines) { 24 | // lines include angle in radians, measured in double precision 25 | // so we can select out vertical and horizontal lines 26 | // They also include "start" and "end" PVectors with the position 27 | if (line.angle >= radians(0) && line.angle < radians(1)) { 28 | stroke(0, 255, 0); 29 | line(line.start.x, line.start.y, line.end.x, line.end.y); 30 | } 31 | 32 | if (line.angle > radians(89) && line.angle < radians(91)) { 33 | stroke(255, 0, 0); 34 | line(line.start.x, line.start.y, line.end.x, line.end.y); 35 | } 36 | } 37 | } -------------------------------------------------------------------------------- /examples/HoughLineDetection/film_scan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/HoughLineDetection/film_scan.jpg -------------------------------------------------------------------------------- /examples/HueRangeSelection/HueRangeSelection.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | PImage img; 4 | OpenCV opencv; 5 | Histogram histogram; 6 | 7 | int lowerb = 50; 8 | int upperb = 100; 9 | 10 | void setup() { 11 | img = loadImage("colored_balls.jpg"); 12 | opencv = new OpenCV(this, img); 13 | size(1024, 768); 14 | opencv.useColor(HSB); 15 | } 16 | 17 | void draw() { 18 | opencv.loadImage(img); 19 | 20 | image(img, 0, 0); 21 | 22 | opencv.setGray(opencv.getH().clone()); 23 | opencv.inRange(lowerb, upperb); 24 | histogram = opencv.findHistogram(opencv.getH(), 255); 25 | 26 | image(opencv.getOutput(), 3*width/4, 3*height/4, width/4,height/4); 27 | 28 | noStroke(); fill(0); 29 | histogram.draw(10, height - 230, 400, 200); 30 | noFill(); stroke(0); 31 | line(10, height-30, 410, height-30); 32 | 33 | text("Hue", 10, height - (textAscent() + textDescent())); 34 | 35 | float lb = map(lowerb, 0, 255, 0, 400); 36 | float ub = map(upperb, 0, 255, 0, 400); 37 | 38 | stroke(255, 0, 0); fill(255, 0, 0); 39 | strokeWeight(2); 40 | line(lb + 10, height-30, ub +10, height-30); 41 | ellipse(lb+10, height-30, 3, 3 ); 42 | text(lowerb, lb-10, height-15); 43 | ellipse(ub+10, height-30, 3, 3 ); 44 | text(upperb, ub+10, height-15); 45 | } 46 | 47 | void mouseMoved() { 48 | if (keyPressed) { 49 | upperb += mouseX - pmouseX; 50 | } 51 | else { 52 | if (upperb < 255 || (mouseX - pmouseX) < 0) { 53 | lowerb += mouseX - pmouseX; 54 | } 55 | 56 | if (lowerb > 0 || (mouseX - pmouseX) > 0) { 57 | upperb += mouseX - pmouseX; 58 | } 59 | } 60 | 61 | upperb = constrain(upperb, lowerb, 255); 62 | lowerb = constrain(lowerb, 0, upperb-1); 63 | } -------------------------------------------------------------------------------- /examples/HueRangeSelection/colored_balls.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/HueRangeSelection/colored_balls.jpg -------------------------------------------------------------------------------- /examples/HueRangeSelection/rainbow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/HueRangeSelection/rainbow.jpg -------------------------------------------------------------------------------- /examples/ImageDiff/ImageDiff.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | PImage before, after, grayDiff; 5 | //PImage colorDiff; 6 | void setup() { 7 | before = loadImage("before.jpg"); 8 | after = loadImage("after.jpg"); 9 | size(640, 480); 10 | 11 | opencv = new OpenCV(this, before); 12 | opencv.diff(after); 13 | grayDiff = opencv.getSnapshot(); 14 | 15 | // opencv.useColor(); 16 | // opencv.loadImage(after); 17 | // opencv.diff(after); 18 | // colorDiff = opencv.getSnapshot(); 19 | 20 | } 21 | 22 | void draw() { 23 | pushMatrix(); 24 | scale(0.5); 25 | image(before, 0, 0); 26 | image(after, before.width, 0); 27 | // image(colorDiff, 0, before.height); 28 | image(grayDiff, before.width, before.height); 29 | popMatrix(); 30 | 31 | fill(255); 32 | text("before", 10, 20); 33 | text("after", before.width/2 +10, 20); 34 | text("gray diff", before.width/2 + 10, before.height/2+ 20); 35 | 36 | // text("color diff", 10, before.height/2+ 20); 37 | } -------------------------------------------------------------------------------- /examples/ImageDiff/after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/ImageDiff/after.jpg -------------------------------------------------------------------------------- /examples/ImageDiff/before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/ImageDiff/before.jpg -------------------------------------------------------------------------------- /examples/ImageFiltering/ImageFiltering.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * Image Filtering 3 | * This sketch performs some image filtering (threshold, blur) and contour detection 4 | * 5 | * @author: Jordi Tost (@jorditost) 6 | * @url: https://github.com/jorditost/ImageFiltering/tree/master/ImageFiltering 7 | * 8 | * University of Applied Sciences Potsdam, 2014 9 | * 10 | * It requires the ControlP5 Processing library: 11 | * http://www.sojamo.de/libraries/controlP5/ 12 | */ 13 | 14 | import gab.opencv.*; 15 | import java.awt.Rectangle; 16 | import processing.video.*; 17 | import controlP5.*; 18 | 19 | OpenCV opencv; 20 | Capture video; 21 | PImage src, preProcessedImage, processedImage, contoursImage; 22 | ArrayList contours; 23 | 24 | float contrast = 1.35; 25 | int brightness = 0; 26 | int threshold = 75; 27 | boolean useAdaptiveThreshold = false; // use basic thresholding 28 | int thresholdBlockSize = 489; 29 | int thresholdConstant = 45; 30 | int blobSizeThreshold = 20; 31 | int blurSize = 4; 32 | 33 | // Control vars 34 | ControlP5 cp5; 35 | int buttonColor; 36 | int buttonBgColor; 37 | 38 | void setup() { 39 | frameRate(15); 40 | 41 | video = new Capture(this, "pipeline:autovideosrc"); 42 | video.start(); 43 | 44 | opencv = new OpenCV(this, 640, 480); 45 | contours = new ArrayList(); 46 | 47 | size(840, 480, P2D); 48 | 49 | // Init Controls 50 | cp5 = new ControlP5(this); 51 | initControls(); 52 | 53 | // Set thresholding 54 | toggleAdaptiveThreshold(useAdaptiveThreshold); 55 | } 56 | 57 | void draw() { 58 | 59 | // Read last captured frame 60 | if (video.available()) { 61 | video.read(); 62 | } 63 | 64 | // Load the new frame of our camera in to OpenCV 65 | opencv.loadImage(video); 66 | src = opencv.getSnapshot(); 67 | 68 | /////////////////////////////// 69 | // <1> PRE-PROCESS IMAGE 70 | // - Grey channel 71 | // - Brightness / Contrast 72 | /////////////////////////////// 73 | 74 | // Gray channel 75 | opencv.gray(); 76 | 77 | //opencv.brightness(brightness); 78 | opencv.contrast(contrast); 79 | 80 | // Save snapshot for display 81 | preProcessedImage = opencv.getSnapshot(); 82 | 83 | /////////////////////////////// 84 | // <2> PROCESS IMAGE 85 | // - Threshold 86 | // - Noise Supression 87 | /////////////////////////////// 88 | 89 | // Adaptive threshold - Good when non-uniform illumination 90 | if (useAdaptiveThreshold) { 91 | 92 | // Block size must be odd and greater than 3 93 | if (thresholdBlockSize%2 == 0) thresholdBlockSize++; 94 | if (thresholdBlockSize < 3) thresholdBlockSize = 3; 95 | 96 | opencv.adaptiveThreshold(thresholdBlockSize, thresholdConstant); 97 | 98 | // Basic threshold - range [0, 255] 99 | } else { 100 | opencv.threshold(threshold); 101 | } 102 | 103 | // Invert (black bg, white blobs) 104 | opencv.invert(); 105 | 106 | // Reduce noise - Dilate and erode to close holes 107 | opencv.dilate(); 108 | opencv.erode(); 109 | 110 | // Blur 111 | opencv.blur(blurSize); 112 | 113 | // Save snapshot for display 114 | processedImage = opencv.getSnapshot(); 115 | 116 | /////////////////////////////// 117 | // <3> FIND CONTOURS 118 | /////////////////////////////// 119 | 120 | // Passing 'true' sorts them by descending area. 121 | contours = opencv.findContours(true, true); 122 | 123 | // Save snapshot for display 124 | contoursImage = opencv.getSnapshot(); 125 | 126 | // Draw 127 | pushMatrix(); 128 | 129 | // Leave space for ControlP5 sliders 130 | translate(width-src.width, 0); 131 | 132 | // Display images 133 | displayImages(); 134 | 135 | // Display contours in the lower right window 136 | pushMatrix(); 137 | scale(0.5); 138 | translate(src.width, src.height); 139 | 140 | displayContours(); 141 | displayContoursBoundingBoxes(); 142 | 143 | popMatrix(); 144 | 145 | popMatrix(); 146 | } 147 | 148 | ///////////////////// 149 | // Display Methods 150 | ///////////////////// 151 | 152 | void displayImages() { 153 | 154 | pushMatrix(); 155 | scale(0.5); 156 | image(src, 0, 0); 157 | image(preProcessedImage, src.width, 0); 158 | image(processedImage, 0, src.height); 159 | image(src, src.width, src.height); 160 | popMatrix(); 161 | 162 | stroke(255); 163 | fill(255); 164 | text("Source", 10, 25); 165 | text("Pre-processed Image", src.width/2 + 10, 25); 166 | text("Processed Image", 10, src.height/2 + 25); 167 | text("Tracked Points", src.width/2 + 10, src.height/2 + 25); 168 | } 169 | 170 | void displayContours() { 171 | 172 | for (int i=0; i 0.9 * src.width * src.height) || 191 | (r.width < blobSizeThreshold || r.height < blobSizeThreshold)) 192 | continue; 193 | 194 | stroke(255, 0, 0); 195 | fill(255, 0, 0, 150); 196 | strokeWeight(2); 197 | rect(r.x, r.y, r.width, r.height); 198 | } 199 | } 200 | 201 | ////////////////////////// 202 | // CONTROL P5 Functions 203 | ////////////////////////// 204 | 205 | void initControls() { 206 | // Slider for contrast 207 | cp5.addSlider("contrast") 208 | .setLabel("contrast") 209 | .setPosition(20, 50) 210 | .setRange(0.0, 6.0) 211 | ; 212 | 213 | // Slider for threshold 214 | cp5.addSlider("threshold") 215 | .setLabel("threshold") 216 | .setPosition(20, 110) 217 | .setRange(0, 255) 218 | ; 219 | 220 | // Toggle to activae adaptive threshold 221 | cp5.addToggle("toggleAdaptiveThreshold") 222 | .setLabel("use adaptive threshold") 223 | .setSize(10, 10) 224 | .setPosition(20, 144) 225 | ; 226 | 227 | // Slider for adaptive threshold block size 228 | cp5.addSlider("thresholdBlockSize") 229 | .setLabel("a.t. block size") 230 | .setPosition(20, 180) 231 | .setRange(1, 700) 232 | ; 233 | 234 | // Slider for adaptive threshold constant 235 | cp5.addSlider("thresholdConstant") 236 | .setLabel("a.t. constant") 237 | .setPosition(20, 200) 238 | .setRange(-100, 100) 239 | ; 240 | 241 | // Slider for blur size 242 | cp5.addSlider("blurSize") 243 | .setLabel("blur size") 244 | .setPosition(20, 260) 245 | .setRange(1, 20) 246 | ; 247 | 248 | // Slider for minimum blob size 249 | cp5.addSlider("blobSizeThreshold") 250 | .setLabel("min blob size") 251 | .setPosition(20, 290) 252 | .setRange(0, 60) 253 | ; 254 | 255 | // Store the default background color, we gonna need it later 256 | buttonColor = cp5.getController("contrast").getColor().getForeground(); 257 | buttonBgColor = cp5.getController("contrast").getColor().getBackground(); 258 | } 259 | 260 | void toggleAdaptiveThreshold(boolean theFlag) { 261 | 262 | useAdaptiveThreshold = theFlag; 263 | 264 | if (useAdaptiveThreshold) { 265 | 266 | // Lock basic threshold 267 | setLock(cp5.getController("threshold"), true); 268 | 269 | // Unlock adaptive threshold 270 | setLock(cp5.getController("thresholdBlockSize"), false); 271 | setLock(cp5.getController("thresholdConstant"), false); 272 | } else { 273 | 274 | // Unlock basic threshold 275 | setLock(cp5.getController("threshold"), false); 276 | 277 | // Lock adaptive threshold 278 | setLock(cp5.getController("thresholdBlockSize"), true); 279 | setLock(cp5.getController("thresholdConstant"), true); 280 | } 281 | } 282 | 283 | void setLock(Controller theController, boolean theValue) { 284 | 285 | theController.setLock(theValue); 286 | 287 | if (theValue) { 288 | theController.setColorBackground(color(150, 150)); 289 | theController.setColorForeground(color(100, 100)); 290 | } else { 291 | theController.setColorBackground(color(buttonBgColor)); 292 | theController.setColorForeground(color(buttonColor)); 293 | } 294 | } 295 | -------------------------------------------------------------------------------- /examples/ImageFiltering/screenshots/objects_basic_threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/ImageFiltering/screenshots/objects_basic_threshold.png -------------------------------------------------------------------------------- /examples/ImageFiltering/screenshots/touch_adaptive_threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/ImageFiltering/screenshots/touch_adaptive_threshold.png -------------------------------------------------------------------------------- /examples/ImageFilteringWithBlobPersistence/Blob.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * Blob Class 3 | * 4 | * Based on this example by Daniel Shiffman: 5 | * http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/ 6 | * 7 | * @author: Jordi Tost (@jorditost) 8 | * 9 | * University of Applied Sciences Potsdam, 2014 10 | */ 11 | 12 | class Blob { 13 | 14 | private PApplet parent; 15 | 16 | // Contour 17 | public Contour contour; 18 | 19 | // Am I available to be matched? 20 | public boolean available; 21 | 22 | // Should I be deleted? 23 | public boolean delete; 24 | 25 | // How long should I live if I have disappeared? 26 | private int initTimer = 5; //127; 27 | public int timer; 28 | 29 | // Unique ID for each blob 30 | int id; 31 | 32 | // Make me 33 | Blob(PApplet parent, int id, Contour c) { 34 | this.parent = parent; 35 | this.id = id; 36 | this.contour = new Contour(parent, c.pointMat); 37 | 38 | available = true; 39 | delete = false; 40 | 41 | timer = initTimer; 42 | } 43 | 44 | // Show me 45 | void display() { 46 | Rectangle r = contour.getBoundingBox(); 47 | 48 | float opacity = map(timer, 0, initTimer, 0, 127); 49 | fill(0,0,255,opacity); 50 | stroke(0,0,255); 51 | rect(r.x, r.y, r.width, r.height); 52 | fill(255,2*opacity); 53 | textSize(26); 54 | text(""+id, r.x+10, r.y+30); 55 | } 56 | 57 | // Give me a new contour for this blob (shape, points, location, size) 58 | // Oooh, it would be nice to lerp here! 59 | void update(Contour newC) { 60 | 61 | contour = new Contour(parent, newC.pointMat); 62 | 63 | // Is there a way to update the contour's points without creating a new one? 64 | /*ArrayList newPoints = newC.getPoints(); 65 | Point[] inputPoints = new Point[newPoints.size()]; 66 | 67 | for(int i = 0; i < newPoints.size(); i++){ 68 | inputPoints[i] = new Point(newPoints.get(i).x, newPoints.get(i).y); 69 | } 70 | contour.loadPoints(inputPoints);*/ 71 | 72 | timer = initTimer; 73 | } 74 | 75 | // Count me down, I am gone 76 | void countDown() { 77 | timer--; 78 | } 79 | 80 | // I am deed, delete me 81 | boolean dead() { 82 | if (timer < 0) return true; 83 | return false; 84 | } 85 | 86 | public Rectangle getBoundingBox() { 87 | return contour.getBoundingBox(); 88 | } 89 | } 90 | 91 | -------------------------------------------------------------------------------- /examples/ImageFilteringWithBlobPersistence/ImageFilteringWithBlobPersistence.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * Image Filtering 3 | * This sketch will help us to adjust the filter values to optimize blob detection 4 | * 5 | * Persistence algorithm by Daniel Shifmann: 6 | * http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/ 7 | * 8 | * @author: Jordi Tost (@jorditost) 9 | * @url: https://github.com/jorditost/ImageFiltering/tree/master/ImageFilteringWithBlobPersistence 10 | * 11 | * University of Applied Sciences Potsdam, 2014 12 | * 13 | * It requires the ControlP5 Processing library: 14 | * http://www.sojamo.de/libraries/controlP5/ 15 | */ 16 | 17 | import gab.opencv.*; 18 | import java.awt.Rectangle; 19 | import processing.video.*; 20 | import controlP5.*; 21 | 22 | OpenCV opencv; 23 | Capture video; 24 | PImage src, preProcessedImage, processedImage, contoursImage; 25 | 26 | ArrayList contours; 27 | 28 | // List of detected contours parsed as blobs (every frame) 29 | ArrayList newBlobContours; 30 | 31 | // List of my blob objects (persistent) 32 | ArrayList blobList; 33 | 34 | 35 | // Number of blobs detected over all time. Used to set IDs. 36 | int blobCount = 0; 37 | 38 | float contrast = 1.35; 39 | int brightness = 0; 40 | int threshold = 75; 41 | boolean useAdaptiveThreshold = false; // use basic thresholding 42 | int thresholdBlockSize = 489; 43 | int thresholdConstant = 45; 44 | int blobSizeThreshold = 20; 45 | int blurSize = 4; 46 | 47 | // Control vars 48 | ControlP5 cp5; 49 | int buttonColor; 50 | int buttonBgColor; 51 | 52 | void setup() { 53 | frameRate(15); 54 | 55 | video = new Capture(this, "pipeline:autovideosrc"); 56 | //video = new Capture(this, 640, 480, "USB2.0 PC CAMERA"); 57 | video.start(); 58 | 59 | opencv = new OpenCV(this, 640, 480); 60 | contours = new ArrayList(); 61 | 62 | // Blobs list 63 | blobList = new ArrayList(); 64 | 65 | size(840, 480, P2D); 66 | 67 | // Init Controls 68 | cp5 = new ControlP5(this); 69 | initControls(); 70 | 71 | // Set thresholding 72 | toggleAdaptiveThreshold(useAdaptiveThreshold); 73 | } 74 | 75 | void draw() { 76 | 77 | // Read last captured frame 78 | if (video.available()) { 79 | video.read(); 80 | } 81 | 82 | // Load the new frame of our camera in to OpenCV 83 | opencv.loadImage(video); 84 | src = opencv.getSnapshot(); 85 | 86 | /////////////////////////////// 87 | // <1> PRE-PROCESS IMAGE 88 | // - Grey channel 89 | // - Brightness / Contrast 90 | /////////////////////////////// 91 | 92 | // Gray channel 93 | opencv.gray(); 94 | 95 | //opencv.brightness(brightness); 96 | opencv.contrast(contrast); 97 | 98 | // Save snapshot for display 99 | preProcessedImage = opencv.getSnapshot(); 100 | 101 | /////////////////////////////// 102 | // <2> PROCESS IMAGE 103 | // - Threshold 104 | // - Noise Supression 105 | /////////////////////////////// 106 | 107 | // Adaptive threshold - Good when non-uniform illumination 108 | if (useAdaptiveThreshold) { 109 | 110 | // Block size must be odd and greater than 3 111 | if (thresholdBlockSize%2 == 0) thresholdBlockSize++; 112 | if (thresholdBlockSize < 3) thresholdBlockSize = 3; 113 | 114 | opencv.adaptiveThreshold(thresholdBlockSize, thresholdConstant); 115 | 116 | // Basic threshold - range [0, 255] 117 | } else { 118 | opencv.threshold(threshold); 119 | } 120 | 121 | // Invert (black bg, white blobs) 122 | opencv.invert(); 123 | 124 | // Reduce noise - Dilate and erode to close holes 125 | opencv.dilate(); 126 | opencv.erode(); 127 | 128 | // Blur 129 | opencv.blur(blurSize); 130 | 131 | // Save snapshot for display 132 | processedImage = opencv.getSnapshot(); 133 | 134 | /////////////////////////////// 135 | // <3> FIND CONTOURS 136 | /////////////////////////////// 137 | 138 | detectBlobs(); 139 | // Passing 'true' sorts them by descending area. 140 | //contours = opencv.findContours(true, true); 141 | 142 | // Save snapshot for display 143 | contoursImage = opencv.getSnapshot(); 144 | 145 | // Draw 146 | pushMatrix(); 147 | 148 | // Leave space for ControlP5 sliders 149 | translate(width-src.width, 0); 150 | 151 | // Display images 152 | displayImages(); 153 | 154 | // Display contours in the lower right window 155 | pushMatrix(); 156 | scale(0.5); 157 | translate(src.width, src.height); 158 | 159 | // Contours 160 | //displayContours(); 161 | //displayContoursBoundingBoxes(); 162 | 163 | // Blobs 164 | displayBlobs(); 165 | 166 | popMatrix(); 167 | 168 | popMatrix(); 169 | } 170 | 171 | /////////////////////// 172 | // Display Functions 173 | /////////////////////// 174 | 175 | void displayImages() { 176 | 177 | pushMatrix(); 178 | scale(0.5); 179 | image(src, 0, 0); 180 | image(preProcessedImage, src.width, 0); 181 | image(processedImage, 0, src.height); 182 | image(src, src.width, src.height); 183 | popMatrix(); 184 | 185 | stroke(255); 186 | fill(255); 187 | textSize(12); 188 | text("Source", 10, 25); 189 | text("Pre-processed Image", src.width/2 + 10, 25); 190 | text("Processed Image", 10, src.height/2 + 25); 191 | text("Tracked Points", src.width/2 + 10, src.height/2 + 25); 192 | } 193 | 194 | void displayBlobs() { 195 | 196 | for (Blob b : blobList) { 197 | strokeWeight(1); 198 | b.display(); 199 | } 200 | } 201 | 202 | void displayContours() { 203 | 204 | // Contours 205 | for (int i=0; i 0.9 * src.width * src.height) || 224 | (r.width < blobSizeThreshold || r.height < blobSizeThreshold)) 225 | continue; 226 | 227 | stroke(255, 0, 0); 228 | fill(255, 0, 0, 150); 229 | strokeWeight(2); 230 | rect(r.x, r.y, r.width, r.height); 231 | } 232 | } 233 | 234 | //////////////////// 235 | // Blob Detection 236 | //////////////////// 237 | 238 | void detectBlobs() { 239 | 240 | // Contours detected in this frame 241 | // Passing 'true' sorts them by descending area. 242 | contours = opencv.findContours(true, true); 243 | 244 | newBlobContours = getBlobsFromContours(contours); 245 | 246 | //println(contours.length); 247 | 248 | // Check if the detected blobs already exist are new or some has disappeared. 249 | 250 | // SCENARIO 1 251 | // blobList is empty 252 | if (blobList.isEmpty()) { 253 | // Just make a Blob object for every face Rectangle 254 | for (int i = 0; i < newBlobContours.size(); i++) { 255 | println("+++ New blob detected with ID: " + blobCount); 256 | blobList.add(new Blob(this, blobCount, newBlobContours.get(i))); 257 | blobCount++; 258 | } 259 | 260 | // SCENARIO 2 261 | // We have fewer Blob objects than face Rectangles found from OpenCV in this frame 262 | } else if (blobList.size() <= newBlobContours.size()) { 263 | boolean[] used = new boolean[newBlobContours.size()]; 264 | // Match existing Blob objects with a Rectangle 265 | for (Blob b : blobList) { 266 | // Find the new blob newBlobContours.get(index) that is closest to blob b 267 | // set used[index] to true so that it can't be used twice 268 | float record = 50000; 269 | int index = -1; 270 | for (int i = 0; i < newBlobContours.size(); i++) { 271 | float d = dist(newBlobContours.get(i).getBoundingBox().x, newBlobContours.get(i).getBoundingBox().y, b.getBoundingBox().x, b.getBoundingBox().y); 272 | //float d = dist(blobs[i].x, blobs[i].y, b.r.x, b.r.y); 273 | if (d < record && !used[i]) { 274 | record = d; 275 | index = i; 276 | } 277 | } 278 | // Update Blob object location 279 | used[index] = true; 280 | b.update(newBlobContours.get(index)); 281 | } 282 | // Add any unused blobs 283 | for (int i = 0; i < newBlobContours.size(); i++) { 284 | if (!used[i]) { 285 | println("+++ New blob detected with ID: " + blobCount); 286 | blobList.add(new Blob(this, blobCount, newBlobContours.get(i))); 287 | //blobList.add(new Blob(blobCount, blobs[i].x, blobs[i].y, blobs[i].width, blobs[i].height)); 288 | blobCount++; 289 | } 290 | } 291 | 292 | // SCENARIO 3 293 | // We have more Blob objects than blob Rectangles found from OpenCV in this frame 294 | } else { 295 | // All Blob objects start out as available 296 | for (Blob b : blobList) { 297 | b.available = true; 298 | } 299 | // Match Rectangle with a Blob object 300 | for (int i = 0; i < newBlobContours.size(); i++) { 301 | // Find blob object closest to the newBlobContours.get(i) Contour 302 | // set available to false 303 | float record = 50000; 304 | int index = -1; 305 | for (int j = 0; j < blobList.size(); j++) { 306 | Blob b = blobList.get(j); 307 | float d = dist(newBlobContours.get(i).getBoundingBox().x, newBlobContours.get(i).getBoundingBox().y, b.getBoundingBox().x, b.getBoundingBox().y); 308 | //float d = dist(blobs[i].x, blobs[i].y, b.r.x, b.r.y); 309 | if (d < record && b.available) { 310 | record = d; 311 | index = j; 312 | } 313 | } 314 | // Update Blob object location 315 | Blob b = blobList.get(index); 316 | b.available = false; 317 | b.update(newBlobContours.get(i)); 318 | } 319 | // Start to kill any left over Blob objects 320 | for (Blob b : blobList) { 321 | if (b.available) { 322 | b.countDown(); 323 | if (b.dead()) { 324 | b.delete = true; 325 | } 326 | } 327 | } 328 | } 329 | 330 | // Delete any blob that should be deleted 331 | for (int i = blobList.size()-1; i >= 0; i--) { 332 | Blob b = blobList.get(i); 333 | if (b.delete) { 334 | blobList.remove(i); 335 | } 336 | } 337 | } 338 | 339 | ArrayList getBlobsFromContours(ArrayList newContours) { 340 | 341 | ArrayList newBlobs = new ArrayList(); 342 | 343 | // Which of these contours are blobs? 344 | for (int i=0; i 0.9 * src.width * src.height) || 350 | (r.width < blobSizeThreshold || r.height < blobSizeThreshold)) 351 | continue; 352 | 353 | newBlobs.add(contour); 354 | } 355 | 356 | return newBlobs; 357 | } 358 | 359 | ////////////////////////// 360 | // CONTROL P5 Functions 361 | ////////////////////////// 362 | 363 | void initControls() { 364 | // Slider for contrast 365 | cp5.addSlider("contrast") 366 | .setLabel("contrast") 367 | .setPosition(20,50) 368 | .setRange(0.0,6.0) 369 | ; 370 | 371 | // Slider for threshold 372 | cp5.addSlider("threshold") 373 | .setLabel("threshold") 374 | .setPosition(20,110) 375 | .setRange(0,255) 376 | ; 377 | 378 | // Toggle to activae adaptive threshold 379 | cp5.addToggle("toggleAdaptiveThreshold") 380 | .setLabel("use adaptive threshold") 381 | .setSize(10,10) 382 | .setPosition(20,144) 383 | ; 384 | 385 | // Slider for adaptive threshold block size 386 | cp5.addSlider("thresholdBlockSize") 387 | .setLabel("a.t. block size") 388 | .setPosition(20,180) 389 | .setRange(1,700) 390 | ; 391 | 392 | // Slider for adaptive threshold constant 393 | cp5.addSlider("thresholdConstant") 394 | .setLabel("a.t. constant") 395 | .setPosition(20,200) 396 | .setRange(-100,100) 397 | ; 398 | 399 | // Slider for blur size 400 | cp5.addSlider("blurSize") 401 | .setLabel("blur size") 402 | .setPosition(20,260) 403 | .setRange(1,20) 404 | ; 405 | 406 | // Slider for minimum blob size 407 | cp5.addSlider("blobSizeThreshold") 408 | .setLabel("min blob size") 409 | .setPosition(20,290) 410 | .setRange(0,60) 411 | ; 412 | 413 | // Store the default background color, we gonna need it later 414 | buttonColor = cp5.getController("contrast").getColor().getForeground(); 415 | buttonBgColor = cp5.getController("contrast").getColor().getBackground(); 416 | } 417 | 418 | void toggleAdaptiveThreshold(boolean theFlag) { 419 | 420 | useAdaptiveThreshold = theFlag; 421 | 422 | if (useAdaptiveThreshold) { 423 | 424 | // Lock basic threshold 425 | setLock(cp5.getController("threshold"), true); 426 | 427 | // Unlock adaptive threshold 428 | setLock(cp5.getController("thresholdBlockSize"), false); 429 | setLock(cp5.getController("thresholdConstant"), false); 430 | 431 | } else { 432 | 433 | // Unlock basic threshold 434 | setLock(cp5.getController("threshold"), false); 435 | 436 | // Lock adaptive threshold 437 | setLock(cp5.getController("thresholdBlockSize"), true); 438 | setLock(cp5.getController("thresholdConstant"), true); 439 | } 440 | } 441 | 442 | void setLock(Controller theController, boolean theValue) { 443 | 444 | theController.setLock(theValue); 445 | 446 | if (theValue) { 447 | theController.setColorBackground(color(150,150)); 448 | theController.setColorForeground(color(100,100)); 449 | 450 | } else { 451 | theController.setColorBackground(color(buttonBgColor)); 452 | theController.setColorForeground(color(buttonColor)); 453 | } 454 | } 455 | -------------------------------------------------------------------------------- /examples/ImageFilteringWithBlobPersistence/screenshots/blob_persistence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/ImageFilteringWithBlobPersistence/screenshots/blob_persistence.png -------------------------------------------------------------------------------- /examples/LiveCamTest/LiveCamTest.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import processing.video.*; 3 | import java.awt.*; 4 | 5 | Capture video; 6 | OpenCV opencv; 7 | 8 | void setup() { 9 | size(640, 480); 10 | video = new Capture(this, "pipeline:autovideosrc"); 11 | opencv = new OpenCV(this, 640, 480); 12 | opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE); 13 | 14 | video.start(); 15 | } 16 | 17 | void draw() { 18 | opencv.loadImage(video); 19 | 20 | image(video, 0, 0 ); 21 | 22 | noFill(); 23 | stroke(0, 255, 0); 24 | strokeWeight(3); 25 | Rectangle[] faces = opencv.detect(); 26 | println(faces.length); 27 | 28 | for (int i = 0; i < faces.length; i++) { 29 | println(faces[i].x + "," + faces[i].y); 30 | rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height); 31 | } 32 | } 33 | 34 | void captureEvent(Capture c) { 35 | c.read(); 36 | } 37 | -------------------------------------------------------------------------------- /examples/LoadAndDisplayImage/LoadAndDisplayImage.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | 5 | void setup() { 6 | opencv = new OpenCV(this, "test.jpg"); 7 | size(1080, 720); 8 | } 9 | 10 | void draw() { 11 | image(opencv.getOutput(), 0, 0); 12 | } -------------------------------------------------------------------------------- /examples/LoadAndDisplayImage/data/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/LoadAndDisplayImage/data/test.jpg -------------------------------------------------------------------------------- /examples/LoadAndDisplayImage/data/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/LoadAndDisplayImage/data/test.png -------------------------------------------------------------------------------- /examples/LumaVsGray/LumaVsGray.pde: -------------------------------------------------------------------------------- 1 | /* 2 | Luma is a better measure of perceived brightness than 3 | the tradition grayscale created by averaging R, G, and B channels. 4 | This sketch demonstrates converting an image to LAB color space 5 | and accessign the Luma channel for comparison with the more common 6 | grayscale version. Uses un-wrapped OpenCV cvtColor() function. 7 | 8 | */ 9 | 10 | import gab.opencv.*; 11 | // Import the OpenCV Improc class, 12 | // it has the cvtColor() function we need. 13 | import org.opencv.imgproc.Imgproc; 14 | 15 | OpenCV opencv; 16 | PImage colorImage, grayImage; 17 | 18 | void setup() { 19 | colorImage = loadImage("flashlight.jpg"); 20 | opencv = new OpenCV(this, colorImage); 21 | size(1080, 720); 22 | 23 | // Save the gray image so we can compare it to Luma 24 | grayImage = opencv.getSnapshot(); 25 | // Use built-in OpenCV function to conver the color image from BGR to LAB color space. 26 | Imgproc.cvtColor(opencv.getColor(), opencv.getColor(), Imgproc.COLOR_BGR2Lab); 27 | // Since the channels start out in the order BGRA, 28 | // Converting to LAB will put the Luma in the B channel 29 | opencv.setGray(opencv.getB()); 30 | } 31 | 32 | void draw() { 33 | background(0); 34 | pushMatrix(); 35 | scale(0.5); 36 | image(colorImage, colorImage.width/2, 0); 37 | image(grayImage, 0, colorImage.height); 38 | image(opencv.getOutput(), colorImage.width, colorImage.height); 39 | popMatrix(); 40 | 41 | fill(255); 42 | text("GRAY", 30, height -25); 43 | text("LUMA", width/2 + 30, height - 25); 44 | } -------------------------------------------------------------------------------- /examples/LumaVsGray/flashlight.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/LumaVsGray/flashlight.jpg -------------------------------------------------------------------------------- /examples/MarkerDetection/MarkerDetection.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import org.opencv.imgproc.Imgproc; 3 | import org.opencv.core.Core; 4 | 5 | import org.opencv.core.Mat; 6 | import org.opencv.core.MatOfPoint; 7 | import org.opencv.core.MatOfPoint2f; 8 | import org.opencv.core.MatOfPoint2f; 9 | import org.opencv.core.CvType; 10 | 11 | import org.opencv.core.Point; 12 | import org.opencv.core.Size; 13 | 14 | //import java.util.list; 15 | 16 | OpenCV opencv; 17 | PImage src, dst, markerImg; 18 | ArrayList contours; 19 | ArrayList approximations; 20 | ArrayList markers; 21 | 22 | boolean[][] markerCells; 23 | 24 | void setup() { 25 | opencv = new OpenCV(this, "marker_test.jpg"); 26 | size(1000, 365); 27 | src = opencv.getInput(); 28 | 29 | // hold on to this for later, since adaptiveThreshold is destructive 30 | Mat gray = OpenCV.imitate(opencv.getGray()); 31 | opencv.getGray().copyTo(gray); 32 | 33 | Mat thresholdMat = OpenCV.imitate(opencv.getGray()); 34 | 35 | opencv.blur(5); 36 | 37 | Imgproc.adaptiveThreshold(opencv.getGray(), thresholdMat, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV, 451, -65); 38 | 39 | contours = new ArrayList(); 40 | Imgproc.findContours(thresholdMat, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE); 41 | 42 | approximations = createPolygonApproximations(contours); 43 | 44 | markers = new ArrayList(); 45 | markers = selectMarkers(approximations); 46 | 47 | //// Mat markerMat = grat.submat(); 48 | // Mat warped = OpenCVPro.imitate(gray); 49 | // 50 | MatOfPoint2f canonicalMarker = new MatOfPoint2f(); 51 | Point[] canonicalPoints = new Point[4]; 52 | canonicalPoints[0] = new Point(0, 350); 53 | canonicalPoints[1] = new Point(0, 0); 54 | canonicalPoints[2] = new Point(350, 0); 55 | canonicalPoints[3] = new Point(350, 350); 56 | canonicalMarker.fromArray(canonicalPoints); 57 | 58 | println("num points: " + markers.get(0).height()); 59 | 60 | Mat transform = Imgproc.getPerspectiveTransform(markers.get(0), canonicalMarker); 61 | Mat unWarpedMarker = new Mat(50, 50, CvType.CV_8UC1); 62 | Imgproc.warpPerspective(gray, unWarpedMarker, transform, new Size(350, 350)); 63 | 64 | 65 | Imgproc.threshold(unWarpedMarker, unWarpedMarker, 125, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); 66 | 67 | float cellSize = 350/7.0; 68 | 69 | markerCells = new boolean[7][7]; 70 | 71 | for (int row = 0; row < 7; row++) { 72 | for (int col = 0; col < 7; col++) { 73 | int cellX = int(col*cellSize); 74 | int cellY = int(row*cellSize); 75 | 76 | Mat cell = unWarpedMarker.submat(cellX, cellX +(int)cellSize, cellY, cellY+ (int)cellSize); 77 | markerCells[row][col] = (Core.countNonZero(cell) > (cellSize*cellSize)/2); 78 | } 79 | } 80 | 81 | for (int col = 0; col < 7; col++) { 82 | for (int row = 0; row < 7; row++) { 83 | if (markerCells[row][col]) { 84 | print(1); 85 | } 86 | else { 87 | print(0); 88 | } 89 | } 90 | println(); 91 | } 92 | 93 | dst = createImage(350, 350, RGB); 94 | opencv.toPImage(unWarpedMarker, dst); 95 | } 96 | 97 | 98 | 99 | ArrayList selectMarkers(ArrayList candidates) { 100 | float minAllowedContourSide = 50; 101 | minAllowedContourSide = minAllowedContourSide * minAllowedContourSide; 102 | 103 | ArrayList result = new ArrayList(); 104 | 105 | for (MatOfPoint2f candidate : candidates) { 106 | 107 | if (candidate.size().height != 4) { 108 | continue; 109 | } 110 | 111 | if (!Imgproc.isContourConvex(new MatOfPoint(candidate.toArray()))) { 112 | continue; 113 | } 114 | 115 | // eliminate markers where consecutive 116 | // points are too close together 117 | float minDist = src.width * src.width; 118 | Point[] points = candidate.toArray(); 119 | for (int i = 0; i < points.length; i++) { 120 | Point side = new Point(points[i].x - points[(i+1)%4].x, points[i].y - points[(i+1)%4].y); 121 | float squaredLength = (float)side.dot(side); 122 | // println("minDist: " + minDist + " squaredLength: " +squaredLength); 123 | minDist = min(minDist, squaredLength); 124 | } 125 | 126 | // println(minDist); 127 | 128 | 129 | if (minDist < minAllowedContourSide) { 130 | continue; 131 | } 132 | 133 | result.add(candidate); 134 | } 135 | 136 | return result; 137 | } 138 | 139 | ArrayList createPolygonApproximations(ArrayList cntrs) { 140 | ArrayList result = new ArrayList(); 141 | 142 | double epsilon = cntrs.get(0).size().height * 0.01; 143 | println(epsilon); 144 | 145 | for (MatOfPoint contour : cntrs) { 146 | MatOfPoint2f approx = new MatOfPoint2f(); 147 | Imgproc.approxPolyDP(new MatOfPoint2f(contour.toArray()), approx, epsilon, true); 148 | result.add(approx); 149 | } 150 | 151 | return result; 152 | } 153 | 154 | void drawContours(ArrayList cntrs) { 155 | for (MatOfPoint contour : cntrs) { 156 | beginShape(); 157 | Point[] points = contour.toArray(); 158 | for (int i = 0; i < points.length; i++) { 159 | vertex((float)points[i].x, (float)points[i].y); 160 | } 161 | endShape(); 162 | } 163 | } 164 | 165 | void drawContours2f(ArrayList cntrs) { 166 | for (MatOfPoint2f contour : cntrs) { 167 | beginShape(); 168 | Point[] points = contour.toArray(); 169 | 170 | for (int i = 0; i < points.length; i++) { 171 | vertex((float)points[i].x, (float)points[i].y); 172 | } 173 | endShape(CLOSE); 174 | } 175 | } 176 | 177 | void draw() { 178 | pushMatrix(); 179 | background(125); 180 | scale(0.5); 181 | image(src, 0, 0); 182 | 183 | noFill(); 184 | smooth(); 185 | strokeWeight(5); 186 | stroke(0, 255, 0); 187 | drawContours2f(markers); 188 | popMatrix(); 189 | 190 | pushMatrix(); 191 | translate(src.width/2, 0); 192 | strokeWeight(1); 193 | image(dst, 0, 0); 194 | 195 | float cellSize = dst.width/7.0; 196 | for (int col = 0; col < 7; col++) { 197 | for (int row = 0; row < 7; row++) { 198 | if(markerCells[row][col]){ 199 | fill(255); 200 | } else { 201 | fill(0); 202 | } 203 | stroke(0,255,0); 204 | rect(col*cellSize, row*cellSize, cellSize, cellSize); 205 | //line(i*cellSize, 0, i*cellSize, dst.width); 206 | //line(0, i*cellSize, dst.width, i*cellSize); 207 | } 208 | } 209 | 210 | popMatrix(); 211 | } -------------------------------------------------------------------------------- /examples/MarkerDetection/marker_test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/MarkerDetection/marker_test.jpg -------------------------------------------------------------------------------- /examples/MorphologyOperations/MorphologyOperations.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | import org.opencv.imgproc.Imgproc; 4 | 5 | OpenCV opencv; 6 | PImage img, opened, closed, tophat; 7 | 8 | void setup() { 9 | img = loadImage("test.jpg"); 10 | size(1280, 720); 11 | 12 | opencv = new OpenCV(this, img); 13 | PImage snap = opencv.getSnapshot(); 14 | 15 | opencv.open(16); 16 | opened = opencv.getSnapshot(); 17 | 18 | opencv.loadImage(snap); 19 | opencv.close(16); 20 | closed = opencv.getSnapshot(); 21 | 22 | opencv.loadImage(snap); 23 | opencv.morphX(Imgproc.MORPH_TOPHAT, Imgproc.MORPH_CROSS, 8, 8); 24 | tophat = opencv.getSnapshot(); 25 | } 26 | 27 | void draw() { 28 | pushMatrix(); 29 | scale(0.5); 30 | image(img, 0, 0); 31 | image(opened, img.width, 0); 32 | image(closed, 0, img.height); 33 | image(tophat, img.width, img.height); 34 | popMatrix(); 35 | 36 | fill(0); 37 | text("source", img.width/2 - 100, 20 ); 38 | text("open(16)", img.width - 100, 20 ); 39 | text("close(16)", img.width/2 - 100, img.height/2 + 20 ); 40 | fill(255); 41 | text("tophat(cross, 8, 8)", img.width - 150, img.height/2 + 20 ); 42 | } -------------------------------------------------------------------------------- /examples/MorphologyOperations/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/MorphologyOperations/test.jpg -------------------------------------------------------------------------------- /examples/MultipleColorTracking/MultipleColorTracking.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * MultipleColorTracking 3 | * Select 4 colors to track them separately 4 | * 5 | * It uses the OpenCV for Processing library by Greg Borenstein 6 | * https://github.com/atduskgreg/opencv-processing 7 | * 8 | * @author: Jordi Tost (@jorditost) 9 | * @url: https://github.com/jorditost/ImageFiltering/tree/master/MultipleColorTracking 10 | * 11 | * University of Applied Sciences Potsdam, 2014 12 | * 13 | * Instructions: 14 | * Press one numerical key [1-4] and click on one color to track it 15 | */ 16 | 17 | import gab.opencv.*; 18 | import processing.video.*; 19 | import java.awt.Rectangle; 20 | 21 | Capture video; 22 | OpenCV opencv; 23 | PImage src; 24 | ArrayList contours; 25 | 26 | // <1> Set the range of Hue values for our filter 27 | //ArrayList colors; 28 | int maxColors = 4; 29 | int[] hues; 30 | int[] colors; 31 | int rangeWidth = 10; 32 | 33 | PImage[] outputs; 34 | 35 | int colorToChange = -1; 36 | 37 | void setup() { 38 | video = new Capture(this, "pipeline:autovideosrc"); 39 | opencv = new OpenCV(this, video.width, video.height); 40 | contours = new ArrayList(); 41 | 42 | size(830, 480, P2D); 43 | 44 | // Array for detection colors 45 | colors = new int[maxColors]; 46 | hues = new int[maxColors]; 47 | 48 | outputs = new PImage[maxColors]; 49 | 50 | video.start(); 51 | } 52 | 53 | void draw() { 54 | 55 | background(150); 56 | 57 | if (video.available()) { 58 | video.read(); 59 | } 60 | 61 | // <2> Load the new frame of our movie in to OpenCV 62 | opencv.loadImage(video); 63 | 64 | // Tell OpenCV to use color information 65 | opencv.useColor(); 66 | src = opencv.getSnapshot(); 67 | 68 | // <3> Tell OpenCV to work in HSV color space. 69 | opencv.useColor(HSB); 70 | 71 | detectColors(); 72 | 73 | // Show images 74 | image(src, 0, 0); 75 | for (int i=0; i -1) { 91 | text("click to change color " + colorToChange, 10, 25); 92 | } else { 93 | text("press key [1-4] to select color", 10, 25); 94 | } 95 | 96 | displayContoursBoundingBoxes(); 97 | } 98 | 99 | ////////////////////// 100 | // Detect Functions 101 | ////////////////////// 102 | 103 | void detectColors() { 104 | 105 | for (int i=0; i Copy the Hue channel of our image into 113 | // the gray channel, which we process. 114 | opencv.setGray(opencv.getH().clone()); 115 | 116 | int hueToDetect = hues[i]; 117 | //println("index " + i + " - hue to detect: " + hueToDetect); 118 | 119 | // <5> Filter the image based on the range of 120 | // hue values that match the object we want to track. 121 | opencv.inRange(hueToDetect-rangeWidth/2, hueToDetect+rangeWidth/2); 122 | 123 | //opencv.dilate(); 124 | opencv.erode(); 125 | 126 | // TO DO: 127 | // Add here some image filtering to detect blobs better 128 | 129 | // <6> Save the processed image for reference. 130 | outputs[i] = opencv.getSnapshot(); 131 | } 132 | 133 | // <7> Find contours in our range image. 134 | // Passing 'true' sorts them by descending area. 135 | if (outputs[0] != null) { 136 | 137 | opencv.loadImage(outputs[0]); 138 | contours = opencv.findContours(true,true); 139 | } 140 | } 141 | 142 | void displayContoursBoundingBoxes() { 143 | 144 | for (int i=0; i -1) { 166 | 167 | color c = get(mouseX, mouseY); 168 | println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c)); 169 | 170 | int hue = int(map(hue(c), 0, 255, 0, 180)); 171 | 172 | colors[colorToChange-1] = c; 173 | hues[colorToChange-1] = hue; 174 | 175 | println("color index " + (colorToChange-1) + ", value: " + hue); 176 | } 177 | } 178 | 179 | void keyPressed() { 180 | 181 | if (key == '1') { 182 | colorToChange = 1; 183 | 184 | } else if (key == '2') { 185 | colorToChange = 2; 186 | 187 | } else if (key == '3') { 188 | colorToChange = 3; 189 | 190 | } else if (key == '4') { 191 | colorToChange = 4; 192 | } 193 | } 194 | 195 | void keyReleased() { 196 | colorToChange = -1; 197 | } 198 | -------------------------------------------------------------------------------- /examples/MultipleColorTracking/screenshots/multiple_color_tracking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/MultipleColorTracking/screenshots/multiple_color_tracking.png -------------------------------------------------------------------------------- /examples/MultipleColorTracking/sketch.properties: -------------------------------------------------------------------------------- 1 | mode.id=processing.mode.java.JavaMode 2 | mode=Java 3 | -------------------------------------------------------------------------------- /examples/OpticalFlow/OpticalFlow.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import processing.video.*; 3 | 4 | OpenCV opencv; 5 | Movie video; 6 | 7 | void setup() { 8 | size(1136, 320); 9 | video = new Movie(this, "sample1.mov"); 10 | opencv = new OpenCV(this, 568, 320); 11 | video.loop(); 12 | video.play(); 13 | } 14 | 15 | void draw() { 16 | background(0); 17 | 18 | if (video.width == 0 || video.height == 0) 19 | return; 20 | 21 | opencv.loadImage(video); 22 | opencv.calculateOpticalFlow(); 23 | 24 | image(video, 0, 0); 25 | translate(video.width, 0); 26 | stroke(255, 0, 0); 27 | opencv.drawOpticalFlow(); 28 | 29 | PVector aveFlow = opencv.getAverageFlow(); 30 | int flowScale = 50; 31 | 32 | stroke(255); 33 | strokeWeight(2); 34 | line(video.width/2, video.height/2, video.width/2 + aveFlow.x*flowScale, video.height/2 + aveFlow.y*flowScale); 35 | } 36 | 37 | void movieEvent(Movie m) { 38 | m.read(); 39 | } 40 | -------------------------------------------------------------------------------- /examples/OpticalFlow/data/sample1.mov: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/OpticalFlow/data/sample1.mov -------------------------------------------------------------------------------- /examples/RegionOfInterest/RegionOfInterest.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | PImage src; 4 | OpenCV opencv; 5 | 6 | int roiWidth = 150; 7 | int roiHeight = 150; 8 | 9 | boolean useROI = true; 10 | 11 | void setup() { 12 | src = loadImage("test.jpg"); 13 | opencv = new OpenCV(this, src); 14 | size(1080, 720); 15 | println(opencv.width, opencv.height); 16 | } 17 | 18 | void draw() { 19 | opencv.loadImage(src); 20 | 21 | if (useROI) { 22 | opencv.setROI(mouseX, mouseY, roiWidth, roiHeight); 23 | } 24 | 25 | opencv.findCannyEdges(20,75); 26 | image(opencv.getOutput(), 0, 0); 27 | 28 | // if an ROI is in-use then getSnapshot() 29 | // will return an image with the dimensions 30 | // and content of the ROI 31 | if(useROI){ 32 | image(opencv.getSnapshot(), width-roiWidth,0); 33 | } 34 | } 35 | 36 | // toggle ROI on and off 37 | void keyPressed() { 38 | useROI = !useROI; 39 | 40 | if (!useROI) { 41 | opencv.releaseROI(); 42 | } 43 | } -------------------------------------------------------------------------------- /examples/RegionOfInterest/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/RegionOfInterest/test.jpg -------------------------------------------------------------------------------- /examples/WarpPerspective/WarpPerspective.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | import org.opencv.imgproc.Imgproc; 3 | import org.opencv.core.MatOfPoint2f; 4 | import org.opencv.core.Point; 5 | import org.opencv.core.Size; 6 | 7 | import org.opencv.core.Mat; 8 | import org.opencv.core.CvType; 9 | 10 | 11 | OpenCV opencv; 12 | PImage src; 13 | PImage card; 14 | int cardWidth = 250; 15 | int cardHeight = 350; 16 | 17 | Contour contour; 18 | 19 | void setup() { 20 | src = loadImage("cards.png"); 21 | size(950, 749); 22 | opencv = new OpenCV(this, src); 23 | 24 | opencv.blur(1); 25 | opencv.threshold(120); 26 | 27 | contour = opencv.findContours(false, true).get(0).getPolygonApproximation(); 28 | 29 | card = createImage(cardWidth, cardHeight, ARGB); 30 | opencv.toPImage(warpPerspective(contour.getPoints(), cardWidth, cardHeight), card); 31 | } 32 | 33 | Mat getPerspectiveTransformation(ArrayList inputPoints, int w, int h) { 34 | Point[] canonicalPoints = new Point[4]; 35 | canonicalPoints[0] = new Point(w, 0); 36 | canonicalPoints[1] = new Point(0, 0); 37 | canonicalPoints[2] = new Point(0, h); 38 | canonicalPoints[3] = new Point(w, h); 39 | 40 | MatOfPoint2f canonicalMarker = new MatOfPoint2f(); 41 | canonicalMarker.fromArray(canonicalPoints); 42 | 43 | Point[] points = new Point[4]; 44 | for (int i = 0; i < 4; i++) { 45 | points[i] = new Point(inputPoints.get(i).x, inputPoints.get(i).y); 46 | } 47 | MatOfPoint2f marker = new MatOfPoint2f(points); 48 | return Imgproc.getPerspectiveTransform(marker, canonicalMarker); 49 | } 50 | 51 | Mat warpPerspective(ArrayList inputPoints, int w, int h) { 52 | Mat transform = getPerspectiveTransformation(inputPoints, w, h); 53 | Mat unWarpedMarker = new Mat(w, h, CvType.CV_8UC1); 54 | Imgproc.warpPerspective(opencv.getColor(), unWarpedMarker, transform, new Size(w, h)); 55 | return unWarpedMarker; 56 | } 57 | 58 | 59 | void draw() { 60 | image(src, 0, 0); 61 | noFill(); 62 | stroke(0, 255, 0); 63 | strokeWeight(4); 64 | contour.draw(); 65 | fill(255, 0); 66 | ArrayList points = contour.getPoints(); 67 | for (int i = 0; i < points.size(); i++) { 68 | text(i, points.get(i).x, points.get(i).y); 69 | } 70 | 71 | pushMatrix(); 72 | translate(src.width, 0); 73 | image(card, 0, 0); 74 | popMatrix(); 75 | } -------------------------------------------------------------------------------- /examples/WarpPerspective/cards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/WarpPerspective/cards.png -------------------------------------------------------------------------------- /examples/WhichFace/Face.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * Which Face Is Which 3 | * Daniel Shiffman 4 | * http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/ 5 | * 6 | * Modified by Jordi Tost (call the constructor specifying an ID) 7 | * @updated: 01/10/2014 8 | */ 9 | 10 | class Face { 11 | 12 | // A Rectangle 13 | Rectangle r; 14 | 15 | // Am I available to be matched? 16 | boolean available; 17 | 18 | // Should I be deleted? 19 | boolean delete; 20 | 21 | // How long should I live if I have disappeared? 22 | int timer = 127; 23 | 24 | // Assign a number to each face 25 | int id; 26 | 27 | // Make me 28 | Face(int newID, int x, int y, int w, int h) { 29 | r = new Rectangle(x,y,w,h); 30 | available = true; 31 | delete = false; 32 | id = newID; 33 | } 34 | 35 | // Show me 36 | void display() { 37 | fill(0,0,255,timer); 38 | stroke(0,0,255); 39 | rect(r.x,r.y,r.width, r.height); 40 | //rect(r.x*scl,r.y*scl,r.width*scl, r.height*scl); 41 | fill(255,timer*2); 42 | text(""+id,r.x+10,r.y+30); 43 | //text(""+id,r.x*scl+10,r.y*scl+30); 44 | //text(""+id,r.x*scl+10,r.y*scl+30); 45 | } 46 | 47 | // Give me a new location / size 48 | // Oooh, it would be nice to lerp here! 49 | void update(Rectangle newR) { 50 | r = (Rectangle) newR.clone(); 51 | } 52 | 53 | // Count me down, I am gone 54 | void countDown() { 55 | timer--; 56 | } 57 | 58 | // I am deed, delete me 59 | boolean dead() { 60 | if (timer < 0) return true; 61 | return false; 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /examples/WhichFace/WhichFace.pde: -------------------------------------------------------------------------------- 1 | /** 2 | * WhichFace 3 | * Daniel Shiffman 4 | * http://shiffman.net/2011/04/26/opencv-matching-faces-over-time/ 5 | * 6 | * Modified by Jordi Tost (@jorditost) to work with the OpenCV library by Greg Borenstein: 7 | * https://github.com/atduskgreg/opencv-processing 8 | * 9 | * @url: https://github.com/jorditost/BlobPersistence/ 10 | * 11 | * University of Applied Sciences Potsdam, 2014 12 | */ 13 | 14 | import gab.opencv.*; 15 | import processing.video.*; 16 | import java.awt.*; 17 | 18 | Capture video; 19 | OpenCV opencv; 20 | 21 | // List of my Face objects (persistent) 22 | ArrayList faceList; 23 | 24 | // List of detected faces (every frame) 25 | Rectangle[] faces; 26 | 27 | // Number of faces detected over all time. Used to set IDs. 28 | int faceCount = 0; 29 | 30 | // Scaling down the video 31 | int scl = 2; 32 | 33 | void setup() { 34 | size(640, 480); 35 | video = new Capture(this, width/scl, height/scl); 36 | opencv = new OpenCV(this, width/scl, height/scl); 37 | opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE); 38 | 39 | faceList = new ArrayList(); 40 | 41 | video.start(); 42 | } 43 | 44 | void draw() { 45 | scale(scl); 46 | opencv.loadImage(video); 47 | 48 | image(video, 0, 0 ); 49 | 50 | detectFaces(); 51 | 52 | // Draw all the faces 53 | for (int i = 0; i < faces.length; i++) { 54 | noFill(); 55 | strokeWeight(5); 56 | stroke(255,0,0); 57 | //rect(faces[i].x*scl,faces[i].y*scl,faces[i].width*scl,faces[i].height*scl); 58 | rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height); 59 | } 60 | 61 | for (Face f : faceList) { 62 | strokeWeight(2); 63 | f.display(); 64 | } 65 | } 66 | 67 | void detectFaces() { 68 | 69 | // Faces detected in this frame 70 | faces = opencv.detect(); 71 | 72 | // Check if the detected faces already exist are new or some has disappeared. 73 | 74 | // SCENARIO 1 75 | // faceList is empty 76 | if (faceList.isEmpty()) { 77 | // Just make a Face object for every face Rectangle 78 | for (int i = 0; i < faces.length; i++) { 79 | println("+++ New face detected with ID: " + faceCount); 80 | faceList.add(new Face(faceCount, faces[i].x,faces[i].y,faces[i].width,faces[i].height)); 81 | faceCount++; 82 | } 83 | 84 | // SCENARIO 2 85 | // We have fewer Face objects than face Rectangles found from OPENCV 86 | } else if (faceList.size() <= faces.length) { 87 | boolean[] used = new boolean[faces.length]; 88 | // Match existing Face objects with a Rectangle 89 | for (Face f : faceList) { 90 | // Find faces[index] that is closest to face f 91 | // set used[index] to true so that it can't be used twice 92 | float record = 50000; 93 | int index = -1; 94 | for (int i = 0; i < faces.length; i++) { 95 | float d = dist(faces[i].x,faces[i].y,f.r.x,f.r.y); 96 | if (d < record && !used[i]) { 97 | record = d; 98 | index = i; 99 | } 100 | } 101 | // Update Face object location 102 | used[index] = true; 103 | f.update(faces[index]); 104 | } 105 | // Add any unused faces 106 | for (int i = 0; i < faces.length; i++) { 107 | if (!used[i]) { 108 | println("+++ New face detected with ID: " + faceCount); 109 | faceList.add(new Face(faceCount, faces[i].x,faces[i].y,faces[i].width,faces[i].height)); 110 | faceCount++; 111 | } 112 | } 113 | 114 | // SCENARIO 3 115 | // We have more Face objects than face Rectangles found 116 | } else { 117 | // All Face objects start out as available 118 | for (Face f : faceList) { 119 | f.available = true; 120 | } 121 | // Match Rectangle with a Face object 122 | for (int i = 0; i < faces.length; i++) { 123 | // Find face object closest to faces[i] Rectangle 124 | // set available to false 125 | float record = 50000; 126 | int index = -1; 127 | for (int j = 0; j < faceList.size(); j++) { 128 | Face f = faceList.get(j); 129 | float d = dist(faces[i].x,faces[i].y,f.r.x,f.r.y); 130 | if (d < record && f.available) { 131 | record = d; 132 | index = j; 133 | } 134 | } 135 | // Update Face object location 136 | Face f = faceList.get(index); 137 | f.available = false; 138 | f.update(faces[i]); 139 | } 140 | // Start to kill any left over Face objects 141 | for (Face f : faceList) { 142 | if (f.available) { 143 | f.countDown(); 144 | if (f.dead()) { 145 | f.delete = true; 146 | } 147 | } 148 | } 149 | } 150 | 151 | // Delete any that should be deleted 152 | for (int i = faceList.size()-1; i >= 0; i--) { 153 | Face f = faceList.get(i); 154 | if (f.delete) { 155 | faceList.remove(i); 156 | } 157 | } 158 | } 159 | 160 | void captureEvent(Capture c) { 161 | c.read(); 162 | } 163 | -------------------------------------------------------------------------------- /examples/WhichFace/screenshots/whichface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/WhichFace/screenshots/whichface.png -------------------------------------------------------------------------------- /examples/WorkingWithColorImages/WorkingWithColorImages.pde: -------------------------------------------------------------------------------- 1 | import gab.opencv.*; 2 | 3 | OpenCV opencv; 4 | PImage threshold, blur, adaptive, gray; 5 | 6 | void setup() { 7 | PImage img = loadImage("test.jpg"); 8 | size(1080, 720); 9 | 10 | // By default, OpenCV for Processing works with a gray 11 | // version of the source image 12 | opencv = new OpenCV(this, img); 13 | // but you can tell it explicitly to use color instead: 14 | opencv.useColor(); 15 | 16 | // A lot of OpenCV operations only work on grayscale images. 17 | // But some do work in color, like threshold, blur, findCannyEdges, findChessboardCorners, etc.: 18 | opencv.threshold(75); 19 | threshold = opencv.getSnapshot(); 20 | 21 | opencv.blur(30); 22 | blur = opencv.getSnapshot(); 23 | 24 | // If you try an operation that does not work in color 25 | // it will print out an error message and leave the image unaffected 26 | opencv.adaptiveThreshold(591, 1); 27 | adaptive = opencv.getSnapshot(); 28 | 29 | // if you convert the image to gray then you can 30 | // do gray-only operations 31 | opencv.gray(); 32 | opencv.adaptiveThreshold(591, 1); 33 | gray = opencv.getSnapshot(); 34 | } 35 | 36 | void draw() { 37 | scale(0.5); 38 | image(threshold, 0, 0); 39 | image(blur, threshold.width,0); 40 | image(adaptive, 0,threshold.height); 41 | image(gray, threshold.width, threshold.height); 42 | } -------------------------------------------------------------------------------- /examples/WorkingWithColorImages/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/examples/WorkingWithColorImages/test.jpg -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | javacppPlatform=linux-x86_64,macosx-x86_64,linux-armhf,linux-arm64,macosx-arm64,windows-x86_64 2 | org.gradle.parallel=true 3 | 4 | outputName=opencv_processing 5 | outputNamePostfix= 6 | 7 | disable-fatjar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cansik/opencv-processing/97c9ab8119c06ce9a4f1a141baa18eafd0d12ccb/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.1-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright © 2015-2021 the original authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | # 21 | # Gradle start up script for POSIX generated by Gradle. 22 | # 23 | # Important for running: 24 | # 25 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is 26 | # noncompliant, but you have some other compliant shell such as ksh or 27 | # bash, then to run this script, type that shell name before the whole 28 | # command line, like: 29 | # 30 | # ksh Gradle 31 | # 32 | # Busybox and similar reduced shells will NOT work, because this script 33 | # requires all of these POSIX shell features: 34 | # * functions; 35 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}», 36 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»; 37 | # * compound commands having a testable exit status, especially «case»; 38 | # * various built-in commands including «command», «set», and «ulimit». 39 | # 40 | # Important for patching: 41 | # 42 | # (2) This script targets any POSIX shell, so it avoids extensions provided 43 | # by Bash, Ksh, etc; in particular arrays are avoided. 44 | # 45 | # The "traditional" practice of packing multiple parameters into a 46 | # space-separated string is a well documented source of bugs and security 47 | # problems, so this is (mostly) avoided, by progressively accumulating 48 | # options in "$@", and eventually passing that to Java. 49 | # 50 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, 51 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; 52 | # see the in-line comments for details. 53 | # 54 | # There are tweaks for specific operating systems such as AIX, CygWin, 55 | # Darwin, MinGW, and NonStop. 56 | # 57 | # (3) This script is generated from the Groovy template 58 | # https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt 59 | # within the Gradle project. 60 | # 61 | # You can find Gradle at https://github.com/gradle/gradle/. 62 | # 63 | ############################################################################## 64 | 65 | # Attempt to set APP_HOME 66 | 67 | # Resolve links: $0 may be a link 68 | app_path=$0 69 | 70 | # Need this for daisy-chained symlinks. 71 | while 72 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path 73 | [ -h "$app_path" ] 74 | do 75 | ls=$( ls -ld "$app_path" ) 76 | link=${ls#*' -> '} 77 | case $link in #( 78 | /*) app_path=$link ;; #( 79 | *) app_path=$APP_HOME$link ;; 80 | esac 81 | done 82 | 83 | APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit 84 | 85 | APP_NAME="Gradle" 86 | APP_BASE_NAME=${0##*/} 87 | 88 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 89 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 90 | 91 | # Use the maximum available, or set MAX_FD != -1 to use that value. 92 | MAX_FD=maximum 93 | 94 | warn () { 95 | echo "$*" 96 | } >&2 97 | 98 | die () { 99 | echo 100 | echo "$*" 101 | echo 102 | exit 1 103 | } >&2 104 | 105 | # OS specific support (must be 'true' or 'false'). 106 | cygwin=false 107 | msys=false 108 | darwin=false 109 | nonstop=false 110 | case "$( uname )" in #( 111 | CYGWIN* ) cygwin=true ;; #( 112 | Darwin* ) darwin=true ;; #( 113 | MSYS* | MINGW* ) msys=true ;; #( 114 | NONSTOP* ) nonstop=true ;; 115 | esac 116 | 117 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 118 | 119 | 120 | # Determine the Java command to use to start the JVM. 121 | if [ -n "$JAVA_HOME" ] ; then 122 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 123 | # IBM's JDK on AIX uses strange locations for the executables 124 | JAVACMD=$JAVA_HOME/jre/sh/java 125 | else 126 | JAVACMD=$JAVA_HOME/bin/java 127 | fi 128 | if [ ! -x "$JAVACMD" ] ; then 129 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 130 | Please set the JAVA_HOME variable in your environment to match the 131 | location of your Java installation." 132 | fi 133 | else 134 | JAVACMD=java 135 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 136 | Please set the JAVA_HOME variable in your environment to match the 137 | location of your Java installation." 138 | fi 139 | 140 | # Increase the maximum file descriptors if we can. 141 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then 142 | case $MAX_FD in #( 143 | max*) 144 | MAX_FD=$( ulimit -H -n ) || 145 | warn "Could not query maximum file descriptor limit" 146 | esac 147 | case $MAX_FD in #( 148 | '' | soft) :;; #( 149 | *) 150 | ulimit -n "$MAX_FD" || 151 | warn "Could not set maximum file descriptor limit to $MAX_FD" 152 | esac 153 | fi 154 | 155 | # Collect all arguments for the java command, stacking in reverse order: 156 | # * args from the command line 157 | # * the main class name 158 | # * -classpath 159 | # * -D...appname settings 160 | # * --module-path (only if needed) 161 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. 162 | 163 | # For Cygwin or MSYS, switch paths to Windows format before running java 164 | if "$cygwin" || "$msys" ; then 165 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) 166 | CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) 167 | 168 | JAVACMD=$( cygpath --unix "$JAVACMD" ) 169 | 170 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 171 | for arg do 172 | if 173 | case $arg in #( 174 | -*) false ;; # don't mess with options #( 175 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath 176 | [ -e "$t" ] ;; #( 177 | *) false ;; 178 | esac 179 | then 180 | arg=$( cygpath --path --ignore --mixed "$arg" ) 181 | fi 182 | # Roll the args list around exactly as many times as the number of 183 | # args, so each arg winds up back in the position where it started, but 184 | # possibly modified. 185 | # 186 | # NB: a `for` loop captures its iteration list before it begins, so 187 | # changing the positional parameters here affects neither the number of 188 | # iterations, nor the values presented in `arg`. 189 | shift # remove old arg 190 | set -- "$@" "$arg" # push replacement arg 191 | done 192 | fi 193 | 194 | # Collect all arguments for the java command; 195 | # * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of 196 | # shell script including quotes and variable substitutions, so put them in 197 | # double quotes to make sure that they get re-expanded; and 198 | # * put everything else in single quotes, so that it's not re-expanded. 199 | 200 | set -- \ 201 | "-Dorg.gradle.appname=$APP_BASE_NAME" \ 202 | -classpath "$CLASSPATH" \ 203 | org.gradle.wrapper.GradleWrapperMain \ 204 | "$@" 205 | 206 | # Use "xargs" to parse quoted args. 207 | # 208 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed. 209 | # 210 | # In Bash we could simply go: 211 | # 212 | # readarray ARGS < <( xargs -n1 <<<"$var" ) && 213 | # set -- "${ARGS[@]}" "$@" 214 | # 215 | # but POSIX shell has neither arrays nor command substitution, so instead we 216 | # post-process each arg (as a line of input to sed) to backslash-escape any 217 | # character that might be a shell metacharacter, then use eval to reverse 218 | # that process (while maintaining the separation between arguments), and wrap 219 | # the whole thing up as a single "set" statement. 220 | # 221 | # This will of course break if any of these variables contains a newline or 222 | # an unmatched quote. 223 | # 224 | 225 | eval "set -- $( 226 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | 227 | xargs -n1 | 228 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | 229 | tr '\n' ' ' 230 | )" '"$@"' 231 | 232 | exec "$JAVACMD" "$@" -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 33 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 34 | 35 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 36 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 37 | 38 | @rem Find java.exe 39 | if defined JAVA_HOME goto findJavaFromJavaHome 40 | 41 | set JAVA_EXE=java.exe 42 | %JAVA_EXE% -version >NUL 2>&1 43 | if "%ERRORLEVEL%" == "0" goto execute 44 | 45 | echo. 46 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 47 | echo. 48 | echo Please set the JAVA_HOME variable in your environment to match the 49 | echo location of your Java installation. 50 | 51 | goto fail 52 | 53 | :findJavaFromJavaHome 54 | set JAVA_HOME=%JAVA_HOME:"=% 55 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 56 | 57 | if exist "%JAVA_EXE%" goto execute 58 | 59 | echo. 60 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 61 | echo. 62 | echo Please set the JAVA_HOME variable in your environment to match the 63 | echo location of your Java installation. 64 | 65 | goto fail 66 | 67 | :execute 68 | @rem Setup the command line 69 | 70 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 71 | 72 | 73 | @rem Execute Gradle 74 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* 75 | 76 | :end 77 | @rem End local scope for the variables with windows NT shell 78 | if "%ERRORLEVEL%"=="0" goto mainEnd 79 | 80 | :fail 81 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 82 | rem the _cmd.exe /c_ return code! 83 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 84 | exit /b 1 85 | 86 | :mainEnd 87 | if "%OS%"=="Windows_NT" endlocal 88 | 89 | :omega -------------------------------------------------------------------------------- /library.properties: -------------------------------------------------------------------------------- 1 | name=OpenCV for Processing 2 | category=Video & Vision 3 | authors=[Greg Borenstein](http://gregborenstein.com) and [Florian Bruggisser](https://broox.ch/) 4 | url=https://github.com/cansik/opencv-processing 5 | sentence=Computer vision with OpenCV. 6 | paragraph=Based on the official OpenCV Java API. A nice Processing-style API for common tasks and access to the full power of the OpenCV API for the advanced stuff. 7 | version=${version} 8 | prettyVersion=${prettyVersion} 9 | lastUpdated=0 10 | minRevision=0 11 | maxRevision=0 12 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | A code template to build libraries for the Processing programming environment. 2 | 3 | Part of the Processing project - http://processing.org 4 | 5 | Copyright (c) 2011-12 Elie Zananiri 6 | Copyright (c) 2008-11 Andreas Schlegel 7 | 8 | This program is free software; you can redistribute it and/or 9 | modify it under the terms of the GNU General Public License 10 | as published by the Free Software Foundation; either version 2 11 | of the License, or (at your option) any later version. 12 | 13 | This program is distributed in the hope that it will be useful, 14 | but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | GNU General Public License for more details. 17 | 18 | You should have received a copy of the GNU General Public License 19 | along with this program; if not, write to the Free Software 20 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -------------------------------------------------------------------------------- /processing-library.gradle: -------------------------------------------------------------------------------- 1 | // This script adds processing library build support to gradle. 2 | def releaseRoot = "$rootDir/release" 3 | 4 | def releaseName = "$outputName$outputNamePostfix-$version" 5 | def releaseDirectory = "$releaseRoot/$releaseName" 6 | def processingVersion = generateProcessingVersion("$version").toString() 7 | 8 | static def generateProcessingVersion(String version) { 9 | def tokens = version.split('\\.') 10 | def sum = 0 11 | def count = 1 12 | 13 | for (String token : tokens.reverse()) { 14 | try { 15 | def value = token as Double 16 | sum += value * count 17 | count *= 100 18 | } catch (ignored) { 19 | } 20 | } 21 | 22 | return sum.toInteger() 23 | } 24 | 25 | task releaseProcessingLib { 26 | dependsOn { 27 | clean 28 | build 29 | javadoc 30 | if (project.hasProperty("disable-fatjar")) { 31 | jar 32 | } else { 33 | fatJar 34 | } 35 | } 36 | 37 | finalizedBy 'packageRelease', 'renameRelease' 38 | 39 | doFirst { 40 | println "releasing library $outputName..." 41 | 42 | // printing current jvm 43 | def currentJvm = org.gradle.internal.jvm.Jvm.current() 44 | println currentJvm 45 | 46 | println("cleaning release...") 47 | project.delete(files( 48 | "$releaseDirectory", 49 | "$releaseRoot/${releaseName}.zip", 50 | "$releaseRoot/${releaseName}.txt" 51 | )) 52 | } 53 | 54 | doLast { 55 | println "creating package..." 56 | 57 | if (project.hasProperty("disable-fatjar")) { 58 | println "copy library..." 59 | project.copy { 60 | from "$buildDir/libs/${outputName}.jar" 61 | into "$releaseDirectory/library" 62 | } 63 | 64 | if (!project.hasProperty("bare")) { 65 | println "copy dependencies..." 66 | project.copy { 67 | from configurations.runtimeClasspath 68 | into "$releaseDirectory/library" 69 | } 70 | } 71 | 72 | // Need to delete the Processing jar explicitly, at least when including them as fileTree dependencies 73 | delete "$releaseDirectory/library/core.jar" 74 | delete "$releaseDirectory/library/jogl-all-main.jar" 75 | delete "$releaseDirectory/library/gluegen-rt-main.jar" 76 | } else { 77 | println "copy fatjar library..." 78 | project.copy { 79 | from "$buildDir/libs/$outputName-complete.jar" 80 | into "$releaseDirectory/library" 81 | rename "$outputName-complete.jar", "${outputName}.jar" 82 | } 83 | } 84 | 85 | 86 | println "copy assets (only if necessary)..." 87 | project.copy { 88 | from("$rootDir") { 89 | include "shaders/**", 90 | "native/**" 91 | } 92 | into "$releaseDirectory/library" 93 | exclude "*.DS_Store" 94 | } 95 | 96 | println "copy data..." 97 | project.copy { 98 | from "data" 99 | into "$releaseDirectory/library" 100 | } 101 | 102 | println "copy javadoc..." 103 | project.copy { 104 | from "$buildDir/docs/javadoc" 105 | into "$releaseDirectory/reference" 106 | } 107 | 108 | println "copy additional files..." 109 | project.copy { 110 | from("$rootDir") { 111 | include "README.md", 112 | "readme/**", 113 | "examples/**", 114 | "src/**" 115 | } 116 | into "$releaseDirectory" 117 | exclude "*.DS_Store", "**/networks/**" 118 | } 119 | 120 | println "copy library properties..." 121 | project.copy { 122 | from("$rootDir") { 123 | include "library.properties" 124 | } 125 | into "$releaseDirectory" 126 | exclude "*.DS_Store" 127 | filter { 128 | it.replace('${version}', "$processingVersion") 129 | .replace('${prettyVersion}', "$project.version") 130 | } 131 | } 132 | 133 | println "copy repository library.txt..." 134 | project.copy { 135 | from "$releaseDirectory/library.properties" 136 | into "$releaseRoot" 137 | rename "library.properties", "${outputName}.txt" 138 | } 139 | } 140 | } 141 | 142 | task packageRelease(type: Zip) { 143 | mustRunAfter releaseProcessingLib 144 | 145 | archiveFileName = "${outputName}${outputNamePostfix}.zip" 146 | from "$releaseDirectory" 147 | into "$outputName" 148 | destinationDirectory = file("$releaseRoot") 149 | exclude "**/*.DS_Store" 150 | } 151 | 152 | task renameRelease { 153 | mustRunAfter packageRelease 154 | } -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | rootProject.name = 'OpenCVProcessing' 2 | 3 | -------------------------------------------------------------------------------- /src/main/java/gab/opencv/Contour.java: -------------------------------------------------------------------------------- 1 | package gab.opencv; 2 | 3 | import java.util.ArrayList; 4 | import processing.core.*; 5 | 6 | import org.opencv.core.MatOfPoint; 7 | import org.opencv.core.MatOfPoint2f; 8 | import org.opencv.imgproc.Imgproc; 9 | import org.opencv.core.Rect; 10 | import org.opencv.core.Mat; 11 | import org.opencv.core.MatOfInt; 12 | import org.opencv.core.Point; 13 | 14 | import java.awt.Rectangle; 15 | 16 | 17 | public class Contour { 18 | private ArrayList points; 19 | private Point[] inputPoints; 20 | private double polygonApproximationFactor; 21 | private PApplet parent; 22 | private Rectangle boundingBox; 23 | public MatOfPoint pointMat; 24 | 25 | public Contour(PApplet parent, MatOfPoint mat){ 26 | polygonApproximationFactor = mat.size().height * 0.01; 27 | this.parent = parent; 28 | this.pointMat = mat; 29 | 30 | Rect r = Imgproc.boundingRect(mat); 31 | boundingBox = new Rectangle(r.x, r.y, r.width, r.height); 32 | loadPoints(mat.toArray()); 33 | } 34 | 35 | public Contour(PApplet parent, MatOfPoint2f mat){ 36 | polygonApproximationFactor = mat.size().height * 0.01; 37 | this.parent = parent; 38 | this.pointMat = new MatOfPoint(mat.toArray()); 39 | 40 | Rect r = Imgproc.minAreaRect(mat).boundingRect(); 41 | boundingBox = new Rectangle(r.x, r.y, r.width, r.height); 42 | loadPoints(mat.toArray()); 43 | } 44 | 45 | public void loadPoints(Point[] pts){ 46 | points = new ArrayList(); 47 | inputPoints = pts; 48 | 49 | for(int i = 0; i < inputPoints.length; i++){ 50 | points.add(new PVector((float)inputPoints[i].x, (float)inputPoints[i].y)); 51 | } 52 | } 53 | 54 | /** 55 | * Check if the Contour contains a given x-y point. 56 | * Particularly, useful for interaction via mouseX and mouseY. 57 | * 58 | * @param x 59 | * @param y 60 | * @return boolean 61 | */ 62 | public boolean containsPoint(int x, int y){ 63 | Point p = new Point(x,y); 64 | MatOfPoint2f m = new MatOfPoint2f(pointMat.toArray()); 65 | 66 | double r = Imgproc.pointPolygonTest(m,p, false); 67 | return r == 1; 68 | } 69 | 70 | /** 71 | * The polygonApproximationFactor is used to determine 72 | * how strictly to follow a curvy polygon when converting 73 | * it into a simpler polygon with getPolygonApproximation(). 74 | * For advanced use only. Set to a sane value by default. 75 | * 76 | * @param polygonApproximationFactor, a double 77 | */ 78 | public void setPolygonApproximationFactor(double polygonApproximationFactor){ 79 | this.polygonApproximationFactor = polygonApproximationFactor; 80 | } 81 | 82 | /** 83 | * Access the current polygonApproximationFactor. The polygonApproximationFactor 84 | * is used to determine how strictly to follow a curvy polygon when converting 85 | * it into a simpler polygon with getPolygonApproximation(). 86 | * 87 | * @return polygonApproximationFactor, a double 88 | */ 89 | public double getPolygonApproximationFactor(){ 90 | return polygonApproximationFactor; 91 | } 92 | 93 | /** 94 | * Get a new Contour that results from calculating 95 | * the polygon approximation of the current Contour. 96 | * The tightness of the approximation is set by the polygonApproximationFactor, 97 | * See setPolygonApproximationFactor() and getPolygonApproximationFactor(). 98 | * 99 | * @return 100 | */ 101 | public Contour getPolygonApproximation(){ 102 | MatOfPoint2f approx = new MatOfPoint2f(); 103 | Imgproc.approxPolyDP(new MatOfPoint2f(inputPoints), approx, polygonApproximationFactor, true); 104 | return new Contour(parent, approx); 105 | } 106 | 107 | /** 108 | * Calculate a convex hull from the current Contour. 109 | * Returns a new Contour representing the convex hull. 110 | * 111 | * @return Contour 112 | */ 113 | public Contour getConvexHull(){ 114 | MatOfInt hull = new MatOfInt(); 115 | MatOfPoint points = new MatOfPoint(pointMat); 116 | 117 | 118 | Imgproc.convexHull(points, hull); 119 | Point[] hp = new Point[hull.height()]; 120 | 121 | for(int i = 0; i < hull.height(); i++){ 122 | int index = (int)hull.get(i,0)[0]; 123 | hp[i] = new Point(pointMat.get(index,0)); 124 | } 125 | MatOfPoint hullPoints = new MatOfPoint(); 126 | hullPoints.fromArray(hp); 127 | 128 | return new Contour(parent, hullPoints); 129 | } 130 | 131 | /** 132 | * Draw the Contour as a closed shape with one vertex per-point. 133 | * 134 | */ 135 | public void draw(){ 136 | parent.beginShape(); 137 | for (PVector p : points) { 138 | parent.vertex(p.x, p.y); 139 | } 140 | parent.endShape(PConstants.CLOSE); 141 | 142 | } 143 | /** 144 | * Get the points that make up the Contour. 145 | * 146 | * @return {@literal ArrayList} points 147 | */ 148 | public ArrayList getPoints(){ 149 | return points; 150 | } 151 | 152 | /** 153 | * The number of points in the Contour. 154 | * 155 | * @return int 156 | */ 157 | public int numPoints(){ 158 | return points.size(); 159 | } 160 | 161 | /** 162 | * Get the bounding box for the Contour. 163 | * 164 | * @return A java.awt.Rectangle 165 | */ 166 | public Rectangle getBoundingBox(){ 167 | return boundingBox; 168 | } 169 | 170 | /** 171 | * The area of the Contour's bounding box. In most cases, this is a good approximation for the Contour's area. 172 | * 173 | * @return float area 174 | */ 175 | public float area(){ 176 | return (boundingBox.width * boundingBox.height); 177 | } 178 | } 179 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /src/main/java/gab/opencv/ContourComparator.java: -------------------------------------------------------------------------------- 1 | package gab.opencv; 2 | 3 | import java.util.Comparator; 4 | 5 | public class ContourComparator implements Comparator { 6 | public int compare(Contour c1, Contour c2) { 7 | if(c1.area() == c2.area()){ 8 | return 0; 9 | } 10 | else if (c1.area() > c2.area()) { 11 | return -1; 12 | } else{ 13 | return 1; 14 | } 15 | } 16 | } -------------------------------------------------------------------------------- /src/main/java/gab/opencv/Flow.java: -------------------------------------------------------------------------------- 1 | package gab.opencv; 2 | 3 | import processing.core.*; 4 | import java.awt.Rectangle; 5 | 6 | import org.opencv.video.Video; 7 | import org.opencv.core.Mat; 8 | import org.opencv.core.CvType; 9 | import org.opencv.core.Core; 10 | import org.opencv.core.Scalar; 11 | 12 | public class Flow { 13 | 14 | private Mat prev; 15 | private Mat flow; 16 | private boolean hasFlow = false; 17 | private double pyramidScale = 0.5; 18 | private int nLevels = 4; 19 | private int windowSize = 8; 20 | private int nIterations = 2; 21 | private int polyN = 7; 22 | private double polySigma = 1.5; 23 | private int runningFlags = Video.OPTFLOW_FARNEBACK_GAUSSIAN; 24 | private PApplet parent; 25 | 26 | public Flow(PApplet parent) { 27 | flow = new Mat(); 28 | this.parent = parent; 29 | } 30 | 31 | public int width(){ 32 | return flow.width(); 33 | } 34 | 35 | public int height(){ 36 | return flow.height(); 37 | } 38 | 39 | public boolean hasFlow(){ 40 | return hasFlow; 41 | } 42 | 43 | public Mat getFlowMat(){ 44 | return flow; 45 | } 46 | 47 | public void calculateOpticalFlow(Mat m) { 48 | int flags = runningFlags; 49 | if (!hasFlow) { 50 | prev = m.clone(); 51 | // no initial flow has been provided so this flag is not needed? => crashes in 4.5.5 52 | // https://docs.opencv.org/4.5.5/dc/d6b/group__video__track.html#gga1d74616b51e7bc4f312a6978690c98b4a9d4430ac75199af0cf6fcdefba30eafe 53 | // flags = Video.OPTFLOW_USE_INITIAL_FLOW; 54 | hasFlow = true; 55 | } 56 | Video.calcOpticalFlowFarneback(prev, m, flow, pyramidScale, nLevels, windowSize, nIterations, polyN, polySigma, flags); 57 | prev = m.clone(); 58 | } 59 | 60 | public PVector getTotalFlowInRegion(int x, int y, int w, int h) { 61 | Mat region = flow.submat(y, y+h, x, x+w); 62 | Scalar total = Core.sumElems(region); 63 | return new PVector((float)total.val[0], (float)total.val[1]); 64 | } 65 | 66 | public PVector getAverageFlowInRegion(int x, int y, int w, int h) { 67 | PVector total = getTotalFlowInRegion(x, y, w, h); 68 | return new PVector(total.x/(w*h), total.y/(w*h)); 69 | } 70 | 71 | public PVector getTotalFlow() { 72 | return getTotalFlowInRegion(0, 0, flow.width(), flow.height()); 73 | } 74 | 75 | public PVector getAverageFlow() { 76 | return getAverageFlowInRegion(0, 0, flow.width(), flow.height()); 77 | } 78 | 79 | public PVector getFlowAt(int x, int y){ 80 | return new PVector((float)flow.get(y, x)[0], (float)flow.get(y, x)[1]); 81 | } 82 | 83 | public void draw() { 84 | int stepSize = 4; 85 | 86 | for (int y = 0; y < flow.height(); y+=stepSize) { 87 | for (int x = 0; x < flow.width(); x+=stepSize) { 88 | PVector flowVec = getFlowAt(x,y); 89 | parent.line(x, y, x+flowVec.x, y+flowVec.y); 90 | } 91 | } 92 | } 93 | 94 | public void setPyramidScale(double v){ 95 | pyramidScale = v; 96 | } 97 | 98 | public double getPyramidScale(){ 99 | return pyramidScale; 100 | } 101 | 102 | public void setLevels(int n){ 103 | nLevels = n; 104 | } 105 | 106 | public int getLevels(){ 107 | return nLevels; 108 | } 109 | 110 | public void setWindowSize(int s){ 111 | windowSize = s; 112 | } 113 | 114 | public int getWindowSize(){ 115 | return windowSize; 116 | } 117 | 118 | public void setIterations(int i){ 119 | nIterations = i; 120 | } 121 | 122 | public int getIterations(){ 123 | return nIterations; 124 | } 125 | 126 | public void setPolyN(int n){ 127 | polyN = n; 128 | } 129 | 130 | public int getPolyN(){ 131 | return polyN; 132 | } 133 | 134 | public void setPolySigma(double s){ 135 | polySigma = s; 136 | } 137 | 138 | public double getPolySigma(){ 139 | return polySigma; 140 | } 141 | } -------------------------------------------------------------------------------- /src/main/java/gab/opencv/Histogram.java: -------------------------------------------------------------------------------- 1 | package gab.opencv; 2 | 3 | import processing.core.*; 4 | import org.opencv.core.Mat; 5 | 6 | public class Histogram { 7 | private Mat mat; 8 | private PApplet parent; 9 | 10 | public Histogram(PApplet parent, Mat mat){ 11 | this.mat = mat; 12 | this.parent = parent; 13 | } 14 | 15 | public void draw(int x, int y, int w, int h) { 16 | parent.pushMatrix(); 17 | parent.translate(x, y); 18 | int numBins = mat.height(); 19 | float binWidth = w/(float)numBins; 20 | 21 | for (int i = 0; i < numBins; i++) { 22 | float v = (float)mat.get(i, 0)[0]; 23 | parent.rect(i*binWidth, h, binWidth, -h*v); 24 | } 25 | parent.popMatrix(); 26 | } 27 | 28 | public Mat getMat(){ 29 | return mat; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/gab/opencv/Line.java: -------------------------------------------------------------------------------- 1 | package gab.opencv; 2 | 3 | import processing.core.*; 4 | 5 | public class Line { 6 | public PVector start, end; 7 | public double angle; 8 | public double x1, y1, x2, y2; 9 | 10 | public Line(double x1, double y1, double x2, double y2){ 11 | this.x1 = x1; 12 | this.y1 = y1; 13 | this.x2 = x2; 14 | this.y2 = y2; 15 | 16 | start = new PVector((float)x1, (float)y1); 17 | end = new PVector((float)x2, (float)y2); 18 | 19 | // measure the angle between this line 20 | // and a vertical line oriented up 21 | angle = angleBetween(x1, y1, x2, y2, 0, 0, 0, -1); 22 | } 23 | 24 | public double angleFrom(Line other){ 25 | return angleBetween(x1, y1, x2, y2, other.x1, other.y1, other.x2, other.y2); 26 | } 27 | 28 | 29 | public static double angleBetween(double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4){ 30 | double angle1 = Math.atan2(y1 - y2, x1 - x2); 31 | double angle2 = Math.atan2(y3 - y4, x3 - x4); 32 | return angle1-angle2; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/gab/opencv/OpenCV.java: -------------------------------------------------------------------------------- 1 | /* 2 | ##library.name## 3 | ##library.sentence## 4 | ##library.url## 5 | 6 | Copyright ##copyright## ##author## 7 | 8 | This library is free software; you can redistribute it and/or 9 | modify it under the terms of the GNU Lesser General Public 10 | License as published by the Free Software Foundation; either 11 | version 2.1 of the License, or (at your option) any later version. 12 | 13 | This library is distributed in the hope that it will be useful, 14 | but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 | Lesser General Public License for more details. 17 | 18 | You should have received a copy of the GNU Lesser General 19 | Public License along with this library; if not, write to the 20 | Free Software Foundation, Inc., 59 Temple Place, Suite 330, 21 | Boston, MA 02111-1307 USA 22 | 23 | @author ##author## 24 | * @modified ##date## 25 | * @version ##library.prettyVersion## (##library.version##) 26 | */ 27 | 28 | 29 | 30 | package gab.opencv; 31 | 32 | import gab.opencv.Contour; 33 | import gab.opencv.ContourComparator; 34 | import gab.opencv.Histogram; 35 | import gab.opencv.Line; 36 | import gab.opencv.Flow; 37 | 38 | import java.awt.Rectangle; 39 | import java.awt.image.BufferedImage; 40 | import java.awt.image.DataBufferInt; 41 | import java.nio.ByteBuffer; 42 | import java.nio.ByteOrder; 43 | import java.nio.IntBuffer; 44 | import java.io.File; 45 | import java.lang.reflect.Field; 46 | import java.net.URL; 47 | import java.util.ArrayList; 48 | import java.util.Collections; 49 | 50 | import org.bytedeco.javacpp.Loader; 51 | import org.bytedeco.opencv.opencv_java; 52 | import org.opencv.bgsegm.BackgroundSubtractorMOG; 53 | import org.opencv.core.Core; 54 | import org.opencv.core.CvType; 55 | import org.opencv.core.Mat; 56 | import org.opencv.core.MatOfRect; 57 | import org.opencv.core.MatOfPoint; 58 | import org.opencv.core.MatOfPoint2f; 59 | import org.opencv.core.MatOfInt; 60 | import org.opencv.core.MatOfFloat; 61 | import org.opencv.core.Rect; 62 | import org.opencv.core.Scalar; 63 | import org.opencv.core.Size; 64 | import org.opencv.core.Point; 65 | import org.opencv.calib3d.Calib3d; 66 | import org.opencv.core.CvException; 67 | import org.opencv.core.Core.MinMaxLocResult; 68 | import org.opencv.objdetect.CascadeClassifier; 69 | import org.opencv.imgproc.Imgproc; 70 | 71 | import processing.core.*; 72 | 73 | import static org.opencv.bgsegm.Bgsegm.createBackgroundSubtractorMOG; 74 | 75 | /** 76 | * OpenCV is the main class for using OpenCV for Processing. Most of the documentation is found here. 77 | * 78 | * OpenCV for Processing is a computer vision library for the Processing creative coding toolkit. 79 | * It's based on OpenCV, which is widely used throughout industry and academic research. OpenCV for 80 | * Processing provides friendly, Processing-style functions for doing all of the most common tasks 81 | * in computer vision: loading images, filtering them, detecting faces, finding contours, background 82 | * subtraction, optical flow, calculating histograms etc. OpenCV also provides access to all native 83 | * OpenCV data types and functions. So advanced users can do anything described in the OpenCV java 84 | * documentation: http://docs.opencv.org/java/ 85 | * 86 | * A text is also underway to provide a narrative introduction to computer vision for beginners using 87 | * OpenCV for Processing: https://github.com/atduskgreg/opencv-processing-book/blob/master/book/toc.md 88 | * 89 | */ 90 | 91 | public class OpenCV { 92 | 93 | PApplet parent; 94 | 95 | public int width; 96 | public int height; 97 | 98 | private int roiWidth; 99 | private int roiHeight; 100 | 101 | public Mat matBGRA; 102 | public Mat matR, matG, matB, matA; 103 | public Mat matHSV; 104 | public Mat matH, matS, matV; 105 | public Mat matGray; 106 | public Mat matROI; 107 | public Mat nonROImat; // so that releaseROI() can return to color/gray as appropriate 108 | 109 | private boolean useColor; 110 | private boolean useROI; 111 | public int colorSpace; 112 | 113 | private PImage outputImage; 114 | private PImage inputImage; 115 | 116 | private boolean nativeLoaded; 117 | private boolean isArm = false; 118 | 119 | public CascadeClassifier classifier; 120 | BackgroundSubtractorMOG backgroundSubtractor; 121 | public Flow flow; 122 | 123 | public final static String VERSION = "##library.prettyVersion##"; 124 | public final static String CASCADE_FRONTALFACE = "haarcascade_frontalface_alt.xml"; 125 | public final static String CASCADE_PEDESTRIANS = "hogcascade_pedestrians.xml"; 126 | public final static String CASCADE_EYE = "haarcascade_eye.xml"; 127 | public final static String CASCADE_CLOCK = "haarcascade_clock.xml"; 128 | public final static String CASCADE_NOSE = "haarcascade_mcs_nose.xml"; 129 | public final static String CASCADE_MOUTH = "haarcascade_mcs_mouth.xml"; 130 | public final static String CASCADE_UPPERBODY = "haarcascade_upperbody.xml"; 131 | public final static String CASCADE_LOWERBODY = "haarcascade_lowerbody.xml"; 132 | public final static String CASCADE_FULLBODY = "haarcascade_fullbody.xml"; 133 | public final static String CASCADE_PEDESTRIAN = "hogcascade_pedestrians.xml"; 134 | 135 | public final static String CASCADE_RIGHT_EAR = "haarcascade_mcs_rightear.xml"; 136 | public final static String CASCADE_PROFILEFACE = "haarcascade_profileface.xml"; 137 | 138 | // used for both Scharr edge detection orientation 139 | // and flip(). Values are set for flip, arbitrary from POV of Scharr 140 | public final static int HORIZONTAL = 1; 141 | public final static int VERTICAL = 0; 142 | public final static int BOTH = -1; 143 | 144 | 145 | 146 | /** 147 | * Initialize OpenCV with the path to an image. 148 | * The image will be loaded and prepared for processing. 149 | * 150 | * @param theParent - A PApplet representing the user sketch, i.e "this" 151 | * @param pathToImg - A String with a path to the image to be loaded 152 | */ 153 | public OpenCV(PApplet theParent, String pathToImg){ 154 | initNative(); 155 | useColor = false; 156 | loadFromString(theParent, pathToImg); 157 | } 158 | 159 | /** 160 | * Initialize OpenCV with the path to an image. 161 | * The image will be loaded and prepared for processing. 162 | * 163 | * @param theParent - A PApplet representing the user sketch, i.e "this" 164 | * @param pathToImg - A String with a path to the image to be loaded 165 | * @param useColor - (Optional) Set to true if you want to use the color version of the image for processing. 166 | */ 167 | public OpenCV(PApplet theParent, String pathToImg, boolean useColor){ 168 | initNative(); 169 | this.useColor = useColor; 170 | if(useColor){ 171 | useColor(); // have to set the color space. 172 | } 173 | loadFromString(theParent, pathToImg); 174 | } 175 | 176 | private void loadFromString(PApplet theParent, String pathToImg){ 177 | parent = theParent; 178 | PImage imageToLoad = parent.loadImage(pathToImg); 179 | init(imageToLoad.width, imageToLoad.height); 180 | loadImage(imageToLoad); 181 | } 182 | 183 | /** 184 | * Initialize OpenCV with an image. 185 | * The image's pixels will be copied and prepared for processing. 186 | * 187 | * @param theParent 188 | * A PApplet representing the user sketch, i.e "this" 189 | * @param img 190 | * A PImage to be loaded 191 | */ 192 | public OpenCV(PApplet theParent, PImage img){ 193 | initNative(); 194 | useColor = false; 195 | loadFromPImage(theParent, img); 196 | } 197 | 198 | /** 199 | * Initialize OpenCV with an image. 200 | * The image's pixels will be copiedd and prepared for processing. 201 | * 202 | * @param theParent 203 | * A PApplet representing the user sketch, i.e "this" 204 | * @param img 205 | * A PImage to be loaded 206 | * @param useColor 207 | * (Optional) Set to true if you want to use the color version of the image for processing. 208 | */ 209 | public OpenCV(PApplet theParent, PImage img, boolean useColor){ 210 | initNative(); 211 | this.useColor = useColor; 212 | if(useColor){ 213 | useColor(); 214 | } 215 | loadFromPImage(theParent, img); 216 | } 217 | 218 | private void loadFromPImage(PApplet theParent, PImage img){ 219 | parent = theParent; 220 | init(img.width, img.height); 221 | loadImage(img); 222 | } 223 | 224 | /** 225 | * 226 | * Apply subsequent image processing to 227 | * the color version of the loaded image. 228 | * 229 | * Note: Many OpenCV functions require a grayscale 230 | * image. Those functions will raise an exception 231 | * if attempted on a color image. 232 | * 233 | */ 234 | public void useColor(){ 235 | useColor(PApplet.RGB); 236 | } 237 | 238 | /** 239 | * 240 | * Get the colorSpace of the current color image. Will be either RGB or HSB. 241 | * 242 | * @return 243 | * 244 | * The color space of the color mats. Either PApplet.RGB or PApplet.HSB 245 | */ 246 | public int getColorSpace(){ 247 | return colorSpace; 248 | } 249 | 250 | /** 251 | * 252 | * Set the main working image to be the color version of the imported image. 253 | * Subsequent image-processing functions will be applied to the color version 254 | * of the image. Image is assumed to be HSB or RGB based on the argument 255 | * 256 | * 257 | * @param colorSpace 258 | * The color space of the image to be processed. Either RGB or HSB. 259 | */ 260 | public void useColor(int colorSpace){ 261 | useColor = true; 262 | if(colorSpace != PApplet.RGB && colorSpace != PApplet.HSB){ 263 | PApplet.println("ERROR: color space must be either RGB or HSB"); 264 | } else { 265 | this.colorSpace = colorSpace; 266 | } 267 | 268 | if(this.colorSpace == PApplet.HSB){ 269 | populateHSV(); 270 | } 271 | } 272 | 273 | private void populateHSV(){ 274 | matHSV = imitate(matBGRA); 275 | Imgproc.cvtColor(matBGRA, matHSV, Imgproc.COLOR_BGR2HSV); 276 | ArrayList channels = new ArrayList(); 277 | Core.split(matHSV, channels); 278 | 279 | matH = channels.get(0); 280 | matS = channels.get(1); 281 | matV = channels.get(2); 282 | } 283 | 284 | private void populateBGRA(){ 285 | ArrayList channels = new ArrayList(); 286 | Core.split(matBGRA, channels); 287 | matB = channels.get(0); 288 | matG = channels.get(1); 289 | matR = channels.get(2); 290 | matA = channels.get(3); 291 | } 292 | 293 | /** 294 | * 295 | * Set OpenCV to do image processing on the grayscale version 296 | * of the loaded image. 297 | * 298 | */ 299 | public void useGray(){ 300 | useColor = false; 301 | } 302 | 303 | /** 304 | * 305 | * Checks whether OpenCV is currently using the color version of the image 306 | * or the grayscale version. 307 | * 308 | * @return 309 | * True if OpenCV is currently using the color version of the image. 310 | */ 311 | public boolean getUseColor(){ 312 | return useColor; 313 | } 314 | 315 | private Mat getCurrentMat(){ 316 | if(useROI){ 317 | return matROI; 318 | 319 | } else{ 320 | 321 | if(useColor){ 322 | return matBGRA; 323 | } else{ 324 | return matGray; 325 | } 326 | } 327 | } 328 | 329 | /** 330 | * Initialize OpenCV with a width and height. 331 | * You will need to load an image in before processing. 332 | * See copy(PImage img). 333 | * 334 | * @param theParent 335 | * A PApplet representing the user sketch, i.e "this" 336 | * @param width 337 | * int 338 | * @param height 339 | * int 340 | */ 341 | public OpenCV(PApplet theParent, int width, int height) { 342 | initNative(); 343 | parent = theParent; 344 | init(width, height); 345 | } 346 | 347 | private void init(int w, int h){ 348 | width = w; 349 | height = h; 350 | welcome(); 351 | setupWorkingImages(); 352 | setupFlow(); 353 | 354 | matR = new Mat(height, width, CvType.CV_8UC1); 355 | matG = new Mat(height, width, CvType.CV_8UC1); 356 | matB = new Mat(height, width, CvType.CV_8UC1); 357 | matA = new Mat(height, width, CvType.CV_8UC1); 358 | matGray = new Mat(height, width, CvType.CV_8UC1); 359 | 360 | matBGRA = new Mat(height, width, CvType.CV_8UC4); 361 | } 362 | 363 | private void setupFlow(){ 364 | flow = new Flow(parent); 365 | } 366 | 367 | private void setupWorkingImages(){ 368 | outputImage = parent.createImage(width,height, PConstants.ARGB); 369 | } 370 | 371 | private String getLibPath() { 372 | URL url = this.getClass().getResource("OpenCV.class"); 373 | if (url != null) { 374 | // Convert URL to string, taking care of spaces represented by the "%20" 375 | // string. 376 | String path = url.toString().replace("%20", " "); 377 | int n0 = path.indexOf('/'); 378 | 379 | int n1 = -1; 380 | 381 | 382 | n1 = path.indexOf("opencv_processing.jar"); 383 | if (PApplet.platform == PConstants.WINDOWS) { //platform Windows 384 | // In Windows, path string starts with "jar file/C:/..." 385 | // so the substring up to the first / is removed. 386 | n0++; 387 | } 388 | 389 | 390 | if ((-1 < n0) && (-1 < n1)) { 391 | return path.substring(n0, n1); 392 | } else { 393 | return ""; 394 | } 395 | } 396 | return ""; 397 | } 398 | 399 | private void initNative(){ 400 | if(nativeLoaded) return; 401 | Loader.load(opencv_java.class); 402 | nativeLoaded = true; 403 | } 404 | 405 | /** 406 | * Load a cascade file for face or object detection. 407 | * Expects one of: 408 | * 409 | *
 410 | 	 * OpenCV.CASCADE_FRONTALFACE
 411 | 	 * OpenCV.CASCADE_PEDESTRIANS
 412 | 	 * OpenCV.CASCADE_EYE			
 413 | 	 * OpenCV.CASCADE_CLOCK		
 414 | 	 * OpenCV.CASCADE_NOSE 		
 415 | 	 * OpenCV.CASCADE_MOUTH		
 416 | 	 * OpenCV.CASCADE_UPPERBODY 	
 417 | 	 * OpenCV.CASCADE_LOWERBODY 	
 418 | 	 * OpenCV.CASCADE_FULLBODY 	
 419 | 	 * OpenCV.CASCADE_PEDESTRIANS
 420 | 	 * OpenCV.CASCADE_RIGHT_EAR 	
 421 | 	 * OpenCV.CASCADE_PROFILEFACE
 422 | 	 * 
423 | * 424 | * To pass your own cascade file, provide an absolute path and a second 425 | * argument of true, thusly: 426 | * 427 | *
 428 | 	 * opencv.loadCascade("/path/to/my/custom/cascade.xml", true)
 429 | 	 * 
430 | * 431 | * (NB: ant build scripts copy the data folder outside of the 432 | * jar so that this will work.) 433 | * 434 | * @param cascadeFileName 435 | * The name of the cascade file to be loaded form within OpenCV for Processing. 436 | * Must be one of the constants provided by this library 437 | */ 438 | public void loadCascade(String cascadeFileName){ 439 | 440 | // localize path to cascade file to point at the library's data folder 441 | String relativePath = "cascade-files/" + cascadeFileName; 442 | String cascadePath = getLibPath(); 443 | cascadePath += relativePath; 444 | 445 | PApplet.println("Load cascade from: " + cascadePath); 446 | 447 | classifier = new CascadeClassifier(cascadePath); 448 | 449 | if(classifier.empty()){ 450 | PApplet.println("Cascade failed to load"); // raise exception here? 451 | } else { 452 | PApplet.println("Cascade loaded: " + cascadeFileName); 453 | } 454 | } 455 | 456 | /** 457 | * Load a cascade file for face or object detection. 458 | * If absolute is true, cascadeFilePath must be an 459 | * absolute path to a cascade xml file. If it is false 460 | * then cascadeFilePath must be one of the options provided 461 | * by OpenCV for Processing as in the single-argument 462 | * version of this function. 463 | * 464 | * @param cascadeFilePath 465 | * A string. Either an absolute path to a cascade XML file or 466 | * one of the constants provided by this library. 467 | * @param absolute 468 | * Whether or not the cascadeFilePath is an absolute path to an XML file. 469 | */ 470 | public void loadCascade(String cascadeFilePath, boolean absolute){ 471 | if(absolute){ 472 | classifier = new CascadeClassifier(cascadeFilePath); 473 | 474 | if(classifier.empty()){ 475 | PApplet.println("Cascade failed to load"); // raise exception here? 476 | } else { 477 | PApplet.println("Cascade loaded from absolute path: " + cascadeFilePath); 478 | } 479 | } else { 480 | loadCascade(cascadeFilePath); 481 | } 482 | } 483 | 484 | /** 485 | * Convert an array of OpenCV Rect objects into 486 | * an array of java.awt.Rectangle rectangles. 487 | * Especially useful when working with 488 | * classifier.detectMultiScale(). 489 | * 490 | * @param rects Rectangles 491 | * 492 | * @return 493 | * A Rectangle[] of java.awt.Rectangle 494 | */ 495 | public static Rectangle[] toProcessing(Rect[] rects){ 496 | Rectangle[] results = new Rectangle[rects.length]; 497 | for(int i = 0; i < rects.length; i++){ 498 | results[i] = new Rectangle(rects[i].x, rects[i].y, rects[i].width, rects[i].height); 499 | } 500 | return results; 501 | } 502 | 503 | /** 504 | * Detect objects using the cascade classifier. loadCascade() must already 505 | * have been called to setup the classifier. See the OpenCV documentation 506 | * for details on the arguments: http://docs.opencv.org/java/org/opencv/objdetect/CascadeClassifier.html#detectMultiScale(org.opencv.core.Mat, org.opencv.core.MatOfRect, double, int, int, org.opencv.core.Size, org.opencv.core.Size) 507 | * 508 | * A simpler version of detect() that doesn't need these arguments is also available. 509 | * 510 | * @param scaleFactor 511 | * @param minNeighbors 512 | * @param flags 513 | * @param minSize 514 | * @param maxSize 515 | * @return 516 | * An array of java.awt.Rectangle objects with the location, width, and height of each detected object. 517 | */ 518 | public Rectangle[] detect(double scaleFactor , int minNeighbors , int flags, int minSize , int maxSize){ 519 | Size minS = new Size(minSize, minSize); 520 | Size maxS = new Size(maxSize, maxSize); 521 | 522 | MatOfRect detections = new MatOfRect(); 523 | classifier.detectMultiScale(getCurrentMat(), detections, scaleFactor, minNeighbors, flags, minS, maxS ); 524 | 525 | return OpenCV.toProcessing(detections.toArray()); 526 | } 527 | 528 | /** 529 | * Detect objects using the cascade classifier. loadCascade() must already 530 | * have been called to setup the classifier. 531 | * 532 | * @return 533 | * An array of java.awt.Rectnangle objects with the location, width, and height of each detected object. 534 | */ 535 | public Rectangle[] detect(){ 536 | MatOfRect detections = new MatOfRect(); 537 | classifier.detectMultiScale(getCurrentMat(), detections); 538 | 539 | return OpenCV.toProcessing(detections.toArray()); 540 | } 541 | 542 | /** 543 | * Setup background subtraction. After calling this function, 544 | * updateBackground() must be called with each new frame 545 | * you want to add to the running background subtraction calculation. 546 | * 547 | * For details on the arguments, see: 548 | * http://docs.opencv.org/java/org/opencv/video/BackgroundSubtractorMOG.html#BackgroundSubtractorMOG(int, int, double) 549 | * 550 | * @param history 551 | * @param nMixtures 552 | * @param backgroundRatio 553 | */ 554 | public void startBackgroundSubtraction(int history, int nMixtures, double backgroundRatio){ 555 | backgroundSubtractor = createBackgroundSubtractorMOG(history, nMixtures, backgroundRatio); 556 | } 557 | 558 | /** 559 | * Update the running background for background subtraction based on 560 | * the current image loaded into OpenCV. startBackgroundSubtraction() 561 | * must have been called before this to setup the background subtractor. 562 | * 563 | */ 564 | public void updateBackground(){ 565 | Mat foreground = imitate(getCurrentMat()); 566 | backgroundSubtractor.apply(getCurrentMat(), foreground, 0.05); 567 | setGray(foreground); 568 | } 569 | 570 | /** 571 | * Calculate the optical flow of the current image relative 572 | * to a running series of images (typically frames from video). 573 | * Optical flow is useful for detecting what parts of the image 574 | * are moving and in what direction. 575 | * 576 | */ 577 | public void calculateOpticalFlow(){ 578 | flow.calculateOpticalFlow(getCurrentMat()); 579 | } 580 | 581 | /* 582 | * Get the total optical flow within a region of the image. 583 | * Be sure to call calculateOpticalFlow() first. 584 | * 585 | */ 586 | public PVector getTotalFlowInRegion(int x, int y, int w, int h) { 587 | return flow.getTotalFlowInRegion(x, y, w, h); 588 | } 589 | 590 | /* 591 | * Get the average optical flow within a region of the image. 592 | * Be sure to call calculateOpticalFlow() first. 593 | * 594 | */ 595 | public PVector getAverageFlowInRegion(int x, int y, int w, int h) { 596 | return flow.getAverageFlowInRegion(x,y,w,h); 597 | } 598 | 599 | /* 600 | * Get the total optical flow for the entire image. 601 | * Be sure to call calculateOpticalFlow() first. 602 | */ 603 | public PVector getTotalFlow() { 604 | return flow.getTotalFlow(); 605 | } 606 | 607 | /* 608 | * Get the average optical flow for the entire image. 609 | * Be sure to call calculateOpticalFlow() first. 610 | */ 611 | public PVector getAverageFlow() { 612 | return flow.getAverageFlow(); 613 | } 614 | 615 | /* 616 | * Get the optical flow at a single point in the image. 617 | * Be sure to call calcuateOpticalFlow() first. 618 | */ 619 | public PVector getFlowAt(int x, int y){ 620 | return flow.getFlowAt(x,y); 621 | } 622 | 623 | /* 624 | * Draw the optical flow. 625 | * Be sure to call calcuateOpticalFlow() first. 626 | */ 627 | public void drawOpticalFlow(){ 628 | flow.draw(); 629 | } 630 | 631 | /** 632 | * Flip the current image. 633 | * 634 | * @param direction 635 | * One of: OpenCV.HORIZONTAL, OpenCV.VERTICAL, or OpenCV.BOTH 636 | */ 637 | public void flip(int direction){ 638 | Core.flip(getCurrentMat(), getCurrentMat(), direction); 639 | } 640 | 641 | /** 642 | * 643 | * Adjust the contrast of the image. Works on color or black and white images. 644 | * 645 | * @param amt 646 | * Amount of contrast to apply. 0-1.0 reduces contrast. Above 1.0 increases contrast. 647 | * 648 | **/ 649 | public void contrast(float amt){ 650 | Scalar modifier; 651 | if(useColor){ 652 | modifier = new Scalar(amt,amt,amt,1); 653 | 654 | } else{ 655 | modifier = new Scalar(amt); 656 | } 657 | 658 | Core.multiply(getCurrentMat(), modifier, getCurrentMat()); 659 | } 660 | 661 | /** 662 | * Get the x-y location of the maximum value in the current image. 663 | * 664 | * @return 665 | * A PVector with the location of the maximum value. 666 | */ 667 | public PVector max(){ 668 | MinMaxLocResult r = Core.minMaxLoc(getCurrentMat()); 669 | return OpenCV.pointToPVector(r.maxLoc); 670 | } 671 | 672 | /** 673 | * Get the x-y location of the minimum value in the current image. 674 | * 675 | * @return 676 | * A PVector with the location of the minimum value. 677 | */ 678 | public PVector min(){ 679 | MinMaxLocResult r = Core.minMaxLoc(getCurrentMat()); 680 | return OpenCV.pointToPVector(r.minLoc); 681 | } 682 | 683 | /** 684 | * Helper function to convert an OpenCV Point into a Processing PVector 685 | * 686 | * @param p 687 | * A Point 688 | * @return 689 | * A PVector 690 | */ 691 | public static PVector pointToPVector(Point p){ 692 | return new PVector((float)p.x, (float)p.y); 693 | } 694 | 695 | 696 | /** 697 | * Adjust the brightness of the image. Works on color or black and white images. 698 | * 699 | * @param amt 700 | * The amount to brighten the image. Ranges -255 to 255. 701 | * 702 | **/ 703 | public void brightness(int amt){ 704 | Scalar modifier; 705 | if(useColor){ 706 | modifier = new Scalar(amt,amt,amt, 1); 707 | 708 | } else{ 709 | modifier = new Scalar(amt); 710 | } 711 | 712 | Core.add(getCurrentMat(), modifier, getCurrentMat()); 713 | } 714 | 715 | /** 716 | * Helper to create a new OpenCV Mat whose channels and 717 | * bit-depth mask an existing Mat. 718 | * 719 | * @param m 720 | * The Mat to match 721 | * @return 722 | * A new Mat 723 | */ 724 | public static Mat imitate(Mat m){ 725 | return new Mat(m.height(), m.width(), m.type()); 726 | } 727 | 728 | /** 729 | * Calculate the difference between the current image 730 | * loaded into OpenCV and a second image. The result is stored 731 | * in the loaded image in OpenCV. Works on both color and grayscale 732 | * images. 733 | * 734 | * @param img 735 | * A PImage to diff against. 736 | */ 737 | public void diff(PImage img){ 738 | Mat imgMat = imitate(getColor()); 739 | toCv(img, imgMat); 740 | 741 | Mat dst = imitate(getCurrentMat()); 742 | 743 | if(useColor){ 744 | ARGBtoBGRA(imgMat, imgMat); 745 | Core.absdiff(getCurrentMat(), imgMat, dst); 746 | } else { 747 | Core.absdiff(getCurrentMat(), OpenCV.gray(imgMat), dst); 748 | } 749 | 750 | dst.assignTo(getCurrentMat()); 751 | } 752 | 753 | /** 754 | * A helper function that diffs two Mats using absdiff. 755 | * Places the result back into mat1 756 | * 757 | * @param mat1 758 | * The destination Mat 759 | * @param mat2 760 | * The Mat to diff against 761 | */ 762 | public static void diff(Mat mat1, Mat mat2){ 763 | Mat dst = imitate(mat1); 764 | Core.absdiff(mat1, mat2, dst); 765 | dst.assignTo(mat1); 766 | } 767 | 768 | /** 769 | * Apply a global threshold to an image. Produces a binary image 770 | * with white pixels where the original image was above the threshold 771 | * and black where it was below. 772 | * 773 | * @param threshold 774 | * An int from 0-255. 775 | */ 776 | public void threshold(int threshold){ 777 | Imgproc.threshold(getCurrentMat(), getCurrentMat(), threshold, 255, Imgproc.THRESH_BINARY); 778 | } 779 | 780 | /** 781 | * Apply a global threshold to the image. The threshold is determined by Otsu's method, which 782 | * attempts to divide the image at a threshold which minimizes the variance of pixels in the black 783 | * and white regions. 784 | * 785 | * See: https://en.wikipedia.org/wiki/Otsu's_method 786 | */ 787 | public void threshold() { 788 | Imgproc.threshold(getCurrentMat(), getCurrentMat(), 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); 789 | } 790 | 791 | /** 792 | * Apply an adaptive threshold to an image. Produces a binary image 793 | * with white pixels where the original image was above the threshold 794 | * and black where it was below. 795 | * 796 | * See: 797 | * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#adaptiveThreshold(org.opencv.core.Mat, org.opencv.core.Mat, double, int, int, int, double) 798 | * 799 | * @param blockSize 800 | * The size of the pixel neighborhood to use. 801 | * @param c 802 | * A constant subtracted from the mean of each neighborhood. 803 | */ 804 | public void adaptiveThreshold(int blockSize, int c){ 805 | try{ 806 | Imgproc.adaptiveThreshold(getCurrentMat(), getCurrentMat(), 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, blockSize, c); 807 | } catch(CvException e){ 808 | PApplet.println("ERROR: adaptiveThreshold function only works on gray images."); 809 | } 810 | } 811 | 812 | /** 813 | * Normalize the histogram of the image. This will spread the image's color 814 | * spectrum over the full 0-255 range. Only works on grayscale images. 815 | * 816 | * 817 | * See: http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#equalizeHist(org.opencv.core.Mat, org.opencv.core.Mat) 818 | * 819 | */ 820 | public void equalizeHistogram(){ 821 | try{ 822 | Imgproc.equalizeHist(getCurrentMat(), getCurrentMat()); 823 | } catch(CvException e){ 824 | PApplet.println("ERROR: equalizeHistogram only works on a gray image."); 825 | } 826 | } 827 | 828 | /** 829 | * Invert the image. 830 | * See: http://docs.opencv.org/java/org/opencv/core/Core.html#bitwise_not(org.opencv.core.Mat, org.opencv.core.Mat) 831 | * 832 | */ 833 | public void invert(){ 834 | Core.bitwise_not(getCurrentMat(),getCurrentMat()); 835 | } 836 | 837 | /** 838 | * Dilate the image. Dilation is a morphological operation (i.e. it affects the shape) often used to 839 | * close holes in contours. It expands white areas of the image. 840 | * 841 | * See: 842 | * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#dilate(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat) 843 | * 844 | */ 845 | public void dilate(){ 846 | Imgproc.dilate(getCurrentMat(), getCurrentMat(), new Mat()); 847 | } 848 | 849 | /** 850 | * Erode the image. Erosion is a morphological operation (i.e. it affects the shape) often used to 851 | * close holes in contours. It contracts white areas of the image. 852 | * 853 | * See: 854 | * http://docs.opencv.org/java/org/opencv/imgproc/Imgproc.html#erode(org.opencv.core.Mat, org.opencv.core.Mat, org.opencv.core.Mat) 855 | * 856 | */ 857 | public void erode(){ 858 | Imgproc.erode(getCurrentMat(), getCurrentMat(), new Mat()); 859 | } 860 | 861 | /** 862 | * Apply a morphological operation (e.g., opening, closing) to the image with a given kernel element. 863 | * 864 | * See: 865 | * http://docs.opencv.org/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.html 866 | * 867 | * @param operation 868 | * The morphological operation to apply: Imgproc.MORPH_CLOSE, MORPH_OPEN, 869 | * MORPH_TOPHAT, MORPH_BLACKHAT, MORPH_GRADIENT. 870 | * @param kernelElement 871 | * The shape to apply the operation with: Imgproc.MORPH_RECT, MORPH_CROSS, or MORPH_ELLIPSE. 872 | * @param width 873 | * Width of the shape. 874 | * @param height 875 | * Height of the shape. 876 | */ 877 | public void morphX(int operation, int kernelElement, int width, int height) { 878 | Mat kernel = Imgproc.getStructuringElement(kernelElement, new Size(width, height)); 879 | Imgproc.morphologyEx(getCurrentMat(), getCurrentMat(), operation, kernel); 880 | } 881 | 882 | /** 883 | * Close the image with a circle of a given size. 884 | * 885 | * See: 886 | * http://docs.opencv.org/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.html#closing 887 | * 888 | * @param size 889 | * Radius of the circle to close with. 890 | */ 891 | public void close(int size) { 892 | Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(size, size)); 893 | Imgproc.morphologyEx(getCurrentMat(), getCurrentMat(), Imgproc.MORPH_CLOSE, kernel); 894 | } 895 | 896 | /** 897 | * Open the image with a circle of a given size. 898 | * 899 | * See: 900 | * http://docs.opencv.org/doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.html#opening 901 | * 902 | * @param size 903 | * Radius of the circle to open with. 904 | */ 905 | public void open(int size) { 906 | Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(size, size)); 907 | Imgproc.morphologyEx(getCurrentMat(), getCurrentMat(), Imgproc.MORPH_OPEN, kernel); 908 | } 909 | 910 | /** 911 | * Blur an image symetrically by a given number of pixels. 912 | * 913 | * @param blurSize 914 | * int - the amount to blur by in x- and y-directions. 915 | */ 916 | public void blur(int blurSize){ 917 | Imgproc.blur(getCurrentMat(), getCurrentMat(), new Size(blurSize, blurSize)); 918 | } 919 | 920 | /** 921 | * Blur an image assymetrically by a different number of pixels in x- and y-directions. 922 | * 923 | * @param blurW 924 | * amount to blur in the x-direction 925 | * @param blurH 926 | * amount to blur in the y-direction 927 | */ 928 | public void blur(int blurW, int blurH){ 929 | Imgproc.blur(getCurrentMat(), getCurrentMat(), new Size(blurW, blurH)); 930 | } 931 | 932 | /** 933 | * Find edges in the image using Canny edge detection. 934 | * 935 | * @param lowThreshold 936 | * @param highThreshold 937 | */ 938 | public void findCannyEdges(int lowThreshold, int highThreshold){ 939 | Imgproc.Canny(getCurrentMat(), getCurrentMat(), lowThreshold, highThreshold); 940 | } 941 | 942 | public void findSobelEdges(int dx, int dy){ 943 | Mat sobeled = new Mat(getCurrentMat().height(), getCurrentMat().width(), CvType.CV_32F); 944 | Imgproc.Sobel(getCurrentMat(), sobeled, CvType.CV_32F, dx, dy); 945 | sobeled.convertTo(getCurrentMat(), getCurrentMat().type()); 946 | } 947 | 948 | public void findScharrEdges(int direction){ 949 | if(direction == HORIZONTAL){ 950 | Imgproc.Scharr(getCurrentMat(), getCurrentMat(), -1, 1, 0 ); 951 | } 952 | 953 | if(direction == VERTICAL){ 954 | Imgproc.Scharr(getCurrentMat(), getCurrentMat(), -1, 0, 1 ); 955 | } 956 | 957 | if(direction == BOTH){ 958 | Mat hMat = imitate(getCurrentMat()); 959 | Mat vMat = imitate(getCurrentMat()); 960 | Imgproc.Scharr(getCurrentMat(), hMat, -1, 1, 0 ); 961 | Imgproc.Scharr(getCurrentMat(), vMat, -1, 0, 1 ); 962 | Core.add(vMat,hMat, getCurrentMat()); 963 | } 964 | } 965 | 966 | public ArrayList findContours(){ 967 | return findContours(true, false); 968 | } 969 | 970 | public ArrayList findContours(boolean findHoles, boolean sort){ 971 | ArrayList result = new ArrayList(); 972 | 973 | ArrayList contourMat = new ArrayList(); 974 | try{ 975 | int contourFindingMode = (findHoles ? Imgproc.RETR_LIST : Imgproc.RETR_EXTERNAL); 976 | 977 | Imgproc.findContours(getCurrentMat(), contourMat, new Mat(), contourFindingMode, Imgproc.CHAIN_APPROX_NONE); 978 | } catch(CvException e){ 979 | PApplet.println("ERROR: findContours only works with a gray image."); 980 | } 981 | for (MatOfPoint c : contourMat) { 982 | result.add(new Contour(parent, c)); 983 | } 984 | 985 | if(sort){ 986 | Collections.sort(result, new ContourComparator()); 987 | } 988 | 989 | return result; 990 | } 991 | 992 | public ArrayList findLines(int threshold, double minLineLength, double maxLineGap){ 993 | ArrayList result = new ArrayList(); 994 | 995 | Mat lineMat = new Mat(); 996 | Imgproc.HoughLinesP(getCurrentMat(), lineMat, 1, PConstants.PI/180.0, threshold, minLineLength, maxLineGap); 997 | for (int i = 0; i < lineMat.width(); i++) { 998 | double[] coords = lineMat.get(0, i); 999 | result.add(new Line(coords[0], coords[1], coords[2], coords[3])); 1000 | } 1001 | 1002 | return result; 1003 | } 1004 | 1005 | public ArrayList findChessboardCorners(int patternWidth, int patternHeight){ 1006 | MatOfPoint2f corners = new MatOfPoint2f(); 1007 | Calib3d.findChessboardCorners(getCurrentMat(), new Size(patternWidth,patternHeight), corners); 1008 | return matToPVectors(corners); 1009 | } 1010 | 1011 | /** 1012 | * 1013 | * @param mat 1014 | * The mat from which to calculate the histogram. Get this from getGray(), getR(), getG(), getB(), etc.. 1015 | * By default this will normalize the histogram (scale the values to 0.0-1.0). Pass false as the third argument to keep values unormalized. 1016 | * @param numBins 1017 | * The number of bins into which divide the histogram should be divided. 1018 | * @return 1019 | * A Histogram object that you can call draw() on. 1020 | */ 1021 | public Histogram findHistogram(Mat mat, int numBins){ 1022 | return findHistogram(mat, numBins, true); 1023 | } 1024 | 1025 | 1026 | public Histogram findHistogram(Mat mat, int numBins, boolean normalize){ 1027 | 1028 | MatOfInt channels = new MatOfInt(0); 1029 | MatOfInt histSize = new MatOfInt(numBins); 1030 | float[] r = {0f, 256f}; 1031 | MatOfFloat ranges = new MatOfFloat(r); 1032 | Mat hist = new Mat(); 1033 | 1034 | ArrayList images = new ArrayList(); 1035 | images.add(mat); 1036 | 1037 | Imgproc.calcHist( images, channels, new Mat(), hist, histSize, ranges); 1038 | 1039 | if(normalize){ 1040 | Core.normalize(hist, hist); 1041 | } 1042 | 1043 | return new Histogram(parent, hist); 1044 | } 1045 | 1046 | /** 1047 | * 1048 | * Filter the image for values between a lower and upper bound. 1049 | * Converts the current image into a binary image with white where pixel 1050 | * values were within bounds and black elsewhere. 1051 | * 1052 | * @param lowerBound 1053 | * @param upperBound 1054 | */ 1055 | public void inRange(int lowerBound, int upperBound){ 1056 | Core.inRange(getCurrentMat(), new Scalar(lowerBound), new Scalar(upperBound), getCurrentMat()); 1057 | } 1058 | 1059 | /** 1060 | * 1061 | * @param src 1062 | * A Mat of type 8UC4 with channels arranged as BGRA. 1063 | * @return 1064 | * A Mat of type 8UC1 in grayscale. 1065 | */ 1066 | public static Mat gray(Mat src){ 1067 | Mat result = new Mat(src.height(), src.width(), CvType.CV_8UC1); 1068 | Imgproc.cvtColor(src, result, Imgproc.COLOR_BGRA2GRAY); 1069 | 1070 | return result; 1071 | } 1072 | 1073 | public void gray(){ 1074 | matGray = gray(matBGRA); 1075 | useGray(); //??? 1076 | } 1077 | 1078 | /** 1079 | * Set a Region of Interest within the image. Subsequent image processing 1080 | * functions will apply to this ROI rather than the full image. 1081 | * Full image will display be included in output. 1082 | * 1083 | * @return 1084 | * False if requested ROI exceed the bounds of the working image. 1085 | * True if ROI was successfully set. 1086 | */ 1087 | public boolean setROI(int x, int y, int w, int h){ 1088 | if(x < 0 || 1089 | x + w > width || 1090 | y < 0 || 1091 | y + h > height){ 1092 | return false; 1093 | } else{ 1094 | roiWidth = w; 1095 | roiHeight = h; 1096 | 1097 | if(useColor){ 1098 | nonROImat = matBGRA; 1099 | matROI = new Mat(matBGRA, new Rect(x, y, w, h)); 1100 | } else { 1101 | nonROImat = matGray; 1102 | matROI = new Mat(matGray, new Rect(x, y, w, h)); 1103 | } 1104 | useROI = true; 1105 | 1106 | return true; 1107 | } 1108 | } 1109 | 1110 | public void releaseROI(){ 1111 | useROI = false; 1112 | } 1113 | 1114 | /** 1115 | * Load an image from a path. 1116 | * 1117 | * @param imgPath 1118 | * String with the path to the image 1119 | */ 1120 | public void loadImage(String imgPath){ 1121 | loadImage(parent.loadImage(imgPath)); 1122 | } 1123 | 1124 | // NOTE: We're not handling the signed/unsigned 1125 | // conversion. Is that any issue? 1126 | public void loadImage(PImage img){ 1127 | // FIXME: is there a better way to hold onto 1128 | // this? 1129 | inputImage = img; 1130 | 1131 | toCv(img, matBGRA); 1132 | ARGBtoBGRA(matBGRA,matBGRA); 1133 | populateBGRA(); 1134 | 1135 | if(useColor){ 1136 | useColor(this.colorSpace); 1137 | } else { 1138 | gray(); 1139 | } 1140 | 1141 | } 1142 | 1143 | public static void ARGBtoBGRA(Mat rgba, Mat bgra){ 1144 | ArrayList channels = new ArrayList(); 1145 | Core.split(rgba, channels); 1146 | 1147 | ArrayList reordered = new ArrayList(); 1148 | // Starts as ARGB. 1149 | // Make into BGRA. 1150 | 1151 | reordered.add(channels.get(3)); 1152 | reordered.add(channels.get(2)); 1153 | reordered.add(channels.get(1)); 1154 | reordered.add(channels.get(0)); 1155 | 1156 | Core.merge(reordered, bgra); 1157 | } 1158 | 1159 | 1160 | public int getSize(){ 1161 | return width * height; 1162 | } 1163 | 1164 | /** 1165 | * 1166 | * Convert a 4 channel OpenCV Mat object into 1167 | * pixels to be shoved into a 4 channel ARGB PImage's 1168 | * pixel array. 1169 | * 1170 | * @param m 1171 | * An RGBA Mat we want converted 1172 | * @return 1173 | * An int[] formatted to be the pixels of a PImage 1174 | */ 1175 | public int[] matToARGBPixels(Mat m){ 1176 | int pImageChannels = 4; 1177 | int numPixels = m.width()*m.height(); 1178 | int[] intPixels = new int[numPixels]; 1179 | byte[] matPixels = new byte[numPixels*pImageChannels]; 1180 | 1181 | m.get(0,0, matPixels); 1182 | ByteBuffer.wrap(matPixels).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer().get(intPixels); 1183 | return intPixels; 1184 | } 1185 | 1186 | 1187 | /** 1188 | * Convert an OpenCV Mat object into a PImage 1189 | * to be used in other Processing code. 1190 | * Copies the Mat's pixel data into the PImage's pixel array. 1191 | * Iterates over each pixel in the Mat, i.e. expensive. 1192 | * 1193 | * (Mainly used internally by OpenCV. Inspired by toCv() 1194 | * from KyleMcDonald's ofxCv.) 1195 | * 1196 | * @param m 1197 | * A Mat you want converted 1198 | * @param img 1199 | * The PImage you want the Mat converted into. 1200 | */ 1201 | public void toPImage(Mat m, PImage img){ 1202 | img.loadPixels(); 1203 | 1204 | if(m.channels() == 3){ 1205 | Mat m2 = new Mat(); 1206 | Imgproc.cvtColor(m, m2, Imgproc.COLOR_RGB2RGBA); 1207 | img.pixels = matToARGBPixels(m2); 1208 | } else if(m.channels() == 1){ 1209 | Mat m2 = new Mat(); 1210 | Imgproc.cvtColor(m, m2, Imgproc.COLOR_GRAY2RGBA); 1211 | img.pixels = matToARGBPixels(m2); 1212 | } else if(m.channels() == 4){ 1213 | img.pixels = matToARGBPixels(m); 1214 | } 1215 | 1216 | img.updatePixels(); 1217 | } 1218 | 1219 | /** 1220 | * Convert a Processing PImage to an OpenCV Mat. 1221 | * (Inspired by Kyle McDonald's ofxCv's toOf()) 1222 | * 1223 | * @param img 1224 | * The PImage to convert. 1225 | * @param m 1226 | * The Mat to receive the image data. 1227 | */ 1228 | public static void toCv(PImage img, Mat m){ 1229 | BufferedImage image = (BufferedImage)img.getNative(); 1230 | int[] matPixels = ((DataBufferInt)image.getRaster().getDataBuffer()).getData(); 1231 | 1232 | ByteBuffer bb = ByteBuffer.allocate(matPixels.length * 4); 1233 | IntBuffer ib = bb.asIntBuffer(); 1234 | ib.put(matPixels); 1235 | 1236 | byte[] bvals = bb.array(); 1237 | 1238 | m.put(0,0, bvals); 1239 | } 1240 | 1241 | public static ArrayList matToPVectors(MatOfPoint mat){ 1242 | ArrayList result = new ArrayList(); 1243 | Point[] points = mat.toArray(); 1244 | for(int i = 0; i < points.length; i++){ 1245 | result.add(new PVector((float)points[i].x, (float)points[i].y)); 1246 | } 1247 | 1248 | return result; 1249 | } 1250 | 1251 | public static ArrayList matToPVectors(MatOfPoint2f mat){ 1252 | ArrayList result = new ArrayList(); 1253 | Point[] points = mat.toArray(); 1254 | for(int i = 0; i < points.length; i++){ 1255 | result.add(new PVector((float)points[i].x, (float)points[i].y)); 1256 | } 1257 | 1258 | return result; 1259 | } 1260 | 1261 | public String matToS(Mat mat){ 1262 | return CvType.typeToString(mat.type()); 1263 | } 1264 | 1265 | public PImage getInput(){ 1266 | return inputImage; 1267 | } 1268 | 1269 | public PImage getOutput(){ 1270 | if(useColor){ 1271 | toPImage(matBGRA, outputImage); 1272 | } else { 1273 | toPImage(matGray, outputImage); 1274 | } 1275 | 1276 | return outputImage; 1277 | } 1278 | 1279 | public PImage getSnapshot(){ 1280 | PImage result; 1281 | 1282 | if(useROI){ 1283 | result = getSnapshot(matROI); 1284 | } else { 1285 | if(useColor){ 1286 | if(colorSpace == PApplet.HSB){ 1287 | result = getSnapshot(matHSV); 1288 | } else { 1289 | result = getSnapshot(matBGRA); 1290 | } 1291 | } else { 1292 | result = getSnapshot(matGray); 1293 | } 1294 | } 1295 | return result; 1296 | } 1297 | 1298 | public PImage getSnapshot(Mat m){ 1299 | PImage result = parent.createImage(m.width(), m.height(), PApplet.ARGB); 1300 | toPImage(m, result); 1301 | return result; 1302 | } 1303 | 1304 | public Mat getR(){ 1305 | return matR; 1306 | } 1307 | 1308 | public Mat getG(){ 1309 | return matG; 1310 | } 1311 | 1312 | public Mat getB(){ 1313 | return matB; 1314 | } 1315 | 1316 | public Mat getA(){ 1317 | return matA; 1318 | } 1319 | 1320 | public Mat getH(){ 1321 | return matH; 1322 | } 1323 | 1324 | public Mat getS(){ 1325 | return matS; 1326 | } 1327 | 1328 | public Mat getV(){ 1329 | return matV; 1330 | } 1331 | 1332 | public Mat getGray(){ 1333 | return matGray; 1334 | } 1335 | 1336 | public void setGray(Mat m){ 1337 | matGray = m; 1338 | useColor = false; 1339 | } 1340 | 1341 | public void setColor(Mat m){ 1342 | matBGRA = m; 1343 | useColor = true; 1344 | } 1345 | 1346 | public Mat getColor(){ 1347 | return matBGRA; 1348 | } 1349 | 1350 | public Mat getROI(){ 1351 | return matROI; 1352 | } 1353 | 1354 | private void welcome() { 1355 | System.out.println("Java OpenCV " + Core.VERSION); 1356 | } 1357 | 1358 | /** 1359 | * return the version of the library. 1360 | * 1361 | * @return String 1362 | */ 1363 | public static String version() { 1364 | return VERSION; 1365 | } 1366 | } 1367 | 1368 | -------------------------------------------------------------------------------- /src/test/java/AlphaChannelTest.java: -------------------------------------------------------------------------------- 1 | import processing.core.PApplet; 2 | import processing.core.PImage; 3 | 4 | import gab.opencv.*; 5 | import org.opencv.core.*; 6 | 7 | public class AlphaChannelTest extends PApplet { 8 | 9 | public static void main(String... args) { 10 | AlphaChannelTest sketch = new AlphaChannelTest(); 11 | sketch.runSketch(); 12 | } 13 | 14 | public void settings() { 15 | size(800, 300); 16 | } 17 | 18 | OpenCV opencv; 19 | 20 | PImage input, output; 21 | 22 | public void setup() { 23 | opencv = new OpenCV(this, 0, 0); 24 | input = createImage(400, 300, ARGB); 25 | 26 | // fill red 27 | for (int y = 0; y < input.height; y++) { 28 | for (int x = 0; x < input.width; x++) { 29 | input.set(x, y, color(255, 0, 0)); 30 | } 31 | } 32 | 33 | input = loadImage("https://github.com/pjreddie/darknet/raw/master/data/dog.jpg"); 34 | input.resize(400, 300); 35 | 36 | Mat mat = new Mat(input.height, input.width, CvType.CV_8UC4); 37 | OpenCV.toCv(input, mat); 38 | 39 | output = createImage(input.width, input.height, RGB); 40 | opencv.toPImage(mat, output); 41 | 42 | println("first pixel of input: " + binary(input.pixels[0])); 43 | println("first pixel of mat:"); 44 | println(mat.get(0, 0)); 45 | println("first pixel of output :" + binary(output.pixels[0])); 46 | } 47 | 48 | public void draw() { 49 | image(input, 0, 0); 50 | image(output, input.width, 0); 51 | } 52 | } -------------------------------------------------------------------------------- /src/test/java/GrabImageTest.java: -------------------------------------------------------------------------------- 1 | import gab.opencv.OpenCV; 2 | import processing.core.PApplet; 3 | import processing.core.PImage; 4 | 5 | public class GrabImageTest extends PApplet { 6 | 7 | public static void main(String... args) { 8 | GrabImageTest sketch = new GrabImageTest(); 9 | sketch.runSketch(); 10 | } 11 | 12 | public void settings() { 13 | size(640, 480); 14 | } 15 | 16 | PImage testImage; 17 | OpenCV opencv; 18 | 19 | public void setup() { 20 | testImage = loadImage(sketchPath("examples/BrightestPoint/robot_light.jpg")); 21 | opencv = new OpenCV(this, testImage.width, testImage.height); 22 | noLoop(); 23 | } 24 | 25 | public void draw() { 26 | opencv.useColor(); 27 | opencv.loadImage(testImage); 28 | PImage result = opencv.getSnapshot(); 29 | image(result, 0, 0); 30 | } 31 | } -------------------------------------------------------------------------------- /src/test/java/OpticalFlowTest.java: -------------------------------------------------------------------------------- 1 | import gab.opencv.OpenCV; 2 | import processing.core.PApplet; 3 | import processing.core.PImage; 4 | 5 | public class OpticalFlowTest extends PApplet { 6 | 7 | public static void main(String... args) { 8 | OpticalFlowTest sketch = new OpticalFlowTest(); 9 | sketch.runSketch(); 10 | } 11 | 12 | public void settings() { 13 | size(640, 480); 14 | } 15 | 16 | PImage testImage; 17 | OpenCV opencv; 18 | 19 | public void setup() { 20 | testImage = loadImage(sketchPath("examples/BrightestPoint/robot_light.jpg")); 21 | opencv = new OpenCV(this, testImage.width, testImage.height); 22 | } 23 | 24 | public void draw() { 25 | opencv.loadImage(testImage); 26 | opencv.calculateOpticalFlow(); 27 | println("Optical Flow was running"); 28 | } 29 | } --------------------------------------------------------------------------------