├── .gitignore ├── README.md ├── python ├── libs │ └── opencv_python_rpi3-3.1.0+39f5a48-cp34-cp34m-linux_armv7l.whl ├── README.md ├── files │ └── GripRunner.py └── samples │ └── frc_find_red_areas │ ├── frc_find_red_areas.py │ └── grip.py ├── java ├── src │ └── main │ │ └── java │ │ ├── edu │ │ └── wpi │ │ │ └── first │ │ │ └── wpilibj │ │ │ └── vision │ │ │ ├── VisionPipeline.java │ │ │ ├── VideoViewer.java │ │ │ └── GripRunner.java │ │ └── grip │ │ └── sample1 │ │ ├── FindRedAreasApp.java │ │ ├── find_red_areas.grip │ │ └── FindRedAreas.java ├── build.xml └── README.md ├── LICENSE.txt └── sample-pipelines └── find_red_areas.grip /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | *.pyc 3 | */__pycache__/* 4 | /.project 5 | /.classpath 6 | /.settings/ 7 | /java/target/ 8 | /java/.gradle/ 9 | /java/build/ 10 | /java/bin/ 11 | .DS_Store 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GRIP-code-generation 2 | ![Join the chat at https://gitter.im/WPIRoboticsProjects/GRIP](https://badges.gitter.im/Join%20Chat.svg) 3 | 4 | Examples and downloads for using generated GRIP pipelines 5 | -------------------------------------------------------------------------------- /python/libs/opencv_python_rpi3-3.1.0+39f5a48-cp34-cp34m-linux_armv7l.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WPIRoboticsProjects/GRIP-code-generation/HEAD/python/libs/opencv_python_rpi3-3.1.0+39f5a48-cp34-cp34m-linux_armv7l.whl -------------------------------------------------------------------------------- /java/src/main/java/edu/wpi/first/wpilibj/vision/VisionPipeline.java: -------------------------------------------------------------------------------- 1 | /*----------------------------------------------------------------------------*/ 2 | /* Copyright (c) FIRST 2016-2017. All Rights Reserved. */ 3 | /* Open Source Software - may be modified and shared by FRC teams. The code */ 4 | /* must be accompanied by the FIRST BSD license file in the root directory of */ 5 | /* the project. */ 6 | /*----------------------------------------------------------------------------*/ 7 | 8 | package edu.wpi.first.wpilibj.vision; 9 | 10 | import org.opencv.core.Mat; 11 | 12 | /** 13 | * A vision pipeline is responsible for running a group of 14 | * OpenCV algorithms to extract data from an image. 15 | * 16 | * @see VisionRunner 17 | * @see VisionThread 18 | */ 19 | public interface VisionPipeline { 20 | 21 | /** 22 | * Processes the image input and sets the result objects. 23 | * Implementations should make these objects accessible. 24 | */ 25 | void process(Mat image); 26 | 27 | } 28 | -------------------------------------------------------------------------------- /java/src/main/java/grip/sample1/FindRedAreasApp.java: -------------------------------------------------------------------------------- 1 | package grip.sample1; 2 | 3 | import static edu.wpi.first.wpilibj.vision.GripRunner.makeCamera; 4 | import static edu.wpi.first.wpilibj.vision.GripRunner.makeWindow; 5 | 6 | import edu.wpi.first.wpilibj.vision.GripRunner; 7 | import edu.wpi.first.wpilibj.vision.GripRunner.Listener; 8 | import edu.wpi.first.wpilibj.vision.VideoViewer; 9 | 10 | public class FindRedAreasApp { 11 | 12 | static final int IMG_WIDTH = 320; 13 | static final int IMG_HEIGHT = 240; 14 | 15 | final VideoViewer window; 16 | final Listener listener; 17 | final GripRunner gripRunner; 18 | 19 | public FindRedAreasApp() { 20 | this.window = makeWindow("GRIP", IMG_WIDTH, IMG_HEIGHT); 21 | this.listener = (this.window!=null) ? (processor -> { window.imshow(processor.rgbThresholdOutput());}) : null; 22 | this.gripRunner = new GripRunner<>( 23 | makeCamera(0, IMG_WIDTH, IMG_HEIGHT, -1.0), 24 | new FindRedAreas(), 25 | listener); 26 | } 27 | 28 | public static void main(String[] args) { 29 | FindRedAreasApp app = new FindRedAreasApp(); 30 | app.gripRunner.runForever(); 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /python/README.md: -------------------------------------------------------------------------------- 1 | # Python samples and code 2 | 3 | The `files` directory contains skeleton code that can be modified to run a GRIP-generated pipeline. 4 | 5 | The `samples` directory has sample programs that can be run directly. Just clone this repository and run the main python files. 6 | 7 | ## Using the samples 8 | 9 | The samples use Python 3 and require three libraries: 10 | 11 | 1. OpenCV 12 | 2. numpy (required by OpenCV) 13 | 3. pynetworktables 14 | 15 | These can all be installed with pip3. 16 | 17 | ```bash 18 | $ pip3 install numpy opencv-python pynetworktables 19 | ``` 20 | 21 | (if your default python version is at least 3+, the normal `pip` command should work) 22 | 23 | 24 | Then `cd` into the directory of the sample you want to run and use the `python3` command to run the sample, eg 25 | 26 | ```bash 27 | $ cd samples/frc_find_red_areas 28 | $ python3 frc_find_red_areas.py 29 | ``` 30 | 31 | (Again, if your default python version is at least 3+, just the `python` command should work) 32 | 33 | 34 | ## Binaries 35 | 36 | Since the Raspberry Pi 3 is a cheap ($35) and easy to use co-processor, we've created a Python 3 package for OpenCV for it and is available in the libs directory. To install it, you'll need to have the python `wheel` tool 37 | 38 | ```bash 39 | $ pip3 install wheel 40 | $ python3 -m wheel install opencv_python_rpi3-...whl 41 | ``` 42 | -------------------------------------------------------------------------------- /python/files/GripRunner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | Simple skeleton program for running an OpenCV pipeline generated by GRIP and using NetworkTables to send data. 5 | 6 | Users need to: 7 | 8 | 1. Import the generated GRIP pipeline, which should be generated in the same directory as this file. 9 | 2. Set the network table server IP. This is usually the robots address (roborio-TEAM-frc.local) or localhost 10 | 3. Handle putting the generated code into NetworkTables 11 | """ 12 | 13 | import cv2 14 | from networktables import NetworkTable 15 | from grip import GripPipeline # TODO change the default module and class, if needed 16 | 17 | 18 | def extra_processing(pipeline: GripPipeline): 19 | """ 20 | Performs extra processing on the pipeline's outputs and publishes data to NetworkTables. 21 | :param pipeline: the pipeline that just processed an image 22 | :return: None 23 | """ 24 | # TODO: Users need to implement this. 25 | # Useful for converting OpenCV objects (e.g. contours) to something NetworkTables can understand. 26 | pass 27 | 28 | 29 | def main(): 30 | NetworkTable.setTeam('0000') # TODO set your team number 31 | NetworkTable.initialize() 32 | cap = cv2.VideoCapture(0) 33 | pipeline = GripPipeline() 34 | while True: 35 | ret, frame = cap.read() 36 | if ret: 37 | pipeline.process(frame) # TODO add extra parameters if the pipeline takes more than just a single image 38 | extra_processing(pipeline) 39 | 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | * Copyright (c) 2016 WPI 2 | * All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions are met: 6 | * * Redistributions of source code must retain the above copyright 7 | * notice, this list of conditions and the following disclaimer. 8 | * * Redistributions in binary form must reproduce the above copyright 9 | * notice, this list of conditions and the following disclaimer in the 10 | * documentation and/or other materials provided with the distribution. 11 | * * Neither the name of WPI nor the 12 | * names of its contributors may be used to endorse or promote products 13 | * derived from this software without specific prior written permission. 14 | * 15 | * THIS SOFTWARE IS PROVIDED BY WPI AND CONTRIBUTORS "AS IS" AND ANY 16 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | * WARRANTIES OF MERCHANTABILITY NONINFRINGEMENT AND FITNESS FOR A PARTICULAR 18 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL FIRST OR CONTRIBUTORS BE LIABLE FOR 19 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /python/samples/frc_find_red_areas/frc_find_red_areas.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | Sample program that uses a generated GRIP pipeline to detect red areas in an image and publish them to NetworkTables. 5 | """ 6 | 7 | import cv2 8 | from networktables import NetworkTables 9 | from grip import GripPipeline 10 | 11 | 12 | def extra_processing(pipeline): 13 | """ 14 | Performs extra processing on the pipeline's outputs and publishes data to NetworkTables. 15 | :param pipeline: the pipeline that just processed an image 16 | :return: None 17 | """ 18 | center_x_positions = [] 19 | center_y_positions = [] 20 | widths = [] 21 | heights = [] 22 | 23 | # Find the bounding boxes of the contours to get x, y, width, and height 24 | for contour in pipeline.filter_contours_output: 25 | x, y, w, h = cv2.boundingRect(contour) 26 | center_x_positions.append(x + w / 2) # X and Y are coordinates of the top-left corner of the bounding box 27 | center_y_positions.append(y + h / 2) 28 | widths.append(w) 29 | heights.append(h) 30 | 31 | # Publish to the '/vision/red_areas' network table 32 | table = NetworkTables.getTable('/vision/red_areas') 33 | table.putNumberArray('x', center_x_positions) 34 | table.putNumberArray('y', center_y_positions) 35 | table.putNumberArray('width', widths) 36 | table.putNumberArray('height', heights) 37 | 38 | 39 | def main(): 40 | print('Initializing NetworkTables') 41 | NetworkTables.setClientMode() 42 | NetworkTables.setIPAddress('localhost') 43 | NetworkTables.initialize() 44 | 45 | print('Creating video capture') 46 | cap = cv2.VideoCapture(0) 47 | 48 | print('Creating pipeline') 49 | pipeline = GripPipeline() 50 | 51 | print('Running pipeline') 52 | while cap.isOpened(): 53 | have_frame, frame = cap.read() 54 | if have_frame: 55 | pipeline.process(frame) 56 | extra_processing(pipeline) 57 | 58 | print('Capture closed') 59 | 60 | 61 | if __name__ == '__main__': 62 | main() 63 | -------------------------------------------------------------------------------- /java/build.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /java/src/main/java/edu/wpi/first/wpilibj/vision/VideoViewer.java: -------------------------------------------------------------------------------- 1 | package edu.wpi.first.wpilibj.vision; 2 | 3 | import java.awt.BorderLayout; 4 | import java.awt.Dimension; 5 | import java.awt.Image; 6 | import java.awt.event.KeyListener; 7 | import java.awt.event.MouseListener; 8 | import java.awt.image.BufferedImage; 9 | import java.awt.image.DataBufferByte; 10 | 11 | import javax.swing.ImageIcon; 12 | import javax.swing.JFrame; 13 | import javax.swing.JLabel; 14 | import javax.swing.JScrollPane; 15 | import javax.swing.UIManager; 16 | import javax.swing.UnsupportedLookAndFeelException; 17 | import javax.swing.WindowConstants; 18 | 19 | import org.opencv.core.Mat; 20 | 21 | /** 22 | * Window for displaying images. 23 | * C++ developers can display images with the imshow function in 24 | * the HighGUI package. This class provides similar functionality 25 | * in Java. 26 | */ 27 | public class VideoViewer { 28 | private final JFrame frame; 29 | private final JLabel imageView; 30 | 31 | public VideoViewer() { 32 | this("OpenCV", 640, 480); 33 | } 34 | 35 | public VideoViewer(String windowName, int width, int height) { 36 | setSystemLookAndFeel(); 37 | 38 | frame = new JFrame(windowName); 39 | imageView = new JLabel(); 40 | final JScrollPane imageScrollPane = new JScrollPane(imageView); 41 | imageScrollPane.setPreferredSize(new Dimension(width, height)); 42 | frame.add(imageScrollPane, BorderLayout.CENTER); 43 | frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); 44 | frame.setLocationRelativeTo(null); 45 | frame.setVisible(true); 46 | } 47 | 48 | public void imshow(Mat image) { 49 | if (image.empty()) { 50 | System.err.println("VideoViewer: empty image"); 51 | return; 52 | } 53 | Image loadedImage = toBufferedImage(image); 54 | imageView.setIcon(new ImageIcon(loadedImage)); 55 | frame.pack(); 56 | 57 | } 58 | 59 | private void setSystemLookAndFeel() { 60 | try { 61 | UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName()); 62 | } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | UnsupportedLookAndFeelException e) { 63 | e.printStackTrace(); 64 | } 65 | } 66 | 67 | protected Image toBufferedImage(Mat matrix) { 68 | int type = BufferedImage.TYPE_BYTE_GRAY; 69 | if (matrix.channels() > 1) { 70 | type = BufferedImage.TYPE_3BYTE_BGR; 71 | } 72 | int bufferSize = matrix.channels() * matrix.cols() * matrix.rows(); 73 | byte[] buffer = new byte[bufferSize]; 74 | matrix.get(0, 0, buffer); // get all the pixels 75 | BufferedImage image = new BufferedImage(matrix.cols(), matrix.rows(), type); 76 | final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); 77 | System.arraycopy(buffer, 0, targetPixels, 0, buffer.length); 78 | return image; 79 | } 80 | 81 | public void addKeyListener(KeyListener listener) { 82 | frame.addKeyListener(listener); 83 | } 84 | 85 | public void addMouseListener(MouseListener listener) { 86 | imageView.addMouseListener(listener); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /java/README.md: -------------------------------------------------------------------------------- 1 | # Java samples and code 2 | 3 | To get this sample to run, you must [install OpenCV](http://opencv.org/downloads.html) on your local machine. You will need [Apache Ant](http://ant.apache.org) to build the executable JAR file app. 4 | 5 | Your environment should define the following environment variables: 6 | 7 | * JAVA_HOME = the directory containing your JDK 8 | * ANT_HOME = the directory in which Apache Ant is installed 9 | * OPENCV_HOME = the directory in which all of OpenCV is installed 10 | * OPENCV_LIB = the directory containing all native JNI libraries 11 | * OPENCV_JAR = the path to the JAR file containing the java interface to OpenCV (typically named something like "opencv-320.jar" ) 12 | 13 | Execute the code by changing to the directory for GRIP-code-generation/java and then executing "ant run". Or, can can run the application by executing: 14 | 15 | `java -Djava.library.path=${OPENCV_LIB} -jar build/grip_sample1.jar` 16 | 17 | ## Installing OpenCV using the WPIRoboticsProjects opencv-installer 18 | 19 | To get OpenCV on your machine, you should use the [prebuild intaller](https://github.com/WPIRoboticsProjects/opencv-installer) from WPIRoboticsProjects. 20 | 21 | This tool allows you to define where the "java" and "jni" code will be stored. 22 | * OPENCV\_JAR should point to the JAR file copied into your "java" directory 23 | * OPENCV\_LIB should point to the directory in which your "jni" artifacts have been copied. 24 | 25 | If this tool doesn't work for your deployment setup (or if you want to use a version of OpenCV not hosted by FIRST), you may install OpenCV as detailed below: 26 | 27 | ## Installing OpenCV on Windows 28 | 29 | Windows is the easiest install, because the OpenCV site has everything precompiled. 30 | 31 | Download and unzip [OpenCV for Windows](http://opencv.org/downloads.html). 32 | * OPENCV\_JAR will be at `${OPENCV_HOME}/build/java/opencv-320.jar` 33 | * OPENCV\_LIB will be at `${OPENCV_HOME}/build/java/x86` 34 | 35 | ## Installing OpenCV on Linux, including Raspberry Pi 36 | 37 | For Linux operating systems, you'll need to [build everything](http://docs.opencv.org/2.4/doc/tutorials/introduction/desktop_java/java_dev_intro.html). 38 | 39 | The build process takes a long time, especially on a Raspberry Pi. Follow the instructions carefully and you'll be OK. You can start by [cloning the github repository](https://github.com/opencv/opencv) or you can start by downloading the [OpenCV for Linux/Mac](http://opencv.org/downloads.html) ZIP file. 40 | * OPENCV\_JAR will be at `${OPENCV_HOME}/build/bin/opencv-320.jar` 41 | * OPENCV\_LIB will be at `${OPENCV_HOME}/build/lib` 42 | 43 | The GRIP wiki contains [additional instructions](https://github.com/WPIRoboticsProjects/GRIP/wiki/Running-GRIP-on-a-Raspberry-Pi-2) on running pipelines on the Raspberry pi. 44 | 45 | ## Installing OpenCV on MacOS 46 | 47 | A Macintosh install is similar to building on Linux, except you may need to take extra steps to [get the build tools](https://blogs.wcode.org/2014/10/howto-install-build-and-use-opencv-macosx-10-10/). You will need the Mac version of [cmake](https://cmake.org/download/) and you'll need to install [Apache Ant](http://stacktips.com/how-to/how-to-install-ant-in-your-mac-os-x). 48 | 49 | Just like in Linux, the environment variables defined in your `.bash_profile` should be: 50 | * OPENCV\_JAR will be at `${OPENCV_HOME}/build/bin/opencv-320.jar` 51 | * OPENCV\_LIB will be at `${OPENCV_HOME}/build/lib` 52 | 53 | ## Using OpenCV in Eclipse 54 | 55 | In Eclipse you should [define a "User Library" for OpenCV](http://docs.opencv.org/2.4/doc/tutorials/introduction/java_eclipse/java_eclipse.html). Open the Eclipse preferences dialog and go to Java / Build Path / User Libraries. Create a new User Library named "opencv-3.2.0". To this library, attach 'opencv-320.jar' as an external JAR. Then, expand that JAR reference and specify the Native Library Location to be your OPENCV\_LIB directory. 56 | 57 | To use OpenCV in a Eclipse Java project, open the project's properties dialog and add the new user library to the Java Build Path Libraries. 58 | -------------------------------------------------------------------------------- /sample-pipelines/find_red_areas.grip: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 320.0 12 | 13 | 14 | 240.0 15 | 16 | 17 | CUBIC 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 91.72661870503596 26 | 255.0 27 | 28 | 29 | 30 | 31 | 0.0 32 | 53.684210526315795 33 | 34 | 35 | 36 | 37 | 0.0 38 | 73.16638370118847 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | true 47 | 48 | 49 | 50 | 51 | 52 | 53 | 20.0 54 | 55 | 56 | 0 57 | 58 | 59 | 0 60 | 61 | 62 | 1000 63 | 64 | 65 | 0 66 | 67 | 68 | 1000 69 | 70 | 71 | 72 | 0 73 | 100 74 | 75 | 76 | 77 | 1000000 78 | 79 | 80 | 0 81 | 82 | 83 | 0 84 | 85 | 86 | 1000 87 | 88 | 89 | 90 | 91 | 92 | 93 | red_areas 94 | 95 | 96 | true 97 | 98 | 99 | true 100 | 101 | 102 | true 103 | 104 | 105 | true 106 | 107 | 108 | true 109 | 110 | 111 | true 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 0 139 | roboRIO-0-FRC.local 140 | roboRIO-0-FRC.local 141 | /home/lvuser 142 | lvuser 143 | /usr/local/frc/JRE/ 144 | -Xmx50m -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:MaxNewSize=16m 145 | 146 | 147 | Python 148 | GripPipeline 149 | false 150 | /home/sam/code/grip-code-generation/python/samples 151 | 152 | grip 153 | 154 | -------------------------------------------------------------------------------- /java/src/main/java/grip/sample1/find_red_areas.grip: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 320.0 12 | 13 | 14 | 240.0 15 | 16 | 17 | CUBIC 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 91.72661870503596 26 | 255.0 27 | 28 | 29 | 30 | 31 | 0.0 32 | 53.684210526315795 33 | 34 | 35 | 36 | 37 | 0.0 38 | 73.16638370118847 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | true 47 | 48 | 49 | 50 | 51 | 52 | 53 | 20.0 54 | 55 | 56 | 0.0 57 | 58 | 59 | 0.0 60 | 61 | 62 | 1000.0 63 | 64 | 65 | 0.0 66 | 67 | 68 | 1000.0 69 | 70 | 71 | 72 | 0 73 | 100 74 | 75 | 76 | 77 | 1000000.0 78 | 79 | 80 | 0.0 81 | 82 | 83 | 0.0 84 | 85 | 86 | 1000.0 87 | 88 | 89 | 90 | 91 | 92 | 93 | red_areas 94 | 95 | 96 | true 97 | 98 | 99 | true 100 | 101 | 102 | true 103 | 104 | 105 | true 106 | 107 | 108 | true 109 | 110 | 111 | true 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 0 139 | roboRIO-0-FRC.local 140 | roboRIO-0-FRC.local 141 | /home/lvuser 142 | lvuser 143 | /usr/local/frc/JRE/ 144 | -Xmx50m -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:MaxNewSize=16m 145 | 146 | 147 | Java 148 | FindRedAreas 149 | true 150 | /Users/keith/Documents/workspace_opencv/GRIP-code-generation/java/src/grip/samples 151 | grip.sample1 152 | grip 153 | 154 | -------------------------------------------------------------------------------- /java/src/main/java/edu/wpi/first/wpilibj/vision/GripRunner.java: -------------------------------------------------------------------------------- 1 | /*----------------------------------------------------------------------------*/ 2 | /* Copyright (c) FIRST 2016-2017. All Rights Reserved. */ 3 | /* Open Source Software - may be modified and shared by FRC teams. The code */ 4 | /* must be accompanied by the FIRST BSD license file in the root directory of */ 5 | /* the project. */ 6 | /*----------------------------------------------------------------------------*/ 7 | 8 | package edu.wpi.first.wpilibj.vision; 9 | 10 | import org.opencv.videoio.VideoCapture; 11 | import org.opencv.videoio.Videoio; 12 | import org.opencv.core.Core; 13 | import org.opencv.core.Mat; 14 | 15 | /** 16 | * A GRIP runner is a convenient wrapper object to make it easy to run vision 17 | * pipelines from robot code. 18 | * 19 | * @see VisionPipeline 20 | */ 21 | public class GripRunner

{ 22 | 23 | static { 24 | System.loadLibrary(Core.NATIVE_LIBRARY_NAME); 25 | } 26 | 27 | public static final boolean DEBUG = Boolean.valueOf(System.getProperty("debug", "false")); 28 | public static final boolean HEADLESS = Boolean.valueOf(System.getProperty("headless", "false")); 29 | final static int FRAME_COUNT_SPAN = 3; 30 | 31 | private final VideoCapture m_cvSink; 32 | private final P m_pipeline; 33 | private final Mat m_image = new Mat(); 34 | private final Listener m_listener; 35 | private int frameCount = 0; 36 | private long frameCountTimeout = System.currentTimeMillis() + 1000L * FRAME_COUNT_SPAN; 37 | 38 | /** 39 | * Listener interface for a callback that should run after a pipeline has 40 | * processed its input. 41 | * 42 | * @param

the type of the pipeline this listener is for 43 | */ 44 | @FunctionalInterface 45 | public interface Listener

{ 46 | 47 | /** 48 | * Called when the pipeline has run. This shouldn't take much time to 49 | * run because it will delay later calls to the pipeline's 50 | * {@link VisionPipeline#process process} method. Copying the outputs 51 | * and code that uses the copies should be synchronized on the 52 | * same mutex to prevent multiple threads from reading and writing to 53 | * the same memory at the same time. 54 | * 55 | * @param pipeline 56 | * the vision pipeline that ran 57 | */ 58 | void copyPipelineOutputs(P pipeline); 59 | 60 | } 61 | 62 | /** 63 | * Creates a new vision runner. It will take images from the 64 | * {@code videoSource}, send them to the {@code pipeline}, and call the 65 | * {@code listener} when the pipeline has finished to alert user code when 66 | * it is safe to access the pipeline's outputs. 67 | * 68 | * @param camera the video source to use to supply images for the pipeline 69 | * @param pipeline the vision pipeline to run 70 | * @param listener a function to call after the pipeline has finished running 71 | */ 72 | public GripRunner(VideoCapture camera, P pipeline, Listener listener) { 73 | this.m_pipeline = pipeline; 74 | this.m_listener = listener; 75 | m_cvSink = camera; 76 | } 77 | 78 | /** 79 | * Runs the pipeline one time, giving it the next image from the video 80 | * source specified in the constructor. This will block until the source 81 | * either has an image or throws an error. If the source successfully 82 | * supplied a frame, the pipeline's image input will be set, the pipeline 83 | * will run, and the listener specified in the constructor will be called to 84 | * notify it that the pipeline ran. 85 | * 86 | *

87 | * This method is exposed to allow teams to add additional functionality or 88 | * have their own ways to run the pipeline. Most teams, however, should just 89 | * use {@link #runForever} in its own thread using a {@link VisionThread}. 90 | *

91 | */ 92 | public void runOnce() { 93 | m_cvSink.read(m_image); 94 | m_pipeline.process(m_image); 95 | if (m_listener != null) { 96 | m_listener.copyPipelineOutputs(m_pipeline); 97 | } 98 | } 99 | 100 | /** 101 | * A convenience method that calls {@link #runOnce()} in an infinite loop. 102 | * This must be run in a dedicated thread, and cannot be used in the main 103 | * robot thread because it will freeze the robot program. 104 | * 105 | * @see VisionThread 106 | */ 107 | public void runForever() { 108 | while (!Thread.currentThread().isInterrupted()) { 109 | runOnce(); 110 | if (DEBUG) { 111 | frameCount++; 112 | if (System.currentTimeMillis() > frameCountTimeout) { 113 | System.out.println("fps: " + (frameCount / FRAME_COUNT_SPAN)); 114 | frameCount = 0; 115 | frameCountTimeout = System.currentTimeMillis() + 1000L * FRAME_COUNT_SPAN; 116 | } 117 | } 118 | } 119 | } 120 | 121 | /** 122 | * Make a connection to a camera. 123 | * 124 | * @param device Camera number. 125 | * @param width Window width in pixels. 126 | * @param height Window height in pixels. 127 | * @param exposure Relative exposure. 128 | * @return 129 | */ 130 | public static VideoCapture makeCamera(int device, int width, int height, double exposure) { 131 | VideoCapture camera = new VideoCapture(0); 132 | camera.set(Videoio.CAP_PROP_FRAME_WIDTH, width); 133 | camera.set(Videoio.CAP_PROP_FRAME_HEIGHT, height); 134 | if (exposure > -1.0) { 135 | System.out.println("\t" + exposure); 136 | camera.set(Videoio.CAP_PROP_AUTO_EXPOSURE, 0); 137 | camera.set(Videoio.CAP_PROP_EXPOSURE, exposure); 138 | } 139 | if (!camera.isOpened()) { 140 | throw new RuntimeException("Camera will not open"); 141 | } 142 | return camera; 143 | } 144 | 145 | /** 146 | * Make a GUI window on which to display a {@link Mat} image. 147 | * 148 | * @param title Title on the window. 149 | * @param width Window width in pixels. 150 | * @param height Window height in pixels. 151 | * @return a new {@link VideoViewer} 152 | */ 153 | public static VideoViewer makeWindow(String title, int width, int height) { 154 | if (HEADLESS) { return null; } 155 | return new VideoViewer("GRIP", width, height); 156 | } 157 | 158 | } 159 | -------------------------------------------------------------------------------- /python/samples/frc_find_red_areas/grip.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy 3 | import math 4 | from enum import Enum 5 | 6 | class GripPipeline: 7 | """ 8 | An OpenCV pipeline generated by GRIP. 9 | """ 10 | 11 | def __init__(self): 12 | """initializes all values to presets or None if need to be set 13 | """ 14 | 15 | self.__resize_image_width = 320.0 16 | self.__resize_image_height = 240.0 17 | self.__resize_image_interpolation = cv2.INTER_CUBIC 18 | 19 | self.resize_image_output = None 20 | 21 | self.__rgb_threshold_input = self.resize_image_output 22 | self.__rgb_threshold_red = [91.72661870503596, 255.0] 23 | self.__rgb_threshold_green = [0.0, 53.684210526315795] 24 | self.__rgb_threshold_blue = [0.0, 73.16638370118847] 25 | 26 | self.rgb_threshold_output = None 27 | 28 | self.__find_contours_input = self.rgb_threshold_output 29 | self.__find_contours_external_only = True 30 | 31 | self.find_contours_output = None 32 | 33 | self.__filter_contours_contours = self.find_contours_output 34 | self.__filter_contours_min_area = 20.0 35 | self.__filter_contours_min_perimeter = 0 36 | self.__filter_contours_min_width = 0 37 | self.__filter_contours_max_width = 1000 38 | self.__filter_contours_min_height = 0 39 | self.__filter_contours_max_height = 1000 40 | self.__filter_contours_solidity = [0, 100] 41 | self.__filter_contours_max_vertices = 1000000 42 | self.__filter_contours_min_vertices = 0 43 | self.__filter_contours_min_ratio = 0 44 | self.__filter_contours_max_ratio = 1000 45 | 46 | self.filter_contours_output = None 47 | 48 | 49 | def process(self, source0): 50 | """ 51 | Runs the pipeline and sets all outputs to new values. 52 | """ 53 | # Step Resize_Image0: 54 | self.__resize_image_input = source0 55 | (self.resize_image_output) = self.__resize_image(self.__resize_image_input, self.__resize_image_width, self.__resize_image_height, self.__resize_image_interpolation) 56 | 57 | # Step RGB_Threshold0: 58 | self.__rgb_threshold_input = self.resize_image_output 59 | (self.rgb_threshold_output) = self.__rgb_threshold(self.__rgb_threshold_input, self.__rgb_threshold_red, self.__rgb_threshold_green, self.__rgb_threshold_blue) 60 | 61 | # Step Find_Contours0: 62 | self.__find_contours_input = self.rgb_threshold_output 63 | (self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only) 64 | 65 | # Step Filter_Contours0: 66 | self.__filter_contours_contours = self.find_contours_output 67 | (self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio) 68 | 69 | 70 | @staticmethod 71 | def __resize_image(input, width, height, interpolation): 72 | """Scales and image to an exact size. 73 | Args: 74 | input: A numpy.ndarray. 75 | Width: The desired width in pixels. 76 | Height: The desired height in pixels. 77 | interpolation: Opencv enum for the type fo interpolation. 78 | Returns: 79 | A numpy.ndarray of the new size. 80 | """ 81 | return cv2.resize(input, ((int)(width), (int)(height)), 0, 0, interpolation) 82 | 83 | @staticmethod 84 | def __rgb_threshold(input, red, green, blue): 85 | """Segment an image based on color ranges. 86 | Args: 87 | input: A BGR numpy.ndarray. 88 | red: A list of two numbers the are the min and max red. 89 | green: A list of two numbers the are the min and max green. 90 | blue: A list of two numbers the are the min and max blue. 91 | Returns: 92 | A black and white numpy.ndarray. 93 | """ 94 | out = cv2.cvtColor(input, cv2.COLOR_BGR2RGB) 95 | return cv2.inRange(out, (red[0], green[0], blue[0]), (red[1], green[1], blue[1])) 96 | 97 | @staticmethod 98 | def __find_contours(input, external_only): 99 | """Sets the values of pixels in a binary image to their distance to the nearest black pixel. 100 | Args: 101 | input: A numpy.ndarray. 102 | external_only: A boolean. If true only external contours are found. 103 | Return: 104 | A list of numpy.ndarray where each one represents a contour. 105 | """ 106 | if(external_only): 107 | mode = cv2.RETR_EXTERNAL 108 | else: 109 | mode = cv2.RETR_LIST 110 | method = cv2.CHAIN_APPROX_SIMPLE 111 | im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method) 112 | return contours 113 | 114 | @staticmethod 115 | def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width, 116 | min_height, max_height, solidity, max_vertex_count, min_vertex_count, 117 | min_ratio, max_ratio): 118 | """Filters out contours that do not meet certain criteria. 119 | Args: 120 | input_contours: Contours as a list of numpy.ndarray. 121 | min_area: The minimum area of a contour that will be kept. 122 | min_perimeter: The minimum perimeter of a contour that will be kept. 123 | min_width: Minimum width of a contour. 124 | max_width: MaxWidth maximum width. 125 | min_height: Minimum height. 126 | max_height: Maximimum height. 127 | solidity: The minimum and maximum solidity of a contour. 128 | min_vertex_count: Minimum vertex Count of the contours. 129 | max_vertex_count: Maximum vertex Count. 130 | min_ratio: Minimum ratio of width to height. 131 | max_ratio: Maximum ratio of width to height. 132 | Returns: 133 | Contours as a list of numpy.ndarray. 134 | """ 135 | output = [] 136 | for contour in input_contours: 137 | x,y,w,h = cv2.boundingRect(contour) 138 | if (w < min_width or w > max_width): 139 | continue 140 | if (h < min_height or h > max_height): 141 | continue 142 | area = cv2.contourArea(contour) 143 | if (area < min_area): 144 | continue 145 | if (cv2.arcLength(contour, True) < min_perimeter): 146 | continue 147 | hull = cv2.convexHull(contour) 148 | solid = 100 * area / cv2.contourArea(hull) 149 | if (solid < solidity[0] or solid > solidity[1]): 150 | continue 151 | if (len(contour) < min_vertex_count or len(contour) > max_vertex_count): 152 | continue 153 | ratio = (float)(w) / h 154 | if (ratio < min_ratio or ratio > max_ratio): 155 | continue 156 | output.append(contour) 157 | return output 158 | 159 | 160 | 161 | -------------------------------------------------------------------------------- /java/src/main/java/grip/sample1/FindRedAreas.java: -------------------------------------------------------------------------------- 1 | package grip.sample1; 2 | 3 | import java.io.File; 4 | import java.io.FileWriter; 5 | import java.io.IOException; 6 | import java.util.ArrayList; 7 | import java.util.List; 8 | import java.util.Map; 9 | import java.util.stream.Collectors; 10 | import java.util.HashMap; 11 | 12 | import edu.wpi.first.wpilibj.vision.VisionPipeline; 13 | 14 | import org.opencv.core.*; 15 | import org.opencv.core.Core.*; 16 | import org.opencv.features2d.FeatureDetector; 17 | import org.opencv.imgcodecs.Imgcodecs; 18 | import org.opencv.imgproc.*; 19 | import org.opencv.objdetect.*; 20 | 21 | /** 22 | * FindRedAreas class. 23 | * 24 | *

An OpenCV pipeline generated by GRIP. 25 | * 26 | * @author GRIP 27 | */ 28 | public class FindRedAreas implements VisionPipeline { 29 | 30 | //Outputs 31 | private Mat resizeImageOutput = new Mat(); 32 | private Mat rgbThresholdOutput = new Mat(); 33 | private ArrayList findContoursOutput = new ArrayList(); 34 | private ArrayList filterContoursOutput = new ArrayList(); 35 | 36 | static { 37 | System.loadLibrary(Core.NATIVE_LIBRARY_NAME); 38 | } 39 | 40 | /** 41 | * This is the primary method that runs the entire pipeline and updates the outputs. 42 | */ 43 | @Override public void process(Mat source0) { 44 | // Step Resize_Image0: 45 | Mat resizeImageInput = source0; 46 | double resizeImageWidth = 320.0; 47 | double resizeImageHeight = 240.0; 48 | int resizeImageInterpolation = Imgproc.INTER_CUBIC; 49 | resizeImage(resizeImageInput, resizeImageWidth, resizeImageHeight, resizeImageInterpolation, resizeImageOutput); 50 | 51 | // Step RGB_Threshold0: 52 | Mat rgbThresholdInput = resizeImageOutput; 53 | double[] rgbThresholdRed = {91.72661870503596, 255.0}; 54 | double[] rgbThresholdGreen = {0.0, 53.684210526315795}; 55 | double[] rgbThresholdBlue = {0.0, 73.16638370118847}; 56 | rgbThreshold(rgbThresholdInput, rgbThresholdRed, rgbThresholdGreen, rgbThresholdBlue, rgbThresholdOutput); 57 | 58 | // Step Find_Contours0: 59 | Mat findContoursInput = rgbThresholdOutput; 60 | boolean findContoursExternalOnly = true; 61 | findContours(findContoursInput, findContoursExternalOnly, findContoursOutput); 62 | 63 | // Step Filter_Contours0: 64 | ArrayList filterContoursContours = findContoursOutput; 65 | double filterContoursMinArea = 20.0; 66 | double filterContoursMinPerimeter = 0.0; 67 | double filterContoursMinWidth = 0.0; 68 | double filterContoursMaxWidth = 1000.0; 69 | double filterContoursMinHeight = 0.0; 70 | double filterContoursMaxHeight = 1000.0; 71 | double[] filterContoursSolidity = {0, 100}; 72 | double filterContoursMaxVertices = 1000000.0; 73 | double filterContoursMinVertices = 0.0; 74 | double filterContoursMinRatio = 0.0; 75 | double filterContoursMaxRatio = 1000.0; 76 | filterContours(filterContoursContours, filterContoursMinArea, filterContoursMinPerimeter, filterContoursMinWidth, filterContoursMaxWidth, filterContoursMinHeight, filterContoursMaxHeight, filterContoursSolidity, filterContoursMaxVertices, filterContoursMinVertices, filterContoursMinRatio, filterContoursMaxRatio, filterContoursOutput); 77 | 78 | } 79 | 80 | /** 81 | * This method is a generated getter for the output of a Resize_Image. 82 | * @return Mat output from Resize_Image. 83 | */ 84 | public Mat resizeImageOutput() { 85 | return resizeImageOutput; 86 | } 87 | 88 | /** 89 | * This method is a generated getter for the output of a RGB_Threshold. 90 | * @return Mat output from RGB_Threshold. 91 | */ 92 | public Mat rgbThresholdOutput() { 93 | return rgbThresholdOutput; 94 | } 95 | 96 | /** 97 | * This method is a generated getter for the output of a Find_Contours. 98 | * @return ArrayList output from Find_Contours. 99 | */ 100 | public ArrayList findContoursOutput() { 101 | return findContoursOutput; 102 | } 103 | 104 | /** 105 | * This method is a generated getter for the output of a Filter_Contours. 106 | * @return ArrayList output from Filter_Contours. 107 | */ 108 | public ArrayList filterContoursOutput() { 109 | return filterContoursOutput; 110 | } 111 | 112 | 113 | /** 114 | * Scales and image to an exact size. 115 | * @param input The image on which to perform the Resize. 116 | * @param width The width of the output in pixels. 117 | * @param height The height of the output in pixels. 118 | * @param interpolation The type of interpolation. 119 | * @param output The image in which to store the output. 120 | */ 121 | private void resizeImage(Mat input, double width, double height, 122 | int interpolation, Mat output) { 123 | Imgproc.resize(input, output, new Size(width, height), 0.0, 0.0, interpolation); 124 | } 125 | 126 | /** 127 | * Segment an image based on color ranges. 128 | * @param input The image on which to perform the RGB threshold. 129 | * @param red The min and max red. 130 | * @param green The min and max green. 131 | * @param blue The min and max blue. 132 | * @param output The image in which to store the output. 133 | */ 134 | private void rgbThreshold(Mat input, double[] red, double[] green, double[] blue, 135 | Mat out) { 136 | Imgproc.cvtColor(input, out, Imgproc.COLOR_BGR2RGB); 137 | Core.inRange(out, new Scalar(red[0], green[0], blue[0]), 138 | new Scalar(red[1], green[1], blue[1]), out); 139 | } 140 | 141 | /** 142 | * Sets the values of pixels in a binary image to their distance to the nearest black pixel. 143 | * @param input The image on which to perform the Distance Transform. 144 | * @param type The Transform. 145 | * @param maskSize the size of the mask. 146 | * @param output The image in which to store the output. 147 | */ 148 | private void findContours(Mat input, boolean externalOnly, 149 | List contours) { 150 | Mat hierarchy = new Mat(); 151 | contours.clear(); 152 | int mode; 153 | if (externalOnly) { 154 | mode = Imgproc.RETR_EXTERNAL; 155 | } 156 | else { 157 | mode = Imgproc.RETR_LIST; 158 | } 159 | int method = Imgproc.CHAIN_APPROX_SIMPLE; 160 | Imgproc.findContours(input, contours, hierarchy, mode, method); 161 | } 162 | 163 | 164 | /** 165 | * Filters out contours that do not meet certain criteria. 166 | * @param inputContours is the input list of contours 167 | * @param output is the the output list of contours 168 | * @param minArea is the minimum area of a contour that will be kept 169 | * @param minPerimeter is the minimum perimeter of a contour that will be kept 170 | * @param minWidth minimum width of a contour 171 | * @param maxWidth maximum width 172 | * @param minHeight minimum height 173 | * @param maxHeight maximimum height 174 | * @param Solidity the minimum and maximum solidity of a contour 175 | * @param minVertexCount minimum vertex Count of the contours 176 | * @param maxVertexCount maximum vertex Count 177 | * @param minRatio minimum ratio of width to height 178 | * @param maxRatio maximum ratio of width to height 179 | */ 180 | private void filterContours(List inputContours, double minArea, 181 | double minPerimeter, double minWidth, double maxWidth, double minHeight, double 182 | maxHeight, double[] solidity, double maxVertexCount, double minVertexCount, double 183 | minRatio, double maxRatio, List output) { 184 | final MatOfInt hull = new MatOfInt(); 185 | output.clear(); 186 | //operation 187 | for (int i = 0; i < inputContours.size(); i++) { 188 | final MatOfPoint contour = inputContours.get(i); 189 | final Rect bb = Imgproc.boundingRect(contour); 190 | if (bb.width < minWidth || bb.width > maxWidth) continue; 191 | if (bb.height < minHeight || bb.height > maxHeight) continue; 192 | final double area = Imgproc.contourArea(contour); 193 | if (area < minArea) continue; 194 | if (Imgproc.arcLength(new MatOfPoint2f(contour.toArray()), true) < minPerimeter) continue; 195 | Imgproc.convexHull(contour, hull); 196 | MatOfPoint mopHull = new MatOfPoint(); 197 | mopHull.create((int) hull.size().height, 1, CvType.CV_32SC2); 198 | for (int j = 0; j < hull.size().height; j++) { 199 | int index = (int)hull.get(j, 0)[0]; 200 | double[] point = new double[] { contour.get(index, 0)[0], contour.get(index, 0)[1]}; 201 | mopHull.put(j, 0, point); 202 | } 203 | final double solid = 100 * area / Imgproc.contourArea(mopHull); 204 | if (solid < solidity[0] || solid > solidity[1]) continue; 205 | if (contour.rows() < minVertexCount || contour.rows() > maxVertexCount) continue; 206 | final double ratio = bb.width / (double)bb.height; 207 | if (ratio < minRatio || ratio > maxRatio) continue; 208 | output.add(contour); 209 | } 210 | } 211 | 212 | 213 | 214 | 215 | } 216 | 217 | --------------------------------------------------------------------------------