├── hello.js ├── jetcolorbar.jpg ├── NDVIcolormap.jpg ├── Image Processing ├── AddsAxisToImage.py ├── roi and crops image.py ├── RGBquad.py ├── YUVQuad.py ├── HSV.py ├── histogram3.py ├── takeHDRpictures.py ├── 3dRGBmeshgrid.py ├── HDRimaging.py ├── HSVcolortrackbar.py ├── realtimelineRGBintensitydisplay.py ├── NDVIvideogainoptimization.py └── CVquaddisplay -colorandsobel .py ├── Pi Camera ├── picameravidwithimagecapture.py ├── picamerawithimagecaptureopencv.py ├── PiCameraEffectsShow.py ├── PiCameraEffectsVid.py ├── exposuremosaic.py ├── videooverlayfinal.py └── picmaeramancontrol.py ├── USB Camera ├── USBTakesPic+Annotates.py └── USBVidWithImageCapture.py ├── computer vision course.md ├── README.md └── NDVIvideogainoptimization.py /hello.js: -------------------------------------------------------------------------------- 1 | alert("Hello, world!"); 2 | dfdsfdfsf 3 | -------------------------------------------------------------------------------- /jetcolorbar.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MargaretAN9/Peggy/HEAD/jetcolorbar.jpg -------------------------------------------------------------------------------- /NDVIcolormap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MargaretAN9/Peggy/HEAD/NDVIcolormap.jpg -------------------------------------------------------------------------------- /Image Processing/AddsAxisToImage.py: -------------------------------------------------------------------------------- 1 | 2 | #Displaying a Matplotlib Scale Image 3 | #Imports a file and displays labels (x,y, title} and resolution tick marks 4 | #requires matplotlib 5 | 6 | 7 | import matplotlib.pyplot as plt 8 | import matplotlib.image as mpimg 9 | 10 | #Enter input file name 11 | filein = "/home/pi/Desktop/testimage1.jpg" 12 | 13 | 14 | image = mpimg.imread(filein) 15 | plt.imshow(image) 16 | 17 | #plt.axis ("off") 18 | 19 | plt.xlabel('Horizontal') 20 | plt.ylabel('Vertical') 21 | 22 | 23 | plt.title('Public Lab Raspberry Pi Test') 24 | 25 | plt.show() 26 | -------------------------------------------------------------------------------- /Image Processing/roi and crops image.py: -------------------------------------------------------------------------------- 1 | 2 | """"" 3 | crops a portion of image 4 | 5 | mouse click to draw rectangle 6 | 7 | press keyboard to show crop 8 | 9 | """"" 10 | 11 | 12 | 13 | 14 | 15 | import cv2 16 | from matplotlib import pyplot as plt 17 | 18 | import matplotlib.pyplot as plt 19 | import matplotlib.image as mpimg 20 | import matplotlib.cm as cm 21 | 22 | import numpy as np 23 | 24 | #parameters select file 25 | 26 | FILEIN = "/home/pi/Desktop/microscope1.jpg" 27 | 28 | 29 | 30 | if __name__ == '__main__' : 31 | 32 | # Read image 33 | im = mpimg.imread(FILEIN) 34 | 35 | # Select ROI 36 | r = cv2.selectROI(im) 37 | 38 | 39 | 40 | # Crop image 41 | imCrop = im[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])] 42 | 43 | 44 | 45 | 46 | # Display cropped image 47 | cv2.imshow("Image", imCrop) 48 | print (r) 49 | 50 | cv2.waitKey(0) 51 | -------------------------------------------------------------------------------- /Pi Camera/picameravidwithimagecapture.py: -------------------------------------------------------------------------------- 1 | #test camera program -tested with Raspberrty Pi camera v2.1 2 | #program provides xx sec alignment preview and records jpg image 3 | # application: align spectrometer or focus microscope 4 | # annotates with filename and datetime 5 | 6 | from picamera import PiCamera,Color 7 | from time import sleep 8 | import datetime as dt 9 | 10 | #set filename/resolution/video time (sec) 11 | #resolution size 4:3 options: (3280,2464),(1920,1080),(1640,1232),(640,480) 12 | 13 | 14 | filename = '/home/pi/Desktop/testimage1.jpg' 15 | SIZE = (3280,2464) 16 | vidtime = 40 17 | 18 | camera = PiCamera() 19 | 20 | camera.start_preview(alpha=255) 21 | 22 | #camera.annotate_background = picamera.Color('black') 23 | camera.annotate_background = Color('blue') 24 | camera.annotate_foreground = Color('yellow') 25 | camera.annotate_text = filename + " " + dt.datetime.now().strftime ('%Y-%m-%d %H:%M:%S') 26 | 27 | 28 | #camera.start_preview() 29 | sleep(vidtime) 30 | 31 | 32 | camera.resolution = (SIZE) 33 | 34 | camera.capture(filename) 35 | 36 | camera.stop_preview() 37 | 38 | 39 | -------------------------------------------------------------------------------- /USB Camera/USBTakesPic+Annotates.py: -------------------------------------------------------------------------------- 1 | 2 | #this python program collects a picture from a usb camera and provides a resolution scale 3 | #tested with raspberry pi (stretch) and public lab usb camera 4 | # program requires fswebcam which can be downloaded by using sudo apt-get install fswebcam 5 | # see https://www.raspberrypi.org/documentation/usage/webcams/README.md for more info 6 | # program uses matplotlib 7 | # resolution set max resolution of the USB 2.0, PC camera from Public lab 8 | 9 | import datetime 10 | import time 11 | import os 12 | import matplotlib.pyplot as plt 13 | import matplotlib.image as mpimg 14 | 15 | 16 | filename = "/home/pi/Desktop/testimage1.jpg" 17 | 18 | 19 | 20 | # date time display options 21 | t = datetime.datetime.now() 22 | 23 | #DATE=$(date +"%Y-%m-%d_%H%M%S") 24 | 25 | time.sleep(.5) 26 | 27 | os.system('fswebcam --skip 2 -r 640x480 --no-banner --jpeg 100 filename') 28 | 29 | 30 | image = mpimg.imread("filename") 31 | plt.imshow(image) 32 | 33 | plt.xlabel(t) 34 | plt.ylabel('Resolution (640x480)') 35 | 36 | 37 | plt.title('Public Lab Test') 38 | 39 | plt.show() 40 | -------------------------------------------------------------------------------- /Image Processing/RGBquad.py: -------------------------------------------------------------------------------- 1 | #imports file and shows RGB quad picture 2 | # uses matplotilib w/numpy 3 | 4 | 5 | 6 | import matplotlib.pyplot as plt 7 | import matplotlib.image as mpimg 8 | import matplotlib.cm as cm 9 | 10 | import numpy as np 11 | 12 | #parameters 13 | 14 | FILEIN = "/home/pi/Desktop/testimage1.jpg" 15 | 16 | 17 | img = mpimg.imread(FILEIN) 18 | 19 | # MATPLOTLIB IS RGB 20 | B = img[:,:,2] 21 | G = img[:,:,1] 22 | R = img[:,:,0] 23 | 24 | f, axarr = plt.subplots(2, 2) 25 | axarr[0,0].imshow(img, cmap = cm.Greys_r) 26 | axarr[0,0].set_title("RGB") 27 | axarr[0,0].axis('on') 28 | 29 | axarr[0,1].imshow(B, cmap = cm.Greys_r) 30 | axarr[0,1].set_title("Blue") 31 | axarr[0,1].axis('on') 32 | 33 | axarr[1,0].imshow(G, cmap = cm.Greys_r) 34 | axarr[1,0].set_title("Green") 35 | axarr[1,0].axis('on') 36 | 37 | axarr[1,1].imshow(R, cmap = cm.Greys_r) 38 | axarr[1,1].set_title("Red") 39 | axarr[1,1].axis('on') 40 | 41 | 42 | # Fine-tune figure; hide x ticks for top plots and y ticks for right plots 43 | plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) 44 | plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False) 45 | 46 | 47 | plt.tight_layout() 48 | 49 | plt.show() 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /Image Processing/YUVQuad.py: -------------------------------------------------------------------------------- 1 | 2 | #imports file and shows YUV quad picture 3 | # uses matplotilib,numpy,opencv 4 | 5 | 6 | 7 | import matplotlib.pyplot as plt 8 | import matplotlib.image as mpimg 9 | import matplotlib.cm as cm 10 | 11 | import numpy as np 12 | import cv2 13 | 14 | #parameters 15 | 16 | FILEIN = "/home/pi/Desktop/testimage1.jpg" 17 | 18 | 19 | 20 | img_in= mpimg.imread(FILEIN) 21 | 22 | img_out = cv2.cvtColor(img_in, cv2.COLOR_BGR2YUV) 23 | 24 | 25 | Y= img_out[:,:,0] 26 | U= img_out[:,:,1] 27 | V= img_out[:,:,2] 28 | 29 | 30 | 31 | f, axarr = plt.subplots(2, 2) 32 | axarr[0,0].imshow(img_in, cmap = cm.Greys_r) 33 | axarr[0,0].set_title("YUV") 34 | axarr[0,0].axis('on') 35 | 36 | axarr[0,1].imshow(Y, cmap = cm.Greys_r) 37 | axarr[0,1].set_title("Y") 38 | axarr[0,1].axis('on') 39 | 40 | axarr[1,0].imshow(U, cmap = cm.Greys_r) 41 | axarr[1,0].set_title("U") 42 | axarr[1,0].axis('on') 43 | 44 | axarr[1,1].imshow(V, cmap = cm.Greys_r) 45 | axarr[1,1].set_title("V") 46 | axarr[1,1].axis('on') 47 | 48 | 49 | # Fine-tune figure; hide x ticks for top plots and y ticks for right plots 50 | plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) 51 | plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False) 52 | 53 | 54 | plt.tight_layout() 55 | 56 | plt.show() 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /Image Processing/HSV.py: -------------------------------------------------------------------------------- 1 | 2 | #imports file and shows HSV quad picture 3 | # uses matplotilib,numpy,opencv 4 | 5 | 6 | 7 | import matplotlib.pyplot as plt 8 | import matplotlib.image as mpimg 9 | import matplotlib.cm as cm 10 | 11 | import numpy as np 12 | import cv2 13 | 14 | #parameters 15 | 16 | FILEIN = "/home/pi/Desktop/spectrsal star2.jpg" 17 | 18 | 19 | 20 | img_in= mpimg.imread(FILEIN) 21 | 22 | img_out = cv2.cvtColor(img_in, cv2.COLOR_BGR2HSV) 23 | 24 | 25 | H= img_out[:,:,0] 26 | S= img_out[:,:,1] 27 | V= img_out[:,:,2] 28 | 29 | 30 | 31 | f, axarr = plt.subplots(2, 2) 32 | axarr[0,0].imshow(img_in, cmap = cm.Greys_r) 33 | axarr[0,0].set_title("HSV") 34 | axarr[0,0].axis('on') 35 | 36 | axarr[0,1].imshow(H, cmap = cm.Greys_r) 37 | axarr[0,1].set_title("H") 38 | axarr[0,1].axis('on') 39 | 40 | axarr[1,0].imshow(S, cmap = cm.Greys_r) 41 | axarr[1,0].set_title("S") 42 | axarr[1,0].axis('on') 43 | 44 | axarr[1,1].imshow(V, cmap = cm.Greys_r) 45 | axarr[1,1].set_title("V") 46 | axarr[1,1].axis('on') 47 | 48 | 49 | # Fine-tune figure; hide x ticks for top plots and y ticks for right plots 50 | plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) 51 | plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False) 52 | 53 | 54 | plt.tight_layout() 55 | 56 | plt.show() 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /USB Camera/USBVidWithImageCapture.py: -------------------------------------------------------------------------------- 1 | # displays video and records picture from USB camera 2 | #tested with Public Lab USB camera on Raspberry PI (stretch) June 2018 3 | # use 's' key to stop video and record image 4 | # code originally from https://gist.github.com/snim2/255151 5 | # uses pygame library 6 | # potential application - focus microscope with video and then store image by pressing 's' 7 | 8 | 9 | import pygame 10 | import pygame.camera 11 | from pygame.locals import * 12 | 13 | # set parameters 14 | 15 | DEVICE = '/dev/video0' 16 | SIZE = (640, 480) 17 | 18 | FILENAME = '/home/pi/Desktop/USBtestimage1.jpg' 19 | 20 | 21 | 22 | def camstream(): 23 | pygame.init() 24 | pygame.camera.init() 25 | display = pygame.display.set_mode(SIZE, 0) 26 | camera = pygame.camera.Camera(DEVICE, SIZE) 27 | camera.start() 28 | screen = pygame.surface.Surface(SIZE, 0, display) 29 | capture = True 30 | while capture: 31 | screen = camera.get_image(screen) 32 | display.blit(screen, (0,0)) 33 | pygame.display.flip() 34 | for event in pygame.event.get(): 35 | if event.type == QUIT: 36 | capture = False 37 | elif event.type == KEYDOWN and event.key == K_s: 38 | pygame.image.save(screen, FILENAME) 39 | camera.stop() 40 | pygame.quit() 41 | return 42 | 43 | camera.stop() 44 | pygame.quit() 45 | return 46 | 47 | if __name__ == '__main__': 48 | camstream() 49 | -------------------------------------------------------------------------------- /Pi Camera/picamerawithimagecaptureopencv.py: -------------------------------------------------------------------------------- 1 | # shows video and captures image using picmaera and opencv 2 | # from https://www.pyimagesearch.com/2015/03/30/accessing-the-raspberry-pi-camera-with-opencv-and-python/ 3 | # 4 | # press q to quit 5 | 6 | from picamera.array import PiRGBArray 7 | from picamera import PiCamera 8 | import time 9 | import cv2 10 | 11 | #set filename/resolution 12 | #resolution size 4:3 options: (1920,1088),(1640,1232),(640,480) 13 | # note (3280,2464) provides 'out of resources' 14 | 15 | SIZE = (1640,1232) 16 | FILEOUT = '/home/pi/Desktop/testimage1.jpg' 17 | #cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN) 18 | #cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) 19 | 20 | # initialize the camera and grab a reference to the raw camera capture 21 | camera = PiCamera() 22 | camera.resolution = (SIZE) 23 | camera.framerate = 5 24 | rawCapture = PiRGBArray(camera, size=(SIZE)) 25 | 26 | 27 | # allow the camera to warmup 28 | time.sleep(0.1) 29 | 30 | # capture frames from the camera 31 | for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): 32 | # grab the raw NumPy array representing the image, then initialize the timestamp 33 | # and occupied/unoccupied text 34 | image = frame.array 35 | 36 | #resize to fit screen 37 | resized = cv2.resize(image,None,fx=.7,fy=.7,interpolation = cv2.INTER_CUBIC) 38 | # show the frame 39 | cv2.imshow("window",resized) 40 | key = cv2.waitKey(1) & 0xFF 41 | # clear the stream in preparation for the next frame 42 | rawCapture.truncate(0) 43 | 44 | # if the `q` key was pressed, break from the loop 45 | if key == ord("q"): 46 | break 47 | 48 | cv2.imwrite(FILEOUT,image) 49 | camera.close() 50 | cv2.waitKey(0) 51 | cv2.destroyAllWindows() 52 | -------------------------------------------------------------------------------- /Image Processing/histogram3.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | #plot histograms 4 | #calculates grayscale/color and 2d histograms 5 | #see https://lmcaraig.com/image-histograms-histograms-equalization-and-histograms-comparison/ 6 | 7 | 8 | import matplotlib.pyplot as plt 9 | import matplotlib.image as mpimg 10 | import matplotlib.cm as cm 11 | import cv2 12 | import numpy as np 13 | from matplotlib import ticker 14 | 15 | #Load paaramters 16 | image = mpimg.imread("/home/pi/Desktop/testimage6.jpg") 17 | 18 | 19 | 20 | 21 | def draw_image_histogram(image, channels, color='k'): 22 | hist = cv2.calcHist([image], channels, None, [256], [0, 256]) 23 | plt.plot(hist, color=color) 24 | plt.xlim([0, 256]) 25 | 26 | 27 | def show_grayscale_histogram(image): 28 | grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 29 | draw_image_histogram(grayscale_image, [0]) 30 | plt.show() 31 | 32 | def show_color_histogram(image): 33 | for i, col in enumerate(['b', 'g', 'r']): 34 | draw_image_histogram(image, [i], color=col) 35 | plt.show() 36 | 37 | 38 | 39 | 40 | def show_image_histogram_2d(image, bins=32, tick_spacing=5): 41 | fig, axes = plt.subplots(1, 3, figsize=(12, 5)) 42 | channels_mapping = {0: 'B', 1: 'G', 2: 'R'} 43 | for i, channels in enumerate([[0, 1], [0, 2], [1, 2]]): 44 | hist = cv2.calcHist( 45 | [image], channels, None, [bins] * 2, [0, 256] * 2) 46 | 47 | channel_x = channels_mapping[channels[0]] 48 | channel_y = channels_mapping[channels[1]] 49 | 50 | ax = axes[i] 51 | ax.set_xlim([0, bins - 1]) 52 | ax.set_ylim([0, bins - 1]) 53 | 54 | ax.set_xlabel('Channel {channel_x}') 55 | ax.set_ylabel('Channel {channel_y}') 56 | ax.set_title('2D Color Histogram for {channel_x} and {channel_y}') 57 | 58 | ax.yaxis.set_major_locator(ticker.MultipleLocator(tick_spacing)) 59 | ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing)) 60 | 61 | im = ax.imshow(hist) 62 | 63 | fig.colorbar(im, ax=axes.ravel().tolist(), orientation='orizontal') 64 | fig.suptitle('2D Color Histograms with {bins} bins', fontsize=16) 65 | plt.show() 66 | 67 | #call functions 68 | 69 | show_image_histogram_2d(image,32,5) 70 | 71 | show_color_histogram(image) 72 | 73 | show_grayscale_histogram(image) 74 | -------------------------------------------------------------------------------- /Image Processing/takeHDRpictures.py: -------------------------------------------------------------------------------- 1 | 2 | #HDR collection program 3 | #record 4 pictures and exposure data for HDR processing 4 | #shows all pictures to check image quality include auto exp + four exposures 5 | 6 | 7 | 8 | 9 | from picamera import PiCamera 10 | from time import sleep 11 | from fractions import Fraction 12 | import cv2 13 | import numpy as np 14 | import matplotlib.pyplot as plt 15 | import matplotlib.image as mpimg 16 | 17 | #set parameters 18 | 19 | font = cv2.FONT_HERSHEY_SIMPLEX 20 | #camera = PiCamera(resolution=(1280,720 )) 21 | #camera = PiCamera(resolution=(1280, 720),framerate=(Fraction (1,8)),sensor_mode=3) 22 | camera = PiCamera(resolution=(1280, 720),framerate=(1),sensor_mode=3) 23 | 24 | #set filenane to transfer exposure data 25 | 26 | filename="expfile" 27 | 28 | #set wait time should be min 5 sec 29 | 30 | waitime = 5 31 | 32 | # xrange = how amny pictures 33 | xrange = 5 34 | 35 | #Raspberry pi exptime is in microseconds , program desigend to accept time in sec and convert to microseconds 36 | # enter times in sec 37 | 38 | 39 | exposure_times = np.array([0.005, .01, .1, .3], dtype=np.float32) 40 | np.save(filename, exposure_times) 41 | 42 | 43 | 44 | 45 | #capture seed picture 46 | 47 | 48 | camera.shutter_speed = 0 49 | camera.iso = 0 50 | sleep(waitime) 51 | camera.capture('/home/pi/Desktop/testexposure.jpg') 52 | 53 | 54 | img = cv2.imread('/home/pi/Desktop/testexposure.jpg') 55 | #cv2.putText(img,"{0},{1}".format(x,y),(5,50), font, 2, (0,255,0), 9, cv2.LINE_AA) 56 | 57 | img = cv2.resize(img, (0, 0), None, .25, .25) 58 | 59 | 60 | 61 | print (exposure_times) 62 | print (" number exp time (sec) exp time (microsceoned) ") 63 | 64 | for i in range (1,xrange): 65 | 66 | sleep(waitime) 67 | camera.shutter_speed = int(1000000*(exposure_times[i-1])) 68 | print ("{0} {1:.4} {2}".format(i,exposure_times[i-1],((camera.shutter_speed)))) 69 | camera.iso = 100 70 | 71 | 72 | camera.capture('/home/pi/Desktop/img{0:02d}.jpg'.format(i)) 73 | img1 = cv2.imread('/home/pi/Desktop/img{0:02d}.jpg'.format(i)) 74 | img1 = cv2.resize(img1, (0, 0), None, .25, .25) 75 | img = np.concatenate((img,img1), axis=1) 76 | 77 | 78 | 79 | cv2.imshow("img", img) 80 | 81 | 82 | 83 | sleep(1) 84 | 85 | 86 | cv2.waitKey(0) 87 | 88 | cv2.destroyAllWindows() 89 | camera.framerate = 30 90 | sleep(.3) 91 | 92 | 93 | camera.close() 94 | 95 | -------------------------------------------------------------------------------- /Pi Camera/PiCameraEffectsShow.py: -------------------------------------------------------------------------------- 1 | 2 | #The program displays differnet processing modes from a Raspberry Pi camera 3 | #program tested on raspberry pi (strectch) with v2 camera (June 2018) 4 | #Image is displayed at default settings between modes for comparison. Over 40 different settings are displayed 5 | # see https://projects.raspberrypi.org/en/projects/getting-started-with-picamera for more info on picamera 6 | # application: program is useful to see preset processing options available with picamera 7 | # see example demo videos at: https://www.youtube.com/watch?v=MCXqdq1Xw9A 8 | 9 | 10 | 11 | from picamera import PiCamera 12 | from time import sleep 13 | from picamera import PiCamera, Color 14 | from picamera import PiCamera, Color 15 | import datetime as dt 16 | 17 | # set parameters 18 | 19 | 20 | camera = PiCamera() 21 | 22 | camera.annotate_background = Color('blue') 23 | camera.annotate_foreground = Color('yellow') 24 | sleep(1) 25 | camera.start_preview() 26 | 27 | camera.annotate_text = " Image displayed at default settings " 28 | sleep(5) 29 | 30 | 31 | for i in range(100): 32 | camera.annotate_text = " Brightness : %s " % i 33 | camera.brightness = i 34 | sleep(0.1) 35 | 36 | camera.brightness = 50 37 | camera.annotate_text = " Image displayed at default settings " 38 | sleep(3) 39 | 40 | 41 | for i in range(100): 42 | camera.annotate_text = " Contrast: %s " % i 43 | camera.contrast = i 44 | sleep(0.1) 45 | 46 | camera.contrast = 0 47 | camera.annotate_text = " Image displayed at default settings " 48 | sleep(3) 49 | 50 | 51 | for effect in camera.AWB_MODES: 52 | camera.awb_mode = effect 53 | camera.annotate_text = " AWB Effect: %s " % effect 54 | sleep(5) 55 | 56 | 57 | camera.awb_mode = 'auto' 58 | camera.annotate_text = " Image displayed at default settings " 59 | sleep(3) 60 | 61 | 62 | 63 | for effect in camera.IMAGE_EFFECTS: 64 | camera.image_effect = effect 65 | camera.annotate_text = " IMAGE Effect: %s " % effect 66 | sleep(5) 67 | 68 | camera.image_effect = 'none' 69 | camera.annotate_text = " Image displayed at default settings " 70 | sleep(3) 71 | 72 | 73 | 74 | for effect in camera.EXPOSURE_MODES: 75 | camera.exposure_mode = effect 76 | camera.annotate_text = " EXPOSURE MODES Effect: %s " % effect 77 | sleep(5) 78 | 79 | 80 | camera.exposure_mode = 'auto' 81 | sleep(2) 82 | camera.annotate_text = " Image displayed at default settings " 83 | sleep(3) 84 | 85 | 86 | 87 | 88 | camera.stop_preview() 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /Pi Camera/PiCameraEffectsVid.py: -------------------------------------------------------------------------------- 1 | 2 | #The program records a video demonstrating differnet processing modes from a Raspberry Pi camera 3 | #program tested on raspberry pi (strectch) with v2 camera (June 2018) 4 | #Image is displayed at default settings between modes for comparison. Over 40 different settings are displayed 5 | # see https://projects.raspberrypi.org/en/projects/getting-started-with-picamera for more info on picamera 6 | # video is recorded in h264 format, use omxplayer FILENAME.h264 to see video on raspberry pi (use terminal) 7 | # application: program is useful to see preset processing options available with picamera 8 | # see example demo videos at: https://www.youtube.com/watch?v=MCXqdq1Xw9A 9 | 10 | 11 | 12 | from picamera import PiCamera 13 | from time import sleep 14 | from picamera import PiCamera, Color 15 | from picamera import PiCamera, Color 16 | import datetime as dt 17 | 18 | # set parameters 19 | FILENAME = '/home/pi/videodemo.h264' 20 | 21 | camera = PiCamera() 22 | 23 | camera.annotate_background = Color('blue') 24 | camera.annotate_foreground = Color('yellow') 25 | sleep(1) 26 | camera.start_preview() 27 | 28 | camera.start_recording(FILENAME) 29 | 30 | 31 | camera.annotate_text = " Image displayed at default settings " 32 | sleep(5) 33 | 34 | 35 | for i in range(100): 36 | camera.annotate_text = " Brightness : %s " % i 37 | camera.brightness = i 38 | sleep(0.1) 39 | 40 | camera.brightness = 50 41 | camera.annotate_text = " Image displayed at default settings " 42 | sleep(3) 43 | 44 | 45 | for i in range(100): 46 | camera.annotate_text = " Contrast: %s " % i 47 | camera.contrast = i 48 | sleep(0.1) 49 | 50 | camera.contrast = 0 51 | camera.annotate_text = " Image displayed at default settings " 52 | sleep(3) 53 | 54 | 55 | for effect in camera.AWB_MODES: 56 | camera.awb_mode = effect 57 | camera.annotate_text = " AWB Effect: %s " % effect 58 | sleep(5) 59 | 60 | 61 | camera.awb_mode = 'auto' 62 | camera.annotate_text = " Image displayed at default settings " 63 | sleep(3) 64 | 65 | 66 | 67 | for effect in camera.IMAGE_EFFECTS: 68 | camera.image_effect = effect 69 | camera.annotate_text = " IMAGE Effect: %s " % effect 70 | sleep(5) 71 | 72 | camera.image_effect = 'none' 73 | camera.annotate_text = " Image displayed at default settings " 74 | sleep(3) 75 | 76 | 77 | 78 | for effect in camera.EXPOSURE_MODES: 79 | camera.exposure_mode = effect 80 | camera.annotate_text = " EXPOSURE MODES Effect: %s " % effect 81 | sleep(5) 82 | 83 | 84 | camera.exposure_mode = 'auto' 85 | sleep(2) 86 | camera.annotate_text = " Image displayed at default settings " 87 | sleep(3) 88 | 89 | 90 | 91 | camera.stop_recording() 92 | camera.stop_preview() 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /Image Processing/3dRGBmeshgrid.py: -------------------------------------------------------------------------------- 1 | 2 | #creates 3d RGB plot/meshgrid 3 | #shows both RGB quad and meshgrids 4 | 5 | 6 | import matplotlib.pyplot as plt 7 | import matplotlib.image as mpimg 8 | import matplotlib.cm as cm 9 | import matplotlib.gridspec as gridspec 10 | import numpy as np 11 | import cv2 12 | from mpl_toolkits.mplot3d import Axes3D 13 | 14 | 15 | #set parameters/load image 16 | 17 | fig = plt.figure(figsize=(16,10)) 18 | 19 | img = mpimg.imread("/home/pi/Desktop/testimage1.jpg") 20 | 21 | #set values of x,y lines for 3d mesh 22 | 23 | y1=1175 24 | y2=1487 25 | 26 | x1=1045 27 | x2=1547 28 | 29 | 30 | #set matrix 31 | 32 | B = img[:,:,2] 33 | G = img[:,:,1] 34 | R = img[:,:,0] 35 | 36 | 37 | 38 | pixels = B[y1:y2, x1:x2] 39 | 40 | Y = np.arange (y1,y2) 41 | X= np.arange (x1,x2) 42 | X,Y = np.meshgrid(X,Y) 43 | 44 | XX,YY = np.meshgrid(X, Y, sparse=True) 45 | Z = pixels 46 | 47 | 48 | # Plot the surface 1 49 | 50 | fig.subplots_adjust(hspace=0.3) 51 | ax1 = fig.add_subplot(3,1,1, projection='3d') 52 | ax1.set_zlabel('Blue') 53 | ax1.tick_params(axis='x',colors="blue") 54 | 55 | 56 | surf1 = ax1.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=0, antialiased=False) 57 | ay1= plt.ylim (y2,y1) 58 | ax1.tick_params(axis='y',colors="blue") 59 | #ax1.set_zticks([0,1,2,4,8,16,32,64,128,256]) 60 | #fig.colorbar(1) 61 | 62 | # Plot the surface 2 63 | ax2 = fig.add_subplot(3,1,2, projection='3d') 64 | Z = G[y1:y2, x1:x2] 65 | ay2= plt.ylim (y2,y1) 66 | 67 | ax2.set_zlabel('Green') 68 | 69 | 70 | ax2.tick_params(axis='x',colors="green") 71 | ax2.tick_params(axis='y',colors="green") 72 | 73 | surf2 = ax2.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=0, antialiased=False) 74 | 75 | # Plot the surface 3 76 | 77 | Z = R[y1:y2, x1:x2] 78 | ax3 = fig.add_subplot(3,1,3,projection='3d') 79 | 80 | 81 | ay3= plt.ylim (y2,y1) 82 | 83 | ax3.set_zlabel('Red') 84 | ax3.set_xlabel('Horizontal - portion displayed out of 3280 ') 85 | ax3.set_ylabel('Vertical -portion displayed out of 2464') 86 | 87 | ax3.tick_params(axis='x',colors="red") 88 | ax3.tick_params(axis='y',colors="red") 89 | 90 | ax3.xaxis.labelpad =30 91 | ax3.yaxis.labelpad =20 92 | 93 | surf3 = ax3.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=0, antialiased=False) 94 | 95 | 96 | 97 | 98 | plt.tight_layout() 99 | 100 | 101 | 102 | 103 | plt.savefig("/home/pi/Desktop/3dtestsmall section.png") 104 | 105 | 106 | 107 | #plt RGB quad 108 | 109 | 110 | 111 | f, axarr = plt.subplots(2, 2) 112 | axarr[0,0].imshow(img, cmap = cm.Greys_r) 113 | axarr[0,0].set_title("RGB") 114 | axarr[0,0].axis('on') 115 | 116 | axarr[0,1].imshow(B, cmap = cm.Greys_r) 117 | axarr[0,1].set_title("Blue") 118 | axarr[0,1].axis('on') 119 | 120 | axarr[1,0].imshow(G, cmap = cm.Greys_r) 121 | axarr[1,0].set_title("Green") 122 | axarr[1,0].axis('on') 123 | 124 | axarr[1,1].imshow(R, cmap = cm.Greys_r) 125 | axarr[1,1].set_title("Red") 126 | axarr[1,1].axis('on') 127 | 128 | 129 | # Fine-tune figure; hide x ticks for top plots and y ticks for right plots 130 | plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False) 131 | plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False) 132 | 133 | 134 | plt.tight_layout() 135 | 136 | plt.show() 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /Image Processing/HDRimaging.py: -------------------------------------------------------------------------------- 1 | # Process HDR pictures 2 | # files and exp data from takeHDRpictures.py 3 | #plots crf function 4 | # creates HDR and tonemapped images:Robertson,Debevek,fusion_mertens 5 | # see:https://docs.opencv.org/3.1.0/d2/df0/tutorial_py_hdr.html 6 | 7 | 8 | 9 | 10 | import cv2 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | import matplotlib.image as mping 14 | 15 | # Loading exposure images into a list 16 | img_fn = ["/home/pi/Desktop/img01.jpg", "/home/pi/Desktop/img02.jpg", "/home/pi/Desktop/img03.jpg", "/home/pi/Desktop/img04.jpg"] 17 | 18 | filename="expfile.npy" 19 | 20 | img_list = [cv2.imread(fn) for fn in img_fn] 21 | exposure_times=np.load(filename) 22 | 23 | #exposure_times = np.array([0.1, 1.0, 4.0, 8.0], dtype=np.float32) 24 | print (exposure_times) 25 | 26 | 27 | # Merge exposures to HDR image 28 | merge_debvec = cv2.createMergeDebevec() 29 | hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy()) 30 | merge_robertson = cv2.createMergeRobertson() 31 | hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) 32 | 33 | dimensions=hdr_debvec.shape 34 | print (dimensions) 35 | 36 | # Tonemap HDR image 37 | tonemap1 = cv2.createTonemapDurand(gamma=2.2) 38 | res_debvec = tonemap1.process(hdr_debvec.copy()) 39 | tonemap2 = cv2.createTonemapDurand(gamma=1.3) 40 | res_robertson = tonemap2.process(hdr_robertson.copy()) 41 | print ("step2") 42 | 43 | 44 | # Exposure fusion using Mertens 45 | merge_mertens = cv2.createMergeMertens() 46 | res_mertens = merge_mertens.process(img_list) 47 | 48 | 49 | 50 | print ("step3") 51 | 52 | 53 | # Convert datatype to 8-bit and save 54 | res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8') 55 | res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8') 56 | res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8') 57 | 58 | cv2.imwrite("/home/pi/Desktop/ldr_debvec.jpg", res_debvec_8bit) 59 | cv2.imwrite("/home/pi/Desktop/ldr_robertson.jpg", res_robertson_8bit) 60 | cv2.imwrite("/home/pi/Desktop/fusion_mertens.jpg", res_mertens_8bit) 61 | 62 | cv2.imwrite("/home/pi/Desktop/hdr_debvec.jpg", hdr_debvec) 63 | cv2.imwrite("/home/pi/Desktop/hdr_robertson.jpg", hdr_robertson) 64 | 65 | 66 | 67 | cal_debvec = cv2.createCalibrateDebevec() 68 | crf_debvec = cal_debvec.process(img_list, times=exposure_times) 69 | hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy(), response=crf_debvec.copy()) 70 | cal_robertson = cv2.createCalibrateRobertson() 71 | crf_robertson = cal_robertson.process(img_list, times=exposure_times) 72 | hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy(), response=crf_robertson.copy()) 73 | 74 | # Obtain Camera Response Function (CRF) 75 | gr = crf_debvec [:,:,0] 76 | gb = crf_debvec [:,:,1] 77 | gg = crf_debvec[:,:,2] 78 | 79 | 80 | 81 | plt.figure(figsize=(10,10)) 82 | plt.ylim((0,14)) 83 | plt.xlim((0,256)) 84 | 85 | 86 | plt.plot(range(256),gr, color = "red" ,linestyle = "-") 87 | 88 | plt.plot(range(256),gg, color = "green" ,linestyle = "-") 89 | plt.plot(range(256),gb, color = "blue" ,linestyle = "-") 90 | 91 | plt.ylabel('Calibrated Intensity') 92 | plt.xlabel('Measured Intensity') 93 | 94 | plt.show() 95 | 96 | 97 | 98 | 99 | calibrateDebevec = cv2.createCalibrateDebevec() 100 | responseDebevec = calibrateDebevec.process(img_list, exposure_times) 101 | 102 | dimensions=responseDebevec.shape 103 | print (dimensions) 104 | 105 | 106 | cv2.imwrite("/home/pi/Desktop/hdr_debvec.jpg", hdr_debvec) 107 | cv2.imwrite("/home/pi/Desktop/hdr_robertson.jpg", hdr_robertson) 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /Image Processing/HSVcolortrackbar.py: -------------------------------------------------------------------------------- 1 | # sets up trackbars to ananlyze image in HSV colorspace 2 | #shows trackbar mask, input and result 3 | # see https://www.learnopencv.com/color-spaces-in-opencv-cpp-python/ for more info on colorspace 4 | #esc to quit 5 | #red typical H: 156-179, S:117-255 , V: 98-255 6 | #green typical H: 40-85, S:255-255 , V: 19-255 7 | #blue typical H: 100-127, S:107-255 , V: 152-255 8 | #yellow typical H: 15-35, S:77-255 , V: 165-255 9 | 10 | import cv2 11 | import numpy as np 12 | from matplotlib import pyplot as plt 13 | 14 | def nothing(x): 15 | pass 16 | 17 | #setparameters input /output file/display size 18 | 19 | filein = '/home/pi/Desktop/starpattern1.jpg' 20 | #mask output 21 | FILEOUT1 = '/home/pi/Desktop/result.png' 22 | #result output 23 | FILEOUT2 = '/home/pi/Desktop/mask.png' 24 | 25 | 26 | #rsize parameter , smaller < 1 27 | #resizes for display .2 for 3280x2464, 1 for 640x480 28 | rsize =.2 29 | 30 | #read file in color 31 | 32 | img = cv2.imread(filein, 1) 33 | 34 | #establish initial range 35 | 36 | Hstart,Sstart,Vstart = 0,0,0 37 | Hend,Send,Vend = 179,255,255 38 | 39 | #convert into HSV colorspace 40 | 41 | hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) 42 | 43 | lower_range = np.array([Hstart,Sstart,Vstart], dtype=np.uint8) 44 | upper_range = np.array([Hend,Send,Vend], dtype=np.uint8) 45 | 46 | 47 | mask = cv2.inRange(hsv, lower_range, upper_range) 48 | 49 | 50 | maskr = cv2.resize(mask,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC) 51 | imgr = cv2.resize(img,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC) 52 | 53 | cv2.imshow('mask',maskr) 54 | cv2.imshow('image', imgr) 55 | 56 | cv2.createTrackbar('Hstart','mask',0,179,nothing) 57 | cv2.createTrackbar('Hend','mask',0,179,nothing) 58 | 59 | cv2.createTrackbar('Sstart','mask',0,255,nothing) 60 | cv2.createTrackbar('Send','mask',0,255,nothing) 61 | 62 | cv2.createTrackbar('Vstart','mask',0,255,nothing) 63 | cv2.createTrackbar('Vend','mask',0,255,nothing) 64 | 65 | 66 | while(1): 67 | 68 | 69 | 70 | 71 | maskr = cv2.resize(mask,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC) 72 | cv2.imshow('mask',maskr) 73 | 74 | imgr = cv2.resize(img,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC) 75 | cv2.imshow('image', imgr) 76 | 77 | Hstart = cv2.getTrackbarPos('Hstart', 'mask') 78 | Hend = cv2.getTrackbarPos('Hend', 'mask') 79 | 80 | Sstart = cv2.getTrackbarPos('Sstart', 'mask') 81 | Send = cv2.getTrackbarPos('Send', 'mask') 82 | 83 | Vstart = cv2.getTrackbarPos('Vstart', 'mask') 84 | Vend = cv2.getTrackbarPos('Vend', 'mask') 85 | 86 | lower_range = np.array([Hstart,Sstart,Vstart], dtype=np.uint8) 87 | upper_range = np.array([Hend,Send,Vend], dtype=np.uint8) 88 | 89 | mask = cv2.inRange(hsv, lower_range, upper_range) 90 | 91 | result = cv2.bitwise_and(img,img,mask = mask) 92 | imgr = cv2.resize(result,None,fx=rsize, fy=rsize, interpolation = cv2.INTER_CUBIC) 93 | 94 | cv2.imshow('result',imgr) 95 | 96 | 97 | # press escape to exit 98 | k = cv2.waitKey(37) 99 | if k == 27: 100 | break 101 | 102 | 103 | cv2.imwrite(FILEOUT1,result) 104 | cv2.imwrite(FILEOUT2,mask) 105 | 106 | cv2.destroyAllWindows() 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | """ 123 | 124 | 125 | img = cv2.blur(img_noblur, (7,7)) 126 | canny_edge = cv2.Canny(img, 0, 0) 127 | 128 | 129 | imgr = cv2.resize(img,None,fx=.2, fy=.2, interpolation = cv2.INTER_CUBIC) 130 | cv2.imshow('image', imgr) 131 | 132 | res = cv2.resize(canny_edge,None,fx=.2, fy=.2, interpolation = cv2.INTER_CUBIC) 133 | cv2.imshow('canny_edge', canny_edge) 134 | 135 | cv2.createTrackbar('min_value','canny_edge',0,500,nothing) 136 | cv2.createTrackbar('max_value','canny_edge',0,500,nothing) 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | while(1): 152 | imgr = cv2.resize(img,None,fx=.2, fy=.2, interpolation = cv2.INTER_CUBIC) 153 | cv2.imshow('image', imgr) 154 | 155 | #cv2.imshow('image', img) 156 | 157 | 158 | res = cv2.resize(canny_edge,None,fx=.2, fy=.2, interpolation = cv2.INTER_CUBIC) 159 | cv2.imshow('canny_edge', res) 160 | 161 | min_value = cv2.getTrackbarPos('min_value', 'canny_edge') 162 | max_value = cv2.getTrackbarPos('max_value', 'canny_edge') 163 | 164 | canny_edge = cv2.Canny(img, min_value, max_value) 165 | 166 | k = cv2.waitKey(37) 167 | if k == 27: 168 | break 169 | 170 | """ 171 | -------------------------------------------------------------------------------- /computer vision course.md: -------------------------------------------------------------------------------- 1 | ## Computer Vision courses with video lectures 2 | 3 | 4 | ------------------------- 5 | 6 | 7 | ------- 8 | 9 | ### Computer Graphics 10 | 11 | - [CS184 - Computer Graphics, Fall 2012 - UC Berkeley](http://inst.eecs.berkeley.edu/~cs184/fa12/onlinelectures.html) 12 | - [ECS 175 - Computer Graphics, Fall 2009 - UC Davis](https://itunes.apple.com/us/itunes-u/computer-graphics-fall-2009/id457893733?mt=10) 13 | - [Introduction to Computer Graphics - IIT Delhi](http://nptel.ac.in/courses/106102065/) 14 | - [Computer Graphics - IIT Madras](http://nptel.ac.in/courses/106106090/) 15 | - [Computer Graphics 2012, Wolfgang Huerst, Utrecht University](https://www.youtube.com/playlist?list=PLDFA8FCF0017504DE) 16 | - [CS 5630/6630 - Visualization, Fall 2016, University of Utah](http://dataviscourse.net/2016/index.html) ([Lectures - Youtube](https://www.youtube.com/playlist?list=PLbuogVdPnkCpQY3miQpTJtnHgCLze2lr0)) 17 | - [Advanced Visualization UC Davis](https://www.youtube.com/playlist?list=PLslgisHe5tBNnySlj9TlL-n-4jNEK-xgi) 18 | - [CSCI E-234 - Introduction to Computer Graphics and GPU Programming, Harvard Extension School](https://itunes.apple.com/us/itunes-u/csci-e-234-introduction-to/id429428034?mt=10) 19 | - [Computer Graphics Fall 2011, Barbara Hecker](https://www.youtube.com/playlist?list=PL9C949E9F19381E61) 20 | - [Introduction to Graphics Architecture](https://www.youtube.com/playlist?list=PL4A8BA1C3B38CFCA0) 21 | - [Ray Tracing for Global Illumination, UCDavis](https://www.youtube.com/playlist?list=PLslgisHe5tBPckSYyKoU3jEA4bqiFmNBJ) 22 | - [Rendering / Ray Tracing Course, SS 2015 - TU Wien](https://www.youtube.com/playlist?list=PLujxSBD-JXgnGmsn7gEyN28P1DnRZG7qi) 23 | - [ECS 178 Introduction to Geometric Modeling, Fall 2012, UC Davis](http://graphics.cs.ucdavis.edu/~joy/ecs178/Units.html) ([iTunes](https://itunes.apple.com/us/itunes-u/computer-science-introduction/id389259246)) 24 | - [Computational Geometry - IIT Delhi](http://nptel.ac.in/courses/106102011/) 25 | - [CS 468 - Differential Geometry for Computer Science - Stanford University](http://graphics.stanford.edu/courses/cs468-13-spring/schedule.html) 26 | 27 | ------- 28 | 29 | ### Image Processing and Computer Vision 30 | 31 | - [MOOC - Digital Image procesing - Duke/Coursera](https://www.youtube.com/playlist?list=PLZ9qNFMHZ-A79y1StvUUqgyL-O0fZh2rs) 32 | - [Computer Vision 2011 - EPFL, Switzerland](http://www.klewel.com/conferences/epfl-computer-vision/) 33 | - [Digital Image Processing - IIT Kharagpur](http://nptel.ac.in/courses/117105079/) 34 | - [Image Processing and Analysis - UC Davis](https://www.youtube.com/playlist?list=PLA64AFAE28B8DD0FD) 35 | - [CAP 5415 - Computer Vision - University of Central Florida](http://crcv.ucf.edu/courses/CAP5415/Fall2014/index.php) 36 | - [EE225B - Digital Image Processing, Spring 2014 - UC Berkeley](https://inst.eecs.berkeley.edu/~ee225b/sp14/) ([Videos - Spring 2006](http://www-video.eecs.berkeley.edu/~avz/video_225b.html)) 37 | - [EE637 - Digital Image Processing I - Purdue University](https://engineering.purdue.edu/~bouman/ece637/) ([Videos - Sp 2011 ](https://www.youtube.com/user/ModelBasedImaging),[Videos - Sp 2007](https://engineering.purdue.edu/~bouman/ece637/lectures/lectures07/)) 38 | - [Computer Vision I: Variational Methods - TU München](https://vision.in.tum.de/teaching/ws2015/vmcv2015) ([YouTube](https://www.youtube.com/playlist?list=PLTBdjV_4f-EJ7A2iIH5L5ztqqrWYjP2RI)) 39 | - [Computer Vision II: Multiple View Geometry (IN2228), SS 2016 - TU München](http://vision.in.tum.de/teaching/ss2016/mvg2016) ([YouTube](https://www.youtube.com/playlist?list=PLTBdjV_4f-EJn6udZ34tht9EVIW7lbeo4)) 40 | - [EGGN 510 - Image and Multidimensional Signal Processing - Colorado School of Mines](http://inside.mines.edu/~whoff/courses/EENG510/lectures/) 41 | - [EENG 512/CSCI 512 - Computer Vision - Colorado School of Mines](http://inside.mines.edu/~whoff/courses/EENG512/lectures/) 42 | - [Computer Vision for Visual Effects - RPI](https://www.ecse.rpi.edu/~rjradke/cvfxcourse.html) ([YouTube](https://www.youtube.com/playlist?list=PLuh62Q4Sv7BUJlKlt84HFqSWfW36MDd5a)) 43 | - [Introduction to Image Processing - RPI](https://www.ecse.rpi.edu/~rjradke/improccourse.html) ([YouTube](https://www.youtube.com/playlist?list=PLuh62Q4Sv7BUf60vkjePfcOQc8sHxmnDX)) 44 | - [Digital Signal Processing - RPI](https://www.ecse.rpi.edu/~rjradke/dspcourse.html) 45 | - [Advanced Vision 2014 - University of Edinburgh](http://homepages.inf.ed.ac.uk/rbf/AVINVERTED/main_av.htm) 46 | - [Photogrammetry Course - 2015/16 - University of Bonn, Germany](https://www.youtube.com/playlist?list=PLgnQpQtFTOGRsi5vzy9PiQpNWHjq-bKN1) 47 | - [MOOC - Introduction to Computer Vision - Udacity](https://www.youtube.com/playlist?list=PLAwxTw4SYaPnbDacyrK_kB_RUkuxQBlCm) 48 | - [Biometrics - IIT Kanpur](http://nptel.ac.in/courses/106104119/) 49 | 50 | 51 | - 52 | -------------------------------------------------------------------------------- /Pi Camera/exposuremosaic.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | 4 | Creates exposure matrix 5 | 6 | Raspberry camera settings: Manual Raspberry Pi cameras settings are described in https://picamera.readthedocs.io/en/release-1.13/fov.html. 7 | Some ot the major exposure control settings for the V2 camera are listed below: 8 | --shutter_speed - controls exposure times, max length is 10 sec. Related to frame rate 9 | --ISO - ISO controls sensitivity of the camera (by adjusting the analog_gain and digital_gain). Values are between 0 (auto) and 1600. The actual value used when iso is explicitly set will be one of the following values (whichever is closest): 100, 200, 320, 400, 500, 640, 800. 10 | --AWB - Auto white balance controls (red, blue) gains and ‘balances’ the color. 11 | 12 | 13 | matrix set for exposure time vs ISO 14 | """ 15 | 16 | 17 | 18 | from picamera import PiCamera 19 | from time import sleep 20 | from fractions import Fraction 21 | import cv2 22 | import numpy as np 23 | import numpy as np 24 | import matplotlib.pyplot as plt 25 | import matplotlib.image as mpimg 26 | 27 | 28 | 29 | #set parameters/resolution and framerate 30 | #note Maximum framerate is determined by the minimum exposure time (INVERSE RELATED) 31 | camera = PiCamera(resolution=(1280, 720),framerate=(.2),sensor_mode=3) 32 | 33 | 34 | font = cv2.FONT_HERSHEY_SIMPLEX 35 | 36 | 37 | 38 | #set wait time should be min 5 sec 39 | 40 | waitime = 3 41 | 42 | 43 | 44 | 45 | #TEST PICTURE 46 | print ("test picture ") 47 | sleep(waitime) 48 | camera.capture('/home/pi/Desktop/testexposure1.jpg') 49 | 50 | #set X AXIS /horizontal # 51 | #camera.shutter_speed =EXPOSURE TIME/time in microseconds 52 | xrange = 5 53 | etime = 100000 54 | #set Y AXIS/number of ISO values 55 | #ISO values between 0(auto),100,200,300,400,500,600,700,800 56 | yrange = 5 57 | 58 | #awb gains between 0.0 and 8.0 (typical gains between 0.9 and 1.9) 59 | print ("Manual mode") 60 | print("X Y Time(microsecs) ISO") 61 | 62 | for y in range (1,yrange): 63 | 64 | for x in range (1,xrange): 65 | 66 | 67 | if x == 1: 68 | #capture seed picture 69 | camera.shutter_speed = etime*x 70 | camera.iso = 100*y 71 | sleep(waitime) 72 | camera.capture('/home/pi/Desktop/testexposure.jpg') 73 | 74 | 75 | img = cv2.imread('/home/pi/Desktop/testexposure.jpg') 76 | cv2.putText(img,"{0},{1}".format(x,y),(5,50), font, 2, (0,255,0), 9, cv2.LINE_AA) 77 | 78 | img = cv2.resize(img, (0, 0), None, .25, .25) 79 | # print ((x),(y),"{0:10s}".format(str(camera.shutter_speed/10000000)),(camera.iso)) 80 | print ((x),(y),"{0:15s}".format(str(round(camera.shutter_speed))),(camera.iso)) 81 | else: 82 | 83 | 84 | camera.shutter_speed = etime*x 85 | camera.iso = 100*y 86 | sleep(waitime) 87 | camera.capture('img1.jpg') 88 | img1 = cv2.imread('img1.jpg') 89 | cv2.putText(img1,"{0},{1}".format(x,y),(5,50), font, 2, (0,255,0), 9, cv2.LINE_AA) 90 | img1 = cv2.resize(img1, (0, 0), None, .25, .25) 91 | img = np.concatenate((img, img1), axis=1) 92 | # print ((x),(y),"{0:10s}".format(str(camera.shutter_speed/10000000)),(camera.iso)) 93 | print ((x),(y),"{0:15s}".format(str(round(camera.shutter_speed))),(camera.iso)) 94 | 95 | 96 | if y==1: 97 | img3=img 98 | else: 99 | 100 | img3= np.concatenate((img3, img), axis=0) 101 | 102 | #colect auto mode last row 103 | 104 | for x in range (1,xrange): 105 | 106 | if x == 1: 107 | #capture seed picture 108 | camera.shutter_speed = 0 109 | camera.iso = 0 110 | sleep(waitime) 111 | camera.capture('/home/pi/Desktop/testexposure3.jpg') 112 | print ("AUTO MODE") 113 | 114 | img4 = cv2.imread('/home/pi/Desktop/testexposure3.jpg') 115 | cv2.putText(img4,"auto",(5,50), font, 2, (0,255,0), 9, cv2.LINE_AA) 116 | 117 | img4 = cv2.resize(img4, (0, 0), None, .25, .25) 118 | 119 | else: 120 | 121 | camera.shutter_speed = 0 122 | camera.iso = 0 123 | sleep(waitime) 124 | camera.capture('img5.jpg') 125 | img5 = cv2.imread('img5.jpg') 126 | cv2.putText(img5,"auto",(5,50), font, 2, (0,255,0), 9, cv2.LINE_AA) 127 | img5 = cv2.resize(img5, (0, 0), None, .25, .25) 128 | img4 = np.concatenate((img4, img5), axis=1) 129 | 130 | print ((x),(y),"{0:15s}".format(str(round(camera.shutter_speed))),(camera.iso)) 131 | 132 | 133 | img3 = np.concatenate((img3, img4), axis=0) 134 | 135 | cv2.imwrite("/home/pi/Desktop/expmatrix.jpg", img3) 136 | sleep(.1) 137 | 138 | 139 | cv2.imshow("img3", img3) 140 | 141 | #shut down steps else camera may lock/freeze 142 | 143 | cv2.waitKey() 144 | cv2.destroyAllWindows() 145 | camera.framerate = 30 146 | sleep(.3) 147 | camera.close() 148 | -------------------------------------------------------------------------------- /Image Processing/realtimelineRGBintensitydisplay.py: -------------------------------------------------------------------------------- 1 | # code display video image and matplotlib graph. a trackbar is used to select a vertical video line 2 | #The plot displays the image (line) RGB components and is updated at video frame rates 3 | # code was modified from https://nrsyed.com/2018/02/08/real-time-video-histograms-with-opencv-and-python/ tutorial 4 | # apapted for picamera, line intensity analysis and trackbar 5 | # press q to quit 6 | 7 | from picamera.array import PiRGBArray 8 | from picamera import PiCamera 9 | import time 10 | import argparse 11 | import cv2 12 | import matplotlib.pyplot as plt 13 | import numpy as np 14 | 15 | 16 | def nothing(x): 17 | pass 18 | 19 | 20 | 21 | # set windows for graph and trackbar 22 | cv2.namedWindow("RGB") 23 | cv2.createTrackbar ('verticalline#',"RGB",0,479,nothing) 24 | 25 | 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument('-f', '--file', 28 | help='Path to video file (if not using camera)') 29 | parser.add_argument('-c', '--color', type=str, default='rgb', 30 | help='Color space: "gray" (default) or "rgb"') 31 | parser.add_argument('-b', '--bins', type=int, default=640, 32 | help='Number of bins per channel (default 16)') 33 | parser.add_argument('-w', '--width', type=int, default=0, 34 | help='Resize video to specified width in pixels (maintains aspect)') 35 | args = vars(parser.parse_args()) 36 | 37 | #set file parameters 38 | color = args['color'] 39 | bins = args['bins'] 40 | resizeWidth = args['width'] 41 | 42 | 43 | font = cv2.FONT_HERSHEY_COMPLEX 44 | SIZE = (640,480) 45 | FILEOUT = '/home/pi/Desktop/testimage1.jpg' 46 | 47 | 48 | # Configure VideoCapture class instance for using camera or file input. 49 | if not args.get('file', False): 50 | capture = cv2.VideoCapture(0) 51 | else: 52 | capture = cv2.VideoCapture(args['file']) 53 | 54 | 55 | # initialize the camera and grab a reference to the raw camera capture 56 | camera = PiCamera() 57 | 58 | #set filename/resolution 59 | #resolution size 4:3 options: (1920,1088),(1640,1232),(640,480) 60 | # note (3280,2464) provides 'out of resources' 61 | 62 | camera.resolution = (SIZE) 63 | camera.framerate = 30 64 | rawCapture = PiRGBArray(camera, size=(SIZE)) 65 | 66 | 67 | # allow the camera to warmup 68 | time.sleep(0.1) 69 | 70 | 71 | # Initialize plot. 72 | fig, ax = plt.subplots() 73 | if color == 'rgb': 74 | ax.set_title('Line intensity ') 75 | else: 76 | ax.set_title('Line Intensity(grayscale)') 77 | ax.set_xlabel('horizontal line #') 78 | ax.set_ylabel('Intensity') 79 | 80 | # Initialize plot line object(s). Turn on interactive plotting and show plot. 81 | lw = 3 82 | alpha = 0.5 83 | if color == 'rgb': 84 | lineR, = ax.plot(np.arange(bins), np.zeros((bins,)), c='r', lw=lw, alpha=alpha) 85 | lineG, = ax.plot(np.arange(bins), np.zeros((bins,)), c='g', lw=lw, alpha=alpha) 86 | lineB, = ax.plot(np.arange(bins), np.zeros((bins,)), c='b', lw=lw, alpha=alpha) 87 | 88 | 89 | else: 90 | lineGray, = ax.plot(np.arange(bins), np.zeros((bins,1)), c='k', lw=lw) 91 | ax.set_xlim(0, bins-1) 92 | ax.set_ylim(0, 256) 93 | plt.ion() 94 | plt.show() 95 | 96 | 97 | 98 | # capture frames from the camera 99 | for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): 100 | # grab the raw NumPy array representing the image, then initialize the timestamp 101 | # and occupied/unoccupied text 102 | image = frame.array 103 | image1= image 104 | 105 | y1 =cv2.getTrackbarPos ('verticalline#',"RGB") 106 | 107 | # Resize frame to width, if specified. 108 | if resizeWidth > 0: 109 | (height, width) = image.shape[:2] 110 | resizeHeight = int(float(resizeWidth / width) * height) 111 | image = cv2.resize(image, (resizeWidth, resizeHeight),interpolation=cv2.INTER_AREA) 112 | 113 | # Normalize histograms based on number of pixels per frame. 114 | numPixels = np.prod(image.shape[:2]) 115 | if color == 'rgb': 116 | 117 | # split imge the fast way 118 | 119 | B1 = image[:,:,0] 120 | G1 = image[:,:,1] 121 | R1 = image[:,:,2] 122 | 123 | pixelsB = B1[y1,] 124 | pixelsG = G1[y1,] 125 | pixelsR = R1[y1,] 126 | 127 | 128 | # update plot 129 | 130 | lineR.set_ydata(pixelsR) 131 | lineG.set_ydata(pixelsG) 132 | lineB.set_ydata(pixelsB) 133 | 134 | 135 | #placeholder for grey scale (not working) 136 | else: 137 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 138 | cv2.imshow('Grayscale', gray) 139 | pixelsgray = image [y1:] 140 | lineGray.set_ydata(pixelsgray) 141 | 142 | 143 | 144 | #create trackbar 145 | 146 | 147 | # cv2.putText(image1,str(y1), (250,250),font,4,(0,0,255)) 148 | fig.canvas.draw() 149 | cv2.line(image1,(1,y1),(640,y1),(0,255,0),1) 150 | cv2.imshow('RGB', image) 151 | 152 | 153 | # clear the stream in preparation for the next frame 154 | #press q to quit (several times) 155 | 156 | rawCapture.truncate(0) 157 | 158 | if cv2.waitKey(1) & 0xFF == ord('q'): 159 | break 160 | 161 | capture.release() 162 | cv2.destroyAllWindows() 163 | 164 | 165 | cv2.imwrite(FILEOUT,image) 166 | camera.close() 167 | cv2.waitKey(0) 168 | -------------------------------------------------------------------------------- /Pi Camera/videooverlayfinal.py: -------------------------------------------------------------------------------- 1 | ''' 2 | -calibration overlay program 3 | -loads file, request user input and then creates transparent grid video overlay 4 | -user input is two mouse clicks and integer input (number of ruler divisions) 5 | -requires picamera, opencv, matplotlib 6 | -press q to exit from video 7 | 8 | ''' 9 | from picamera.array import PiRGBArray 10 | from picamera import PiCamera 11 | import time 12 | import cv2 13 | import sys 14 | import matplotlib.pyplot as plt 15 | import numpy as np 16 | import os 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | #set parameters 26 | #set filename/resolution 27 | #resolution size 4:3 options: (1920,1088),(1640,1232),(640,480) 28 | # note (3280,2464) provides 'out of resources' 29 | SIZE = (640,480) 30 | 31 | # provide calibration file /Output file address 32 | 33 | calimagefilename = "/home/pi/Desktop/microscope1.jpg" 34 | FILEOUT = '/home/pi/Desktop/testimage1.jpg' 35 | 36 | 37 | # display parameters 38 | 39 | font = cv2.FONT_HERSHEY_SIMPLEX 40 | 41 | opacity = .5 # sets grid transparency (.1 most transparent, 1 =no transparency) 42 | 43 | 44 | # losd cslibration file 45 | 46 | 47 | 48 | try: 49 | img=cv2.imread(calimagefilename, 1) 50 | except: 51 | print ('faild to load %s' % imagefilename) 52 | quit() 53 | 54 | usage='left click to draw a circle.\nright click to draw a rectangle.\n' 55 | usage='left click to draw a circle.\ndraw line by two left clicks along straight edge.\nright click to draw a rectangle.\n' 56 | usage=usage+'press any key to exit.' 57 | print(usage) 58 | 59 | a=np.array([0,0]) 60 | 61 | 62 | # request mouse clicks 63 | 64 | windowName="mouse" 65 | cv2.namedWindow(windowName) 66 | 67 | 68 | global dist 69 | dist=0 70 | 71 | def onMouse(event, x, y, flags, param): 72 | """ 73 | Mouse event callback function. 74 | left click -> draw circle 75 | right click -> draw rectangle 76 | """ 77 | 78 | global a 79 | global dist 80 | 81 | if event == cv2.EVENT_MOUSEMOVE:return 82 | 83 | if event == cv2.EVENT_LBUTTONDOWN: 84 | center=(x,y) 85 | point1 = center 86 | radius=10 87 | color=(255,255,0) 88 | cv2.circle(img,center,radius,color) 89 | a=np.vstack([a,np.hstack([x,y])]) 90 | b=a[1:,:] 91 | 92 | 93 | if len(b) >=2: 94 | element = b[-2] 95 | elemenet2 = b[-1] 96 | 97 | cv2.line(img,(element[0],element[1]),(elemenet2[0],elemenet2[1]),(255,0,0),5) 98 | 99 | dist = cv2.norm(element,elemenet2) 100 | 101 | # right click for square marker (this could be used if you wanted to select region of interest) 102 | if event == cv2.EVENT_RBUTTONDOWN: 103 | rect_start=(x-10,y-10) 104 | rect_end=(x+10,y+10) 105 | color=(100,255,100) 106 | cv2.rectangle(img,rect_start,rect_end,color) 107 | 108 | cv2.imshow(windowName,img) 109 | 110 | 111 | #setMouseCallback(...) 112 | # setMouseCallback(windowName, onMouse [, param]) -> None 113 | cv2.setMouseCallback(windowName,onMouse) 114 | 115 | 116 | 117 | cv2.imshow(windowName,img) 118 | cv2.waitKey(0) 119 | cv2.destroyAllWindows() 120 | 121 | print ("pixel distance=", dist) 122 | 123 | 124 | 125 | #look at ruler and count divisions , typical value is 50 126 | num = int(input ("enter number of divisions and press enter")) 127 | print ("prepare for new measurement/ video turn on/press q to quit ") 128 | 129 | """ 130 | #begin video capture 131 | # initialize the camera and grab a reference to the raw camera capture 132 | # press q to quit 133 | 134 | """ 135 | camera = PiCamera() 136 | camera.resolution = (SIZE) 137 | camera.framerate = 5 138 | rawCapture = PiRGBArray(camera, size=(SIZE)) 139 | 140 | 141 | 142 | # allow the camera to warmup 143 | time.sleep(0.1) 144 | 145 | # set scale using pixel distance form caibration picture 146 | scale = dist/(num/10) 147 | 148 | # capture frames from the camera 149 | for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): 150 | # grab the raw NumPy array representing the image, then initialize the timestamp 151 | # and occupied/unoccupied text 152 | image = frame.array 153 | 154 | 155 | # show the frame/create overlay 156 | 157 | overlay = image.copy() 158 | start = 0 159 | while start < 640: 160 | cv2.line(overlay, (0, int(start)), (640, int(start)), (255, 255, 0), 1) 161 | cv2.line(overlay, (int(start),0), (int(start),640), (255, 255, 0), 1) 162 | start += scale 163 | 164 | 165 | # blend with the original: 166 | 167 | cv2.addWeighted(overlay, opacity, image, 1 - opacity, 0, image) 168 | 169 | # SET Text/manual input 170 | 171 | #cv2.putText(image,'Grid Scale = 100 microns',(225,470), font, .5, (255,255,255), 1, cv2.LINE_AA) 172 | #cv2.putText(image,'4X Objective',(510, 470), font, .5, (200,255,255), 1, cv2.LINE_AA) 173 | cv2.putText(image,'Public Lab Overlay Test',(5,470), font, .5, (255,255,255), 1, cv2.LINE_AA) 174 | cv2.imshow("Frame", image) 175 | 176 | key = cv2.waitKey(1) & 0xFF 177 | 178 | 179 | # clear the stream in preparation for the next frame 180 | rawCapture.truncate(0) 181 | 182 | # if the `q` key was pressed, break from the loop 183 | if key == ord("q"): 184 | break 185 | 186 | 187 | cv2.imwrite(FILEOUT,image) 188 | 189 | cv2.destroyAllWindows() 190 | 191 | camera.close() 192 | cv2.waitKey(0) 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Project Title: Computer Vision enhancements for Raspberry Pi based Public Lab Science Projects 3 | 4 | ## Program Description: 5 | The project developed a series of modular python programs that support different Public Lab (https://publiclab.org/) imaging and spectrum measurement toolkits. The programs enable near real time OpenCV computer vision (CV) measurements of images or spectra. The CV measurements can be used to improve instrument performance (feedback that controls lighting amplitude or camera array exposure times) or assist calibration. Program was supported by the google Summer of Code 2018. 6 | 7 | * Complete Google Summer of Code proposal available at : https://publiclab.org/notes/MaggPi/03-20-2018/gsoc-proposal-computer-vision-enhancements-for-raspberry-pi-based-public-lab-science-projects 8 | 9 | * Code demonstrations can be seen at https://publiclab.org/profile/MaggPi. 10 | 11 | * Video demos at : https://www.youtube.com/channel/UCbyyYOlNo87CXJ39h3wqXZA 12 | 13 | * All code is available at: https://github.com/MargaretAN9/Peggy 14 | 15 | 16 | ## Development Environment: 17 | All programs were tested with a Raspberry Pi 3B+ (stretch), OpenCV2, Raspberry Pi NoIR camera (or webcam) and Python 3.5 18 | 19 | ## Program organization: 20 | Programs are divided into the three categories (Picamera, USB and Image Processing) that are based on different ways the image is acquired. Multiple software routines have been posted to accommodate different levels of experience. For example, a beginner will probably just want to capture an image, a slightly more experienced user will want to capture and annotate the image, and an experienced programmer will want to capture, annotate and process the image(with opencv). 21 | ## Prerequisites 22 | Program requirements are listed on the import section of the program but here is a complete list of download instructions or install resources: 23 | 24 | * Picamera - https://picamera.readthedocs.io/en/release-1.13/install.html 25 | * OpenCV2 https://www.pyimagesearch.com/2017/09/04/raspbian-stretch-install-opencv-3-python-on-your-raspberry-pi/ 26 | * Matplotlib: sudo apt-get build-dep python-matplotlib 27 | * Omxplayer (to display h264 videos of raspberry pi): sudo apt-get install omxplayer 28 | * SciPy : sudo apt-get install python3-scipy 29 | * fswebcam: USB camera driver -sudo apt-get install fswebcam 30 | * Also don’t forget to update before downloading: sudo apt-get update and sudo apt-get dist-upgrade 31 | 32 | 33 | ## References/Acknowledgments 34 | * picamera documentation - Must read for anybody working with Raspberry Pi camera: 35 | https://picamera.readthedocs.io/en/release-1.13/ 36 | 37 | * OpenCV tutorial sites- Great online tutorials, most with step by step instructions. 38 | * Adrian Rosebrock - https://www.pyimagesearch.com/author/adrian/ 39 | * Satya Mallick - https://www.learnopencv.com/about/ 40 | * Sergio Canu - https://pysource.com/ 41 | * Dr. Robert Wilson’s git hub page https://github.com/robintw/RPiNDVI has super python coding about a host of environmental applications. 42 | * Katherine Scott (SimpleCV) and Ladyada(ADAfruit) – Their videos gave me confidence I could do this stuff. 43 | * Public Lab mentors: @amirberAgain, @warren, @icarito @cfastie @liz 44 | * Alex Norton (keep it simple) 45 | 46 | ## Software programs: 47 | 48 | #### -3dRGBmeshgrid.py 49 | Creates 3d RGB plot/meshgrid. Shows both RGB quad and meshgrids. 50 | 51 | ### Image Processing: Programs that process digital images in various ways 52 | 53 | #### -AddsAxisTolmage.py 54 | Displaying a Matplotlib Scale Image. Imports a file and displays labels (x,y, title} and resolution tick marks. Requires matplotlib. 55 | 56 | #### -CVquaddisplay -colorandsobel .py 57 | Quad video for real time image processing. Sets up 3 windows -trackbar windows and video quad display showing camera settings, color filter and sobel edge detection. Trackbar also sets up blob detection see Satya Mallick https://www.learnopencv.com/tag/blob-detector/ 58 | Records video by setting file name, videowriter format and enable 'out.write(combined)' and 'write video' commands 59 | 60 | #### -HDRimaging.py 61 | Processes HDR pictures. Use takeHDRpictures.py and capture pictures for input. Plots crf function. Creates HDR and tonemapped images:Robertson,Debevek,fusion_mertens. See:https://docs.opencv.org/3.1.0/d2/df0/tutorial_py_hdr.html for more info 62 | 63 | #### -histogram3.py 64 | Plot histograms #calculates grayscale/color and 2d histograms. See https://lmcaraig.com/image-histograms-histograms-equalization-and-histograms-comparison/. 65 | 66 | #### -HSV.py 67 | Imports file and shows HSV quad picture. Uses matplotilib,numpy,opencv. 68 | 69 | #### -HSVcolortrachbar.py 70 | Sets up trackbars to analyze image in HSV colorspace. Shows trackbar mask, input and result. Esc to quit #red typical H: 156-179, S:117-255, V: 98-255 #green typical H: 40-85, S:255-255, V: 19-255. Blue typical H: 100-127, S:107-255, V: 152-255. Yellow typical H: 15-35, S:77-255, V: 165-255 71 | 72 | #### -NDVI Red/Gain optimization program 73 | Program displays (and records) an RGB//B/NDVI(fastie)/NDVI(Jet) quad video. Tested with a Raspberry Pi NoIR camera with blue filter. Trackbars select gain settings. Program opens at zero gain so need to move red/blue gain to .5/.5 to see first images. NDVI equations from https://github.com/robintw/RPiNDVI/blob/master/ndvi.py 74 | Program requires loading colorbars (jetcolorbar.jpg and NDVIcolormap.jpg) posted at https://github.com/MargaretAN9/Peggy 75 | 76 | #### -realtimelineRGBintensitydisplay.py 77 | Displays video image and matplotlib graph. a trackbar is used to select a vertical video line. 78 | The plot displays the image (line) RGB components and is updated at video frame rates 79 | Code was modified from https://nrsyed.com/2018/02/08/real-time-video-histograms-with-opencv-and-python/ tutorial 80 | and papted for picamera with line intensity analysis and trackbar. Press q to quit. 81 | 82 | #### -RGBquad.py 83 | Imports file and shows RGB quad picture. Uses matplotilib w/numpy. 84 | 85 | #### -roi and crops image.py 86 | Crops a portion of image. Mouse click to draw rectangle. Press keyboard to show crop. 87 | 88 | #### -takeHDRpictures.py 89 | HDR collection program. Record 4 pictures and exposure data for HDR processing. Shows all pictures to check image quality include auto exp + four exposures. (Use with HDRimaging.py) 90 | 91 | #### -YUVQuad.py 92 | Imports file and shows YUV quad picture. Uses matplotilib,numpy,opencv. 93 | 94 | 95 | 96 | 97 | ## Pi Camera: Programs that take videos or pictures with a Raspberry Pi Camera. 98 | 99 | 100 | #### -exposuremosaic.py 101 | Creates exposure matrix of two different Raspberry Pi camera settings: Manual Raspberry Pi cameras settings are described in https://picamera.readthedocs.io/en/release-1.13/fov.html. Some at the major exposure control settings for the V2 camera are listed below: 102 | --shutter_speed - controls exposure times, max length is 10 sec. Related to frame rate. 103 | --ISO - ISO controls sensitivity of the camera (by adjusting the analog_gain and digital_gain). Values are between 0 (auto) and 1600. The actual value used when iso is explicitly set will be one of the following values (whichever is closest): 100, 200, 320, 400, 500, 640, 800. 104 | --AWB - Auto white balance controls (red, blue) gains and ‘balances’ the color. 105 | Matrix set for exposure time vs ISO. 106 | 107 | #### -PiCameraEffectsShow.py 108 | The program displays different processing modes from a Raspberry Pi camera #program tested on raspberry pi (strectch) with v2 camera (June 2018). Image is displayed at default settings between modes for comparison. Over 40 different settings are displayed. See https://projects.raspberrypi.org/en/projects/getting-started-with-picamera for more info on picamera. Application: program is useful to see preset processing options available with picamera. See example demo videos at: https://www.youtube.com/watch?v=MCXqdq1Xw9A. 109 | 110 | #### -PiCameraEffectsVid.py 111 | The program records a video demonstrating different processing modes from a Raspberry Pi camera #program tested on raspberry pi (strectch) with v2 camera (June 2018). Image is displayed at default settings between modes for comparison. Over 40 different settings are displayed. See https://projects.raspberrypi.org/en/projects/getting-started-with-picamera for more info on picamera. Video is recorded in h264 format, use omxplayer FILENAME.h264 to see video on raspberry pi (use terminal). Application: program is useful to see preset processing options available with picamera. See example demo videos at: https://www.youtube.com/watch?v=MCXqdq1Xw9A 112 | 113 | #### -picameravidwithimagecapture.py 114 | Program provides xx sec alignment preview and records jpg image. Application: align spectrometer or focus microscope. Annotates with filename and datetime. 115 | 116 | #### -picameravidwithimagecaptureopencv.py 117 | Shows video and captures image using picmaera and opencv. From https://www.pyimagesearch.com/2015/03/30/accessing-the-raspberry-pi-camera-with-opencv-and-python/. Press q to quit 118 | 119 | #### -picmaeramancontrol.py 120 | Trackbar Picamera manual control program. Sets up 2 windows -trackbar window and video quad display showing cmera settings and R, B and RGB components. Records video by setting file name, videowriter formet and enabling 'out.write(combined)'. 121 | Requires opencv2 and picamera, ESC to quit. 122 | 123 | #### -videooverlayfinal.py 124 | Calibration overlay program. Loads file, request user input and then creates transparent grid video overlay. User input is two mouse clicks and integer input (number of ruler divisions). Requires picamera, opencv, matplotlib. Press q to exit from video. 125 | 126 | 127 | ### USB Camera: Programs that take videos or pictures with a USB Camera. 128 | 129 | #### -USBTakesPic+Annotates.py 130 | Uses the connected USB Camera to take a photo and annotate said image with a resolution scale vis matplotlib. Requires fswebcam which can be downloaded by using sudo apt-get install fswebcam. 131 | 132 | #### -USBVidWithImageCapture.py 133 | Displays video and records picture from USB camera #tested with Public Lab USB camera on Raspberry PI (stretch) June 2018. Use 's' key to stop video and record image. Code originally from https://gist.github.com/snim2/255151. Uses pygame library. Potential application - focus microscope with video and then store image by pressing 's'. 134 | -------------------------------------------------------------------------------- /Pi Camera/picmaeramancontrol.py: -------------------------------------------------------------------------------- 1 | #Trackbar Picamera manual control program 2 | #program tested with Rasspberry PI 3B+.v2 NoIR camera 3 | #sets up 2 windows -trackbar window and video quad display showing cmera settings and R, B and RGB components 4 | #record video by setting file name, videowriter formet and enabling 'out.write(combined)' 5 | #requires opencv2 and picamera 6 | #ESC to quit 7 | 8 | 9 | 10 | import time 11 | import numpy as np 12 | import cv2 13 | import picamera 14 | import picamera.array 15 | from fractions import Fraction 16 | 17 | def nothing(x): 18 | pass 19 | 20 | 21 | ISO_number = [100,200,320,400,500,640,800] 22 | exposure_number = ['auto','off','night', 'nightpreview', 'backlight','spotlight', 'sports','snow','beach','verylong','fixedfps','antishake','fireworks'] 23 | effect_number = ['none','negative','solarize', 'colorswap','washedout','colorbalance','cartoon','sketch', 'denoise','emboss', 'oilpaint','hatch','gpen','pastel','watercolor','film','blur'] 24 | 25 | 26 | font=cv2.FONT_HERSHEY_SIMPLEX 27 | 28 | 29 | cv2.namedWindow("Public Lab") 30 | blankimg= np.zeros((400,544,3),np.uint8) 31 | 32 | 33 | cv2.namedWindow("Trackbars", cv2.WINDOW_NORMAL) 34 | #cv2.resizeWindow("Trackbars",512,512) 35 | 36 | 37 | cv2.createTrackbar ('Exposure Comp',"Trackbars",25,50,nothing) 38 | cv2.createTrackbar ('Red Gain',"Trackbars",10,80,nothing) 39 | cv2.createTrackbar ('Blue Gain',"Trackbars",10,80,nothing) 40 | cv2.createTrackbar ('Frame Rate',"Trackbars",25,60,nothing) 41 | cv2.createTrackbar ('Contrast',"Trackbars",100,200,nothing) 42 | cv2.createTrackbar ('Brightness',"Trackbars",50,100,nothing) 43 | cv2.createTrackbar ('Saturation',"Trackbars",100,200,nothing) 44 | cv2.createTrackbar ('Sharpness',"Trackbars",100,200,nothing) 45 | cv2.createTrackbar ('Exposure',"Trackbars",0,11,nothing) 46 | cv2.createTrackbar ('Effects',"Trackbars",0,16,nothing) 47 | 48 | 49 | # Create a VideoCapture object/used for video recording 50 | cap = cv2.VideoCapture(0) 51 | 52 | #Set video frame height 53 | 54 | height= 400 55 | width=544 56 | frame_width = 544 57 | #frame_height = int(cap.get(4)) 58 | frame_height= 400 59 | 60 | # Define the codec and create VideoWriter object. Three choices aare available avi,mpeg4 or h264 61 | 62 | #The output is stored in 'outpy.avi' file. 63 | #out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height)) 64 | 65 | #twice height and width 66 | 67 | #set video writer MPEG4 =XVID 68 | out = cv2.VideoWriter('/home/pi/Desktop/NDVItest20.mp4',cv2.VideoWriter_fourcc('X','V','I','D'), 10, (1088,800),1) 69 | #set video writer (MJPG=avi) option 70 | #out = cv2.VideoWriter('/home/pi/Desktop/NDVItestwithtrackbar.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (1088,800),1) 71 | #set video writer H264 option 72 | #out = cv2.VideoWriter('/home/pi/Desktop/output.h264',cv2.VideoWriter_fourcc('H','2','6','4'), 10, (1088,800),1) 73 | 74 | #def functions 75 | 76 | def label(image, text): 77 | """ 78 | Labels the given image with the given text 79 | """ 80 | return cv2.putText(image, text, (0, 50), font, 2, (255,255,255),4) 81 | 82 | 83 | def disp_multiple(im1=None, im2=None, im3=None, im4=None): 84 | 85 | # height, width = im1.shape 86 | 87 | combined = np.zeros((2 * height, 2 * width, 3), dtype=np.uint8) 88 | 89 | # combined[0:height, 0:width, :] = cv2.cvtColor(im1, cv2.COLOR_GRAY2RGB) 90 | # combined[height:, width:, :] = im1 91 | combined[0:height, 0:width, :] = im1 92 | combined[height:, 0:width, :] = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB) 93 | # combined[height:, 0:width, :] = im2 94 | # combined[height:, width:, :] = im3 95 | combined[0:height, width:, :] = cv2.cvtColor(im3, cv2.COLOR_GRAY2RGB) 96 | combined[height:, width:, :] = im4 97 | 98 | 99 | return combined 100 | 101 | 102 | 103 | 104 | #begin camera collection 105 | 106 | def run(): 107 | with picamera.PiCamera() as camera: 108 | 109 | # Set the camera parameters 110 | x = 400 111 | # camera.resolution = (int(640), x) 112 | camera.resolution = (544, x) 113 | # Various optional camera settings below: 114 | camera.iso=100 115 | camera.framerate = 30 116 | camera.awb_mode = 'off' 117 | camera.exposure_mode = "off" 118 | # camera.framerate = Fraction (1,6) 119 | #red/blue camera ratios from 0 to 8 120 | 121 | 122 | # camera.awb_gains = (Red_gain,Blue_gain) 123 | 124 | # Need to sleep to give the camera time to get set up properly 125 | time.sleep(1) 126 | 127 | with picamera.array.PiRGBArray(camera) as stream: 128 | # Loop constantly 129 | while True: 130 | # Grab data from the camera, in colour format 131 | # NOTE: This comes in BGR rather than RGB, which is important 132 | # for later! 133 | camera.capture(stream, format='bgr', use_video_port=True) 134 | image = stream.array 135 | 136 | image1=image 137 | 138 | # Get the individual colour components of the image 139 | b, g, r = cv2.split(image) 140 | 141 | #get info from Trackbars 142 | 143 | Exposure_Comp=cv2.getTrackbarPos ("Exposure Comp","Trackbars") 144 | Red_gain =cv2.getTrackbarPos ("Red Gain","Trackbars") 145 | Blue_gain =cv2.getTrackbarPos ("Blue Gain","Trackbars") 146 | Frame_rate =cv2.getTrackbarPos ("Frame Rate","Trackbars") 147 | Contrast =cv2.getTrackbarPos ('Contrast',"Trackbars") 148 | Brightness =cv2.getTrackbarPos ('Brightness',"Trackbars") 149 | ISO =cv2.getTrackbarPos ('ISO',"Trackbars") 150 | Exp=cv2.getTrackbarPos ('Exposure',"Trackbars") 151 | Saturation=cv2.getTrackbarPos ('Saturation',"Trackbars") 152 | Sharpness=cv2.getTrackbarPos ('Sharpness',"Trackbars") 153 | Effects=cv2.getTrackbarPos ('Effects',"Trackbars") 154 | 155 | #scale camera settings 156 | 157 | camera.exposure_compensation = Exposure_Comp-25 158 | camera.awb_gains = (Red_gain/10,Blue_gain/10) 159 | camera.framerate = Frame_rate 160 | camera.contrast = Contrast-100 161 | camera.brightness = Brightness 162 | camera.exposure_mode = exposure_number[Exp] 163 | camera.saturation = Saturation-100 164 | camera.sharpness = Sharpness-100 165 | camera.image_effect = effect_number[Effects] 166 | 167 | 168 | 169 | 170 | # Label images 171 | label(image1, 'RGB') 172 | label(b, 'B') 173 | label(r, 'R') 174 | 175 | 176 | 177 | 178 | # Combine ready for display 179 | combined = disp_multiple(blankimg,b,r,image1) 180 | 181 | 182 | 183 | # write video 184 | cv2.putText(combined,"Exposure Compensation:",(10,25),font,1,(256,256,256),2) 185 | cv2.putText(combined,str(camera.exposure_compensation),(450,25),font,1,(256,256,256),2) 186 | 187 | cv2.putText(combined,"Blue",(10,55),font,1,(256,0,0),2) 188 | cv2.putText(combined,"/",(80,55),font,1,(256,256,256),2) 189 | cv2.putText(combined,"Red Gain:",(110,55),font,1,(0,0,256),2) 190 | cv2.putText(combined,str(Red_gain/10),(470,55),font,1,(0,0,256),2) 191 | cv2.putText(combined,"/",(450,55),font,1,(256,256,256),2) 192 | cv2.putText(combined,str(Blue_gain/10),(400,55),font,1,(256,0,0),2) 193 | 194 | cv2.putText(combined,"Frame Rate:",(10,85),font,1,(256,256,256),2) 195 | cv2.putText(combined,str(camera.framerate),(450,85),font,1,(256,256,256),2) 196 | 197 | cv2.putText(combined,"Contrast:",(10,115),font,1,(256,256,256),2) 198 | cv2.putText(combined,str(camera.contrast),(450,115),font,1,(256,256,256),2) 199 | 200 | cv2.putText(combined,"Brightness:",(10,145),font,1,(256,256,256),2) 201 | cv2.putText(combined,str(camera.brightness),(450,145),font,1,(256,256,256),2) 202 | 203 | cv2.putText(combined,"Saturation:",(10,175),font,1,(256,256,256),2) 204 | cv2.putText(combined,str(camera.saturation),(450,175),font,1,(256,256,256),2) 205 | 206 | cv2.putText(combined,"Sharpness:",(10,205),font,1,(256,256,256),2) 207 | cv2.putText(combined,str(camera.sharpness),(450,205),font,1,(256,256,256),2) 208 | 209 | cv2.putText(combined,"Exposure:",(10,235),font,1,(256,256,256),2) 210 | cv2.putText(combined,str(camera.exposure_mode),(355,235),font,1,(256,256,256),2) 211 | 212 | cv2.putText(combined,"Image Effect:",(10,265),font,1,(256,256,256),2) 213 | cv2.putText(combined,str(camera.image_effect),(355,265),font,1,(256,256,256),2) 214 | 215 | cv2.putText(combined,"ISO:",(10,295),font,1,(256,256,256),2) 216 | cv2.putText(combined,str(camera.iso),(355,295),font,1,(256,256,256),2) 217 | 218 | cv2.putText(combined,"Exposure Speed:",(10,325),font,1,(256,256,256),2) 219 | cv2.putText(combined,str(round(camera.exposure_speed/1000000,4)),(355,325),font,1,(256,256,256),2) 220 | 221 | 222 | cv2.putText(combined,"Analog Gain:",(10,355),font,1,(256,256,256),2) 223 | cv2.putText(combined,str(round(float(camera.analog_gain),2)),(355,355),font,1,(256,256,256),2) 224 | 225 | cv2.putText(combined,"Digital Gain:",(10,385),font,1,(256,256,256),2) 226 | cv2.putText(combined,str(round(float(camera.digital_gain),2)),(355,385),font,1,(256,256,256),2) 227 | 228 | cv2.putText(combined, "B", (460, 450), font, 2, (255,0,0),4) 229 | cv2.putText(combined, "R", (1020, 50), font, 2, (0,0,255),4) 230 | 231 | 232 | 233 | 234 | # use for video recording 235 | 236 | 237 | # out.write(combined) 238 | 239 | 240 | # Display 241 | cv2.imshow('Public Lab', combined) 242 | 243 | stream.truncate(0) 244 | 245 | # press ESC to break 246 | c = cv2.waitKey(7) % 0x100 247 | if c == 27: 248 | break 249 | 250 | # cleanup or things will get messy 251 | cv2.destroyAllWindows() 252 | cap.release() 253 | out.release() 254 | 255 | if __name__ == '__main__': 256 | run() 257 | 258 | -------------------------------------------------------------------------------- /NDVIvideogainoptimization.py: -------------------------------------------------------------------------------- 1 | #NDVI Red/Gain optimization program 2 | #the program displays (and records) an RGB//B/NDVI(fastie)/NDVI(Jet) quad video. 3 | #Tested with a Raspberry Pi NoIR camera with blue filter 4 | #trackbars select gain settings -program opens at zerogain so need to move .5/.5 to see first images 5 | #NDVI equations from https://github.com/robintw/RPiNDVI/blob/master/ndvi.py 6 | 7 | #store file on desktop, HDMI, AVI or MPEG4 are possible recordong options -set video writer and file name 8 | # to not record "#out.write(combined)" 9 | #note you are creating big data files 10 | 11 | #ESC to quit 12 | 13 | 14 | 15 | import time 16 | import numpy as np 17 | import cv2 18 | import picamera 19 | import picamera.array 20 | 21 | def nothing(x): 22 | pass 23 | 24 | 25 | font=cv2.FONT_HERSHEY_SIMPLEX 26 | 27 | cv2.namedWindow("Public Lab") 28 | cv2.createTrackbar ('Red Gain',"Public Lab",0,80,nothing) 29 | cv2.createTrackbar ('Blue Gain',"Public Lab",0,80,nothing) 30 | cv2.createTrackbar ('Frame Rate',"Public Lab",5,60,nothing) 31 | 32 | # Create a VideoCapture object 33 | cap = cv2.VideoCapture(0) 34 | 35 | width = 544 36 | #frame_height = int(cap.get(4)) 37 | height= 400 38 | 39 | 40 | #frame_width = int(cap.get(3)) 41 | 42 | frame_width = 544 43 | #frame_height = int(cap.get(4)) 44 | frame_height= 400 45 | 46 | # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file. 47 | #out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height)) 48 | 49 | #twice height and width 50 | 51 | #set video writer MPEG4 =XVID 52 | out = cv2.VideoWriter('/home/pi/Desktop/NDVItest20.mp4',cv2.VideoWriter_fourcc('X','V','I','D'), 10, (1088,800),1) 53 | #set video writer (MJPG=avi) option 54 | #out = cv2.VideoWriter('/home/pi/Desktop/NDVItestwithtrackbar.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (1088,800),1) 55 | #set video writer H264 option 56 | #out = cv2.VideoWriter('/home/pi/Desktop/output.h264',cv2.VideoWriter_fourcc('H','2','6','4'), 10, (1088,800),1) 57 | """ 58 | Combines four images for display. 59 | 60 | """ 61 | def disp_multiple(im1=None, im2=None, im3=None, im4=None): 62 | 63 | # height, width = im1.shape 64 | 65 | combined = np.zeros((2 * height, 2 * width, 3), dtype=np.uint8) 66 | 67 | # combined[0:height, 0:width, :] = cv2.cvtColor(im1, cv2.COLOR_GRAY2RGB) 68 | # combined[height:, width:, :] = im1 69 | combined[0:height, 0:width, :] = im1 70 | # combined[height:, 0:width, :] = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB) 71 | combined[height:, 0:width, :] = im2 72 | # combined[height:, width:, :] = im3 73 | combined[0:height, width:, :] = cv2.cvtColor(im3, cv2.COLOR_GRAY2RGB) 74 | combined[height:, width:, :] = im4 75 | 76 | 77 | 78 | return combined 79 | 80 | 81 | def label(image, text): 82 | """ 83 | Labels the given image with the given text 84 | """ 85 | return cv2.putText(image, text, (0, 50), font, 2, (255,255,255),4) 86 | 87 | 88 | def contrast_stretch(im): 89 | """ 90 | Performs a simple contrast stretch of the given image, from 5-95%. 91 | """ 92 | in_min = np.percentile(im, 5) 93 | in_max = np.percentile(im, 95) 94 | 95 | out_min = 0.0 96 | out_max = 255.0 97 | 98 | out = im - in_min 99 | out *= ((out_min - out_max) / (in_min - in_max)) 100 | out += in_min 101 | 102 | return out 103 | 104 | 105 | #load display colorbars 106 | colorbar= cv2.imread ("/home/pi/Desktop/NDVIcolormap.jpg",1) 107 | 108 | colorbar=cv2.resize (colorbar,None, fx=.8,fy=.4,interpolation=cv2.INTER_CUBIC) 109 | 110 | 111 | print (colorbar.shape) 112 | colorbarjet=cv2.imread("/home/pi/Desktop/jetcolorbar.jpg",1) 113 | 114 | 115 | print (colorbarjet.shape) 116 | 117 | 118 | #fastie colormap 119 | def fastieColorMap(ndvi) : 120 | 121 | fastie = np.zeros((256, 1, 3), dtype=np.uint8) 122 | fastie[:, 0, 2] = [255, 250, 246, 242, 238, 233, 229, 225, 221, 216, 212, 208, 204, 200, 195, 191, 187, 183, 178, 174, 170, 166, 161, 157, 153, 149, 145, 140, 136, 132, 128, 123, 119, 115, 111, 106, 102, 98, 94, 90, 85, 81, 77, 73, 68, 64, 60, 56, 52, 56, 60, 64, 68, 73, 77, 81, 85, 90, 94, 98, 102, 106, 111, 115, 119, 123, 128, 132, 136, 140, 145, 149, 153, 157, 161, 166, 170, 174, 178, 183, 187, 191, 195, 200, 204, 208, 212, 216, 221, 225, 229, 233, 238, 242, 246, 250, 255, 250, 245, 240, 235, 230, 225, 220, 215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165, 160, 155, 151, 146, 141, 136, 131, 126, 121, 116, 111, 106, 101, 96, 91, 86, 81, 76, 71, 66, 61, 56, 66, 77, 87, 98, 108, 119, 129, 140, 131, 122, 113, 105, 96, 87, 78, 70, 61, 52, 43, 35, 26, 17, 8, 0, 7, 15, 23, 31, 39, 47, 55, 63, 71, 79, 87, 95, 103, 111, 119, 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239, 247, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] 123 | fastie[:, 0, 1] = [255, 250, 246, 242, 238, 233, 229, 225, 221, 216, 212, 208, 204, 200, 195, 191, 187, 183, 178, 174, 170, 166, 161, 157, 153, 149, 145, 140, 136, 132, 128, 123, 119, 115, 111, 106, 102, 98, 94, 90, 85, 81, 77, 73, 68, 64, 60, 56, 52, 56, 60, 64, 68, 73, 77, 81, 85, 90, 94, 98, 102, 106, 111, 115, 119, 123, 128, 132, 136, 140, 145, 149, 153, 157, 161, 166, 170, 174, 178, 183, 187, 191, 195, 200, 204, 208, 212, 216, 221, 225, 229, 233, 238, 242, 246, 250, 255, 250, 245, 240, 235, 230, 225, 220, 215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165, 160, 155, 151, 146, 141, 136, 131, 126, 121, 116, 111, 106, 101, 96, 91, 86, 81, 76, 71, 66, 61, 56, 66, 77, 87, 98, 108, 119, 129, 140, 147, 154, 161, 168, 175, 183, 190, 197, 204, 211, 219, 226, 233, 240, 247, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 249, 244, 239, 233, 228, 223, 217, 212, 207, 201, 196, 191, 185, 180, 175, 170, 164, 159, 154, 148, 143, 138, 132, 127, 122, 116, 111, 106, 100, 95, 90, 85, 79, 74, 69, 63, 58, 53, 47, 42, 37, 31, 26, 21, 15, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 124 | fastie[:, 0, 0] = [255, 250, 246, 242, 238, 233, 229, 225, 221, 216, 212, 208, 204, 200, 195, 191, 187, 183, 178, 174, 170, 166, 161, 157, 153, 149, 145, 140, 136, 132, 128, 123, 119, 115, 111, 106, 102, 98, 94, 90, 85, 81, 77, 73, 68, 64, 60, 56, 52, 56, 60, 64, 68, 73, 77, 81, 85, 90, 94, 98, 102, 106, 111, 115, 119, 123, 128, 132, 136, 140, 145, 149, 153, 157, 161, 166, 170, 174, 178, 183, 187, 191, 195, 200, 204, 208, 212, 216, 221, 225, 229, 233, 238, 242, 246, 250, 255, 250, 245, 240, 235, 230, 225, 220, 215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165, 160, 155, 151, 146, 141, 136, 131, 126, 121, 116, 111, 106, 101, 96, 91, 86, 81, 76, 71, 66, 61, 56, 80, 105, 130, 155, 180, 205, 230, 255, 239, 223, 207, 191, 175, 159, 143, 127, 111, 95, 79, 63, 47, 31, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 31, 47, 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239] 125 | 126 | 127 | color = cv2.LUT(ndvi, fastie) 128 | 129 | 130 | return color; 131 | 132 | #begin camera collection 133 | 134 | def run(): 135 | with picamera.PiCamera() as camera: 136 | 137 | # Set the camera parameters 138 | x = 400 139 | # camera.resolution = (int(1.33 * x), x) 140 | camera.resolution = (544, x) 141 | # Various optional camera settings below: 142 | camera.framerate = 30 143 | camera.awb_mode = 'off' 144 | #red/blue camera ratios from 0 to 8 145 | 146 | 147 | # camera.awb_gains = (Red_gain,Blue_gain) 148 | 149 | # Need to sleep to give the camera time to get set up properly 150 | time.sleep(1) 151 | 152 | with picamera.array.PiRGBArray(camera) as stream: 153 | # Loop constantly 154 | while True: 155 | # Grab data from the camera, in colour format 156 | # NOTE: This comes in BGR rather than RGB, which is important 157 | # for later! 158 | camera.capture(stream, format='bgr', use_video_port=True) 159 | image = stream.array 160 | 161 | image1=image 162 | Red_gain =cv2.getTrackbarPos ("Red Gain","Public Lab") 163 | Blue_gain =cv2.getTrackbarPos ("Blue Gain","Public Lab") 164 | Frame_rate =cv2.getTrackbarPos ("Frame Rate","Public Lab") 165 | 166 | # print (Frame_rate) 167 | 168 | camera.awb_gains = (Red_gain/10,Blue_gain/10) 169 | camera.framerate = Frame_rate 170 | 171 | 172 | # Get the individual colour components of the image 173 | b, g, r = cv2.split(image) 174 | 175 | 176 | #start video capture 177 | ret, image = cap.read() 178 | # Calculate the NDVI 179 | 180 | # Bottom of fraction 181 | bottom = (r.astype(float) + g.astype(float)) 182 | bottom[bottom == 0] = 0.01 # Make sure we don't divide by zero! 183 | 184 | ndvi = (r.astype(float) - g) / bottom 185 | ndvi = contrast_stretch(ndvi) 186 | ndvi = ndvi.astype(np.uint8) 187 | 188 | 189 | ndvijet = cv2.applyColorMap(ndvi, cv2.COLORMAP_JET) 190 | 191 | 192 | ndvi = cv2.cvtColor(ndvi, cv2.COLOR_GRAY2BGR); 193 | # NOTE : im_gray is 3-channel image with identical 194 | 195 | 196 | ndvifastie = fastieColorMap(ndvi) 197 | 198 | # #format red 199 | 200 | # zeros = np.zeros (r.shape[:2], dtype ="uint8") 201 | # r= cv2.merge(([zeros,zeros,r])) 202 | 203 | # Do the labelling 204 | label(image1, 'RGB') 205 | label(ndvifastie, 'NDVI/fastie') 206 | label(r, 'RED') 207 | label(ndvijet, 'NDVI/Jet') 208 | 209 | 210 | 211 | 212 | 213 | # Combine ready for display 214 | combined = disp_multiple(image1,ndvifastie,r, ndvijet) 215 | 216 | 217 | 218 | # colorbar fastie 219 | 220 | rows,cols,channels = colorbar.shape 221 | 222 | roi = colorbar[0:rows, 0:cols ] 223 | 224 | # Now create a mask of logo and create its inverse mask also 225 | img2gray = cv2.cvtColor(colorbar,cv2.COLOR_BGR2GRAY) 226 | ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) 227 | mask_inv = cv2.bitwise_not(mask) 228 | # Now black-out the area of logo in ROI 229 | img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 230 | 231 | 232 | # Take only region of logo from logo image. 233 | img2_fg = cv2.bitwise_and(colorbar,colorbar,mask = mask) 234 | 235 | 236 | # Put logo in ROI and modify the main image 237 | dst = cv2.add(img1_bg,img2_fg) 238 | 239 | 240 | combined[775:(775+rows), 25:(25+cols)] = dst 241 | 242 | 243 | # colorbar jet 244 | 245 | rows,cols,channels = colorbarjet.shape 246 | 247 | roi = colorbarjet[0:rows, 0:cols ] 248 | 249 | # Now create a mask of logo and create its inverse mask also 250 | img2gray = cv2.cvtColor(colorbarjet,cv2.COLOR_BGR2GRAY) 251 | ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) 252 | mask_inv = cv2.bitwise_not(mask) 253 | # Now black-out the area of logo in ROI 254 | img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 255 | 256 | 257 | # Take only region of logo from logo image. 258 | img2_fg = cv2.bitwise_and(colorbarjet,colorbarjet,mask = mask) 259 | 260 | 261 | # Put logo in ROI and modify the main image 262 | dst = cv2.add(img1_bg,img2_fg) 263 | 264 | 265 | combined[775:(775+rows), 720:(720+cols)] = dst 266 | 267 | 268 | 269 | 270 | # write video 271 | 272 | 273 | cv2.putText(combined,str(Red_gain/10),(750,50),font,2,(0,0,256),4) 274 | cv2.putText(combined,str(Blue_gain/10),(200,50),font,2,(256,0,0),4) 275 | out.write(combined) 276 | 277 | 278 | # Display 279 | cv2.imshow('Public Lab', combined) 280 | 281 | stream.truncate(0) 282 | 283 | # press ESC to break 284 | c = cv2.waitKey(7) % 0x100 285 | if c == 27: 286 | break 287 | 288 | # cleanup or things will get messy 289 | cv2.destroyAllWindows() 290 | cap.release() 291 | out.release() 292 | 293 | if __name__ == '__main__': 294 | run() 295 | 296 | 297 | 298 | 299 | 300 | -------------------------------------------------------------------------------- /Image Processing/NDVIvideogainoptimization.py: -------------------------------------------------------------------------------- 1 | #NDVI Red/Gain optimization program 2 | #the program displays (and records) an RGB//B/NDVI(fastie)/NDVI(Jet) quad video. 3 | #Tested with a Raspberry Pi NoIR camera with blue filter 4 | #trackbars select gain settings -program opens at zerogain so need to move .5/.5 to see first images 5 | #NDVI equations from https://github.com/robintw/RPiNDVI/blob/master/ndvi.py 6 | 7 | #store file on desktop, HDMI, AVI or MPEG4 are possible recordong options -set video writer and file name 8 | # to not record "#out.write(combined)" 9 | #note you are creating big data files 10 | #program requires loading colorbars (jetcolorbar.jpg and NDVIcolormap.jpg) posted at https://github.com/MargaretAN9/Peggy 11 | #ESC to quit 12 | 13 | 14 | 15 | import time 16 | import numpy as np 17 | import cv2 18 | import picamera 19 | import picamera.array 20 | 21 | def nothing(x): 22 | pass 23 | 24 | 25 | font=cv2.FONT_HERSHEY_SIMPLEX 26 | 27 | cv2.namedWindow("Public Lab") 28 | cv2.createTrackbar ('Red Gain',"Public Lab",0,80,nothing) 29 | cv2.createTrackbar ('Blue Gain',"Public Lab",0,80,nothing) 30 | cv2.createTrackbar ('Frame Rate',"Public Lab",5,60,nothing) 31 | 32 | # Create a VideoCapture object 33 | cap = cv2.VideoCapture(0) 34 | 35 | width = 544 36 | #frame_height = int(cap.get(4)) 37 | height= 400 38 | 39 | 40 | #frame_width = int(cap.get(3)) 41 | 42 | frame_width = 544 43 | #frame_height = int(cap.get(4)) 44 | frame_height= 400 45 | 46 | # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file. 47 | #out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height)) 48 | 49 | #twice height and width 50 | 51 | #set video writer MPEG4 =XVID 52 | out = cv2.VideoWriter('/home/pi/Desktop/NDVItest20.mp4',cv2.VideoWriter_fourcc('X','V','I','D'), 10, (1088,800),1) 53 | #set video writer (MJPG=avi) option 54 | #out = cv2.VideoWriter('/home/pi/Desktop/NDVItestwithtrackbar.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (1088,800),1) 55 | #set video writer H264 option 56 | #out = cv2.VideoWriter('/home/pi/Desktop/output.h264',cv2.VideoWriter_fourcc('H','2','6','4'), 10, (1088,800),1) 57 | """ 58 | Combines four images for display. 59 | 60 | """ 61 | def disp_multiple(im1=None, im2=None, im3=None, im4=None): 62 | 63 | # height, width = im1.shape 64 | 65 | combined = np.zeros((2 * height, 2 * width, 3), dtype=np.uint8) 66 | 67 | # combined[0:height, 0:width, :] = cv2.cvtColor(im1, cv2.COLOR_GRAY2RGB) 68 | # combined[height:, width:, :] = im1 69 | combined[0:height, 0:width, :] = im1 70 | # combined[height:, 0:width, :] = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB) 71 | combined[height:, 0:width, :] = im2 72 | # combined[height:, width:, :] = im3 73 | combined[0:height, width:, :] = cv2.cvtColor(im3, cv2.COLOR_GRAY2RGB) 74 | combined[height:, width:, :] = im4 75 | 76 | 77 | 78 | return combined 79 | 80 | 81 | def label(image, text): 82 | """ 83 | Labels the given image with the given text 84 | """ 85 | return cv2.putText(image, text, (0, 50), font, 2, (255,255,255),4) 86 | 87 | 88 | def contrast_stretch(im): 89 | """ 90 | Performs a simple contrast stretch of the given image, from 5-95%. 91 | """ 92 | in_min = np.percentile(im, 5) 93 | in_max = np.percentile(im, 95) 94 | 95 | out_min = 0.0 96 | out_max = 255.0 97 | 98 | out = im - in_min 99 | out *= ((out_min - out_max) / (in_min - in_max)) 100 | out += in_min 101 | 102 | return out 103 | 104 | 105 | #load display colorbars 106 | colorbar= cv2.imread ("/home/pi/Desktop/NDVIcolormap.jpg",1) 107 | 108 | colorbar=cv2.resize (colorbar,None, fx=.8,fy=.4,interpolation=cv2.INTER_CUBIC) 109 | 110 | 111 | print (colorbar.shape) 112 | colorbarjet=cv2.imread("/home/pi/Desktop/jetcolorbar.jpg",1) 113 | 114 | 115 | print (colorbarjet.shape) 116 | 117 | 118 | #fastie colormap 119 | def fastieColorMap(ndvi) : 120 | 121 | fastie = np.zeros((256, 1, 3), dtype=np.uint8) 122 | fastie[:, 0, 2] = [255, 250, 246, 242, 238, 233, 229, 225, 221, 216, 212, 208, 204, 200, 195, 191, 187, 183, 178, 174, 170, 166, 161, 157, 153, 149, 145, 140, 136, 132, 128, 123, 119, 115, 111, 106, 102, 98, 94, 90, 85, 81, 77, 73, 68, 64, 60, 56, 52, 56, 60, 64, 68, 73, 77, 81, 85, 90, 94, 98, 102, 106, 111, 115, 119, 123, 128, 132, 136, 140, 145, 149, 153, 157, 161, 166, 170, 174, 178, 183, 187, 191, 195, 200, 204, 208, 212, 216, 221, 225, 229, 233, 238, 242, 246, 250, 255, 250, 245, 240, 235, 230, 225, 220, 215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165, 160, 155, 151, 146, 141, 136, 131, 126, 121, 116, 111, 106, 101, 96, 91, 86, 81, 76, 71, 66, 61, 56, 66, 77, 87, 98, 108, 119, 129, 140, 131, 122, 113, 105, 96, 87, 78, 70, 61, 52, 43, 35, 26, 17, 8, 0, 7, 15, 23, 31, 39, 47, 55, 63, 71, 79, 87, 95, 103, 111, 119, 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239, 247, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] 123 | fastie[:, 0, 1] = [255, 250, 246, 242, 238, 233, 229, 225, 221, 216, 212, 208, 204, 200, 195, 191, 187, 183, 178, 174, 170, 166, 161, 157, 153, 149, 145, 140, 136, 132, 128, 123, 119, 115, 111, 106, 102, 98, 94, 90, 85, 81, 77, 73, 68, 64, 60, 56, 52, 56, 60, 64, 68, 73, 77, 81, 85, 90, 94, 98, 102, 106, 111, 115, 119, 123, 128, 132, 136, 140, 145, 149, 153, 157, 161, 166, 170, 174, 178, 183, 187, 191, 195, 200, 204, 208, 212, 216, 221, 225, 229, 233, 238, 242, 246, 250, 255, 250, 245, 240, 235, 230, 225, 220, 215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165, 160, 155, 151, 146, 141, 136, 131, 126, 121, 116, 111, 106, 101, 96, 91, 86, 81, 76, 71, 66, 61, 56, 66, 77, 87, 98, 108, 119, 129, 140, 147, 154, 161, 168, 175, 183, 190, 197, 204, 211, 219, 226, 233, 240, 247, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 249, 244, 239, 233, 228, 223, 217, 212, 207, 201, 196, 191, 185, 180, 175, 170, 164, 159, 154, 148, 143, 138, 132, 127, 122, 116, 111, 106, 100, 95, 90, 85, 79, 74, 69, 63, 58, 53, 47, 42, 37, 31, 26, 21, 15, 10, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 124 | fastie[:, 0, 0] = [255, 250, 246, 242, 238, 233, 229, 225, 221, 216, 212, 208, 204, 200, 195, 191, 187, 183, 178, 174, 170, 166, 161, 157, 153, 149, 145, 140, 136, 132, 128, 123, 119, 115, 111, 106, 102, 98, 94, 90, 85, 81, 77, 73, 68, 64, 60, 56, 52, 56, 60, 64, 68, 73, 77, 81, 85, 90, 94, 98, 102, 106, 111, 115, 119, 123, 128, 132, 136, 140, 145, 149, 153, 157, 161, 166, 170, 174, 178, 183, 187, 191, 195, 200, 204, 208, 212, 216, 221, 225, 229, 233, 238, 242, 246, 250, 255, 250, 245, 240, 235, 230, 225, 220, 215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165, 160, 155, 151, 146, 141, 136, 131, 126, 121, 116, 111, 106, 101, 96, 91, 86, 81, 76, 71, 66, 61, 56, 80, 105, 130, 155, 180, 205, 230, 255, 239, 223, 207, 191, 175, 159, 143, 127, 111, 95, 79, 63, 47, 31, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 31, 47, 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239] 125 | 126 | 127 | color = cv2.LUT(ndvi, fastie) 128 | 129 | 130 | return color; 131 | 132 | #begin camera collection 133 | 134 | def run(): 135 | with picamera.PiCamera() as camera: 136 | 137 | # Set the camera parameters 138 | x = 400 139 | # camera.resolution = (int(1.33 * x), x) 140 | camera.resolution = (544, x) 141 | # Various optional camera settings below: 142 | camera.framerate = 30 143 | camera.awb_mode = 'off' 144 | #red/blue camera ratios from 0 to 8 145 | 146 | 147 | # camera.awb_gains = (Red_gain,Blue_gain) 148 | 149 | # Need to sleep to give the camera time to get set up properly 150 | time.sleep(1) 151 | 152 | with picamera.array.PiRGBArray(camera) as stream: 153 | # Loop constantly 154 | while True: 155 | # Grab data from the camera, in colour format 156 | # NOTE: This comes in BGR rather than RGB, which is important 157 | # for later! 158 | camera.capture(stream, format='bgr', use_video_port=True) 159 | image = stream.array 160 | 161 | image1=image 162 | Red_gain =cv2.getTrackbarPos ("Red Gain","Public Lab") 163 | Blue_gain =cv2.getTrackbarPos ("Blue Gain","Public Lab") 164 | Frame_rate =cv2.getTrackbarPos ("Frame Rate","Public Lab") 165 | 166 | # print (Frame_rate) 167 | 168 | camera.awb_gains = (Red_gain/10,Blue_gain/10) 169 | camera.framerate = Frame_rate 170 | 171 | 172 | # Get the individual colour components of the image 173 | b, g, r = cv2.split(image) 174 | 175 | 176 | #start video capture 177 | ret, image = cap.read() 178 | # Calculate the NDVI 179 | 180 | # Bottom of fraction 181 | bottom = (r.astype(float) + g.astype(float)) 182 | bottom[bottom == 0] = 0.01 # Make sure we don't divide by zero! 183 | 184 | ndvi = (r.astype(float) - g) / bottom 185 | ndvi = contrast_stretch(ndvi) 186 | ndvi = ndvi.astype(np.uint8) 187 | 188 | 189 | ndvijet = cv2.applyColorMap(ndvi, cv2.COLORMAP_JET) 190 | 191 | 192 | ndvi = cv2.cvtColor(ndvi, cv2.COLOR_GRAY2BGR); 193 | # NOTE : im_gray is 3-channel image with identical 194 | 195 | 196 | ndvifastie = fastieColorMap(ndvi) 197 | 198 | # #format red 199 | 200 | # zeros = np.zeros (r.shape[:2], dtype ="uint8") 201 | # r= cv2.merge(([zeros,zeros,r])) 202 | 203 | # Do the labelling 204 | label(image1, 'RGB') 205 | label(ndvifastie, 'NDVI/fastie') 206 | label(r, 'RED') 207 | label(ndvijet, 'NDVI/Jet') 208 | 209 | 210 | 211 | 212 | 213 | # Combine ready for display 214 | combined = disp_multiple(image1,ndvifastie,r, ndvijet) 215 | 216 | 217 | 218 | # colorbar fastie 219 | 220 | rows,cols,channels = colorbar.shape 221 | 222 | roi = colorbar[0:rows, 0:cols ] 223 | 224 | # Now create a mask of logo and create its inverse mask also 225 | img2gray = cv2.cvtColor(colorbar,cv2.COLOR_BGR2GRAY) 226 | ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) 227 | mask_inv = cv2.bitwise_not(mask) 228 | # Now black-out the area of logo in ROI 229 | img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 230 | 231 | 232 | # Take only region of logo from logo image. 233 | img2_fg = cv2.bitwise_and(colorbar,colorbar,mask = mask) 234 | 235 | 236 | # Put logo in ROI and modify the main image 237 | dst = cv2.add(img1_bg,img2_fg) 238 | 239 | 240 | combined[775:(775+rows), 25:(25+cols)] = dst 241 | 242 | 243 | # colorbar jet 244 | 245 | rows,cols,channels = colorbarjet.shape 246 | 247 | roi = colorbarjet[0:rows, 0:cols ] 248 | 249 | # Now create a mask of logo and create its inverse mask also 250 | img2gray = cv2.cvtColor(colorbarjet,cv2.COLOR_BGR2GRAY) 251 | ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) 252 | mask_inv = cv2.bitwise_not(mask) 253 | # Now black-out the area of logo in ROI 254 | img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) 255 | 256 | 257 | # Take only region of logo from logo image. 258 | img2_fg = cv2.bitwise_and(colorbarjet,colorbarjet,mask = mask) 259 | 260 | 261 | # Put logo in ROI and modify the main image 262 | dst = cv2.add(img1_bg,img2_fg) 263 | 264 | 265 | combined[775:(775+rows), 720:(720+cols)] = dst 266 | 267 | 268 | 269 | 270 | # write video 271 | 272 | 273 | cv2.putText(combined,str(Red_gain/10),(750,50),font,2,(0,0,256),4) 274 | cv2.putText(combined,str(Blue_gain/10),(200,50),font,2,(256,0,0),4) 275 | out.write(combined) 276 | 277 | 278 | # Display 279 | cv2.imshow('Public Lab', combined) 280 | 281 | stream.truncate(0) 282 | 283 | # press ESC to break 284 | c = cv2.waitKey(7) % 0x100 285 | if c == 27: 286 | break 287 | 288 | # cleanup or things will get messy 289 | cv2.destroyAllWindows() 290 | cap.release() 291 | out.release() 292 | 293 | if __name__ == '__main__': 294 | run() 295 | 296 | 297 | 298 | 299 | 300 | -------------------------------------------------------------------------------- /Image Processing/CVquaddisplay -colorandsobel .py: -------------------------------------------------------------------------------- 1 | #Quad video for real time image processing 2 | #program tested with Rasspberry PI 3B+.v2 NoIR camera 3 | #sets up 3 windows -trackbar windows and video quad display showing camera settings, color filter and sobel edge detection 4 | # trackbar alss sets up blob detection see Satya Mallick https://www.learnopencv.com/tag/blob-detector/ 5 | #record video by setting file name, videowriter format and enable 'out.write(combined)' and 'write video' commands 6 | 7 | #trackbars also feature blob detection 8 | 9 | #requires opencv2 and picamera and scipy 10 | #ESC to quit 11 | 12 | 13 | import os 14 | from scipy.signal import convolve2d as conv2 15 | from scipy import ndimage 16 | from skimage import color, data, restoration 17 | from skimage.filters import sobel 18 | import matplotlib.pyplot as plt 19 | 20 | import time 21 | import numpy as np 22 | import cv2 23 | import picamera 24 | import picamera.array 25 | from fractions import Fraction 26 | 27 | def nothing(x): 28 | pass 29 | 30 | #set camera control values 31 | ISO_number = [100,200,320,400,500,640,800] 32 | exposure_number = ['auto','off','night', 'nightpreview', 'backlight','spotlight', 'sports','snow','beach','verylong','fixedfps','antishake','fireworks'] 33 | effect_number = ['none','negative','solarize', 'colorswap','washedout','colorbalance','cartoon','sketch', 'denoise','emboss', 'oilpaint','hatch','gpen','pastel','watercolor','film','blur'] 34 | 35 | 36 | font=cv2.FONT_HERSHEY_SIMPLEX 37 | #set up Windows 38 | 39 | cv2.namedWindow("Public Lab") 40 | blankimg= np.zeros((400,544,3),np.uint8) 41 | cv2.namedWindow("Trackbars", cv2.WINDOW_NORMAL) 42 | cv2.namedWindow("Trackbars1", cv2.WINDOW_NORMAL) 43 | 44 | # set up trackbars 45 | 46 | cv2.createTrackbar ('Exposure Comp',"Trackbars",25,50,nothing) 47 | cv2.createTrackbar ('Red Gain',"Trackbars",10,80,nothing) 48 | cv2.createTrackbar ('Blue Gain',"Trackbars",14,80,nothing) 49 | cv2.createTrackbar ('Frame Rate',"Trackbars",45,60,nothing) 50 | cv2.createTrackbar ('Contrast',"Trackbars",100,200,nothing) 51 | cv2.createTrackbar ('Brightness',"Trackbars",50,100,nothing) 52 | cv2.createTrackbar ('Saturation',"Trackbars",100,200,nothing) 53 | cv2.createTrackbar ('Sharpness',"Trackbars",100,200,nothing) 54 | cv2.createTrackbar ('Exposure',"Trackbars",0,11,nothing) 55 | cv2.createTrackbar ('Effects',"Trackbars",0,16,nothing) 56 | 57 | cv2.createTrackbar('Hstart','Trackbars',80,255,nothing) 58 | cv2.createTrackbar('Hend','Trackbars',156,255,nothing) 59 | 60 | cv2.createTrackbar('Sstart','Trackbars',80,255,nothing) 61 | cv2.createTrackbar('Send','Trackbars',166,255,nothing) 62 | 63 | cv2.createTrackbar('Vstart','Trackbars',50,255,nothing) 64 | cv2.createTrackbar('Vend','Trackbars',144,255,nothing) 65 | 66 | cv2.createTrackbar('MinArea','Trackbars1',200,4000,nothing) 67 | cv2.createTrackbar('Circularity', 'Trackbars1',1,10,nothing) 68 | cv2.createTrackbar('Convexity', 'Trackbars1',50,100,nothing) 69 | 70 | cv2.createTrackbar('minThreshold', 'Trackbars1',10,200, nothing) 71 | cv2.createTrackbar('maxThreshold', 'Trackbars1',200,2000, nothing) 72 | cv2.createTrackbar('InertiaRatio', 'Trackbars1',50,100,nothing) 73 | 74 | # SET UP colorspace values 75 | Bstart,Gstart,Rstart = 0,0,0 76 | Bend,Gend,Rend = 255,255,255 77 | 78 | lower_range = np.array([Bstart,Gstart,Rstart], dtype=np.uint8) 79 | upper_range = np.array([Bend,Gend,Rend], dtype=np.uint8) 80 | 81 | 82 | ######################### 83 | 84 | 85 | # Setup SimpleBlobDetector parameters. 86 | params = cv2.SimpleBlobDetector_Params() 87 | 88 | # Change thresholds 89 | params.minThreshold = 10; 90 | params.maxThreshold = 200; 91 | 92 | # Filter by Area. 93 | params.filterByArea = True 94 | params.minArea = 1500 95 | 96 | # Filter by Circularity 97 | params.filterByCircularity = True 98 | params.minCircularity = 0.1 99 | 100 | # Filter by Convexity 101 | params.filterByConvexity = True 102 | params.minConvexity = 0.87 103 | 104 | # Filter by Inertia 105 | params.filterByInertia = True 106 | params.minInertiaRatio = 0.01 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | # Create a VideoCapture object/used for video recording 115 | cap = cv2.VideoCapture(0) 116 | 117 | #Set video frame height 118 | 119 | height= 400 120 | width=544 121 | frame_width = 544 122 | #frame_height = int(cap.get(4)) 123 | frame_height= 400 124 | 125 | # Define the codec and create VideoWriter object. Three choices available are avi,mpeg4 or h264 126 | 127 | #The output is stored in 'outpy.avi' file. 128 | #out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height)) 129 | 130 | #twice height and width 131 | 132 | #set video writer MPEG4 =XVID 133 | out = cv2.VideoWriter('/home/pi/Desktop/microscopequad1.mp4',cv2.VideoWriter_fourcc('X','V','I','D'), 10, (1088,800),1) 134 | #set video writer (MJPG=avi) option 135 | #out = cv2.VideoWriter('/home/pi/Desktop/NDVItestwithtrackbar.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (1088,800),1) 136 | #set video writer H264 option 137 | #out = cv2.VideoWriter('/home/pi/Desktop/output.h264',cv2.VideoWriter_fourcc('H','2','6','4'), 10, (1088,800),1) 138 | 139 | #def functions 140 | 141 | def label(image, text): 142 | """ 143 | Labels the given image with the given text 144 | """ 145 | return cv2.putText(image, text, (0, 50), font, 2, (255,255,255),4) 146 | 147 | 148 | def disp_multiple(im1=None, im2=None, im3=None, im4=None): 149 | 150 | # height, width = im1.shape 151 | #note u may need to convert to RGB or GRAY depending on pprocessing 152 | 153 | combined = np.zeros((2 * height, 2 * width, 3), dtype=np.uint8) 154 | 155 | # combined[0:height, 0:width, :] = cv2.cvtColor(im1, cv2.COLOR_GRAY2RGB) 156 | # combined[height:, width:, :] = im1 157 | combined[0:height, 0:width, :] = im1 158 | # combined[height:, 0:width, :] = cv2.cvtColor(im2, cv2.COLOR_GRAY2RGB) 159 | combined[height:, 0:width, :] = im2 160 | combined[0:height:, width:, :] = im3 161 | # combined[0:height, width:, :] =cv2.cvtColor(im3, cv2.COLOR_GRAY2RGB) 162 | # combined[height:, width:, :] = im4 163 | combined[height:, width:, :] =cv2.cvtColor(im4, cv2.COLOR_GRAY2RGB) 164 | 165 | return combined 166 | 167 | def contrast_stretch(im): 168 | """ 169 | Performs a simple contrast stretch of the given image, from 5-95%. 170 | """ 171 | in_min = np.percentile(im, 5) 172 | in_max = np.percentile(im, 95) 173 | 174 | out_min = 0.0 175 | out_max = 255.0 176 | 177 | out = im - in_min 178 | out *= ((out_min - out_max) / (in_min - in_max)) 179 | out += in_min 180 | 181 | return out 182 | 183 | 184 | #begin camera collection 185 | 186 | def run(): 187 | with picamera.PiCamera() as camera: 188 | 189 | # Set the camera parameters 190 | x = 400 191 | # camera.resolution = (int(640), x) 192 | camera.resolution = (544, x) 193 | # Various optional camera settings below: 194 | camera.iso=400 195 | camera.framerate = 30 196 | camera.awb_mode = 'off' 197 | camera.exposure_mode = "off" 198 | # camera.framerate = Fraction (1,6) 199 | #red/blue camera ratios from 0 to 8 200 | 201 | 202 | # camera.awb_gains = (Red_gain,Blue_gain) 203 | 204 | # Need to sleep to give the camera time to get set up properly 205 | time.sleep(1) 206 | 207 | with picamera.array.PiRGBArray(camera) as stream: 208 | # Loop constantly 209 | while True: 210 | # Grab data from the camera, in colour format 211 | # NOTE: This comes in BGR rather than RGB, which is important 212 | # for later! 213 | camera.capture(stream, format='bgr', use_video_port=True) 214 | image = stream.array 215 | 216 | image1=image 217 | 218 | image2=image 219 | 220 | # Get the individual colour components of the image 221 | # b, g, r = cv2.split(image) 222 | 223 | 224 | f = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY) 225 | 226 | #placeholder for gaussian filter deblur (tested but not used) 227 | 228 | 229 | # blurred_f = ndimage.gaussian_filter(f, 3) 230 | 231 | # filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1) 232 | 233 | # alpha = 10 234 | # sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f) 235 | 236 | 237 | # image1= contrast_stretch(image1) 238 | # image1 = image1.astype(np.uint8) 239 | 240 | # sobel filter need to convert using (ex.astype(float) and np.uint8 241 | ex=ndimage.sobel(f,axis=0,mode='constant') 242 | ey=ndimage.sobel(f,axis=1,mode='constant') 243 | edges = np.hypot(ex.astype(float),ey.astype(float)) 244 | edges = edges.astype(np.uint8) 245 | 246 | 247 | 248 | #color analysis 249 | 250 | 251 | 252 | 253 | image2 = cv2.cvtColor(image2,cv2.COLOR_BGR2HSV) 254 | 255 | 256 | Hstart = cv2.getTrackbarPos('Hstart', 'Trackbars') 257 | Hend = cv2.getTrackbarPos('Hend', 'Trackbars') 258 | 259 | Sstart = cv2.getTrackbarPos('Sstart', 'Trackbars') 260 | Send = cv2.getTrackbarPos('Send', 'Trackbars') 261 | 262 | Vstart = cv2.getTrackbarPos('Vstart', 'Trackbars') 263 | Vend = cv2.getTrackbarPos('Vend', 'Trackbars') 264 | 265 | lower_range = np.array([Hstart,Sstart,Vstart], dtype=np.uint8) 266 | upper_range = np.array([Hend,Send,Vend], dtype=np.uint8) 267 | 268 | 269 | 270 | mask = cv2.inRange(image, lower_range, upper_range) 271 | 272 | result = cv2.bitwise_and(image,image,mask = mask) 273 | 274 | 275 | 276 | #Blob detection 277 | 278 | 279 | blob = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 280 | 281 | 282 | 283 | 284 | 285 | # Setup SimpleBlobDetector parameters. 286 | params = cv2.SimpleBlobDetector_Params() 287 | 288 | # Change thresholds 289 | params.minThreshold = cv2.getTrackbarPos('minThreshold', 'Trackbars1'); 290 | params.maxThreshold = cv2.getTrackbarPos('maxThreshold', 'Trackbars1'); 291 | 292 | # Filter by Area. 293 | params.filterByArea = True 294 | params.minArea = cv2.getTrackbarPos('MinArea', 'Trackbars1') 295 | 296 | # Filter by Circularity 297 | params.filterByCircularity = True 298 | params.minCircularity = cv2.getTrackbarPos('Circularity', 'Trackbars1')/10 299 | 300 | # Filter by Convexity 301 | params.filterByConvexity = True 302 | params.minConvexity = cv2.getTrackbarPos('Convexity', 'Trackbars1')/100 303 | 304 | # Filter by Inertia 305 | params.filterByInertia = True 306 | params.minInertiaRatio = cv2.getTrackbarPos('InertiaRatio', 'Trackbars1')/100 307 | 308 | 309 | detector = cv2.SimpleBlobDetector_create(params) 310 | 311 | 312 | 313 | # Detect blobs. 314 | keypoints = detector.detect(blob) 315 | 316 | # Draw detected blobs as red circles. 317 | # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob 318 | image_blob = cv2.drawKeypoints(blob, keypoints, np.array([]), (0,255,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) 319 | 320 | 321 | 322 | 323 | 324 | detector = cv2.SimpleBlobDetector_create(params) 325 | 326 | 327 | 328 | 329 | #get info from Trackbars 330 | 331 | Exposure_Comp=cv2.getTrackbarPos ("Exposure Comp","Trackbars") 332 | Red_gain =cv2.getTrackbarPos ("Red Gain","Trackbars") 333 | Blue_gain =cv2.getTrackbarPos ("Blue Gain","Trackbars") 334 | Frame_rate =cv2.getTrackbarPos ("Frame Rate","Trackbars") 335 | Contrast =cv2.getTrackbarPos ('Contrast',"Trackbars") 336 | Brightness =cv2.getTrackbarPos ('Brightness',"Trackbars") 337 | ISO =cv2.getTrackbarPos ('ISO',"Trackbars") 338 | Exp=cv2.getTrackbarPos ('Exposure',"Trackbars") 339 | Saturation=cv2.getTrackbarPos ('Saturation',"Trackbars") 340 | Sharpness=cv2.getTrackbarPos ('Sharpness',"Trackbars") 341 | Effects=cv2.getTrackbarPos ('Effects',"Trackbars") 342 | 343 | #scale camera settings 344 | 345 | camera.exposure_compensation = Exposure_Comp-25 346 | camera.awb_gains = (Red_gain/10,Blue_gain/10) 347 | camera.framerate = Frame_rate 348 | camera.contrast = Contrast-100 349 | camera.brightness = Brightness 350 | camera.exposure_mode = exposure_number[Exp] 351 | camera.saturation = Saturation-100 352 | camera.sharpness = Sharpness-100 353 | camera.image_effect = effect_number[Effects] 354 | 355 | 356 | 357 | 358 | 359 | # Label images 360 | label(image1, 'RGB') 361 | # label(b, 'B') 362 | # label(r, 'R') 363 | 364 | #combine quad video 365 | 366 | 367 | # Combine ready for display 368 | # combined = disp_multiple(blankimg,sharpened,result,image_blob) 369 | combined = disp_multiple(blankimg,result,image1,edges) 370 | 371 | 372 | #use for recording 373 | # write video 374 | 375 | #put text 376 | cv2.putText(combined,"Exposure Compensation:",(10,25),font,1,(256,256,256),2) 377 | cv2.putText(combined,str(camera.exposure_compensation),(450,25),font,1,(256,256,256),2) 378 | 379 | cv2.putText(combined,"Blue",(10,55),font,1,(256,0,0),2) 380 | cv2.putText(combined,"/",(80,55),font,1,(256,256,256),2) 381 | cv2.putText(combined,"Red Gain:",(110,55),font,1,(0,0,256),2) 382 | cv2.putText(combined,str(Red_gain/10),(470,55),font,1,(0,0,256),2) 383 | cv2.putText(combined,"/",(450,55),font,1,(256,256,256),2) 384 | cv2.putText(combined,str(Blue_gain/10),(400,55),font,1,(256,0,0),2) 385 | 386 | cv2.putText(combined,"Frame Rate:",(10,85),font,1,(256,256,256),2) 387 | cv2.putText(combined,str(camera.framerate),(450,85),font,1,(256,256,256),2) 388 | 389 | cv2.putText(combined,"Contrast:",(10,115),font,1,(256,256,256),2) 390 | cv2.putText(combined,str(camera.contrast),(450,115),font,1,(256,256,256),2) 391 | 392 | cv2.putText(combined,"Brightness:",(10,145),font,1,(256,256,256),2) 393 | cv2.putText(combined,str(camera.brightness),(450,145),font,1,(256,256,256),2) 394 | 395 | cv2.putText(combined,"Saturation:",(10,175),font,1,(256,256,256),2) 396 | cv2.putText(combined,str(camera.saturation),(450,175),font,1,(256,256,256),2) 397 | 398 | cv2.putText(combined,"Sharpness:",(10,205),font,1,(256,256,256),2) 399 | cv2.putText(combined,str(camera.sharpness),(450,205),font,1,(256,256,256),2) 400 | 401 | cv2.putText(combined,"Exposure:",(10,235),font,1,(256,256,256),2) 402 | cv2.putText(combined,str(camera.exposure_mode),(355,235),font,1,(256,256,256),2) 403 | 404 | cv2.putText(combined,"Image Effect:",(10,265),font,1,(256,256,256),2) 405 | cv2.putText(combined,str(camera.image_effect),(355,265),font,1,(256,256,256),2) 406 | 407 | cv2.putText(combined,"ISO:",(10,295),font,1,(256,256,256),2) 408 | cv2.putText(combined,str(camera.iso),(355,295),font,1,(256,256,256),2) 409 | 410 | cv2.putText(combined,"Exposure Speed:",(10,325),font,1,(256,256,256),2) 411 | cv2.putText(combined,str(round(camera.exposure_speed/1000000,4)),(355,325),font,1,(256,256,256),2) 412 | 413 | 414 | cv2.putText(combined,"Analog Gain:",(10,355),font,1,(256,256,256),2) 415 | cv2.putText(combined,str(round(float(camera.analog_gain),2)),(355,355),font,1,(256,256,256),2) 416 | 417 | cv2.putText(combined,"Digital Gain:",(10,385),font,1,(256,256,256),2) 418 | cv2.putText(combined,str(round(float(camera.digital_gain),2)),(355,385),font,1,(256,256,256),2) 419 | 420 | cv2.putText(combined, "Color Filter", (10, 450), font, 2, (255,255,255),4) 421 | # cv2.putText(combined, "R", (1020, 50), font, 2, (0,0,255),4) 422 | cv2.putText(combined, "Sobel", (580, 450), font, 2, (255,255,255),4) 423 | 424 | 425 | 426 | # use for video recording 427 | 428 | 429 | out.write(combined) 430 | 431 | 432 | # Display 433 | cv2.imshow('Public Lab', combined) 434 | 435 | stream.truncate(0) 436 | 437 | # press ESC to break 438 | c = cv2.waitKey(7) % 0x100 439 | if c == 27: 440 | break 441 | 442 | # cleanup or things will get messy 443 | cv2.destroyAllWindows() 444 | cap.release() 445 | out.release() 446 | 447 | if __name__ == '__main__': 448 | run() 449 | 450 | --------------------------------------------------------------------------------