├── README.md ├── HLS_code ├── OPEN_CLOSE │ ├── opening_core.h │ ├── opening_core.cpp │ └── opening_core_tb.cpp ├── BLOB_DETECTION │ ├── blob_analysis_core.h │ ├── blob_analysis_core.cpp │ └── blob_analysis_tb.cpp └── MOG │ ├── build_gaussian_core.h │ ├── build_gaussian_tb.cpp │ └── build_gaussian_core.cpp ├── PS_code ├── application │ ├── kalmanFilter.h │ ├── trackManager.h │ ├── kalman.cpp │ ├── track.cpp │ ├── track.h │ ├── trackManager.cpp │ └── kalmanFilter.cpp └── core_setup │ ├── pl_pipeline_api.c │ ├── tpg_api.c │ ├── dma_api.c │ ├── zed_iic.h │ ├── test_mog_main.c │ ├── zed_iic_axi.c │ └── vdma_api.c └── MATLAB_code ├── Foreground_det_comp.m ├── Bounding_box_comp.m └── back_sub_kalman.m /README.md: -------------------------------------------------------------------------------- 1 | # FPGA-Object-detection 2 | Code from final year project - Design of an FPGA based image processing system. 3 | 4 | Based on Zedboard - with Zynq 7020 device. 5 | 6 | Processing is split between Programable Logic - Object detection - and Processing System - Object tracking and decision making. 7 | 8 | PL object detection pipeline : MOG Foreground Detection --> Morphological operations --> BLOB analysis. 9 | 10 | Matlab was used to model the system to confirm algorithm choices and to test HLS implementations of algorithms against bench mark. 11 | 12 | Tests compare MOG outputs and bounding boxes of identified objects 13 | 14 | Code is structured as below:\ 15 | • MATLAB code\ 16 | o Modelling\ 17 | o Testing\ 18 | • HLS code\ 19 | o MOG core\ 20 | o Metamorphic operations core\ 21 | o BLOB analysis core\ 22 | • PS SDK Code\ 23 | o IP Core setup\ 24 | o Tracking and decision making\ 25 | -------------------------------------------------------------------------------- /HLS_code/OPEN_CLOSE/opening_core.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | typedef hls::Mat<240,320, HLS_8UC1> GRAY_IMAGE; 6 | typedef ap_axiu<8,1,1,1> GRAY_PIXEL; 7 | typedef hls::stream< GRAY_PIXEL > GRAY_AXI_STREAM; 8 | 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | typedef ap_ufixed<9, 9> coord; 18 | typedef ap_ufixed<8, 8> uint_8; 19 | typedef ap_ufixed<16, 16> uint_16; 20 | typedef ap_ufixed<1, 1> uint_1; 21 | 22 | /* start is inclusive, end is inclusive */ 23 | struct rle_run { 24 | struct{ 25 | coord s; 26 | coord e; 27 | uint_8 no; 28 | coord y; 29 | uint_1 _last_run; 30 | } data; 31 | bool last; 32 | }; 33 | typedef rle_run rle_run; 34 | 35 | typedef hls::stream< rle_run > RLE_AXI_STREAM; 36 | 37 | void open_and_close(GRAY_AXI_STREAM& INPUT_STREAM, RLE_AXI_STREAM& OUTPUT_STREAM); 38 | -------------------------------------------------------------------------------- /PS_code/application/kalmanFilter.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "defines.h" 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | 10 | class kalmanFilter 11 | { 12 | 13 | public: 14 | kalmanFilter(Point_t pt, track_t deltaTime = 0.2, track_t accelNoiseMag = 0.5); 15 | ~kalmanFilter(); 16 | 17 | Point_t GetPointPrediction(Point_t pt); 18 | Point_t Update(Point_t pt, bool dataCorrect); 19 | 20 | 21 | 22 | private: 23 | 24 | cv::Ptr m_unscentedKalman; 25 | 26 | std::deque m_initialPoints; 27 | static const size_t MIN_INIT_VALS = 4; 28 | 29 | Point_t m_lastPointResult; 30 | 31 | 32 | bool m_initialized; 33 | track_t m_deltaTime; 34 | track_t m_deltaTimeMin; 35 | track_t m_deltaTimeMax; 36 | track_t m_lastDist; 37 | track_t m_deltaStep; 38 | static const int m_deltaStepsCount = 20; 39 | track_t m_accelNoiseMag; 40 | void createUnscented(Point_t xy0, Point_t xyv0); 41 | }; -------------------------------------------------------------------------------- /PS_code/core_setup/pl_pipeline_api.c: -------------------------------------------------------------------------------- 1 | #include "xbuild_gaussian.h" 2 | 3 | XBuild_gaussian setup_mog () 4 | { 5 | XBuild_gaussian gaussian_ip; 6 | XBuild_gaussian_Config* gaussian_ptr; 7 | 8 | //init build gaussian core 9 | gaussian_ptr = XBuild_gaussian_LookupConfig(XPAR_BUILD_GAUSSIAN_0_DEVICE_ID); 10 | if (!gaussian_ptr) { 11 | return XST_FAILURE; 12 | } 13 | Status = XBuild_gaussian_CfgInitialize(&gaussian_ip, gaussian_ptr ); 14 | if (Status != XST_SUCCESS) { 15 | return XST_FAILURE; 16 | } 17 | //set input parameters 18 | XBuild_gaussian_Set_bg_thresh_V(&gaussian_ip, 0.96); 19 | XBuild_gaussian_Set_learning_rate_V(&gaussian_ip, 0.005); 20 | XBuild_gaussian_Set_min_var(&gaussian_ip, 4); 21 | XBuild_gaussian_EnableAutoRestart(&gaussian_ip); 22 | 23 | //Start the core 24 | XBuild_gaussian_Start(&gaussian_ip); 25 | Status = XBuild_gaussian_IsIdle(&gaussian_ip); 26 | printf("Gaussian Idle Status %u \n\r", (unsigned int) Status); 27 | Status = XBuild_gaussian_IsReady(&gaussian_ip); 28 | printf("Gaussian Idle Status %u \n\r", (unsigned int) Status); 29 | return gaussian_ip; 30 | } 31 | -------------------------------------------------------------------------------- /PS_code/core_setup/tpg_api.c: -------------------------------------------------------------------------------- 1 | #include "xv_tpg.h" 2 | 3 | XV_tpg setup_tpg() 4 | { 5 | //configure tpg 6 | XV_tpg ptpg; 7 | XV_tpg_Config *ptpg_config; 8 | 9 | ptpg_config = XV_tpg_LookupConfig(XPAR_V_TPG_0_DEVICE_ID); 10 | XV_tpg_CfgInitialize(&ptpg, ptpg_config, ptpg_config->BaseAddress); 11 | 12 | printf("Hello World\n\r"); 13 | 14 | printf("TPG Initialization\r\n"); 15 | 16 | u32 height,width,status; 17 | 18 | status = XV_tpg_IsReady(&ptpg); 19 | printf("Status %u \n\r", (unsigned int) status); 20 | status = XV_tpg_IsIdle(&ptpg); 21 | printf("Status %u \n\r", (unsigned int) status); 22 | XV_tpg_Set_height(&ptpg, (u32)1080); 23 | XV_tpg_Set_width(&ptpg, (u32)1920); 24 | height = XV_tpg_Get_height(&ptpg); 25 | width = XV_tpg_Get_width(&ptpg); 26 | XV_tpg_Set_colorFormat(&ptpg,XVIDC_CSF_YCRCB_422); 27 | XV_tpg_Set_maskId(&ptpg, 0x0); 28 | XV_tpg_Set_motionSpeed(&ptpg, 0x4); 29 | printf("info from tpg %u %u \n\r", (unsigned int)height, (unsigned int)width); 30 | XV_tpg_Set_bckgndId(&ptpg, XTPG_BKGND_SOLID_RED); 31 | status = XV_tpg_Get_bckgndId(&ptpg); 32 | printf("Status %x \n\r", (unsigned int) status); 33 | XV_tpg_EnableAutoRestart(&ptpg); 34 | XV_tpg_Start(&ptpg); 35 | status = XV_tpg_IsIdle(&ptpg); 36 | printf("Status %u \n\r", (unsigned int) status); 37 | return ptpg; 38 | } 39 | 40 | int start_movement(XV_tpg ptpg){ 41 | XV_tpg_Set_ovrlayId(&ptpg, XTPG_BKGND_H_RAMP); 42 | XV_tpg_Set_boxSize(&ptpg , 50); 43 | XV_tpg_Set_boxColorR(&ptpg , 149);//Y 44 | XV_tpg_Set_boxColorG(&ptpg , 43);//U 45 | XV_tpg_Set_boxColorB(&ptpg , 21);//V 46 | XV_tpg_Set_motionSpeed(&ptpg, 2); 47 | } -------------------------------------------------------------------------------- /HLS_code/BLOB_DETECTION/blob_analysis_core.h: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | typedef ap_uint<9> coord; 10 | typedef ap_uint<8> uint_8; 11 | typedef ap_uint<1> uint_1; 12 | 13 | struct point { 14 | coord x; 15 | coord y; 16 | }; 17 | typedef point point; 18 | 19 | struct rle_run { 20 | struct{ 21 | coord s; 22 | coord e; 23 | uint_8 no; 24 | coord y; 25 | uint_1 _last_run; 26 | } data; 27 | bool last; 28 | }; 29 | typedef rle_run rle_run; 30 | 31 | typedef hls::stream< rle_run > RLE_AXI_STREAM; 32 | 33 | /* start is inclusive, end is exclusive */ 34 | struct rle_line { 35 | uint_8 no_runs; 36 | rle_run runs[20]; 37 | }; 38 | typedef rle_line rle_line; 39 | 40 | /* top left, top right, bottom left, bottom right*/ 41 | /*struct bounding_box { 42 | point tl; 43 | point tr; 44 | point bl; 45 | point br; 46 | }; 47 | typedef bounding_box bounding_box; 48 | */ 49 | 50 | struct blob{ 51 | unsigned char id; 52 | point cp; 53 | short area; 54 | coord max_x; 55 | coord min_x; 56 | coord max_y; 57 | coord min_y; 58 | } __attribute__((packed, alligned(1))); //128 bits due to alignment issues 59 | 60 | 61 | typedef ap_uint<128> blob_port; 62 | 63 | //typedef blob blob; 64 | 65 | //function declaration 66 | void blob_analysis(RLE_AXI_STREAM &rle_stream, blob_port objects_port[100]); 67 | void identify_update_objects(rle_line ¤t, rle_line &previous, unsigned char * ob_id, blob * (&objects_ptr)[100]); 68 | void create_blob(blob * b, unsigned char * ob_id, rle_run current); 69 | -------------------------------------------------------------------------------- /PS_code/core_setup/dma_api.c: -------------------------------------------------------------------------------- 1 | #include "xaxidma.h" 2 | 3 | #define MEM_BASE_ADDR 0x01000000 4 | #define TX_BUFFER_BASE (MEM_BASE_ADDR + 0X00100000) 5 | //size of mog 320 * 480 = 0x960000 6 | #define RX_BUFFER_BASE (MEM_BASE_ADDR + 0X02000000) 7 | #define DMA_DEV_ID XPAR_AXIDMA_0_DEVICE_ID 8 | #define FRAME_SIZE (76800) 9 | 10 | XAxiDma setup_dma() 11 | { 12 | XAxiDma AxiDma; 13 | XAxiDma_Config *CfgPtr; 14 | 15 | //set up the DMA which stores the MOG structure 16 | int Status = XST_SUCCESS; 17 | 18 | CfgPtr = XAxiDma_LookupConfig(DMA_DEV_ID); 19 | if (!CfgPtr) { 20 | return XST_FAILURE; 21 | } 22 | 23 | Status = XAxiDma_CfgInitialize(&AxiDma, CfgPtr); 24 | if (Status != XST_SUCCESS) { 25 | return XST_FAILURE; 26 | } 27 | XAxiDma_IntrDisable(&AxiDma, XAXIDMA_IRQ_ALL_MASK, XAXIDMA_DEVICE_TO_DMA); 28 | XAxiDma_IntrDisable(&AxiDma, XAXIDMA_IRQ_ALL_MASK, XAXIDMA_DMA_TO_DEVICE); 29 | 30 | //int *m_dma_buffer_TX = (int*) TX_BUFFER_BASE; 31 | int *m_dma_buffer_RX = (int*) RX_BUFFER_BASE; 32 | 33 | Status = XAxiDma_Selftest(&AxiDma); //reset 34 | if (Status != XST_SUCCESS) { 35 | return XST_FAILURE; 36 | } 37 | 38 | //Zero out 128 bits of memory 39 | int zero[4] = {0,0,0,0}; 40 | 41 | Xil_DCacheFlushRange((u32)zero, 4 * sizeof(int)); 42 | Xil_DCacheFlushRange((u32)m_dma_buffer_RX, 4 * sizeof(int)); 43 | //setting MOG to zeros and read to confirm 44 | for (int i = 0; i < FRAME_SIZE; i++){ 45 | XAxiDma_SimpleTransfer(&AxiDma, (u32)zero, 4*sizeof(int), XAXIDMA_DMA_TO_DEVICE); 46 | XAxiDma_SimpleTransfer(&AxiDma, (u32)m_dma_buffer_RX , 4 * sizeof(int), XAXIDMA_DEVICE_TO_DMA); 47 | while(XAxiDma_Busy(&AxiDma,XAXIDMA_DEVICE_TO_DMA)); 48 | } 49 | //flush cache before reading 50 | Xil_DCacheInvalidateRange((u32)m_dma_buffer_RX, FRAME_SIZE * 4 * sizeof(int)); 51 | for (int i = 0; i < 4; i+=4){ 52 | printf("Recv[%d]=%d,%d,%d,%d\n", i, m_dma_buffer_RX[i],m_dma_buffer_RX[i+1],m_dma_buffer_RX[i+2],m_dma_buffer_RX[i+3]); 53 | } 54 | return 0; 55 | } 56 | -------------------------------------------------------------------------------- /HLS_code/MOG/build_gaussian_core.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | typedef hls::Mat<480, 640, HLS_8UC1> GRAY_IMAGE; 9 | typedef ap_axiu<16,2,5,6> YUV_pixel; 10 | typedef ap_axiu<8,2,5,6> GRAY_pixel; 11 | typedef hls::stream< YUV_pixel > AXI_STREAM; 12 | typedef hls::stream< GRAY_pixel> GRAY_AXI_STREAM; 13 | typedef ap_ufixed<10, 8, AP_RND> mean_vals; 14 | typedef ap_ufixed<10, 1, AP_RND> weight_vals; 15 | typedef ap_ufixed<14, 14>var_vals; 16 | typedef ap_fixed<32,16> calc_t; 17 | struct lum_gaussian{ 18 | mean_vals mean; 19 | var_vals var; 20 | weight_vals weight; 21 | unsigned char matchsum; 22 | }__attribute__((packed, aligned(2))); 23 | typedef struct lum_gaussian lum_gaussian; 24 | struct pixel_k_gaussian{ 25 | struct { 26 | lum_gaussian k_lum[3]; 27 | } data; 28 | unsigned char last; 29 | }__attribute__((packed, aligned(2))); 30 | typedef struct pixel_k_gaussian pixel_k_gaussian; 31 | //typedef pixel_k_gaussian k_luminosity_gaussian[1920*1080]; 32 | 33 | 34 | //slow learning rate 35 | const calc_t LEARNING_RATE (0.05); 36 | const calc_t BG_THRESHOLD (0.97); 37 | 38 | //function declaration 39 | void update_gaussian(lum_gaussian &l, unsigned char y, bool matched, weight_vals learn_rate, unsigned char min_var, int frame_count); 40 | int find_match(lum_gaussian l, unsigned char y); 41 | void sort_gaussians(pixel_k_gaussian &pg); 42 | void normalise_weights(pixel_k_gaussian &pg); 43 | lum_gaussian create_new_gaussian(unsigned char y, var_vals var, weight_vals weight); 44 | //void build_gaussian(hls::stream &stream_in , GRAY_AXI_STREAM &stream_out , pixel_k_gaussian MOG[640*480], hls::stream &vid_out, pixel_k_gaussian MOG_out[640*480], weight_vals bg_thresh, weight_vals learning_rate, unsigned char min_var); 45 | void build_gaussian(hls::stream &stream_in , hls::stream &stream_out , pixel_k_gaussian MOG[640*480], hls::stream &vid_out, pixel_k_gaussian MOG_out[640*480], weight_vals bg_thresh, weight_vals learning_rate, unsigned char min_var); 46 | -------------------------------------------------------------------------------- /PS_code/application/trackManager.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "track.h" 3 | 4 | // ---------------------------------------------------------------------- 5 | 6 | /// 7 | /// \brief The TrackerSettings struct 8 | /// 9 | struct TrackerSettings 10 | { 11 | /// 12 | /// \brief m_dt 13 | /// Time step for Kalman 14 | /// 15 | track_t m_dt = 1.0f; 16 | 17 | /// 18 | /// \brief m_accelNoiseMag 19 | /// Noise magnitude for Kalman 20 | /// 21 | track_t m_accelNoiseMag = 0.1f; 22 | 23 | /// 24 | /// \brief m_distThres 25 | /// Distance threshold for Assignment problem for tracking::DistCenters or for tracking::DistRects (for tracking::DistJaccard it need from 0 to 1) 26 | /// 27 | track_t m_distThres = 50; 28 | 29 | /// 30 | /// \brief m_maximumAllowedSkippedFrames 31 | /// If the object don't assignment more than this frames then it will be removed 32 | /// 33 | size_t m_maximumAllowedSkippedFrames = 25; 34 | 35 | /// 36 | /// \brief m_maxTraceLength 37 | /// The maximum trajectory length 38 | /// 39 | size_t m_maxTraceLength = 50; 40 | 41 | /// 42 | /// \brief m_useAbandonedDetection 43 | /// Detection abandoned objects 44 | /// 45 | bool m_useAbandonedDetection = false; 46 | 47 | /// 48 | /// \brief m_minStaticTime 49 | /// After this time (in seconds) the object is considered abandoned 50 | /// 51 | int m_minStaticTime = 5; 52 | /// 53 | /// \brief m_maxStaticTime 54 | /// After this time (in seconds) the abandoned object will be removed 55 | /// 56 | int m_maxStaticTime = 25; 57 | }; 58 | 59 | class trackManager 60 | { 61 | public: 62 | trackManager(const TrackerSettings& settings); 63 | ~trackManager(void); 64 | 65 | tracks_t tracks; 66 | std::vector shaft; 67 | float danger_radius; 68 | void Update(const regions_t& regions, cv::UMat grayFrame, float fps); 69 | 70 | private: 71 | 72 | TrackerSettings m_settings; 73 | size_t N; 74 | size_t M; 75 | size_t m_nextTrackID; 76 | 77 | cv::UMat m_prevFrame; 78 | std::vector distMatrix; 79 | std::vector assignment; 80 | 81 | void UpdateHungrian(const regions_t& regions, cv::UMat grayFrame, float fps); 82 | }; 83 | 84 | -------------------------------------------------------------------------------- /HLS_code/OPEN_CLOSE/opening_core.cpp: -------------------------------------------------------------------------------- 1 | #include "opening_core.h" 2 | #include 3 | 4 | void open_and_close(GRAY_AXI_STREAM& INPUT_STREAM, RLE_AXI_STREAM& OUTPUT_STREAM)//, int rows, int cols) 5 | { 6 | #pragma HLS INTERFACE axis port=INPUT_STREAM 7 | #pragma HLS INTERFACE axis port=OUTPUT_STREAM 8 | #pragma HLS INTERFACE s_axilite port=return bundle=CRTL_BUS 9 | GRAY_AXI_STREAM OUTPUT_IMG_STREAM; 10 | #pragma HLS STREAM variable=OUTPUT_IMG_STREAM depth=1024 dim=1 11 | 12 | GRAY_IMAGE img_in(240, 320); 13 | GRAY_IMAGE img_out(240, 320); 14 | #pragma HLS STREAM variable=img_in depth=1024 dim=1 15 | #pragma HLS STREAM variable=img_out depth=1024 dim=1 16 | // #pragma HLS dataflow 17 | hls::AXIvideo2Mat(INPUT_STREAM, img_in); 18 | hls::Window<3, 3, unsigned char> kernel_3; 19 | hls::Window<8, 8, unsigned char> kernel_8; 20 | hls::Window<40, 40, unsigned char> kernel_40; 21 | //close 22 | hls::Dilate<0,1>(img_in, img_out , kernel_8); 23 | hls::Erode<0,1>(img_out, img_out , kernel_8); 24 | //open 25 | hls::Erode<0,1>(img_out, img_out , kernel_3); 26 | hls::Dilate<0,1>(img_out, img_out , kernel_3); 27 | //close 28 | for (int i = 0; i < 5; i++){ 29 | hls::Dilate<0,1>(img_out, img_out , kernel_8); 30 | } 31 | for (int i = 0; i < 5; i++){ 32 | hls::Erode<0,1>(img_out, img_out , kernel_8); 33 | } 34 | hls::Mat2AXIvideo(img_out, OUTPUT_IMG_STREAM); 35 | /* encode image in rle */ 36 | for (int r = 0; r < 240; r++){ 37 | ap_uint<8> prev_pixel = 0; 38 | coord start = 0; 39 | coord end = 0; 40 | rle_run run; 41 | run.data.s = img_out.cols; 42 | for (int c = 0; c < img_out.cols; c++){ 43 | while(OUTPUT_IMG_STREAM.empty()){} 44 | GRAY_PIXEL p; 45 | OUTPUT_IMG_STREAM.read(p); 46 | //printf("%d", (int)p.data > 0 ? 1 : 0); 47 | if (p.data != prev_pixel){ 48 | if (p.data > 0){ 49 | start = c; 50 | if (run.data.s < img_out.cols){ 51 | run.data._last_run = 0; 52 | OUTPUT_STREAM.write(run); 53 | } 54 | } else { 55 | end = c - 1; 56 | run.data.s = start; 57 | run.data.e = end; 58 | run.data.no = 0; 59 | run.data.y = r; 60 | //r.tlast cannot be set yet 61 | } 62 | } 63 | if (c == img_out.cols - 1){ 64 | //EDGE CASE WHERE RUN CONTINUES TO END 65 | if (run.data.s != start && p.data > 0){ 66 | run.data.s = start; 67 | run.data.e = img_out.cols - 1; 68 | run.data.no = 0; 69 | run.data.y = r; 70 | } 71 | if(run.data.s != img_out.cols){ 72 | run.data._last_run = 1; 73 | OUTPUT_STREAM.write(run); 74 | } 75 | } 76 | prev_pixel = (uint_8) p.data; 77 | } 78 | //printf("\n"); 79 | } 80 | } 81 | 82 | -------------------------------------------------------------------------------- /HLS_code/OPEN_CLOSE/opening_core_tb.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "time.h" 6 | #include 7 | #include "opening_core.h" 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | 14 | #ifdef _WIN32 15 | #include 16 | #define SYSERROR() GetLastError() 17 | #else 18 | #include 19 | #define SYSERROR() errno 20 | #endif 21 | 22 | //#define INPUT_IMG "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_3\\2376_mask.png" 23 | //#define INPUT_IMG "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_2\\246_mask.png" 24 | //#define INPUT_IMG "C:\\Users\\Tiarnan\\Pictures\\unedited_mask_1190.png" 25 | 26 | 27 | using namespace std; 28 | 29 | int main(int argc, char* argv[]) { 30 | 31 | GRAY_AXI_STREAM src_axi; 32 | RLE_AXI_STREAM dst_axi; 33 | char path_buffer[100]; 34 | char rle_path[20]; 35 | cv::Mat src; 36 | cv::Mat ref; 37 | for (int i = 500; i < 1600; i+=5){ 38 | snprintf(path_buffer, sizeof(path_buffer), "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_5\\%d_mask.png", i); 39 | std::cout << "Frame path : " << path_buffer << endl; 40 | src = cvLoadImage(path_buffer, 0); 41 | cv::Mat dst(src.rows, src.cols, CV_8UC1, cv::Scalar(0)); 42 | 43 | int sr = src.rows; 44 | int sc = src.cols; 45 | 46 | snprintf(rle_path, sizeof(rle_path), "rle_%d.txt", i); 47 | ofstream rlefile; 48 | rlefile.open(rle_path); 49 | 50 | // cv::imshow("unedited", src); 51 | 52 | //for (int r = 0; r < src.rows; r++){ 53 | // for (int c = 0; c < src.cols; c++){ 54 | // printf("%d", (int)src.at(r,c) > 0 ? 1 : 0); 55 | // } 56 | // printf("\n"); 57 | //} 58 | 59 | printf("\n"); 60 | printf("\n"); 61 | printf("\n"); 62 | printf("\n"); 63 | // printf("%d", (int)p.data > 0 ? 1 : 0); 64 | 65 | cvMat2AXIvideo(src, src_axi); 66 | 67 | printf("Calling open close now"); 68 | 69 | open_and_close(src_axi, dst_axi); 70 | 71 | rle_run run; 72 | while(!dst_axi.empty()){ 73 | dst_axi.read(run); 74 | int start = (int)run.data.s; 75 | int end = (int)run.data.e; 76 | int last = (int)run.data._last_run; 77 | int y = (int) run.data.y; 78 | rlefile << start; 79 | rlefile << ","; 80 | rlefile << end; 81 | rlefile << ","; 82 | rlefile << last; 83 | rlefile << ","; 84 | rlefile << y << endl; 85 | } 86 | rlefile.close(); 87 | } 88 | /* 89 | printf("Calling show now"); 90 | cv::imshow("edited", dst); 91 | 92 | cv::Mat diffImage(240, 426, CV_8UC1, cv::Scalar(0)); 93 | cv::absdiff(ref, dst, diffImage); 94 | 95 | imshow("Difference in hardware and reference is", diffImage); 96 | 97 | printf("Waiting on key press now"); 98 | 99 | cv::waitKey(0); 100 | // Closes all the frames 101 | cv::destroyAllWindows(); 102 | */ 103 | 104 | return 0; 105 | } 106 | -------------------------------------------------------------------------------- /PS_code/application/kalman.cpp: -------------------------------------------------------------------------------- 1 | #include "logger.h" 2 | #include "defines.h" 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace std; 8 | using namespace cv::tracking; 9 | 10 | class AcceleratedModel : public cv::tracking::UkfSystemModel 11 | { 12 | public: 13 | AcceleratedModel(track_t deltaTime, bool rectModel) 14 | : 15 | cv::tracking::UkfSystemModel(), 16 | m_deltaTime(deltaTime), 17 | m_rectModel(rectModel) 18 | { 19 | 20 | } 21 | 22 | void stateConversionFunction(const cv::Mat& x_k, const cv::Mat& u_k, const cv::Mat& v_k, cv::Mat& x_kplus1) 23 | { 24 | track_t x0 = x_k.at(0, 0); 25 | track_t y0 = x_k.at(1, 0); 26 | track_t vx0 = x_k.at(2, 0); 27 | track_t vy0 = x_k.at(3, 0); 28 | track_t ax0 = x_k.at(4, 0); 29 | track_t ay0 = x_k.at(5, 0); 30 | DBOUT("Current velocity x: " << vx0); 31 | DBOUT("Current velocity y: " << vy0); 32 | DBOUT("Current position x: " << x0); 33 | DBOUT("Current position y: " << y0); 34 | //cout << "Current acceleration x: " << ax0 << endl; 35 | //cout << "Current acceleration y: " << ay0 << endl; 36 | 37 | //use equations of motion to predict new position / displacement 38 | x_kplus1.at(0, 0) = x0 + (vx0 * m_deltaTime + ax0 * sqr(m_deltaTime) / 2); 39 | x_kplus1.at(1, 0) = y0 + (vy0 * m_deltaTime + ay0 * sqr(m_deltaTime) / 2); 40 | DBOUT("new position x: " << x_kplus1.at(0, 0)); 41 | DBOUT("new position y: " << x_kplus1.at(1, 0)); 42 | //new velocity 43 | x_kplus1.at(2, 0) = vx0 + ax0 * m_deltaTime; 44 | x_kplus1.at(3, 0) = vy0 + ay0 * m_deltaTime; 45 | //assume constant acceleration 46 | x_kplus1.at(4, 0) = ax0; 47 | x_kplus1.at(5, 0) = ay0; 48 | 49 | if (m_rectModel) 50 | { 51 | x_kplus1.at(6, 0) = x_k.at(6, 0); 52 | x_kplus1.at(7, 0) = x_k.at(7, 0); 53 | } 54 | 55 | if (v_k.size() == u_k.size()) 56 | { 57 | x_kplus1 += v_k + u_k; 58 | } 59 | else 60 | { 61 | x_kplus1 += v_k; 62 | } 63 | } 64 | 65 | //correct 66 | void measurementFunction(const cv::Mat& x_k, const cv::Mat& n_k, cv::Mat& z_k) 67 | { 68 | track_t x0 = x_k.at(0, 0); 69 | track_t y0 = x_k.at(1, 0); 70 | track_t vx0 = x_k.at(2, 0); 71 | track_t vy0 = x_k.at(3, 0); 72 | track_t ax0 = x_k.at(4, 0); 73 | track_t ay0 = x_k.at(5, 0); 74 | 75 | DBOUT("Current position x correcting: " << x0); 76 | DBOUT("Current position y correcting: " << y0); 77 | 78 | z_k.at(0, 0) = x0 + vx0 * m_deltaTime + ax0 * sqr(m_deltaTime) / 2 + n_k.at(0, 0); 79 | z_k.at(1, 0) = y0 + vy0 * m_deltaTime + ay0 * sqr(m_deltaTime) / 2 + n_k.at(1, 0); 80 | 81 | if (m_rectModel) 82 | { 83 | z_k.at(2, 0) = x_k.at(6, 0); 84 | z_k.at(3, 0) = x_k.at(7, 0); 85 | } 86 | } 87 | 88 | private: 89 | track_t m_deltaTime; 90 | bool m_rectModel; 91 | }; 92 | -------------------------------------------------------------------------------- /PS_code/core_setup/zed_iic.h: -------------------------------------------------------------------------------- 1 | //---------------------------------------------------------------- 2 | // _____ 3 | // * * 4 | // *____ *____ 5 | // * *===* *==* 6 | // *___*===*___** AVNET 7 | // *======* 8 | // *====* 9 | //--------------------------------------------------------------- 10 | // 11 | // This design is the property of Avnet. Publication of this 12 | // design is not authorized without written consent from Avnet. 13 | // 14 | // Please direct any questions to: technical.support@avnet.com 15 | // 16 | // Disclaimer: 17 | // Avnet, Inc. makes no warranty for the use of this code or design. 18 | // This code is provided "As Is". Avnet, Inc assumes no responsibility for 19 | // any errors, which may appear in this code, nor does it make a commitment 20 | // to update the information contained herein. Avnet, Inc specifically 21 | // disclaims any implied warranties of fitness for a particular purpose. 22 | // Copyright(c) 2013 Avnet, Inc. 23 | // All rights reserved. 24 | // 25 | //---------------------------------------------------------------- 26 | // 27 | // Create Date: Jul 01, 2013 28 | // Design Name: ZED-IIC 29 | // Module Name: zed_iic.h 30 | // Project Name: ZED-IIC 31 | // Target Devices: Zynq 32 | // Avnet Boards: ZedBoard 33 | // 34 | // Tool versions: ISE 14.6 35 | // 36 | // Description: IIC Hardware Abstraction Layer 37 | // 38 | // Dependencies: 39 | // 40 | // Revision: Jul 01, 2013: 1.00 Initial version 41 | // 42 | //---------------------------------------------------------------- 43 | 44 | #ifndef __ZED_IIC_H__ 45 | #define __ZED_IIC_H__ 46 | 47 | #include 48 | 49 | #include "xbasic_types.h" 50 | 51 | #define ZED_IIC_CONTEXT_BUFFER_SIZE 32 52 | 53 | struct struct_zed_iic_t 54 | { 55 | // software library version 56 | Xuint32 uVersion; 57 | 58 | // instantiation-specific names 59 | char szName[32]; 60 | 61 | // pointer to instantiation-specific data 62 | void *pContext; 63 | 64 | // context data (must be large enough to contain fmc_iic_axi_t or other implementations) 65 | unsigned char ContextBuffer[ZED_IIC_CONTEXT_BUFFER_SIZE]; 66 | 67 | // function pointers to implementation-specific code 68 | int (*fpIicRead )(struct struct_zed_iic_t *, Xuint8 ChipAddress, 69 | Xuint8 RegAddress, 70 | Xuint8 *pBuffer, 71 | Xuint8 ByteCount ); 72 | int (*fpIicWrite)(struct struct_zed_iic_t *, Xuint8 ChipAddress, 73 | Xuint8 RegAddress, 74 | Xuint8 *pBuffer, 75 | Xuint8 ByteCount ); 76 | }; 77 | typedef struct struct_zed_iic_t zed_iic_t; 78 | 79 | // Initialization routine for AXI_IIC implementation 80 | int zed_iic_axi_init( zed_iic_t *pIIC, char szName[], Xuint32 CoreAddress ); 81 | 82 | 83 | #endif // __ZED_IIC_H__ 84 | -------------------------------------------------------------------------------- /MATLAB_code/Foreground_det_comp.m: -------------------------------------------------------------------------------- 1 | function background_golden_reference_gen() 2 | 3 | videos = {'C:\\Users\\Tiarnan\\Pictures\\Matlab_test\\test_vid_1.avi', 4 | 'C:\\Users\\Tiarnan\\Pictures\\Matlab_test\\test_vid_2.avi', 5 | 'C:\\Users\\Tiarnan\\Pictures\\Matlab_test\\test_vid_3.avi', 6 | 'C:\\Users\\Tiarnan\\Pictures\\Matlab_test\\test_vid_4.avi', 7 | 'C:\\Users\\Tiarnan\\Pictures\\Matlab_test\\test_vid_5.avi'} 8 | start = [500,1,715,1,1]; 9 | for i = 1:5 10 | obj = setUpSystemObjects(char(videos(i))); 11 | 12 | count = 0; 13 | bad = 0; 14 | good = 0; 15 | base_path = 'C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_'; 16 | %fid = fopen('GoldenReference.txt','wt'); 17 | %p_c = 0; 18 | while ~isDone(obj.reader) 19 | count = count + 1; 20 | frame = obj.reader(); 21 | if (count < start(i)) 22 | continue; 23 | end 24 | mask = detectMovement(frame); 25 | path = sprintf('%s%d\\%d_%s', base_path, i, count, 'mask.png'); 26 | hls_mask = imread(path); 27 | height = size(frame,1); 28 | width = size(frame, 2); 29 | obj.maskPlayer.step(mask); 30 | obj.videoPlayer.step(frame); 31 | fg_frame = 0; 32 | good_frame = 0; 33 | bad_frame = 0; 34 | for row = 1:height 35 | for col = 1:width 36 | if (mask(row,col) > 0) 37 | fg_frame = fg_frame + 1; 38 | if hls_mask(row,col) > 0 39 | good_frame = good_frame + 1; 40 | else 41 | bad_frame = bad_frame + 1; 42 | end 43 | else 44 | if hls_mask(row,col) == 0 45 | good_frame = good_frame + 1; 46 | else 47 | bad_frame = bad_frame + 1; 48 | end 49 | end 50 | end 51 | end 52 | if ((fg_frame > height*width*0.03) & (fg_frame < height*width*0.5)) 53 | good = good + good_frame; 54 | bad = bad + bad_frame; 55 | end 56 | end 57 | good 58 | bad 59 | ratio = 100 - (bad / (good+bad) * 100) 60 | end 61 | 62 | function obj = setUpSystemObjects(videopath) 63 | obj.reader = vision.VideoFileReader(videopath); 64 | obj.maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]); 65 | obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]); 66 | obj.detector = vision.ForegroundDetector('NumGaussians',3,'NumTrainingFrames', 50, 'MinimumBackgroundRatio', 0.6, 'InitialVariance', 1, 'LearningRate', 0.005); 67 | end 68 | 69 | function mask = detectMovement(frame) 70 | mask = obj.detector(frame); 71 | end 72 | end -------------------------------------------------------------------------------- /PS_code/application/track.cpp: -------------------------------------------------------------------------------- 1 | #include "track.h" 2 | 3 | /// 4 | /// \brief track 5 | /// \param pt 6 | /// \param region 7 | /// \param deltaTime 8 | /// \param accelNoiseMag 9 | /// \param trackID 10 | /// \param filterObjectSize 11 | /// \param externalTrackerForLost 12 | /// 13 | track::track( 14 | cv::Point point, 15 | track_t deltaTime, 16 | track_t accelNoiseMag, 17 | size_t trackID 18 | ) 19 | : 20 | m_trackID(trackID), 21 | m_skippedFrames(0), 22 | m_predictionPoint(point), 23 | m_outOfTheFrame(false) 24 | { 25 | //unscented kalman filter 26 | m_kalman = new kalmanFilter(m_predictionPoint, deltaTime, accelNoiseMag); 27 | m_trace.push_back(m_predictionPoint, m_predictionPoint); 28 | } 29 | 30 | /// 31 | /// \brief CalcDist 32 | /// \param pt 33 | /// \return 34 | /// 35 | track_t track::CalcDist(const Point_t& pt) const 36 | { 37 | Point_t diff = m_predictionPoint - pt; 38 | return sqrtf(sqr(diff.x) + sqr(diff.y)); 39 | } 40 | 41 | 42 | /// 43 | /// \brief Update 44 | /// \param pt 45 | /// \param region 46 | /// \param dataCorrect 47 | /// \param max_trace_length 48 | /// \param prevFrame 49 | /// \param currFrame 50 | /// 51 | void track::Update( 52 | const Point_t& pnt, 53 | bool dataCorrect, 54 | size_t max_trace_length, 55 | cv::UMat prevFrame, 56 | cv::UMat currFrame, 57 | int trajLen 58 | ) 59 | { 60 | cv::Point pt(pnt); 61 | DBOUT("Current Point : " << pt.x << " , " << pt.y); 62 | PointUpdate(pt, dataCorrect, currFrame.size()); 63 | DBOUT("New Point : " << m_predictionPoint.x << " , " << m_predictionPoint.y); 64 | 65 | if (dataCorrect) 66 | { 67 | m_lastPoint = pt; 68 | m_trace.push_back(m_predictionPoint, pt); 69 | 70 | } 71 | else 72 | { 73 | m_trace.push_back(m_predictionPoint); 74 | } 75 | 76 | if (m_trace.size() > max_trace_length) 77 | { 78 | m_trace.pop_front(m_trace.size() - max_trace_length); 79 | } 80 | } 81 | 82 | /// 83 | /// \brief CreateExternalTracker 84 | /// 85 | void track::CreateExternalTracker() 86 | { 87 | if (!m_tracker || m_tracker.empty()) 88 | { 89 | cv::TrackerKCF::Params params; 90 | params.compressed_size = 1; 91 | params.desc_pca = cv::TrackerKCF::GRAY; 92 | params.desc_npca = cv::TrackerKCF::GRAY; 93 | params.resize = true; 94 | params.detect_thresh = 0.5f; 95 | #if (((CV_VERSION_MAJOR == 3) && (CV_VERSION_MINOR >= 3)) || (CV_VERSION_MAJOR > 3)) 96 | m_tracker = cv::TrackerKCF::create(params); 97 | #else 98 | m_tracker = cv::TrackerKCF::createTracker(params); 99 | #endif 100 | } 101 | } 102 | 103 | /// 104 | /// \brief PointUpdate 105 | /// \param pt 106 | /// \param dataCorrect 107 | /// 108 | void track::PointUpdate( 109 | const Point_t& pt, 110 | bool dataCorrect, 111 | const cv::Size& frameSize 112 | ) 113 | { 114 | m_kalman->GetPointPrediction(pt); 115 | 116 | if (m_averagePoint.x + m_averagePoint.y > 0) 117 | { 118 | if (dataCorrect) 119 | { 120 | m_predictionPoint = m_kalman->Update((pt + m_averagePoint) / 2, dataCorrect); 121 | } 122 | else 123 | { 124 | m_predictionPoint = m_kalman->Update((m_predictionPoint + m_averagePoint) / 2, true); 125 | } 126 | } 127 | else 128 | { 129 | m_predictionPoint = m_kalman->Update(pt, dataCorrect); 130 | } 131 | 132 | auto Clamp = [](track_t& v, int hi) -> bool 133 | { 134 | if (v < 0) 135 | { 136 | v = 0; 137 | return true; 138 | } 139 | else if (hi && v > hi - 1) 140 | { 141 | v = static_cast(hi - 1); 142 | return true; 143 | } 144 | return false; 145 | }; 146 | m_outOfTheFrame = false; 147 | m_outOfTheFrame |= Clamp(m_predictionPoint.x, frameSize.width); 148 | m_outOfTheFrame |= Clamp(m_predictionPoint.y, frameSize.height); 149 | } -------------------------------------------------------------------------------- /PS_code/application/track.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "KalmanFilter.h" 9 | 10 | // -------------------------------------------------------------------------- 11 | /// 12 | /// \brief The TrajectoryPoint struct 13 | /// 14 | struct TrajectoryPoint 15 | { 16 | /// 17 | /// \brief TrajectoryPoint 18 | /// 19 | TrajectoryPoint() 20 | : m_hasRaw(false) 21 | { 22 | } 23 | 24 | /// 25 | /// \brief TrajectoryPoint 26 | /// \param prediction 27 | /// 28 | TrajectoryPoint(const Point_t& prediction) 29 | : 30 | m_hasRaw(false), 31 | m_prediction(prediction) 32 | { 33 | } 34 | 35 | /// 36 | /// \brief TrajectoryPoint 37 | /// \param prediction 38 | /// \param raw 39 | /// 40 | TrajectoryPoint(const Point_t& prediction, const Point_t& raw) 41 | : 42 | m_hasRaw(true), 43 | m_prediction(prediction), 44 | m_raw(raw) 45 | { 46 | } 47 | 48 | bool m_hasRaw; 49 | Point_t m_prediction; 50 | Point_t m_raw; 51 | }; 52 | 53 | // -------------------------------------------------------------------------- 54 | /// 55 | /// Trace class holds previous positions of the object 56 | /// 57 | class Trace 58 | { 59 | public: 60 | /// 61 | /// \brief operator [] 62 | /// \param i 63 | /// \return 64 | /// 65 | const Point_t& operator[](size_t i) const 66 | { 67 | return m_trace[i].m_prediction; 68 | } 69 | 70 | /// 71 | /// \brief operator [] 72 | /// \param i 73 | /// \return 74 | /// 75 | Point_t& operator[](size_t i) 76 | { 77 | return m_trace[i].m_prediction; 78 | } 79 | 80 | /// 81 | /// \brief at 82 | /// \param i 83 | /// \return 84 | /// 85 | const TrajectoryPoint& at(size_t i) const 86 | { 87 | return m_trace[i]; 88 | } 89 | 90 | /// 91 | /// \brief size 92 | /// \return 93 | /// 94 | size_t size() const 95 | { 96 | return m_trace.size(); 97 | } 98 | 99 | /// 100 | /// \brief push_back 101 | /// \param prediction 102 | /// 103 | void push_back(const Point_t& prediction) 104 | { 105 | m_trace.push_back(TrajectoryPoint(prediction)); 106 | } 107 | void push_back(const Point_t& prediction, const Point_t& raw) 108 | { 109 | m_trace.push_back(TrajectoryPoint(prediction, raw)); 110 | } 111 | 112 | /// 113 | /// \brief pop_front 114 | /// \param count 115 | /// 116 | void pop_front(size_t count) 117 | { 118 | if (count < size()) 119 | { 120 | m_trace.erase(m_trace.begin(), m_trace.begin() + count); 121 | } 122 | else 123 | { 124 | m_trace.clear(); 125 | } 126 | } 127 | 128 | /// 129 | /// \brief GetRawCount 130 | /// \param lastPeriod 131 | /// \return 132 | /// 133 | size_t GetRawCount(size_t lastPeriod) const 134 | { 135 | size_t res = 0; 136 | 137 | size_t i = 0; 138 | if (lastPeriod < m_trace.size()) 139 | { 140 | i = m_trace.size() - lastPeriod; 141 | } 142 | for (; i < m_trace.size(); ++i) 143 | { 144 | if (m_trace[i].m_hasRaw) 145 | { 146 | ++res; 147 | } 148 | } 149 | 150 | return res; 151 | } 152 | 153 | private: 154 | std::deque m_trace; 155 | }; 156 | 157 | // -------------------------------------------------------------------------- 158 | /// 159 | /// \brief The track class 160 | /// 161 | class track 162 | { 163 | public: 164 | track(cv::Point point, 165 | track_t deltaTime, 166 | track_t accelNoiseMag, 167 | size_t trackID); 168 | //probably add built in tracker 169 | 170 | /// 171 | /// \brief CalcDist 172 | /// Euclidean distance in pixels between objects centres on two N and N+1 frames 173 | /// \param pt 174 | /// \return 175 | /// 176 | track_t CalcDist(const Point_t& pt) const; 177 | 178 | 179 | 180 | void Update(const Point_t& pnt, bool dataCorrect, size_t max_trace_length, cv::UMat prevFrame, cv::UMat currFrame, int trajLen); 181 | 182 | Trace m_trace; 183 | size_t m_trackID; 184 | size_t m_skippedFrames; 185 | Point_t m_averagePoint; ///< Average point after LocalTracking 186 | Point_t m_lastPoint; 187 | 188 | 189 | private: 190 | Point_t m_predictionPoint; 191 | kalmanFilter* m_kalman; 192 | bool m_outOfTheFrame; 193 | 194 | cv::Ptr m_tracker; 195 | void CreateExternalTracker(); 196 | 197 | void PointUpdate(const Point_t& pt, bool dataCorrect, const cv::Size& frameSize); 198 | 199 | bool m_isStatic = false; 200 | int m_staticFrames = 0; 201 | cv::UMat m_staticFrame; 202 | }; 203 | 204 | typedef std::vector> tracks_t; -------------------------------------------------------------------------------- /MATLAB_code/Bounding_box_comp.m: -------------------------------------------------------------------------------- 1 | function back_sub_kalman() 2 | 3 | obj = setUpSystemObjects(); 4 | count = 0; 5 | step = 0; 6 | %read HLS detected bboxes 7 | fid = fopen('C:\\Users\\Tiarnan\\Documents\\Final Year Project\\test_vid_1_id_var1\\bboxes.txt','r'); 8 | A = fscanf(fid,'%d'); 9 | A_idx = 1; 10 | %skipped different numbers of frames for longer videos 11 | frames_skipped = 5; %test video 2 12 | %frames_skipped = 7; %test video 3 13 | overlapRatio = 0; 14 | matlab_bbox_count = 0; 15 | hls_bbox_count = 0; 16 | match_count = 0; 17 | while ~isDone(obj.reader) 18 | pause_pls = 0; 19 | frame = obj.reader(); 20 | count = count + 1 21 | mask = detectMovement(frame); 22 | if (count < 885) %test video 3 23 | continue; 24 | end 25 | if (count > 1395) 26 | break; 27 | end 28 | % from 715 we only want every 7th frame. 29 | if step > 0 30 | step = step - 1; 31 | continue; 32 | end 33 | %Get MATLAB bbox 34 | [~, centroids, bboxes] = obj.blobAnalyser(mask); 35 | matlab_bbox_count = matlab_bbox_count + size(centroids, 1); 36 | %get HLS bbox for this frame 37 | if A(A_idx) ~= count + 15 38 | %something wrong 39 | count 40 | A_idx 41 | break; 42 | else 43 | A_idx = A_idx + 1; 44 | if (A_idx > size(A)) 45 | break; 46 | end 47 | HLS_bboxes = []; 48 | while A(A_idx) ~= count + frames_skipped + 15 49 | hls_bbox_count = hls_bbox_count + 1; 50 | %basically for each HLS BBOX in this frame 51 | %MATLAB bbox format is [upper left x, upper left y, w, h] 52 | %HLS bbox format is [upper left x, upper left y, bottom right x, bottom right y] 53 | %test vids 2&4 are 320 wide, vids 1,3,5 are 426 wide 54 | HLS_bbox = [4.5*(A(A_idx)), 4.5*(A(A_idx+1)),(4.5*(A(A_idx+2))-4.5*(A(A_idx))),(4.5*(A(A_idx+3))-4.5*(A(A_idx+1)))]; 55 | % HLS_bbox = [6*(A(A_idx)), 4.5*(A(A_idx+1)),(6*(A(A_idx+2))-6*(A(A_idx))),(4.5*(A(A_idx+3))-4.5*(A(A_idx+1)))]; 56 | HLS_bboxes = [HLS_bboxes;HLS_bbox]; 57 | labels = cellstr('HLS'); 58 | HLS_bbox 59 | frame = insertObjectAnnotation(frame, 'rectangle', HLS_bbox, labels,'FontSize', 20, 'LineWidth',10); 60 | A_idx = A_idx + 4; 61 | if A_idx > size(A) 62 | break; 63 | end 64 | end 65 | if (A_idx > size(A)) 66 | break; 67 | end 68 | for i = 1:size(centroids, 1) 69 | matlab_max_ratio = 0; 70 | for j = 1:size(HLS_bboxes,1) 71 | %each bbox only matches to one bbox, the max 72 | MATLAB_bbox = bboxes(i, :); 73 | HLS_bbox = HLS_bboxes(j, :); 74 | temp_overlapRatio = bboxOverlapRatio(HLS_bbox, MATLAB_bbox); 75 | if temp_overlapRatio > matlab_max_ratio 76 | matlab_max_ratio = temp_overlapRatio; 77 | end 78 | end 79 | if matlab_max_ratio > 0 80 | if matlab_max_ratio > 0.5 && matlab_max_ratio < 0.6 81 | pause_pls = 1; 82 | end 83 | match_count = match_count + 1; 84 | matlab_max_ratio 85 | end 86 | overlapRatio = overlapRatio + matlab_max_ratio; 87 | end 88 | end 89 | labels = cellstr('MATLAB'); 90 | frame = insertObjectAnnotation(frame, 'rectangle', bboxes, labels, 'Color', 'red', 'FontSize', 20, 'LineWidth',10); 91 | obj.videoPlayer.step(frame); 92 | pause(0.2); 93 | if pause_pls == 1 94 | pause(10) 95 | end 96 | step = frames_skipped - 1; 97 | end 98 | max(hls_bbox_count,matlab_bbox_count) 99 | overlapRatio 100 | overlapRatio / max(hls_bbox_count,matlab_bbox_count) 101 | overlapRatio / match_count 102 | 103 | function obj = setUpSystemObjects() 104 | obj.reader = vision.VideoFileReader('C:\Users\Tiarnan\Pictures\Matlab_test\test_vid_3.avi'); 105 | obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]); 106 | obj.detector = vision.ForegroundDetector('NumGaussians', 3,'NumTrainingFrames', 50, 'MinimumBackgroundRatio', 0.6, 'InitialVariance', 1); 107 | obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, 'AreaOutputPort', true, 'CentroidOutputPort', true, ... 108 | 'MinimumBlobArea',700); 109 | end 110 | 111 | function mask = detectMovement(frame) 112 | mask = obj.detector(frame); 113 | mask = imopen(mask, strel('rectangle', [3,3])); 114 | mask = imclose(mask, strel('rectangle', [8, 8])); 115 | mask = bwareaopen(mask, 50); 116 | mask = imfill(mask, 'holes'); 117 | end 118 | 119 | end 120 | -------------------------------------------------------------------------------- /HLS_code/BLOB_DETECTION/blob_analysis_core.cpp: -------------------------------------------------------------------------------- 1 | #include "blob_analysis_core.h" 2 | #include 3 | 4 | /*Types defined in blob_analysis_core.h*/ 5 | 6 | using namespace hls; 7 | 8 | /* 9 | * Top function - blob analysis 10 | * Receive input mask as stream two lines at a time encoded using rle 11 | * and compute the neighbouring areas of the lines to 12 | * identify objects. 13 | * stream_in - input mask from MOG Foreground detection 14 | * objects - array of bounding boxes and centre points in the image 15 | */ 16 | void blob_analysis(RLE_AXI_STREAM &rle_stream, blob_port objects_port[100]){ 17 | #pragma HLS INTERFACE s_axilite port=return bundle=CRTL_BUS 18 | #pragma HLS INTERFACE bram port=objects_port 19 | #pragma HLS INTERFACE axis port=rle_stream 20 | blob * objects_ptr[100]; 21 | blob objects[100]; 22 | #pragma HLS ARRAY_MAP variable=objects_ptr instance=array1 horizontal 23 | #pragma HLS ARRAY_MAP variable=objects instance=array1 horizontal 24 | 25 | for (int i=0; i<100; i++){ 26 | #pragma HLS PIPELINE 27 | objects[i].id = objects_port[i] & 0xFF; 28 | objects[i].cp.x = (objects_port[i] >> 8) & 0x1FF; 29 | objects[i].cp.y = (objects_port[i] >> 17) & 0x1FF; 30 | objects[i].area = (objects_port[i] >> 26) & 0xFFFF; 31 | objects[i].max_x = (objects_port[i] >> 42) & 0x1FF; 32 | objects[i].min_x = (objects_port[i] >> 51) & 0x1FF; 33 | objects[i].max_y = (objects_port[i] >> 60) & 0x1FF; 34 | objects[i].min_y = (objects_port[i] >> 69) & 0x1FF; 35 | objects_ptr[i] = &(objects[i]); 36 | } 37 | //create blobs so they are in scope 38 | coord cur_line = 0; 39 | rle_line previous; 40 | rle_line current; 41 | unsigned char * next_ob_id_p; 42 | unsigned char next_ob_id = 1; 43 | next_ob_id_p = &next_ob_id; 44 | while(!rle_stream.empty()){ 45 | //for (int l = 0; l < 240; l++){ 46 | //printf("l : %d", l); 47 | while(rle_stream.empty()){}; 48 | rle_run run; 49 | uint_8 run_count = 0; 50 | run.data._last_run = 0; 51 | while(!run.data._last_run){ 52 | //for (int r = 0; r < 120; r++){ 53 | run = rle_stream.read(); 54 | current.runs[run_count] = run; 55 | run_count = run_count + 1; 56 | } 57 | current.no_runs = run_count; 58 | if (cur_line > 0){ 59 | identify_update_objects(current, previous, next_ob_id_p, objects_ptr); 60 | } else { 61 | //first line is current - mark runs 62 | for (int i = 0; i < current.no_runs; i++){ 63 | //for (int i = 0; i < 120; i++){ 64 | current.runs[i].data.no = next_ob_id; 65 | create_blob(objects_ptr[next_ob_id - 1], next_ob_id_p, current.runs[i]); 66 | next_ob_id = next_ob_id + 1; 67 | } 68 | } 69 | previous = current; 70 | cur_line = cur_line + 1; 71 | } 72 | for (int i = 0; i < 100; i++){ 73 | #pragma HLS PIPELINE 74 | unsigned char *start = (unsigned char *)&objects[i]; 75 | unsigned char *start_port = (unsigned char *)&objects_port[i]; 76 | for (int j = 0; j < sizeof(blob); j ++){ 77 | unsigned char byte = (unsigned char)*(start + j); 78 | *(start_port + j) = byte; 79 | } 80 | } 81 | } 82 | 83 | /* 84 | * Iterate over the two lines passed in and identify what objects 85 | * the runs belong to. Update the object properties as we go. 86 | * current - line we are identifying objects in 87 | * previous - check if runs in current match runs in previous 88 | * to get object id. 89 | */ 90 | void identify_update_objects(rle_line ¤t, rle_line &previous, unsigned char * ob_id, blob * (&objects_ptr)[100]){ 91 | for (int i = 0; i < current.no_runs; i++){ 92 | //for (int i = 0; i < 120; i++){ 93 | unsigned char match = 0; 94 | for (int j = 0; j < previous.no_runs; j++){ 95 | //for (int j = 0; j < 120; j++){ 96 | if ((current.runs[i].data.s <= previous.runs[j].data.e && 97 | current.runs[i].data.s >= previous.runs[j].data.s) || 98 | (current.runs[i].data.e <= previous.runs[j].data.e && 99 | current.runs[i].data.e >= previous.runs[j].data.s) || 100 | (current.runs[i].data.s < previous.runs[j].data.s && 101 | current.runs[i].data.e > previous.runs[j].data.e)) { 102 | match = match + 1; 103 | if (match == 1) { 104 | //update object parameters 105 | current.runs[i].data.no = previous.runs[j].data.no; 106 | //area 107 | char idx = (char)current.runs[i].data.no - 1; 108 | if (idx >= 0 && idx < 100){ 109 | blob* b = objects_ptr[idx]; 110 | b->area = b->area + (current.runs[i].data.e - current.runs[i].data.s); 111 | if (current.runs[i].data.e > b->max_x){ 112 | b->max_x = current.runs[i].data.e; 113 | } 114 | if (current.runs[i].data.s < b->min_x){ 115 | b->min_x = current.runs[i].data.s; 116 | } 117 | if (current.runs[i].data.y > b->max_y){ 118 | b->max_y = current.runs[i].data.y; 119 | } 120 | coord w = b->max_x - b->min_x; 121 | coord h = b->max_y - b->min_y; 122 | point centre; 123 | centre.x = b->min_x + (w/2); 124 | centre.y = b->min_y + (h/2); 125 | b->cp = centre; 126 | } 127 | //bounding box created at end 128 | } else if (match > 1) { 129 | //check if matched with a new object 130 | //merge objects 131 | //idx of object already matched with 132 | current.runs[i].data.no = previous.runs[j].data.no; 133 | char idx_merge = (char)current.runs[i].data.no - 1; 134 | char idx = (char)previous.runs[j].data.no -1; 135 | if (idx == idx_merge){ 136 | break; 137 | } 138 | //copy features to blob 1 139 | blob* b = objects_ptr[idx]; 140 | blob* b_merge = objects_ptr[idx_merge]; 141 | b->area = b->area + b_merge->area; 142 | b->max_x = b->max_x >= b_merge->max_x ? b->max_x : b_merge->max_x; 143 | b->max_y = b->max_y >= b_merge->max_y ? b->max_y : b_merge->max_y; 144 | b->min_x = b->min_x <= b_merge->min_x ? b->min_x : b_merge->min_x; 145 | b->min_y = b->min_y <= b_merge->min_y ? b->min_y : b_merge->min_y; 146 | coord w = b->max_x - b->min_x; 147 | coord h = b->max_y - b->min_y; 148 | point centre; 149 | centre.x = b->min_x + (w/2); 150 | centre.y = b->min_y + (h/2); 151 | b->cp = centre; 152 | objects_ptr[idx_merge] = objects_ptr[idx]; 153 | } 154 | } 155 | } 156 | if (match == 0) { 157 | //create a new object 158 | if(*ob_id < 100){ 159 | current.runs[i].data.no = *ob_id; 160 | create_blob(objects_ptr[*(ob_id) - 1], ob_id, current.runs[i]); 161 | *ob_id = *(ob_id) + 1; 162 | } 163 | } 164 | } 165 | } 166 | 167 | void create_blob(blob * b, unsigned char * ob_id, rle_run current){ 168 | b->id = *ob_id; 169 | b->min_y = current.data.y; 170 | b->max_y = current.data.y; 171 | b->min_x = current.data.s; 172 | b->max_x = current.data.e; 173 | coord w = current.data.e - current.data.s + 1; 174 | point centre; 175 | centre.x = current.data.s + (w/2); 176 | centre.y = current.data.y; 177 | b->cp = centre; 178 | b->area = w; 179 | } 180 | -------------------------------------------------------------------------------- /PS_code/application/trackManager.cpp: -------------------------------------------------------------------------------- 1 | //#include 2 | #include 3 | 4 | #include "logger.h" 5 | #include "trackManager.h" 6 | 7 | using namespace dlib; 8 | 9 | 10 | trackManager::trackManager(const TrackerSettings& settings) 11 | : 12 | m_settings(settings), 13 | m_nextTrackID(0) 14 | { 15 | shaft.push_back(Point_t(80,330)); 16 | shaft.push_back(Point_t(50, 290)); 17 | danger_radius = 80.00; 18 | }+ 19 | 20 | 21 | trackManager::~trackManager() 22 | { 23 | } 24 | 25 | matrix create_matrix_for_min_cost(std::vector &distMatrix, int no_tracks, int no_regions) { 26 | //distMatrix is a matrix of track_t distances from each detection to each tracked obj. 27 | //using max cost algorithm so need to invert values - /1 28 | //algorithm needs ints so multiply up 29 | int max = (no_tracks > no_regions) ? no_tracks : no_regions; 30 | matrix costMatrix(max, max); 31 | matrix inverseMatrix(no_tracks, no_regions); 32 | float inverse; 33 | float min_inverse = 1.00f; 34 | for (int i = 0; i < costMatrix.nc(); i++) { 35 | for (int j = 0; j < costMatrix.nr(); j++) { 36 | costMatrix(i, j) = -1; 37 | } 38 | } 39 | for (int i = 0; i < no_tracks; i++) { 40 | for (int j = 0; j < no_regions; j++) { 41 | if (distMatrix[(i*no_regions) + j] > 0) { 42 | inverse = 1.00 / (distMatrix[(i*no_regions) + j]); 43 | DBOUT("dist matrix : " << distMatrix[(i*no_regions) + j]); 44 | DBOUT("inverse : " << inverse); 45 | inverseMatrix(i, j) = inverse; 46 | } 47 | else { 48 | inverse = 1.00 / 1e-6; 49 | } 50 | if (inverse < min_inverse) { 51 | min_inverse = inverse; 52 | } 53 | } 54 | } 55 | for (int i = 0; i < no_tracks; i++) { 56 | for (int j = 0; j < no_regions; j++) { 57 | DBOUT("cost matrix : " << costMatrix(i, j)); 58 | float cost_f = inverseMatrix(i,j) / min_inverse; 59 | costMatrix(i, j) = (int)cost_f; 60 | DBOUT("cost matrix : " << costMatrix(i,j)); 61 | } 62 | } 63 | return costMatrix; 64 | 65 | } 66 | 67 | void trackManager::Update( 68 | const regions_t& regions, 69 | cv::UMat grayFrame, 70 | float fps 71 | ) 72 | { 73 | UpdateHungrian(regions, grayFrame, fps); 74 | 75 | grayFrame.copyTo(m_prevFrame); 76 | } 77 | 78 | // --------------------------------------------------------------------------- 79 | // 80 | // --------------------------------------------------------------------------- 81 | void trackManager::UpdateHungrian( 82 | const regions_t& regions, 83 | cv::UMat grayFrame, 84 | float fps 85 | ) 86 | { 87 | N = tracks.size(); 88 | M = regions.size(); 89 | 90 | std::vector mu(1); 91 | float max_velocity; 92 | 93 | dlib::matrix costMatrix(N, M); 94 | distMatrix.clear(); 95 | 96 | 97 | if (!tracks.empty()) 98 | { 99 | const track_t maxPossibledistMatrix = grayFrame.cols * grayFrame.rows; 100 | track_t maxdistMatrix = 0; 101 | for (size_t i = 0; i < tracks.size(); i++) { 102 | //get velocity from track kalman filter 103 | for (size_t j = 0; j < regions.size(); j++) { 104 | mu[0] = moments(regions[j], false); 105 | Point_t pnt = Point_t(mu[0].m10 / mu[0].m00, mu[0].m01 / mu[0].m00); 106 | auto dist = tracks[i]->CalcDist(pnt); 107 | distMatrix.push_back(dist); 108 | if (dist > maxdistMatrix) { 109 | maxdistMatrix = dist; 110 | } 111 | } 112 | } 113 | 114 | // ----------------------------------- 115 | // Solving assignment problem (tracks and predictions of Kalman filter) 116 | // ----------------------------------- 117 | 118 | costMatrix = create_matrix_for_min_cost(distMatrix, N, M); 119 | for (long r = 0; r < costMatrix.nr(); ++r) 120 | { 121 | // loop over all the columns 122 | for (long c = 0; c < costMatrix.nc(); ++c) 123 | { 124 | DBOUT("VALUE : " << costMatrix(r, c)); 125 | } 126 | } 127 | assignment = max_cost_assignment(costMatrix); 128 | 129 | //identify assignments to undetected regions 130 | for (size_t i = 0; i < static_cast(assignment.size()); i++) { 131 | if (assignment[i] > regions.size() - 1) 132 | assignment[i] = -1; 133 | } 134 | 135 | // ----------------------------------- 136 | // clean assignment from pairs with large distance 137 | // ----------------------------------- 138 | for (size_t i = 0; i < static_cast(tracks.size()); i++) 139 | { 140 | if (assignment[i] != -1) 141 | { 142 | if (distMatrix[i + assignment[i] * N] > m_settings.m_distThres) 143 | { 144 | assignment[i] = -1; 145 | tracks[i]->m_skippedFrames++; 146 | } 147 | } 148 | else 149 | { 150 | // If track have no assigned detect, then increment skipped frames counter. 151 | tracks[i]->m_skippedFrames++; 152 | } 153 | } 154 | 155 | // ----------------------------------- 156 | // If track didn't get detected for long time, remove it. 157 | // ----------------------------------- 158 | for (int i = 0; i < static_cast(tracks.size()); i++) 159 | { 160 | if (tracks[i]->m_skippedFrames > m_settings.m_maximumAllowedSkippedFrames) 161 | { 162 | tracks.erase(tracks.begin() + i); 163 | assignment.erase(assignment.begin() + i); 164 | i--; 165 | } 166 | } 167 | } 168 | 169 | // ----------------------------------- 170 | // Search for unassigned detects and start new tracks for them. 171 | // ----------------------------------- 172 | for (size_t i = 0; i < regions.size(); ++i) 173 | { 174 | if (distance(assignment.begin(), find(assignment.begin(), assignment.end(), i)) > tracks.size() -1 175 | || assignment.size() == 0) 176 | { 177 | mu[0] = moments(regions[i], false); 178 | Point_t pnt = Point_t(mu[0].m10 / mu[0].m00, mu[0].m01 / mu[0].m00); 179 | tracks.push_back(std::make_unique(pnt, 180 | m_settings.m_dt, 181 | m_settings.m_accelNoiseMag, 182 | m_nextTrackID++)); 183 | } 184 | } 185 | 186 | // Update Kalman Filters state 187 | //DBOUT("UPDATING HUNGARIAN " << assignment.size()); 188 | 189 | 190 | for (size_t i = 0; i < assignment.size(); i++) 191 | { 192 | //DBOUT("UPDATING KALMAN 4"); 193 | // If track updated less than one time, than filter state is not correct. 194 | if (assignment[i] != -1) // If we have assigned detect, then update using its coordinates, 195 | { 196 | //DBOUT("UPDATING KALMAN "); 197 | mu[0] = moments(regions[assignment[i]], false); 198 | Point_t pt = Point_t(mu[0].m10 / mu[0].m00, mu[0].m01 / mu[0].m00); 199 | tracks[i]->m_skippedFrames = 0; 200 | tracks[i]->Update( 201 | pt, true, 202 | m_settings.m_maxTraceLength, 203 | m_prevFrame, grayFrame, 204 | m_settings.m_useAbandonedDetection ? cvRound(m_settings.m_minStaticTime * fps) : 0); 205 | } 206 | else // if not continue using predictions 207 | { 208 | //DBOUT("UPDATING KALMAN 1"); 209 | Point_t pt; 210 | tracks[i]->Update(pt, false, m_settings.m_maxTraceLength, m_prevFrame, grayFrame, 0); 211 | } 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /PS_code/application/kalmanFilter.cpp: -------------------------------------------------------------------------------- 1 | #include "kalmanFilter.h" 2 | #include "logger.h" 3 | 4 | using namespace cv; 5 | 6 | kalmanFilter::kalmanFilter(Point_t pt, 7 | track_t deltaTime, // time increment (lower values makes target more "massive") 8 | track_t accelNoiseMag 9 | ) 10 | : 11 | m_initialized(false), 12 | m_deltaTime(deltaTime), 13 | m_deltaTimeMin(deltaTime), 14 | m_deltaTimeMax(2 * deltaTime), 15 | m_lastDist(0), 16 | m_accelNoiseMag(accelNoiseMag) 17 | { 18 | m_deltaStep = (m_deltaTimeMax - m_deltaTimeMin) / m_deltaStepsCount; 19 | 20 | m_initialPoints.push_back(pt); 21 | m_lastPointResult = pt; 22 | 23 | } 24 | 25 | 26 | kalmanFilter::~kalmanFilter() 27 | { 28 | } 29 | 30 | 31 | class AcceleratedModel : public cv::tracking::UkfSystemModel 32 | { 33 | public: 34 | AcceleratedModel(track_t deltaTime) 35 | : 36 | cv::tracking::UkfSystemModel(), 37 | m_deltaTime(deltaTime) 38 | { 39 | 40 | } 41 | 42 | void stateConversionFunction(const cv::Mat& x_k, const cv::Mat& u_k, const cv::Mat& v_k, cv::Mat& x_kplus1) 43 | { 44 | track_t x0 = x_k.at(0, 0); 45 | track_t y0 = x_k.at(1, 0); 46 | track_t vx0 = x_k.at(2, 0); 47 | track_t vy0 = x_k.at(3, 0); 48 | track_t ax0 = x_k.at(4, 0); 49 | track_t ay0 = x_k.at(5, 0); 50 | DBOUT("Current velocity x: " << vx0); 51 | DBOUT("Current velocity y: " << vy0); 52 | DBOUT("Current position x: " << x0); 53 | DBOUT("Current position y: " << y0); 54 | //cout << "Current acceleration x: " << ax0 << endl; 55 | //cout << "Current acceleration y: " << ay0 << endl; 56 | 57 | //use equations of motion to predict new position / displacement 58 | 59 | x_kplus1.at(0, 0) = x0 + (vx0 * m_deltaTime + ax0 * sqr(m_deltaTime) / 2); 60 | x_kplus1.at(1, 0) = y0 + (vy0 * m_deltaTime + ay0 * sqr(m_deltaTime) / 2); 61 | DBOUT("new position x: " << x_kplus1.at(0, 0)); 62 | DBOUT("new position y: " << x_kplus1.at(1, 0)); 63 | //new velocity 64 | x_kplus1.at(2, 0) = vx0 + ax0 * m_deltaTime; 65 | x_kplus1.at(3, 0) = vy0 + ay0 * m_deltaTime; 66 | //assume constant acceleration 67 | x_kplus1.at(4, 0) = ax0; 68 | x_kplus1.at(5, 0) = ay0; 69 | 70 | 71 | if (v_k.size() == u_k.size()) 72 | { 73 | x_kplus1 += v_k + u_k; 74 | } 75 | else 76 | { 77 | x_kplus1 += v_k; 78 | } 79 | } 80 | 81 | void measurementFunction(const cv::Mat& x_k, const cv::Mat& n_k, cv::Mat& z_k) 82 | { 83 | track_t x0 = x_k.at(0, 0); 84 | track_t y0 = x_k.at(1, 0); 85 | track_t vx0 = x_k.at(2, 0); 86 | track_t vy0 = x_k.at(3, 0); 87 | track_t ax0 = x_k.at(4, 0); 88 | track_t ay0 = x_k.at(5, 0); 89 | 90 | //DBOUT("Current position x correcting: " << x0); 91 | //DBOUT("Current position y correcting: " << y0); 92 | 93 | z_k.at(0, 0) = x0 + vx0 * m_deltaTime + ax0 * sqr(m_deltaTime) / 2 + n_k.at(0, 0); 94 | z_k.at(1, 0) = y0 + vy0 * m_deltaTime + ay0 * sqr(m_deltaTime) / 2 + n_k.at(1, 0); 95 | 96 | //DBOUT("new position x correcting: " << z_k.at(0, 0)); 97 | //DBOUT("new position y correcting: " << z_k.at(1, 0)); 98 | } 99 | 100 | private: 101 | track_t m_deltaTime; 102 | }; 103 | 104 | 105 | void kalmanFilter::createUnscented(Point_t xy0, Point_t xyv0) { 106 | int MP = 2; 107 | int DP = 6; 108 | int CP = 0; 109 | 110 | cv::Mat processNoiseCov = cv::Mat::zeros(DP, DP, Mat_t(1)); 111 | processNoiseCov.at(0, 0) = 1e-2f; 112 | processNoiseCov.at(1, 1) = 1e-2f; 113 | processNoiseCov.at(2, 2) = 1e-1f; 114 | processNoiseCov.at(3, 3) = 1e-1f; 115 | processNoiseCov.at(4, 4) = 1e-1f; 116 | processNoiseCov.at(5, 5) = 1e-1f; 117 | 118 | cv::Mat measurementNoiseCov = cv::Mat::zeros(MP, MP, Mat_t(1)); 119 | measurementNoiseCov.at(0, 0) = 1e-6f; 120 | measurementNoiseCov.at(1, 1) = 1e-6f; 121 | 122 | cv::Mat initState(DP, 1, Mat_t(1)); 123 | initState.at(0, 0) = xy0.x; 124 | initState.at(1, 0) = xy0.y; 125 | initState.at(2, 0) = xyv0.x; 126 | initState.at(3, 0) = xyv0.y; 127 | initState.at(4, 0) = 0; 128 | initState.at(5, 0) = 0; 129 | 130 | cv::Mat P = 1e-6 * cv::Mat::eye(DP, DP, Mat_t(1)); 131 | 132 | m_deltaTime = 0.1f; 133 | Ptr model(new AcceleratedModel(m_deltaTime)); 134 | cv::tracking::UnscentedKalmanFilterParams params(DP, MP, CP, 0, 0, model); 135 | params.dataType = Mat_t(1); 136 | params.stateInit = initState.clone(); 137 | params.errorCovInit = P.clone(); 138 | params.measurementNoiseCov = measurementNoiseCov.clone(); 139 | params.processNoiseCov = processNoiseCov.clone(); 140 | 141 | params.alpha = 1.0; 142 | params.beta = 2.0; 143 | params.k = -2.0; 144 | 145 | m_unscentedKalman = createUnscentedKalmanFilter(params); 146 | m_initialized = true; 147 | } 148 | 149 | Point_t kalmanFilter::GetPointPrediction(Point_t pt) 150 | { 151 | if (m_initialized) 152 | { 153 | //m_lastPointResult = pt; 154 | cv::Mat prediction; 155 | 156 | prediction = m_unscentedKalman->predict(); 157 | 158 | m_lastPointResult = Point_t(prediction.at(0), prediction.at(1)); 159 | } 160 | return m_lastPointResult; 161 | } 162 | 163 | //--------------------------------------------------------------------------- 164 | Point_t kalmanFilter::Update(Point_t pt, bool dataCorrect) 165 | { 166 | if (!m_initialized) 167 | { 168 | if (m_initialPoints.size() < MIN_INIT_VALS) 169 | { 170 | if (dataCorrect) 171 | { 172 | m_initialPoints.push_back(pt); 173 | } 174 | } 175 | if (m_initialPoints.size() == MIN_INIT_VALS) 176 | { 177 | track_t kx = 0; 178 | track_t bx = 0; 179 | track_t ky = 0; 180 | track_t by = 0; 181 | get_lin_regress_params(m_initialPoints, 0, MIN_INIT_VALS, kx, bx, ky, by); 182 | Point_t xy0(kx * (MIN_INIT_VALS - 1) + bx, ky * (MIN_INIT_VALS - 1) + by); 183 | Point_t xyv0(kx, ky); 184 | 185 | createUnscented(xy0, xyv0); 186 | m_lastDist = 0; 187 | } 188 | } 189 | 190 | if (m_initialized) 191 | { 192 | cv::Mat measurement(2, 1, Mat_t(1)); 193 | if (!dataCorrect) 194 | { 195 | DBOUT("Updating predictions"); 196 | measurement.at(0) = m_lastPointResult.x; //update using prediction 197 | measurement.at(1) = m_lastPointResult.y; 198 | } 199 | else 200 | { 201 | DBOUT("Updating measurements"); 202 | measurement.at(0) = pt.x; //update using measurements 203 | measurement.at(1) = pt.y; 204 | } 205 | // Correction 206 | cv::Mat estimated; 207 | estimated = m_unscentedKalman->correct(measurement); 208 | 209 | m_lastPointResult.x = estimated.at(0); //update using measurements 210 | m_lastPointResult.y = estimated.at(1); 211 | } 212 | else 213 | { 214 | if (dataCorrect) 215 | { 216 | m_lastPointResult = pt; 217 | } 218 | } 219 | return m_lastPointResult; 220 | } 221 | 222 | -------------------------------------------------------------------------------- /HLS_code/MOG/build_gaussian_tb.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "time.h" 6 | #include 7 | #include "build_gaussian_core.h" 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | 14 | #ifdef _WIN32 15 | #include 16 | #define SYSERROR() GetLastError() 17 | #else 18 | #include 19 | #define SYSERROR() errno 20 | #endif 21 | 22 | //#define INPUT_VID "C:\\Users\\Tiarnan\\Pictures\\test_vid_mog_bg.avi" 23 | #define INPUT_VID "C:\\Users\\Tiarnan\\Pictures\\Matlab_test\\test_vid_1.avi" 24 | //#define INPUT_VID "C:\\Users\\Tiarnan\\Downloads\\test_vid_4.avi" 25 | 26 | using namespace std; 27 | 28 | int main(int argc, char* argv[]) { 29 | 30 | hls::stream src_axi; 31 | hls::stream vid_axi; 32 | //GRAY_AXI_STREAM dst_axi; 33 | hls::stream dst_axi; 34 | hls::stream MOG_in; 35 | hls::stream MOG_out; 36 | #ifdef __SYNTHESIS__ 37 | pixel_k_gaussian *MOG = (pixel_k_gaussian*)calloc(sizeof(pixel_k_gaussian) * 640 * 480); 38 | bool *out_val = (char*)calloc(sizeof(char) * 640 * 480); 39 | #else 40 | static pixel_k_gaussian MOG[640*480] = { 0 }; 41 | //static short prev_y[640*480] = { 0 }; 42 | //static short prev_bgr[640*480][3] = { 0 }; 43 | static unsigned char out_val[480][640]; 44 | #endif 45 | cv::VideoCapture cap(INPUT_VID); 46 | 47 | // Check if camera opened successfully 48 | if(!cap.isOpened()){ 49 | printf( "Error opening video stream or file" ); 50 | return -1; 51 | } 52 | 53 | /* 54 | bool ret = cap.set(CV_CAP_PROP_FRAME_WIDTH,640); 55 | if (!ret){ 56 | printf("failed to set video width"); 57 | printf("Video width is %d", cap.get(CV_CAP_PROP_FRAME_WIDTH)); 58 | } 59 | 60 | ret = cap.set(CV_CAP_PROP_FRAME_HEIGHT,480); 61 | if (!ret){ 62 | printf("failed to set video height"); 63 | printf("Video height is %d", cap.get(CV_CAP_PROP_FRAME_HEIGHT)); 64 | }*/ 65 | printf("input fourcc : %d", cap.get(CV_CAP_PROP_FOURCC)); 66 | 67 | cv::VideoWriter output("C:\\Users\\Tiarnan\\Downloads\\test_vid_4_out.avi", CV_FOURCC('C','V','I','D'), 68 | 30, cv::Size(480,640)); 69 | cv::VideoWriter output_mask("C:\\Users\\Tiarnan\\Downloads\\test_vid_4_out_mask.avi", cap.get(CV_CAP_PROP_FOURCC), 70 | cap.get(CV_CAP_PROP_FPS), 71 | cv::Size((int)cap.get(CV_CAP_PROP_FRAME_HEIGHT), (int)cap.get(CV_CAP_PROP_FRAME_WIDTH))); 72 | if(!output.isOpened()){ 73 | printf( "Error opening video file" ); 74 | return -1; 75 | } 76 | if(!output_mask.isOpened()){ 77 | printf( "Error opening video mask file" ); 78 | return -1; 79 | } 80 | 81 | //false is U true is V 82 | bool u = false; 83 | int frame_count = 0; 84 | std::string frame_text; 85 | std::string base_text = "frame : "; 86 | 87 | std::ofstream of("ref.txt"); 88 | 89 | while(frame_count < 80){ 90 | 91 | cv::Mat frame; 92 | cv::Mat frame_yuv; 93 | // Capture frame-by-frame 94 | cap >> frame; 95 | 96 | // If the frame is empty, break immediately 97 | if (frame.empty()) 98 | break; 99 | frame_count++; 100 | 101 | if (frame_count < 70) { 102 | continue; 103 | } 104 | 105 | cv::cvtColor(frame, frame_yuv, cv::COLOR_BGR2YUV); 106 | 107 | for (int idxRows = 0; idxRows < frame.rows; idxRows++ ){ 108 | for (int idxCols = 0; idxCols < frame.cols; idxCols++ ){ 109 | YUV_pixel valIn; 110 | short data; 111 | short Y = frame_yuv.at(idxRows, idxCols)[0]; 112 | short U = frame_yuv.at(idxRows, idxCols)[1]; 113 | short V = frame_yuv.at(idxRows, idxCols)[2]; 114 | if (idxRows == 0 && idxCols == 0){ 115 | //printf("breaking here"); 116 | } 117 | // Y is going to be lsb and U/V is msb 118 | if (u) { 119 | data = (U << 8) | Y; 120 | } else{ 121 | data = (V << 8) | Y; 122 | } 123 | u = !u; 124 | valIn.data = data; 125 | valIn.keep = 1; valIn.strb = 1; valIn.id = 0; valIn.dest = 0; 126 | //these may be changed below 127 | valIn.user = 0; valIn.last = 0; 128 | if (idxCols == 0 && idxRows == 0) { 129 | valIn.user = 1; 130 | } else if (idxCols == frame.cols - 1) { 131 | valIn.last = 1; 132 | } 133 | src_axi << valIn; 134 | } 135 | } 136 | 137 | build_gaussian(src_axi, dst_axi, MOG, vid_axi, MOG, BG_THRESHOLD, LEARNING_RATE, 1); 138 | 139 | //take data out of output stream; 140 | for (int idxRows = 0; idxRows < frame.rows; idxRows++ ){ 141 | for (int idxCols = 0; idxCols < frame.cols; idxCols++ ){ 142 | bool valOut; 143 | while(dst_axi.empty()){}; 144 | valOut = dst_axi.read(); 145 | if (valOut == true){ 146 | out_val[idxRows][idxCols] = 255; 147 | } else { 148 | out_val[idxRows][idxCols] = 0; 149 | } 150 | } 151 | } 152 | 153 | cv::Mat mask(480, 640, CV_8UC1, cv::Scalar(0)); 154 | for (int idxRows = 0; idxRows < frame_yuv.rows; idxRows++ ){ 155 | for (int idxCols = 0; idxCols < frame_yuv.cols; idxCols++ ){ 156 | mask.at(idxRows, idxCols) = out_val[idxRows][idxCols]; 157 | } 158 | } 159 | 160 | // Display the resulting framec 161 | char numstr[21]; // enough to hold all numbers up to 64-bits 162 | sprintf(numstr, "%d", frame_count); 163 | frame_text = base_text + numstr; 164 | cv::putText(frame,frame_text,cv::Point(10,30),cv::FONT_HERSHEY_SIMPLEX,1,cv::Scalar(128)); 165 | //cv::putText(mask,frame_text,cv::Point(10,30),cv::FONT_HERSHEY_SIMPLEX,1,cv::Scalar(128)); 166 | output.write(frame); 167 | output_mask.write(mask); 168 | cv::imshow( "Frame", frame ); 169 | cv::imshow( "Mask", mask ); 170 | 171 | char c=(char)cv::waitKey(5); 172 | if(c==27) 173 | break; 174 | 175 | if (frame_count == 314){ 176 | frame_text = ""; 177 | } 178 | 179 | /*if (frame_count == 350) { 180 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_350_frame_v1.png", frame); 181 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_350_mask_v1.png", mask); 182 | } 183 | if (frame_count == 380) { 184 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_380_frame_v1.png", frame); 185 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_380_mask_v1.png", mask); 186 | } 187 | if (frame_count == 400) { 188 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_400_frame_v1.png", frame); 189 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_400_mask_v1.png", mask); 190 | } 191 | if (frame_count == 420) { 192 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_420_frame_v1.png", frame); 193 | cv::imwrite("C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\short_420_mask_v1.png", mask); 194 | }*/ 195 | } 196 | //myfile.close(); 197 | // When everything done, release the video capture object 198 | cap.release(); 199 | output.release(); 200 | output_mask.release(); 201 | // Closes all the frames 202 | cv::destroyAllWindows(); 203 | 204 | return 0; 205 | } 206 | -------------------------------------------------------------------------------- /PS_code/core_setup/test_mog_main.c: -------------------------------------------------------------------------------- 1 | /****************************************************************************** 2 | * 3 | * Copyright (C) 2009 - 2014 Xilinx, Inc. All rights reserved. 4 | * 5 | * Permission is hereby granted, free of charge, to any person obtaining a copy 6 | * of this software and associated documentation files (the "Software"), to deal 7 | * in the Software without restriction, including without limitation the rights 8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | * copies of the Software, and to permit persons to whom the Software is 10 | * furnished to do so, subject to the following conditions: 11 | * 12 | * The above copyright notice and this permission notice shall be included in 13 | * all copies or substantial portions of the Software. 14 | * 15 | * Use of the Software is limited solely to applications: 16 | * (a) running on a Xilinx device, or 17 | * (b) that interact with a Xilinx device through a bus or interconnect. 18 | * 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 | * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 23 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF 24 | * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | * SOFTWARE. 26 | * 27 | * Except as contained in this notice, the name of the Xilinx shall not be used 28 | * in advertising or otherwise to promote the sale, use or other dealings in 29 | * this Software without prior written authorization from Xilinx. 30 | * 31 | ******************************************************************************/ 32 | 33 | /* 34 | * helloworld.c: simple test application 35 | * 36 | * This application configures UART 16550 to baud rate 9600. 37 | * PS7 UART (Zynq) is not initialized by this application, since 38 | * bootrom/bsp configures it to baud rate 115200 39 | * 40 | * ------------------------------------------------ 41 | * | UART TYPE BAUD RATE | 42 | * ------------------------------------------------ 43 | * uartns550 9600 44 | * uartlite Configurable only in HW design 45 | * ps7_uart 115200 (configured by bootrom/bsp) 46 | */ 47 | 48 | #include 49 | #include "platform.h" 50 | #include "xil_printf.h" 51 | #include "xparameters.h" 52 | #include "sleep.h" 53 | #include "xv_tpg.h" 54 | #include "zed_iic.h" 55 | //#include "zed_iic_axi.c" 56 | #include "xvidc.h" 57 | #include "xaxivdma.h" 58 | #include "xbuild_gaussian.h" 59 | #include "xaxidma.h" 60 | 61 | unsigned int srcBuffer = (XPAR_PS7_DDR_0_S_AXI_BASEADDR + 0x100000); 62 | 63 | #define ADV7511_ADDR 0x72 64 | #define CARRIER_HDMI_OUT_CONFIG_LEN (40) 65 | #define DMA_DEV_ID XPAR_AXIDMA_0_DEVICE_ID 66 | #define FRAME_SIZE (307200) 67 | //ADV7511 Configuration 68 | 69 | /* 70 | * The video input format of the ADV7511 is set to YCbCr, 16-bit, 4:2:2, 71 | * ID 1 (separate syncs), Style 1. The video output format is set to 72 | * YCbCr, 16-bit, 4:2:2, HDMI mode. 73 | */ 74 | #define ZC702_HDMI_CONFIG_LEN 16 75 | Xuint8 zc702_hdmi_config[ZC702_HDMI_CONFIG_LEN][3] = 76 | { 77 | {ADV7511_ADDR>>1,0x41, 0x10}, // Power Down Control 78 | // R0x41[ 6] = PowerDown = 0 (power-up) 79 | {ADV7511_ADDR>>1,0xD6, 0xC0}, // HPD Control 80 | // R0xD6[7:6] = HPD Control = 11 (always high) 81 | {ADV7511_ADDR>>1,0x15, 0x01}, // Input YCbCr 4:2:2 with separate syncs 82 | {ADV7511_ADDR>>1,0x16, 0xB9}, // Output format 4:2:2, Input Color Depth = 8 83 | // R0x16[ 7] = Output Video Format = 1 (4:2:2) 84 | // R0x16[5:4] = Input Video Color Depth = 11 (8 bits/color) 85 | // R0x16[3:2] = Input Video Style = 10 (style 1) 86 | // R0x16[ 1] = DDR Input Edge = 0 (falling edge) 87 | // R0x16[ 0] = Output Color Space = 1 (YCbCr) 88 | {ADV7511_ADDR>>1,0x48, 0x08}, // Video Input Justification 89 | // R0x48[8:7] = Video Input Justification = 01 (right justified) 90 | {ADV7511_ADDR>>1,0x55, 0x20}, // Set RGB in AVinfo Frame 91 | // R0x55[6:5] = Output Format = 01 (YCbCr) 92 | {ADV7511_ADDR>>1,0x56, 0x19}, // Aspect Ratio 93 | // R0x56[5:4] = Picture Aspect Ratio = 10 (16:9) 94 | // R0x56[5:4] = Picture Aspect Ratio = 01 (4:3) 95 | // R0x56[3:0] = Active Format Aspect Ratio = 1000 (Same as Aspect Ratio) 96 | {ADV7511_ADDR>>1,0x98, 0x03}, // ADI Recommended Write 97 | {ADV7511_ADDR>>1,0x9A, 0xE0}, // ADI Recommended Write 98 | {ADV7511_ADDR>>1,0x9C, 0x30}, // PLL Filter R1 Value 99 | {ADV7511_ADDR>>1,0x9D, 0x61}, // Set clock divide 100 | {ADV7511_ADDR>>1,0xA2, 0xA4}, // ADI Recommended Write 101 | {ADV7511_ADDR>>1,0xA3, 0xA4}, // ADI Recommended Write 102 | {ADV7511_ADDR>>1,0xAF, 0x06}, // HDMI/DVI Modes 103 | // R0xAF[ 7] = HDCP Enable = 0 (HDCP disabled) 104 | // R0xAF[ 4] = Frame Encryption = 0 (Current frame NOT HDCP encrypted) 105 | // R0xAF[3:2] = 01 (fixed) 106 | // R0xAF[ 1] = HDMI/DVI Mode Select = 2 (HDMI Mode) 107 | {ADV7511_ADDR>>1,0xE0, 0xD0}, // Must be set to 0xD0 for proper operation 108 | {ADV7511_ADDR>>1,0xF9, 0x00} // Fixed I2C Address (This should be set to a non-conflicting I2C address) 109 | }; 110 | 111 | int main () 112 | { 113 | init_platform(); 114 | int ret; 115 | XAxiVdma vdmaPtr; 116 | zed_iic_t hdmi_out_iic; 117 | XBuild_gaussian gaussian_ip; 118 | XV_tpg ptpg; 119 | XAxiDma AxiDma; 120 | print("Hello World\n\r"); 121 | 122 | //set up the DMA which stores the MOG structure 123 | 124 | int Status = XST_SUCCESS; 125 | 126 | //configure tpg 127 | ptpg = setup_tpg(); 128 | 129 | //configure DMA 130 | AxiDma = setup_dma(); 131 | 132 | //init build gaussian core 133 | gaussian_ip = setup_mog(); 134 | 135 | /* Calling the API to configure and start VDMA without frame counter interrupt*/ 136 | ret = run_triple_frame_buffer(&vdmaPtr, XPAR_AXI_VDMA_0_DEVICE_ID, 320, 240, srcBuffer, 100, 0); 137 | if (ret != XST_SUCCESS) { 138 | xil_printf("Transfer of frames failed with error = %d\r\n",ret); 139 | return XST_FAILURE; 140 | } else { 141 | xil_printf("Transfer of frames started \r\n"); 142 | } 143 | 144 | //HDMI output initialisation 145 | ret = zed_iic_axi_init(&hdmi_out_iic,"ZED HDMI I2C Controller", XPAR_AXI_IIC_0_BASEADDR); 146 | if ( !ret ) 147 | { 148 | print( "ERROR : Failed to initialize IIC driver\n\r" ); 149 | return -1; 150 | } 151 | 152 | //HDMI output initialisation 153 | Xuint8 num_bytes; 154 | int i; 155 | 156 | for ( i = 0; i < ZC702_HDMI_CONFIG_LEN; i++ ) 157 | { 158 | //xil_printf( "[ZedBoard HDMI] IIC Write - Device = 0x%02X, Address = 0x%02X, Data = 0x%02X\n\r", carrier_hdmi_out_config[i][0]<<1, carrier_hdmi_out_config[i][1], carrier_hdmi_out_config[i][2] ); 159 | num_bytes = hdmi_out_iic.fpIicWrite( &hdmi_out_iic, zc702_hdmi_config[i][0], zc702_hdmi_config[i][1], &(zc702_hdmi_config[i][2]), 1 ); 160 | printf("Written %d to reg address %d\r\n", num_bytes, zc702_hdmi_config[i][1]); 161 | } 162 | 163 | //check monitor connected and wait to start movement 164 | int sleep_coutn = 0; 165 | u8 monitor_connected = 0; 166 | while(sleep_coutn < 10) 167 | { 168 | print("checking monitor\r\n"); 169 | monitor_connected = check_hdmi_hpd_status(&hdmi_out_iic); 170 | if(monitor_connected) 171 | { 172 | print("HDMI Monitor connected\r\n"); 173 | } 174 | else 175 | { 176 | print("No HDMI Monitor connected / Monitor Disconnected\r\n"); 177 | } 178 | sleep(10); 179 | //} 180 | sleep_coutn +=2; 181 | } 182 | 183 | //3E register used to identify video format 184 | //Xuint8 buffer; 185 | //num_bytes = hdmi_out_iic.fpIicRead( &hdmi_out_iic, ADV7511_ADDR>>1, 0x3E, &buffer ,1); 186 | //print("Detected : %d\r\n", (int)buffer); 187 | start_movement(ptpg); 188 | 189 | cleanup_platform(); 190 | return 0; 191 | } 192 | -------------------------------------------------------------------------------- /HLS_code/BLOB_DETECTION/blob_analysis_tb.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "time.h" 6 | #include 7 | #include "blob_analysis_core.h" 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | //#define INPUT_RLE "C:\\Users\\Tiarnan\\Documents\\Final Year Project\\rle_300_vid2.txt" 14 | //#define INPUT_RLE "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_4\\rle_382.txt" 15 | //#define OG_FRAME "C:\\Users\\Tiarnan\\Documents\\Final Year Project\\frame_450.png" 16 | //#define OG_FRAME "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_3\\2333.png" 17 | //#define OUT_FRAME "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\2_300_identified.png" 18 | //#define OUT_FRAME "C:\\Users\\Tiarnan\\Documents\\Final Year Project\\frame_2333_identified.png" 19 | using namespace std; 20 | 21 | int main(int argc, char* argv[]) { 22 | char rle_path[100]; 23 | char og_frame_path[100]; 24 | char out_frame_path[100]; 25 | //loop over test videos 26 | for (int i = 900; i < 2116; i+=5){ 27 | //vector blob_vector; 28 | blob objects[100]; 29 | blob b0 = {}; 30 | objects[0] = b0; 31 | blob b1 = {}; 32 | objects[1] = b1; 33 | blob b2 = {}; 34 | objects[2] = b2; 35 | blob b3 = {}; 36 | objects[3] = b3; 37 | blob b4 = {}; 38 | objects[4] = b4; 39 | blob b5 = {}; 40 | objects[5] = b5; 41 | blob b6 = {}; 42 | objects[6] = b6; 43 | blob b7 = {}; 44 | objects[7] = b7; 45 | blob b8 = {}; 46 | objects[8] = b8; 47 | blob b9 = {}; 48 | objects[9] = b9; 49 | blob b10 = {}; 50 | objects[10] = b10; 51 | blob b11 = {}; 52 | objects[11] = b11; 53 | blob b12 = {}; 54 | objects[12] = b12; 55 | blob b13 = {}; 56 | objects[13] = b13; 57 | blob b14 = {}; 58 | objects[14] = b14; 59 | blob b15 = {}; 60 | objects[15] = b15; 61 | blob b16 = {}; 62 | objects[16] = b16; 63 | blob b17 = {}; 64 | objects[17] = b17; 65 | blob b18 = {}; 66 | objects[18] = b18; 67 | blob b19 = {}; 68 | objects[19] = b19; 69 | blob b20 = {}; 70 | objects[20] = b20; 71 | blob b21 = {}; 72 | objects[21] = b21; 73 | blob b22 = {}; 74 | objects[22] = b22; 75 | blob b23 = {}; 76 | objects[23] = b23; 77 | blob b24 = {}; 78 | objects[24] = b24; 79 | blob b25 = {}; 80 | objects[25] = b25; 81 | blob b26 = {}; 82 | objects[26] = b26; 83 | blob b27 = {}; 84 | objects[27] = b27; 85 | blob b28 = {}; 86 | objects[28] = b28; 87 | blob b29 = {}; 88 | objects[29] = b29; 89 | blob b30 = {}; 90 | objects[30] = b30; 91 | blob b31 = {}; 92 | objects[31] = b31; 93 | blob b32 = {}; 94 | objects[32] = b32; 95 | blob b33 = {}; 96 | objects[33] = b33; 97 | blob b34 = {}; 98 | objects[34] = b34; 99 | blob b35 = {}; 100 | objects[35] = b35; 101 | blob b36 = {}; 102 | objects[36] = b36; 103 | blob b37 = {}; 104 | objects[37] = b37; 105 | blob b38 = {}; 106 | objects[38] = b38; 107 | blob b39 = {}; 108 | objects[39] = b39; 109 | blob b40 = {}; 110 | objects[40] = b40; 111 | blob b41 = {}; 112 | objects[41] = b41; 113 | blob b42 = {}; 114 | objects[42] = b42; 115 | blob b43 = {}; 116 | objects[43] = b43; 117 | blob b44 = {}; 118 | objects[44] = b44; 119 | blob b45 = {}; 120 | objects[45] = b45; 121 | blob b46 = {}; 122 | objects[46] = b46; 123 | blob b47 = {}; 124 | objects[47] = b47; 125 | blob b48 = {}; 126 | objects[48] = b48; 127 | blob b49 = {}; 128 | objects[49] = b49; 129 | blob b50 = {}; 130 | objects[50] = b50; 131 | blob b51 = {}; 132 | objects[51] = b51; 133 | blob b52 = {}; 134 | objects[52] = b52; 135 | blob b53 = {}; 136 | objects[53] = b53; 137 | blob b54 = {}; 138 | objects[54] = b54; 139 | blob b55 = {}; 140 | objects[55] = b55; 141 | blob b56 = {}; 142 | objects[56] = b56; 143 | blob b57 = {}; 144 | objects[57] = b57; 145 | blob b58 = {}; 146 | objects[58] = b58; 147 | blob b59 = {}; 148 | objects[59] = b59; 149 | blob b60 = {}; 150 | objects[60] = b60; 151 | blob b61 = {}; 152 | objects[61] = b61; 153 | blob b62 = {}; 154 | objects[62] = b62; 155 | blob b63 = {}; 156 | objects[63] = b63; 157 | blob b64 = {}; 158 | objects[64] = b64; 159 | blob b65 = {}; 160 | objects[65] = b65; 161 | blob b66 = {}; 162 | objects[66] = b66; 163 | blob b67 = {}; 164 | objects[67] = b67; 165 | blob b68 = {}; 166 | objects[68] = b68; 167 | blob b69 = {}; 168 | objects[69] = b69; 169 | blob b70 = {}; 170 | objects[70] = b70; 171 | blob b71 = {}; 172 | objects[71] = b71; 173 | blob b72 = {}; 174 | objects[72] = b72; 175 | blob b73 = {}; 176 | objects[73] = b73; 177 | blob b74 = {}; 178 | objects[74] = b74; 179 | blob b75 = {}; 180 | objects[75] = b75; 181 | blob b76 = {}; 182 | objects[76] = b76; 183 | blob b77 = {}; 184 | objects[77] = b77; 185 | blob b78 = {}; 186 | objects[78] = b78; 187 | blob b79 = {}; 188 | objects[79] = b79; 189 | blob b80 = {}; 190 | objects[80] = b80; 191 | blob b81 = {}; 192 | objects[81] = b81; 193 | blob b82 = {}; 194 | objects[82] = b82; 195 | blob b83 = {}; 196 | objects[83] = b83; 197 | blob b84 = {}; 198 | objects[84] = b84; 199 | blob b85 = {}; 200 | objects[85] = b85; 201 | blob b86 = {}; 202 | objects[86] = b86; 203 | blob b87 = {}; 204 | objects[87] = b87; 205 | blob b88 = {}; 206 | objects[88] = b88; 207 | blob b89 = {}; 208 | objects[89] = b89; 209 | blob b90 = {}; 210 | objects[90] = b90; 211 | blob b91 = {}; 212 | objects[91] = b91; 213 | blob b92 = {}; 214 | objects[92] = b92; 215 | blob b93 = {}; 216 | objects[93] = b93; 217 | blob b94 = {}; 218 | objects[94] = b94; 219 | blob b95 = {}; 220 | objects[95] = b95; 221 | blob b96 = {}; 222 | objects[96] = b96; 223 | blob b97 = {}; 224 | objects[97] = b97; 225 | blob b98 = {}; 226 | objects[98] = b98; 227 | blob b99 = {}; 228 | objects[99] = b99; 229 | blob_port objects_port[100]; 230 | cout << "blob size : " << sizeof(objects[0]) << endl; 231 | //cout << "blob port size : " << sizeof(objects_port[0]) << endl; 232 | for (int j = 0; j < 100; j++){ 233 | memcpy(&objects_port[j], &objects[j], sizeof(blob)); 234 | } 235 | RLE_AXI_STREAM rle_stream; 236 | rle_run run; 237 | snprintf(rle_path, sizeof(rle_path), "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_1_var1\\rle_%d.txt", i); 238 | snprintf(og_frame_path, sizeof(og_frame_path), "C:\\Users\\Tiarnan\\Pictures\\bg_core_output\\test_vid_1_var1\\%d.png", i); 239 | snprintf(out_frame_path, sizeof(out_frame_path), "C:\\Users\\Tiarnan\\Documents\\Final Year Project\\test_vid_1_id_var1\\%d.png", i); 240 | std::ifstream rlefile(rle_path); 241 | string line; 242 | if (rlefile.is_open()){ 243 | while ( getline (rlefile,line) ){ 244 | std::stringstream ss(line); 245 | int j; 246 | int count = 0; 247 | while (ss >> j) 248 | { 249 | if (ss.peek() == ','){ 250 | ss.ignore(); 251 | } 252 | if (count == 0){ 253 | run.data.s = j; 254 | } else if (count == 1){ 255 | run.data.e = j; 256 | } else if (count == 2){ 257 | run.data._last_run = j; 258 | } else if (count == 3){ 259 | run.data.y = j; 260 | } 261 | count = count +1; 262 | } 263 | run.data.no = 0; 264 | rle_stream << run; 265 | } 266 | rlefile.close(); 267 | } 268 | else cout << "Could not open file"; 269 | 270 | blob_analysis(rle_stream, objects_port); 271 | 272 | for (int j=0; j<100; j++){ 273 | //masks 274 | unsigned short *start = (unsigned short *)&objects_port[j]; 275 | unsigned char id = (unsigned char)*(start); 276 | unsigned short cp_x = *(start + 1); 277 | unsigned short cp_y = *(start + 2); 278 | unsigned short area = *(start + 3); 279 | unsigned short max_x = *(start + 4); 280 | unsigned short min_x = *(start + 5); 281 | unsigned short max_y = *(start + 6); 282 | unsigned short min_y = *(start + 7); 283 | //coord coord_mask = 0x1FF; 284 | //unsigned char uchar_mask = 0xFF; 285 | //short short_mask = 0xFFFF; 286 | //unsigned char id = objects_port[i] & uchar_mask; 287 | objects[j].id = id; 288 | //cout << "id : " << id << endl; 289 | //coord cp_x = (objects_port[i] >> 16) & coord_mask; 290 | //cout << "cp_x : " << cp_x << endl; 291 | //coord cp_y = (objects_port[i] >> 32) & coord_mask; 292 | //cout << "cp_y : " << cp_y << endl; 293 | objects[j].cp.x = cp_x; 294 | objects[j].cp.y = cp_y; 295 | //short area = (objects_port[i] >> 48) & short_mask; 296 | //cout << "area : " << area << endl; 297 | objects[j].area = area; 298 | //coord max_x = (objects_port[i] >> 64) & coord_mask; 299 | //cout << "max x : " << max_x << endl; 300 | //coord min_x = (objects_port[i] >> 80) & coord_mask; 301 | //cout << "minx : " << min_x << endl; 302 | //coord max_y = (objects_port[i] >> 96) & coord_mask; 303 | //cout << "max y : " << max_y << endl; 304 | //coord min_y = (objects_port[i] >> 112) & coord_mask; 305 | //cout << "min_y : " << min_y << endl; 306 | objects[j].max_x = max_x; 307 | objects[j].min_x = min_x; 308 | objects[j].max_y = max_y; 309 | objects[j].min_y = min_y; 310 | } 311 | 312 | cv::Mat frame = cvLoadImage(og_frame_path, 1); 313 | 314 | printf("\n%d ", i); 315 | for (int j = 0 ; j < 100; j++){ 316 | if (objects[j].id > 0 && objects[j].area > 350 && objects[j].area < 5000){ 317 | cv::Point tl; 318 | cv::Point br; 319 | tl.x = (int)objects[j].min_x; 320 | tl.y = (int)objects[j].min_y; 321 | br.x = (int)objects[j].max_x; 322 | br.y = (int)objects[j].max_y; 323 | //if w > 2.5*h 324 | if (((int)objects[j].max_x - (int)objects[j].min_x) > 2.5*((int)objects[j].max_y - (int)objects[j].min_y)){ 325 | continue; 326 | } 327 | printf("%d ", (int)objects[j].min_x ); 328 | printf("%d ",(int)objects[j].min_y ); 329 | printf("%d ",(int)objects[j].max_x ); 330 | printf("%d ",(int)objects[j].max_y ); 331 | cv::rectangle(frame, tl, br, cv::Scalar(0, 255, 0), 2); 332 | } 333 | } 334 | 335 | //cv::imshow("edited", frame); 336 | cv::imwrite(out_frame_path, frame); 337 | } 338 | cv::waitKey(0); 339 | return 0; 340 | } 341 | -------------------------------------------------------------------------------- /HLS_code/MOG/build_gaussian_core.cpp: -------------------------------------------------------------------------------- 1 | #include "build_gaussian_core.h" 2 | #include 3 | 4 | /*Types defined in build_gaussian_core.h*/ 5 | 6 | using namespace hls; 7 | 8 | 9 | /* 10 | * Top function - Two stages: building MOG and Background Subtraction 11 | * MOG:Receives YUV pixel in AXI Stream and extracts 12 | * luminosity value. If this is the first frame create a new gaussian 13 | * for the pixel with weight one and mean = pixel val. Otherwise find 14 | * the gaussians that match the pixel and update these. create a new gaussian 15 | * if suitable one does not exist and remove the lowest weighted one. 16 | * Background Subtraction: Order models wrt weight/sd - Backgrounds should occur 17 | * frequently and without much change. Chose fist B models as bg where sum of 18 | * weight is less than T. If pixel matches at least one bg gaussian it is bg. 19 | * stream_in - frame of YUV pixels in frame 20 | * stream_out - mask for input to the morphological core 21 | * use bool for testing and GRAY_AXI_STREAM for real implementation. 22 | * MOG - A max of five gaussians and control variables 23 | * vid_out - YUV stream of black pr white pixels 24 | * bg_thresh - change the bg threshold through AXI Lite 25 | * learning_rate - change the learning_rate through AXI Lite 26 | * min_var - change the min variance a gaussian can have through AXI Lite 27 | * 28 | */ 29 | //void build_gaussian(hls::stream &stream_in , GRAY_AXI_STREAM &stream_out , pixel_k_gaussian MOG[640*480], hls::stream &vid_out, pixel_k_gaussian MOG_out[640*480], weight_vals bg_thresh, weight_vals learning_rate, unsigned char min_var){ 30 | void build_gaussian(hls::stream &stream_in , hls::stream &stream_out , pixel_k_gaussian MOG[640*480], hls::stream &vid_out, pixel_k_gaussian MOG_out[640*480], weight_vals bg_thresh, weight_vals learning_rate, unsigned char min_var){ 31 | #pragma HLS DATA_PACK variable=MOG struct_level 32 | #pragma HLS DATA_PACK variable=MOG_out struct_level 33 | //#pragma HLS DATA_PACK variable=MOG->data 34 | //#pragma HLS DATA_PACK variable=MOG_out->data 35 | //#pragma HLS INTERFACE axis port=MOG_out //used for testing 36 | //#pragma HLS INTERFACE axis port=MOG //used for testing 37 | #pragma HLS INTERFACE s_axilite port=bg_thresh bundle=CRTL_BUS 38 | #pragma HLS INTERFACE s_axilite port=learning_rate bundle=CRTL_BUS 39 | #pragma HLS INTERFACE s_axilite port=min_var bundle=CRTL_BUS 40 | #pragma HLS INTERFACE m_axi depth=800 port=MOG offset=slave bundle=MOG_MASTER 41 | #pragma HLS INTERFACE m_axi depth=800 port=MOG_out offset=slave bundle=MOG_MASTER 42 | #pragma HLS INTERFACE axis port=stream_in 43 | #pragma HLS INTERFACE axis port=stream_out 44 | #pragma HLS INTERFACE axis port=vid_out 45 | #pragma HLS INTERFACE s_axilite port=return bundle=CRTL_BUS 46 | static int frame_count = 0; 47 | calc_t BG_THRESH = bg_thresh; 48 | frame_count = frame_count + 1; 49 | //#pragma HLS DATAFLOW 50 | const int pixels = 320 * 240; 51 | for (int idx = 0; idx < pixels; idx++){ 52 | #pragma HLS PIPELINE II=10 53 | bool p_is_bg = false; 54 | unsigned char match = 3; 55 | YUV_pixel pix = stream_in.read(); 56 | unsigned char y = (pix.data & 0xFF); 57 | YUV_pixel out_p; 58 | out_p.dest = pix.dest; 59 | out_p.last = pix.last; 60 | out_p.user = pix.user; 61 | out_p.id = pix.id; 62 | out_p.strb = pix.strb; 63 | out_p.keep = pix.keep; 64 | out_p.data = 0x0000; 65 | pixel_k_gaussian pg = MOG[idx]; //= *((pixel_k_gaussian*)MOG + idx); 66 | //std::cout << "Size of pkg : " << sizeof(pixel_k_gaussian) << std::endl; 67 | //unsigned char * pg_ptr = (unsigned char *)&pg; 68 | //for (int j = 0; j < sizeof(pixel_k_gaussian); j ++){ 69 | // unsigned char byte = (unsigned char)*(MOG + sizeof(pixel_k_gaussian)*idx + j); 70 | // *(pg_ptr + j) = byte; 71 | //} 72 | //memcpy(&pg, MOG + sizeof(pixel_k_gaussian)*idx, sizeof(pixel_k_gaussian)); 73 | //order gaussians by fitness 74 | sort_gaussians(pg); 75 | // check pixel for match with a gaussian - changhe to going up and break 76 | for (int g = 0; g <= 2; g++ ){ 77 | //#pragma HLS UNROLL 78 | int m = find_match(pg.data.k_lum[g], y); 79 | //int m = find_match(pg.k_lum[g], y); 80 | match = (m == 1) ? hls::min(match, (unsigned char)g) : match; 81 | } 82 | //update matched gaussian with best fitness 83 | if (match < 3){ 84 | for (int g = 0; g <= 2; g++ ){ 85 | //#pragma HLS UNROLL 86 | if (match == g){ 87 | //update_gaussian(pg.k_lum[g],y, true, learning_rate, min_var, frame_count); 88 | update_gaussian(pg.data.k_lum[g],y, true, learning_rate, min_var, frame_count); 89 | } else { 90 | update_gaussian(pg.data.k_lum[g],y, false, learning_rate, min_var, frame_count); 91 | //update_gaussian(pg.k_lum[g],y, false, learning_rate, min_var, frame_count); 92 | } 93 | } 94 | } else{ 95 | //create a gaussian to model y and remove the lowest weighted gaussian, idx = 2 96 | //calc new weight 97 | calc_t msumtot = calc_t(pg.data.k_lum[0].matchsum) + calc_t(pg.data.k_lum[1].matchsum); 98 | //calc_t msumtot = calc_t(pg.k_lum[0].matchsum) + calc_t(pg.k_lum[1].matchsum); 99 | weight_vals w; 100 | if (msumtot == 0) { 101 | w = weight_vals (1); 102 | } else { 103 | w = weight_vals(calc_t(1)/msumtot); 104 | } 105 | lum_gaussian l = create_new_gaussian(y, var_vals (30), w); 106 | //pg.k_lum[2] = l; 107 | pg.data.k_lum[2] = l; 108 | } 109 | normalise_weights(pg); 110 | if (frame_count > 50 ){ 111 | // start background subtraction 112 | sort_gaussians(pg); 113 | calc_t sum = 0.0; 114 | //int g = 0; 115 | for (ap_uint<4> g = 0; g < 3; g ++ ){ 116 | //while (sum < BG_THRESH){ 117 | sum = sum + pg.data.k_lum[g].weight; 118 | //sum = sum + pg.k_lum[g].weight; 119 | //if (find_match(pg.k_lum[g], y )){ 120 | if (find_match(pg.data.k_lum[g], y )){ 121 | p_is_bg = true; 122 | out_p.data = 0x8000; 123 | break; 124 | } 125 | if (sum > BG_THRESH){ 126 | break; 127 | } 128 | } 129 | stream_out.write(!p_is_bg); 130 | //gp.data = !p_is_bg; 131 | } else { 132 | stream_out.write(false); 133 | } 134 | vid_out.write(out_p); 135 | //stream_out.write(gp); 136 | //pg.last = pg.last + 1; 137 | //if(idx == 1){ 138 | // std::cout << "last : " << (int)pg.last <= l.mean) { 162 | if ((y - l.mean) < calc_t(sqrt(l.var)*calc_t(2.5))){ 163 | if (l.matchsum > 0) { 164 | return 1; 165 | } else { 166 | return 0; 167 | } 168 | } else { 169 | return 0; 170 | } 171 | } 172 | else { 173 | if ((l.mean - y) < calc_t(sqrt(l.var)*calc_t(2.5))){ 174 | if (l.matchsum > 0) { 175 | return 1; 176 | } else { 177 | return 0; 178 | } 179 | } else { 180 | return 0; 181 | } 182 | } 183 | } 184 | 185 | /* 186 | * This function a gaussian, mean, var and weight based on 187 | * previous values. 188 | * l - input gaussian of pixel. This function is 189 | * called in a loop so all gaussians are checked. 190 | * y - luminosity value of current pixel 191 | * matched - is this the gaussian that the current pixel matches 192 | */ 193 | void update_gaussian(lum_gaussian &l, unsigned char y, bool matched, weight_vals learn_rate, unsigned char min_var, int frame_count){ 194 | calc_t learning_rate = frame_count < 50 ? calc_t (0.5/frame_count) : (calc_t)learn_rate; 195 | weight_vals weight_out; 196 | weight_vals weight_in = l.weight; 197 | if (matched){ 198 | mean_vals mean_out; 199 | mean_vals mean_in = l.mean; 200 | var_vals var_out; 201 | var_vals var_in = l.var; 202 | unsigned char matchsum_in = l.matchsum; 203 | unsigned char matchsum_out = (matchsum_in == 255) ? matchsum_in : matchsum_in + 1; 204 | calc_t mean_var_lr = 0.5; 205 | //make sure outdated gaussians get updated quicklly but heavily weighted arent overly affected by outliers 206 | if (weight_in != 0){ 207 | mean_var_lr = calc_t (learning_rate/weight_in) > mean_var_lr ? mean_var_lr : calc_t (learning_rate/weight_in); 208 | } else { 209 | mean_var_lr = 0; 210 | } 211 | //update mean 212 | mean_out = mean_in + mean_var_lr * (y - mean_in); 213 | //mean_out = ((mean_in * num_frames) + y ) / num_frames + 1; 214 | //update sd 215 | var_out = var_in + mean_var_lr * (((y-mean_in)*(y-mean_in)) - var_in); 216 | if (var_out < min_var){ 217 | var_out = min_var; 218 | } 219 | //calc_t Sn_prev = (sd_in * sd_in) * num_frames; 220 | //calc_t Sn_curr = Sn_prev + (y - mean_in)*(y - mean_out); 221 | //sd_out = hls::sqrt((calc_t)(Sn_curr/num_frames+1)); 222 | //update weight 223 | weight_out = weight_in - learning_rate * weight_in + learning_rate; 224 | //weight_out = (gaus_val)(1-LEARNING_RATE)*weight_in + LEARNING_RATE; 225 | //update l 226 | l.mean = mean_out; 227 | l.var = var_out; 228 | l.matchsum = matchsum_out; 229 | } else { 230 | weight_out = weight_in - learning_rate * weight_in; 231 | } 232 | l.weight = weight_out; 233 | } 234 | 235 | /* 236 | * This function sorts the gaussians in order wrt w/sd 237 | * highest ratio is in position 0 238 | * Background models should have a high weight and low variance 239 | * pg - contains all gaussians for a given pixel 240 | */ 241 | void sort_gaussians(pixel_k_gaussian &pg){ 242 | //bool swap = true; 243 | //while ( swap ){ 244 | //swap = false; 245 | for (int g = 0; g < 2; g ++ ){ 246 | lum_gaussian l_curr = pg.data.k_lum[g]; 247 | lum_gaussian l_next = pg.data.k_lum[g+1]; 248 | //lum_gaussian l_curr = pg.k_lum[g]; 249 | //lum_gaussian l_next = pg.k_lum[g+1]; 250 | calc_t ratio_next; 251 | calc_t ratio_curr; 252 | if (l_curr.matchsum > 0){ 253 | if (l_curr.var != 0){ 254 | ratio_curr = calc_t(calc_t(l_curr.weight)/calc_t(hls::sqrt(l_curr.var))); 255 | } else { 256 | ratio_curr = calc_t(l_curr.weight); 257 | } 258 | } else { 259 | ratio_curr = calc_t(0); 260 | } 261 | if (l_next.matchsum > 0){ 262 | if (l_next.var != 0){ 263 | ratio_next = calc_t(calc_t(l_next.weight)/calc_t(hls::sqrt(l_next.var))); 264 | } else { 265 | ratio_next = calc_t(l_next.weight); 266 | } 267 | } else { 268 | ratio_next = calc_t(0); 269 | } 270 | if (ratio_next > ratio_curr){ 271 | //swap them 272 | //swap = true; 273 | lum_gaussian temp = l_next; 274 | pg.data.k_lum[g+1] = pg.data.k_lum[g]; 275 | //pg.k_lum[g+1] = pg.k_lum[g]; 276 | pg.data.k_lum[g] = temp; 277 | //pg.k_lum[g] = temp; 278 | } 279 | } 280 | for (int g = 0; g < 2; g ++ ){ 281 | lum_gaussian l_curr = pg.data.k_lum[g]; 282 | //lum_gaussian l_curr = pg.k_lum[g]; 283 | lum_gaussian l_next = pg.data.k_lum[g+1]; 284 | //lum_gaussian l_next = pg.k_lum[g+1]; 285 | calc_t ratio_next; 286 | calc_t ratio_curr; 287 | if (l_curr.matchsum > 0){ 288 | if (l_curr.var != 0){ 289 | ratio_curr = calc_t(calc_t(l_curr.weight)/calc_t(hls::sqrt(l_curr.var))); 290 | } else { 291 | ratio_curr = calc_t(l_curr.weight); 292 | } 293 | } else { 294 | ratio_curr = calc_t(0); 295 | } 296 | if (l_next.matchsum > 0){ 297 | if (l_next.var != 0){ 298 | ratio_next = calc_t(calc_t(l_next.weight)/calc_t(hls::sqrt(l_next.var))); 299 | } else { 300 | ratio_next = calc_t(l_next.weight); 301 | } 302 | } else { 303 | ratio_next = calc_t(0); 304 | } 305 | if (ratio_next > ratio_curr){ 306 | //swap them 307 | //swap = true; 308 | lum_gaussian temp = l_next; 309 | pg.data.k_lum[g+1] = pg.data.k_lum[g]; 310 | //pg.k_lum[g+1] = pg.k_lum[g]; 311 | pg.data.k_lum[g] = temp; 312 | //pg.k_lum[g] = temp; 313 | } 314 | } 315 | } 316 | 317 | 318 | /* 319 | * This function normalises the weights to a sum of one 320 | * pg - contains all gaussians for a given pixel 321 | */ 322 | void normalise_weights(pixel_k_gaussian &pg){ 323 | //calc_t sum = pg.k_lum[0].weight + pg.k_lum[1].weight + pg.k_lum[2].weight; 324 | calc_t sum = pg.data.k_lum[0].weight + pg.data.k_lum[1].weight + pg.data.k_lum[2].weight; 325 | for (int g = 0; g < 3; g ++ ){ 326 | pg.data.k_lum[g].weight = (weight_vals)(pg.data.k_lum[g].weight / sum); 327 | //pg.k_lum[g].weight = (weight_vals)(pg.k_lum[g].weight / sum); 328 | } 329 | } 330 | 331 | 332 | /* 333 | * This function creates a new gaussian based on params 334 | * y - pixel luiminance needs cast to mean_sd_vals 335 | * var - variance inital value 336 | * weight - weight inital value 337 | * return val - lum_gaussian struct 338 | */ 339 | lum_gaussian create_new_gaussian(unsigned char y, var_vals var, weight_vals weight){ 340 | lum_gaussian l; 341 | l.mean = mean_vals (y); 342 | l.var = var; 343 | l.weight = weight; 344 | l.matchsum = 1; 345 | return l; 346 | } 347 | -------------------------------------------------------------------------------- /PS_code/core_setup/zed_iic_axi.c: -------------------------------------------------------------------------------- 1 | //---------------------------------------------------------------- 2 | // _____ 3 | // * * 4 | // *____ *____ 5 | // * *===* *==* 6 | // *___*===*___** AVNET 7 | // *======* 8 | // *====* 9 | //--------------------------------------------------------------- 10 | // 11 | // This design is the property of Avnet. Publication of this 12 | // design is not authorized without written consent from Avnet. 13 | // 14 | // Please direct any questions to: technical.support@avnet.com 15 | // 16 | // Disclaimer: 17 | // Avnet, Inc. makes no warranty for the use of this code or design. 18 | // This code is provided "As Is". Avnet, Inc assumes no responsibility for 19 | // any errors, which may appear in this code, nor does it make a commitment 20 | // to update the information contained herein. Avnet, Inc specifically 21 | // disclaims any implied warranties of fitness for a particular purpose. 22 | // Copyright(c) 2013 Avnet, Inc. 23 | // All rights reserved. 24 | // 25 | //---------------------------------------------------------------- 26 | // 27 | // Create Date: Jul 01, 2013 28 | // Design Name: ZED-IIC 29 | // Module Name: zed_iic_axi.c 30 | // Project Name: ZED-IIC 31 | // Target Devices: Zynq 32 | // Avnet Boards: ZedBoard 33 | // 34 | // Tool versions: ISE 14.6 35 | // 36 | // Description: IIC Hardware Abstraction Layer 37 | // => AXI_IIC implementation 38 | // 39 | // Dependencies: 40 | // 41 | // Revision: Jul 01, 2013: 1.00 Initial version 42 | // 43 | //---------------------------------------------------------------- 44 | 45 | #include 46 | #include 47 | #include 48 | 49 | // Located in: microblaze_0/include/ 50 | #include "xparameters.h" 51 | #include "xstatus.h" 52 | 53 | #include "zed_iic.h" 54 | 55 | #if defined(XPAR_XIIC_NUM_INSTANCES) 56 | 57 | #include "xiic.h" 58 | #include "xil_io.h" 59 | 60 | /* 61 | * The page size determines how much data should be written at a time. 62 | * The write function should be called with this as a maximum byte count. 63 | */ 64 | #define PAGE_SIZE 2 65 | 66 | #define ADV7511_HPD_CTRL_MASK 0x40 // bit 6 = state of HPD 67 | #define ADV7511_HDP_REG_ADDR 0x42 68 | 69 | //////////////////////////////////////////////////////////////////////// 70 | // Context Data 71 | //////////////////////////////////////////////////////////////////////// 72 | 73 | struct struct_zed_iic_axi_t 74 | { 75 | Xuint32 CoreAddress; 76 | }; 77 | typedef struct struct_zed_iic_axi_t zed_iic_axi_t; 78 | 79 | //////////////////////////////////////////////////////////////////////// 80 | // I2C Functions 81 | //////////////////////////////////////////////////////////////////////// 82 | 83 | // Forward declarations 84 | int zed_iic_axi_GpoRead ( zed_iic_t *pIIC, Xuint32 *pGpioData ); 85 | int zed_iic_axi_GpoWrite( zed_iic_t *pIIC, Xuint32 GpioData ); 86 | int zed_iic_axi_IicWrite( zed_iic_t *pIIC, Xuint8 ChipAddress, Xuint8 RegAddress, 87 | Xuint8 *pBuffer, Xuint8 ByteCount); 88 | int zed_iic_axi_IicRead ( zed_iic_t *pIIC, Xuint8 ChipAddress, Xuint8 RegAddress, 89 | Xuint8 *pBuffer, Xuint8 ByteCount); 90 | int zed_iic_axi_IicEWrite( zed_iic_t *pIIC, Xuint8 ChipAddress, Xuint16 RegAddress, 91 | Xuint8 *pBuffer, Xuint8 ByteCount); 92 | int zed_iic_axi_IicERead ( zed_iic_t *pIIC, Xuint8 ChipAddress, Xuint16 RegAddress, 93 | Xuint8 *pBuffer, Xuint8 ByteCount); 94 | 95 | /****************************************************************************** 96 | * This function initializes the IIC controller. 97 | * 98 | * @param CoreAddress contains the address of the IIC core. 99 | * 100 | * @return If successfull, returns 1. Otherwise, returns 0. 101 | * 102 | * @note None. 103 | * 104 | ******************************************************************************/ 105 | int zed_iic_axi_init( zed_iic_t *pIIC, char szName[], Xuint32 CoreAddress ) 106 | { 107 | XStatus Status; 108 | Xuint8 StatusReg; 109 | Xuint32 timeout = 10000; 110 | 111 | //zed_iic_axi_t *pContext = (zed_iic_axi_t *)malloc( sizeof(zed_iic_axi_t) ); 112 | //if ( pContext == NULL ) 113 | //{ 114 | // print("Failed to allocate context data for FMC-IIC-AXI implementation\n\r" ); 115 | // return 0; 116 | //} 117 | zed_iic_axi_t *pContext = (zed_iic_axi_t *) (pIIC->ContextBuffer); 118 | if ( sizeof(zed_iic_axi_t) > ZED_IIC_CONTEXT_BUFFER_SIZE ) 119 | { 120 | xil_printf("ZED_IIC_CONTEXT_BUFFER_SIZE is not large enough for fic_iic_xps_t structure (increase to %d)\n\r", sizeof(zed_iic_axi_t) ); 121 | return 0; 122 | } 123 | 124 | pContext->CoreAddress = CoreAddress; 125 | 126 | /* 127 | * Initialize the IIC Core. 128 | */ 129 | Status = XIic_DynInit(pContext->CoreAddress); 130 | if(Status != XST_SUCCESS) 131 | { 132 | print("Failed to initialize I2C chain\n\r" ); 133 | return 0; 134 | } 135 | 136 | /* 137 | * Check to see if the core was initialized successfully 138 | */ 139 | do 140 | { 141 | StatusReg = Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET); 142 | //print("[%s] Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET) => 0x%02X\n\r", pContext->szName, StatusReg ); 143 | StatusReg = StatusReg & (XIIC_SR_RX_FIFO_EMPTY_MASK | 144 | XIIC_SR_TX_FIFO_EMPTY_MASK | 145 | XIIC_SR_BUS_BUSY_MASK); 146 | } while ( (timeout-- > 0) && 147 | (StatusReg != (XIIC_SR_RX_FIFO_EMPTY_MASK | XIIC_SR_TX_FIFO_EMPTY_MASK)) ); 148 | 149 | /* 150 | * Initialize the IIC structure 151 | */ 152 | pIIC->uVersion = 1; 153 | strcpy( pIIC->szName, szName ); 154 | pIIC->pContext = (void *)pContext; 155 | pIIC->fpIicRead = &zed_iic_axi_IicRead; 156 | pIIC->fpIicWrite = &zed_iic_axi_IicWrite; 157 | 158 | return 1; 159 | } 160 | 161 | /***** This function checks whether a monitor is connected ***** 162 | * 163 | * 164 | * 165 | * 166 | * 167 | */ 168 | int check_hdmi_hpd_status(zed_iic_t *IicPs) 169 | { 170 | int num_bytes; 171 | u8 data = 0x00; 172 | 173 | // 174 | num_bytes = zed_iic_axi_IicRead(IicPs, 0x39, ADV7511_HDP_REG_ADDR, &data, 1); 175 | if (num_bytes < 1) { 176 | return XST_FAILURE; 177 | } 178 | 179 | if((data & ADV7511_HPD_CTRL_MASK) == ADV7511_HPD_CTRL_MASK) { 180 | // Monitor Connected 181 | return 1; 182 | } 183 | else 184 | { 185 | // Monitor not connected 186 | return 0; 187 | } 188 | } 189 | 190 | /****************************************************************************** 191 | * This function writes a buffer of bytes to the IIC chip. 192 | * 193 | * @param ChipAddress contains the address of the chip. 194 | * @param RegAddress contains the address of the register to write to. 195 | * @param pBuffer contains the address of the data to write. 196 | * @param ByteCount contains the number of bytes in the buffer to be written. 197 | * Note that this should not exceed the page size as noted by the 198 | * constant PAGE_SIZE. 199 | * 200 | * @return The number of bytes written, a value less than that which was 201 | * specified as an input indicates an error. 202 | * 203 | * @note None. 204 | * 205 | ******************************************************************************/ 206 | int zed_iic_axi_IicWrite(zed_iic_t *pIIC, Xuint8 ChipAddress, Xuint8 RegAddress, 207 | Xuint8 *pBuffer, Xuint8 ByteCount) 208 | { 209 | Xuint8 SentByteCount; 210 | Xuint8 WriteBuffer[PAGE_SIZE + 1]; 211 | Xuint8 Index; 212 | Xuint8 StatusReg; 213 | zed_iic_axi_t *pContext = (zed_iic_axi_t *)(pIIC->pContext); 214 | 215 | #if 1 216 | // Make sure all the Fifo's are cleared and Bus is Not busy. 217 | do 218 | { 219 | StatusReg = Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET); 220 | //print("[%s] Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET) => 0x%02X\n\r", pContext->szName, StatusReg ); 221 | StatusReg = StatusReg & (XIIC_SR_RX_FIFO_EMPTY_MASK | 222 | XIIC_SR_TX_FIFO_EMPTY_MASK | 223 | XIIC_SR_BUS_BUSY_MASK); 224 | } while (StatusReg != (XIIC_SR_RX_FIFO_EMPTY_MASK | 225 | XIIC_SR_TX_FIFO_EMPTY_MASK)); 226 | #endif 227 | 228 | /* 229 | * A temporary write buffer must be used which contains both the address 230 | * and the data to be written, put the address in first 231 | */ 232 | WriteBuffer[0] = RegAddress; 233 | 234 | /* 235 | * Put the data in the write buffer following the address. 236 | */ 237 | for (Index = 0; Index < ByteCount; Index++) 238 | { 239 | WriteBuffer[Index + 1] = pBuffer[Index]; 240 | } 241 | 242 | /* 243 | * Write data at the specified address. 244 | */ 245 | SentByteCount = XIic_DynSend(pContext->CoreAddress, ChipAddress, WriteBuffer, 246 | ByteCount + 1, XIIC_STOP); 247 | if (SentByteCount < 1) { SentByteCount = 1; } 248 | 249 | // Return the number of bytes written. 250 | return SentByteCount - 1; 251 | } 252 | 253 | 254 | /****************************************************************************** 255 | * This function reads a number of bytes from an IIC chip into a 256 | * specified buffer. 257 | * 258 | * @param ChipAddress contains the address of the IIC core. 259 | * @param RegAddress contains the address of the register to write to. 260 | * @param pBuffer contains the address of the data buffer to be filled. 261 | * @param ByteCount contains the number of bytes in the buffer to be read. 262 | * This value is constrained by the page size of the device such 263 | * that up to 64K may be read in one call. 264 | * 265 | * @return The number of bytes read. A value less than the specified input 266 | * value indicates an error. 267 | * 268 | * @note None. 269 | * 270 | ******************************************************************************/ 271 | int zed_iic_axi_IicRead(zed_iic_t *pIIC, Xuint8 ChipAddress, Xuint8 RegAddress, 272 | Xuint8 *pBuffer, Xuint8 ByteCount) 273 | { 274 | Xuint8 ReceivedByteCount = 0; 275 | Xuint8 SentByteCount = 0; 276 | Xuint8 ControlReg; 277 | Xuint8 StatusReg; 278 | int cnt = 0; 279 | zed_iic_axi_t *pContext = (zed_iic_axi_t *)(pIIC->pContext); 280 | 281 | #if 1 282 | // Make sure all the Fifo's are cleared and Bus is Not busy. 283 | do 284 | { 285 | StatusReg = Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET); 286 | //print("[%s] Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET) => 0x%02X\n\r", pContext->szName, StatusReg ); 287 | StatusReg = StatusReg & (XIIC_SR_RX_FIFO_EMPTY_MASK | 288 | XIIC_SR_TX_FIFO_EMPTY_MASK | 289 | XIIC_SR_BUS_BUSY_MASK); 290 | if ((StatusReg & XIIC_SR_RX_FIFO_EMPTY_MASK) != XIIC_SR_RX_FIFO_EMPTY_MASK) 291 | { 292 | /* 293 | * The RX buffer is not empty and it is assumed there is a stale 294 | * message in there. Attempt to clear out the RX buffer, otherwise 295 | * this loop spins forever. 296 | */ 297 | XIic_ReadReg(pContext->CoreAddress, XIIC_DRR_REG_OFFSET); 298 | } 299 | 300 | /* 301 | * Check to see if the bus is busy. Since we are master, if the bus is 302 | * still busy that means that arbitration has been lost. 303 | * 304 | * According to Product Guide PG090, October 16, 2012: 305 | * 306 | * Control Register (0x100), Bit 2 MSMS: 307 | * 308 | * "Master/Slave Mode Select. When this bit is changed from 0 to 1, 309 | * the AXI IIC bus interface generates a START condition in master 310 | * mode. When this bit is cleared, a STOP condition is generated and 311 | * the AXI IIC bus interface switches to slave mode. When this bit is 312 | * cleared by the hardware, because arbitration for the bus has been 313 | * lost, a STOP condition is not generated. (See also Interrupt(0): 314 | * Arbitration Lost in Chapter 3.)" 315 | * 316 | * According to this, it should be okay to clear the master/slave mode 317 | * select to clear a false start condition with a stop and regain 318 | * arbitration over the bus. 319 | */ 320 | if ((StatusReg & XIIC_SR_BUS_BUSY_MASK) == XIIC_SR_BUS_BUSY_MASK) 321 | { 322 | ControlReg = Xil_In8(pContext->CoreAddress + XIIC_CR_REG_OFFSET); 323 | ControlReg = ControlReg & 0xFB; // Clear the MSMS bit. 324 | Xil_Out8(pContext->CoreAddress + XIIC_CR_REG_OFFSET, ControlReg); 325 | } 326 | } while (StatusReg != (XIIC_SR_RX_FIFO_EMPTY_MASK | 327 | XIIC_SR_TX_FIFO_EMPTY_MASK)); 328 | #endif 329 | 330 | // Position the Read pointer to specific location. 331 | do 332 | { 333 | StatusReg = Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET); 334 | //print("[%s] Xil_In8(pContext->CoreAddress + XIIC_SR_REG_OFFSET) => 0x%02X\n\r", pContext->szName, StatusReg ); 335 | if(!(StatusReg & XIIC_SR_BUS_BUSY_MASK)) 336 | { 337 | SentByteCount = XIic_DynSend(pContext->CoreAddress, ChipAddress, 338 | (Xuint8 *)&RegAddress, 1, 339 | XIIC_REPEATED_START); 340 | } 341 | cnt++; 342 | }while(SentByteCount != 1 && (cnt < 100)); 343 | 344 | // Error writing chip address so return SentByteCount 345 | if (SentByteCount < 1) { return SentByteCount; } 346 | 347 | // Receive the data. 348 | ReceivedByteCount = XIic_DynRecv(pContext->CoreAddress, ChipAddress, pBuffer, 349 | ByteCount); 350 | 351 | // Return the number of bytes received. 352 | return ReceivedByteCount; 353 | } 354 | 355 | 356 | #endif // defined(XPAR_XIIC_NUM_INSTANCES) 357 | -------------------------------------------------------------------------------- /PS_code/core_setup/vdma_api.c: -------------------------------------------------------------------------------- 1 | /****************************************************************************** 2 | * 3 | * Copyright (C) 2014 - 2016 Xilinx, Inc. All rights reserved. 4 | * 5 | * Permission is hereby granted, free of charge, to any person obtaining a copy 6 | * of this software and associated documentation files (the "Software"), to deal 7 | * in the Software without restriction, including without limitation the rights 8 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | * copies of the Software, and to permit persons to whom the Software is 10 | * furnished to do so, subject to the following conditions: 11 | * 12 | * The above copyright notice and this permission notice shall be included in 13 | * all copies or substantial portions of the Software. 14 | * 15 | * Use of the Software is limited solely to applications: 16 | * (a) running on a Xilinx device, or 17 | * (b) that interact with a Xilinx device through a bus or interconnect. 18 | * 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 | * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 23 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF 24 | * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | * SOFTWARE. 26 | * 27 | * Except as contained in this notice, the name of the Xilinx shall not be used 28 | * in advertising or otherwise to promote the sale, use or other dealings in 29 | * this Software without prior written authorization from Xilinx. 30 | * 31 | ******************************************************************************/ 32 | /*****************************************************************************/ 33 | /** 34 | * 35 | * @file vdma_api.c 36 | * 37 | * This file has high level API to configure and start the VDMA.The file assumes that: 38 | * The design has VDMA with both MM2S and S2MM path enable. 39 | * The API's has tested with hardware that has tow VDMA and MM2S to S2MM are back 40 | * to back connected for each VDMA. 41 | * 42 | * MODIFICATION HISTORY: 43 | * 44 | * Ver Who Date Changes 45 | * ----- ---- -------- ------------------------------------------------------- 46 | * 4.0 adk 11/26/15 First release 47 | ****************************************************************************/ 48 | 49 | #define DEBUG_MODE 0 50 | 51 | /******************** Include files **********************************/ 52 | #include "xaxivdma.h" 53 | #include "xparameters.h" 54 | #include "xil_exception.h" 55 | 56 | /******************** Data structure Declarations *****************************/ 57 | 58 | typedef struct vdma_handle 59 | { 60 | /* The device ID of the VDMA */ 61 | unsigned int device_id; 62 | /* The state variable to keep track if the initialization is done*/ 63 | unsigned int init_done; 64 | /** The XAxiVdma driver instance data. */ 65 | XAxiVdma* InstancePtr; 66 | /* The XAxiVdma_DmaSetup structure contains all the necessary information to 67 | * start a frame write or read. */ 68 | XAxiVdma_DmaSetup ReadCfg; 69 | XAxiVdma_DmaSetup WriteCfg; 70 | /* Horizontal size of frame */ 71 | unsigned int hsize; 72 | /* Vertical size of frame */ 73 | unsigned int vsize; 74 | /* Buffer address from where read and write will be done by VDMA */ 75 | unsigned int buffer_address; 76 | /* Flag to tell VDMA to interrupt on frame completion*/ 77 | unsigned int enable_frm_cnt_intr; 78 | /* The counter to tell VDMA on how many frames the interrupt should happen*/ 79 | unsigned int number_of_frame_count; 80 | }vdma_handle; 81 | 82 | /******************** Constant Definitions **********************************/ 83 | 84 | /* 85 | * Device related constants. These need to defined as per the HW system. 86 | */ 87 | vdma_handle vdma_context[XPAR_XAXIVDMA_NUM_INSTANCES]; 88 | static unsigned int context_init=0; 89 | 90 | /******************* Function Prototypes ************************************/ 91 | 92 | static int ReadSetup(vdma_handle *vdma_context); 93 | static int WriteSetup(vdma_handle *vdma_context); 94 | static int StartTransfer(XAxiVdma *InstancePtr); 95 | 96 | /*****************************************************************************/ 97 | /** 98 | * 99 | * run_triple_frame_buffer API 100 | * 101 | * This API is the interface between application and other API. When application will call 102 | * this API with right argument, This API will call rest of the API to configure the read 103 | * and write path of VDMA,based on ID. After that it will start both the read and write path 104 | * of VDMA 105 | * 106 | * @param InstancePtr is the handle to XAxiVdma data structure. 107 | * @param DeviceId is the device ID of current VDMA 108 | * @param hsize is the horizontal size of the frame. It will be in Pixels. 109 | * The actual size of frame will be calculated by multiplying this 110 | * with tdata width. 111 | * @param vsize is the Vertical size of the frame. 112 | * @param buf_base_addr is the buffer address where frames will be written 113 | * and read by VDMA. 114 | * @param number_frame_count specifies after how many frames the interrupt 115 | * should come. 116 | * @param enable_frm_cnt_intr is for enabling frame count interrupt 117 | * when set to 1. 118 | * @return 119 | * - XST_SUCCESS if example finishes successfully 120 | * - XST_FAILURE if example fails. 121 | * 122 | ******************************************************************************/ 123 | int run_triple_frame_buffer(XAxiVdma* InstancePtr, int DeviceId, int hsize, 124 | int vsize, int buf_base_addr, int number_frame_count, 125 | int enable_frm_cnt_intr) 126 | { 127 | int Status,i; 128 | XAxiVdma_Config *Config; 129 | XAxiVdma_FrameCounter FrameCfgPtr; 130 | 131 | /* This is one time initialization of state machine context. 132 | * In first call it will be done for all VDMA instances in the system. 133 | */ 134 | if(context_init==0) { 135 | for(i=0; i < XPAR_XAXIVDMA_NUM_INSTANCES; i++) { 136 | vdma_context[i].InstancePtr = NULL; 137 | vdma_context[i].device_id = -1; 138 | vdma_context[i].hsize = 0; 139 | vdma_context[i].vsize = 0; 140 | vdma_context[i].init_done = 0; 141 | vdma_context[i].buffer_address = 0; 142 | vdma_context[i].enable_frm_cnt_intr = 0; 143 | vdma_context[i].number_of_frame_count = 0; 144 | 145 | } 146 | context_init = 1; 147 | } 148 | 149 | /* The below initialization will happen for each VDMA. The API argument 150 | * will be stored in internal data structure 151 | */ 152 | 153 | /* The information of the XAxiVdma_Config comes from hardware build. 154 | * The user IP should pass this information to the AXI DMA core. 155 | */ 156 | Config = XAxiVdma_LookupConfig(DeviceId); 157 | if (!Config) { 158 | xil_printf("No video DMA found for ID %x\r\n",DeviceId ); 159 | return XST_FAILURE; 160 | } 161 | 162 | printf("Config->BaseAddress : %d \n\r", Config->BaseAddress); 163 | if(vdma_context[DeviceId].init_done ==0) { 164 | vdma_context[DeviceId].InstancePtr = InstancePtr; 165 | 166 | /* Initialize DMA engine */ 167 | Status = XAxiVdma_CfgInitialize(vdma_context[DeviceId].InstancePtr, 168 | Config, Config->BaseAddress); 169 | if (Status != XST_SUCCESS) { 170 | xil_printf("Configuration Initialization failed %d\r\n", 171 | Status); 172 | return XST_FAILURE; 173 | } 174 | 175 | vdma_context[DeviceId].init_done = 1; 176 | } 177 | 178 | vdma_context[DeviceId].device_id = DeviceId; 179 | vdma_context[DeviceId].vsize = vsize; 180 | 181 | vdma_context[DeviceId].buffer_address = buf_base_addr; 182 | vdma_context[DeviceId].enable_frm_cnt_intr = enable_frm_cnt_intr; 183 | vdma_context[DeviceId].number_of_frame_count = number_frame_count; 184 | vdma_context[DeviceId].hsize = hsize * (Config->Mm2SStreamWidth>>3); 185 | 186 | /* Setup the write channel */ 187 | Status = WriteSetup(&vdma_context[DeviceId]); 188 | if (Status != XST_SUCCESS) { 189 | xil_printf("Write channel setup failed %d\r\n", Status); 190 | if(Status == XST_VDMA_MISMATCH_ERROR) 191 | xil_printf("DMA Mismatch Error\r\n"); 192 | return XST_FAILURE; 193 | } 194 | 195 | /* Setup the read channel */ 196 | Status = ReadSetup(&vdma_context[DeviceId]); 197 | if (Status != XST_SUCCESS) { 198 | xil_printf("Read channel setup failed %d\r\n", Status); 199 | if(Status == XST_VDMA_MISMATCH_ERROR) 200 | xil_printf("DMA Mismatch Error\r\n"); 201 | return XST_FAILURE; 202 | } 203 | 204 | /* The frame counter interrupt is enabled, setting VDMA for same */ 205 | if(vdma_context[DeviceId].enable_frm_cnt_intr) { 206 | FrameCfgPtr.ReadDelayTimerCount = 1; 207 | FrameCfgPtr.ReadFrameCount = number_frame_count; 208 | FrameCfgPtr.WriteDelayTimerCount = 1; 209 | FrameCfgPtr.WriteFrameCount = number_frame_count; 210 | 211 | XAxiVdma_SetFrameCounter(vdma_context[DeviceId].InstancePtr,&FrameCfgPtr); 212 | /* Enable DMA read and write channel interrupts. The configuration for interrupt 213 | * controller will be done by application */ 214 | XAxiVdma_IntrEnable(vdma_context[DeviceId].InstancePtr, 215 | XAXIVDMA_IXR_ERROR_MASK | 216 | XAXIVDMA_IXR_FRMCNT_MASK,XAXIVDMA_WRITE); 217 | XAxiVdma_IntrEnable(vdma_context[DeviceId].InstancePtr, 218 | XAXIVDMA_IXR_ERROR_MASK | 219 | XAXIVDMA_IXR_FRMCNT_MASK,XAXIVDMA_READ); 220 | } else { 221 | /* Enable DMA read and write channel interrupts. The configuration for interrupt 222 | * controller will be done by application */ 223 | XAxiVdma_IntrEnable(vdma_context[DeviceId].InstancePtr, 224 | XAXIVDMA_IXR_ERROR_MASK,XAXIVDMA_WRITE); 225 | XAxiVdma_IntrEnable(vdma_context[DeviceId].InstancePtr, 226 | XAXIVDMA_IXR_ERROR_MASK ,XAXIVDMA_READ); 227 | } 228 | 229 | /* Start the DMA engine to transfer */ 230 | Status = StartTransfer(vdma_context[DeviceId].InstancePtr); 231 | if (Status != XST_SUCCESS) { 232 | if(Status == XST_VDMA_MISMATCH_ERROR) 233 | xil_printf("DMA Mismatch Error\r\n"); 234 | return XST_FAILURE; 235 | } 236 | #if DEBUG_MODE 237 | xil_printf("Code is in Debug mode, Make sure that buffer addresses are at valid memory \r\n"); 238 | xil_printf("In triple mode, there has to be six consecutive buffers for Debug mode \r\n"); 239 | { 240 | u32 pixels,j,Addr = vdma_context[DeviceId].buffer_address; 241 | u8 *dst,*src; 242 | u32 total_pixel = vdma_context[DeviceId].stride * vdma_context[DeviceId].vsize; 243 | src = (unsigned char *)Addr; 244 | dst = (unsigned char *)Addr + (total_pixel * vdma_context->InstancePtr->MaxNumFrames); 245 | 246 | for(j=0;jInstancePtr->MaxNumFrames;j++) { 247 | for(pixels=0;pixelsReadCfg.VertSizeInput = vdma_context->vsize; 283 | vdma_context->ReadCfg.HoriSizeInput = vdma_context->hsize; 284 | 285 | vdma_context->ReadCfg.Stride = vdma_context->hsize; 286 | vdma_context->ReadCfg.FrameDelay = 0; /* This example does not test frame delay */ 287 | 288 | vdma_context->ReadCfg.EnableCircularBuf = 1; 289 | vdma_context->ReadCfg.EnableSync = 1; /* Gen-Lock */ 290 | 291 | vdma_context->ReadCfg.PointNum = 0; 292 | vdma_context->ReadCfg.EnableFrameCounter = 0; /* Endless transfers */ 293 | 294 | vdma_context->ReadCfg.FixedFrameStoreAddr = 0; /* We are not doing parking */ 295 | /* Configure the VDMA is per fixed configuration, This configuration is being used by majority 296 | * of customer. Expert users can play around with this if they have different configurations */ 297 | 298 | Status = XAxiVdma_DmaConfig(vdma_context->InstancePtr, XAXIVDMA_READ, &vdma_context->ReadCfg); 299 | if (Status != XST_SUCCESS) { 300 | xil_printf("Read channel config failed %d\r\n", Status); 301 | return XST_FAILURE; 302 | } 303 | 304 | /* Initialize buffer addresses 305 | * 306 | * These addresses are physical addresses 307 | */ 308 | Addr = vdma_context->buffer_address; 309 | 310 | for(Index = 0; Index < vdma_context->InstancePtr->MaxNumFrames; Index++) { 311 | vdma_context->ReadCfg.FrameStoreStartAddr[Index] = Addr; 312 | 313 | /* Initializing the buffer in case of Debug mode */ 314 | 315 | #if DEBUG_MODE 316 | { 317 | u32 i; 318 | u8 *src; 319 | u32 total_pixel = vdma_context->stride * vdma_context->vsize; 320 | src = (unsigned char *)Addr; 321 | xil_printf("Read Buffer %d address: 0x%x \r\n",Index,Addr); 322 | for(i=0;ihsize * vdma_context->vsize; 329 | } 330 | 331 | /* Set the buffer addresses for transfer in the DMA engine 332 | * The buffer addresses are physical addresses 333 | */ 334 | Status = XAxiVdma_DmaSetBufferAddr(vdma_context->InstancePtr, XAXIVDMA_READ, 335 | vdma_context->ReadCfg.FrameStoreStartAddr); 336 | if (Status != XST_SUCCESS) { 337 | xil_printf( 338 | "Read channel set buffer address failed %d\r\n", Status); 339 | 340 | return XST_FAILURE; 341 | } 342 | 343 | return XST_SUCCESS; 344 | } 345 | 346 | /*****************************************************************************/ 347 | /** 348 | * 349 | * This function sets up the write channel 350 | * 351 | * @param dma_context is the context pointer to the VDMA engine.. 352 | * 353 | * @return XST_SUCCESS if the setup is successful, XST_FAILURE otherwise. 354 | * 355 | * @note None. 356 | * 357 | ******************************************************************************/ 358 | static int WriteSetup(vdma_handle *vdma_context) 359 | { 360 | int Index; 361 | u32 Addr; 362 | int Status; 363 | 364 | vdma_context->WriteCfg.VertSizeInput = vdma_context->vsize; 365 | vdma_context->WriteCfg.HoriSizeInput = vdma_context->hsize; 366 | 367 | vdma_context->WriteCfg.Stride = vdma_context->hsize; 368 | vdma_context->WriteCfg.FrameDelay = 0; /* This example does not test frame delay */ 369 | 370 | vdma_context->WriteCfg.EnableCircularBuf = 1; 371 | vdma_context->WriteCfg.EnableSync = 1; /* Gen-Lock */ 372 | 373 | vdma_context->WriteCfg.PointNum = 0; 374 | vdma_context->WriteCfg.EnableFrameCounter = 0; /* Endless transfers */ 375 | 376 | vdma_context->WriteCfg.FixedFrameStoreAddr = 0; /* We are not doing parking */ 377 | /* Configure the VDMA is per fixed configuration, This configuration 378 | * is being used by majority of customers. Expert users can play around 379 | * with this if they have different configurations 380 | */ 381 | 382 | Status = XAxiVdma_DmaConfig(vdma_context->InstancePtr, XAXIVDMA_WRITE, &vdma_context->WriteCfg); 383 | if (Status != XST_SUCCESS) { 384 | xil_printf( 385 | "Write channel config failed %d\r\n", Status); 386 | 387 | return Status; 388 | } 389 | 390 | /* Initialize buffer addresses 391 | * 392 | * Use physical addresses 393 | */ 394 | Addr = vdma_context->buffer_address; 395 | /* If Debug mode is enabled write frame is shifted 3 Frames 396 | * store ahead to compare read and write frames 397 | */ 398 | #if DEBUG_MODE 399 | Addr = Addr + vdma_context->InstancePtr->MaxNumFrames * \ 400 | (vdma_context->stride * vdma_context->vsize); 401 | #endif 402 | 403 | for(Index = 0; Index < vdma_context->InstancePtr->MaxNumFrames; Index++) { 404 | vdma_context->WriteCfg.FrameStoreStartAddr[Index] = Addr; 405 | #if DEBUG_MODE 406 | xil_printf("Write Buffer %d address: 0x%x \r\n",Index,Addr); 407 | #endif 408 | 409 | Addr += (vdma_context->hsize * vdma_context->vsize); 410 | } 411 | 412 | /* Set the buffer addresses for transfer in the DMA engine */ 413 | Status = XAxiVdma_DmaSetBufferAddr(vdma_context->InstancePtr, 414 | XAXIVDMA_WRITE, 415 | vdma_context->WriteCfg.FrameStoreStartAddr); 416 | if (Status != XST_SUCCESS) { 417 | xil_printf("Write channel set buffer address failed %d\r\n", 418 | Status); 419 | return XST_FAILURE; 420 | } 421 | 422 | /* Clear data buffer 423 | */ 424 | #if DEBUG_MODE 425 | memset((void *)vdma_context->buffer_address, 0, 426 | vdma_context->ReadCfg.Stride * vdma_context->ReadCfg.VertSizeInput * vdma_context->InstancePtr->MaxNumFrames); 427 | #endif 428 | return XST_SUCCESS; 429 | } 430 | 431 | /*****************************************************************************/ 432 | /** 433 | * 434 | * This function starts the DMA transfers. Since the DMA engine is operating 435 | * in circular buffer mode, video frames will be transferred continuously. 436 | * 437 | * @param InstancePtr points to the DMA engine instance 438 | * 439 | * @return 440 | * - XST_SUCCESS if both read and write start successfully 441 | * - XST_FAILURE if one or both directions cannot be started 442 | * 443 | * @note None. 444 | * 445 | ******************************************************************************/ 446 | static int StartTransfer(XAxiVdma *InstancePtr) 447 | { 448 | int Status; 449 | /* Start the write channel of VDMA */ 450 | Status = XAxiVdma_DmaStart(InstancePtr, XAXIVDMA_WRITE); 451 | if (Status != XST_SUCCESS) { 452 | xil_printf("Start Write transfer failed %d\r\n", Status); 453 | 454 | return XST_FAILURE; 455 | } 456 | /* Start the Read channel of VDMA */ 457 | Status = XAxiVdma_DmaStart(InstancePtr, XAXIVDMA_READ); 458 | if (Status != XST_SUCCESS) { 459 | xil_printf("Start read transfer failed %d\r\n", Status); 460 | 461 | return XST_FAILURE; 462 | } 463 | 464 | return XST_SUCCESS; 465 | } 466 | -------------------------------------------------------------------------------- /MATLAB_code/back_sub_kalman.m: -------------------------------------------------------------------------------- 1 | function back_sub_kalman() 2 | 3 | obj = setUpSystemObjects(); 4 | 5 | tracks = struct(... 6 | 'id', {}, ... 7 | 'bbox', {}, ... 8 | 'contour', {}, ... 9 | 'kalmanFilter', {}, ... 10 | 'age', {}, ... 11 | 'totalVisibleCount', {}, ... 12 | 'invisibleCount', {}, ... 13 | 'track', {} ... 14 | ); 15 | 16 | nextId = 1; 17 | 18 | radius = 200; 19 | open(obj.k); 20 | 21 | count = 0; 22 | overlap = -1; 23 | 24 | while ~isDone(obj.reader) 25 | frame = obj.reader(); 26 | count = count + 1; 27 | if (count < 500) 28 | continue; 29 | end 30 | mask = detectMovement(frame); 31 | [~, centroids, bboxes] = obj.blobAnalyser(mask); 32 | mask_p=bwperim(mask); 33 | predictNewLocationsOfTracks(); 34 | [assignments, unassignedTracks, unassignedDetections] = detectionToTrackAssignment(); 35 | updateAssignedTracks(); 36 | updateUnassignedTracks(); 37 | deleteLostTracks(); 38 | if ~isempty(centroids) 39 | createNewTracks(); 40 | end 41 | motion = predictMotion(); 42 | checkForOverlap(motion); 43 | displayTrackingResults(mask_p); 44 | end 45 | 46 | close(obj.k) 47 | 48 | function obj = setUpSystemObjects() 49 | obj.reader = vision.VideoFileReader('C:\Users\Tiarnan\Pictures\Matlab_test\test_vid_5.mp4'); 50 | obj.maskPlayer = vision.VideoPlayer('Position', [740, 400, 700, 400]); 51 | obj.videoPlayer = vision.VideoPlayer('Position', [20, 400, 700, 400]); 52 | obj.detector = vision.ForegroundDetector('NumGaussians', 7,'NumTrainingFrames', 50, 'MinimumBackgroundRatio', 0.6, 'InitialVariance', 1); 53 | obj.blobAnalyser = vision.BlobAnalysis('BoundingBoxOutputPort', true, 'AreaOutputPort', true, 'CentroidOutputPort', true, ... 54 | 'MinimumBlobArea',7000); 55 | obj.k = VideoWriter('C:\Users\Tiarnan\Pictures\Matlab_test\output_test_vid_3.mp4'); 56 | end 57 | 58 | function mask = detectMovement(frame) 59 | mask = obj.detector(frame); 60 | mask = imopen(mask, strel('rectangle', [3,3])); 61 | mask = imclose(mask, strel('rectangle', [8, 8])); 62 | mask = bwareaopen(mask, 50); 63 | mask = imfill(mask, 'holes'); 64 | end 65 | 66 | function predictNewLocationsOfTracks() 67 | for i = 1:length(tracks) 68 | bbox = tracks(i).bbox; 69 | 70 | % Predict the current location of the track. 71 | predictedCentroid = predict(tracks(i).kalmanFilter); 72 | 73 | %push the predicted centroid onto the tracks track 74 | tracks(i).track(end + 1) = predictedCentroid(1); 75 | tracks(i).track(end + 1) = predictedCentroid(2); 76 | 77 | % Shift the bounding box so that its center is at 78 | % the predicted location. 79 | predictedCentroid = int32(predictedCentroid) - bbox(3:4) / 2; 80 | tracks(i).bbox = [predictedCentroid, bbox(3:4)]; 81 | end 82 | end 83 | 84 | function [assignments, unassignedTracks, unassignedDetections] = detectionToTrackAssignment() 85 | nTracks = length(tracks); 86 | nDetections = size(centroids, 1); 87 | 88 | % Compute the cost of assigning each detection to each track. 89 | cost = zeros(nTracks, nDetections); 90 | for i = 1:nTracks 91 | cost(i, :) = distance(tracks(i).kalmanFilter, centroids); 92 | end 93 | 94 | % Solve the assignment problem. 95 | costOfNonAssignment = 20; 96 | [assignments, unassignedTracks, unassignedDetections] = assignDetectionsToTracks(cost, costOfNonAssignment); 97 | end 98 | 99 | function updateAssignedTracks() 100 | numAssignedTracks = size(assignments, 1); 101 | for i = 1:numAssignedTracks 102 | trackIdx = assignments(i, 1); 103 | if (tracks(trackIdx).id < 0 & nextId == 12) 104 | continue; 105 | end 106 | detectionIdx = assignments(i, 2); 107 | centroid = centroids(detectionIdx, :); 108 | bbox = bboxes(detectionIdx, :); 109 | 110 | % Correct the estimate of the object's location 111 | % using the new detection. 112 | correct(tracks(trackIdx).kalmanFilter, centroid); 113 | 114 | %push the real centroid onto the tracks track 115 | tracks(trackIdx).track(end-1) = centroid(1); 116 | tracks(trackIdx).track(end) = centroid(2); 117 | 118 | % Replace predicted bounding box with detected 119 | % bounding box. 120 | tracks(trackIdx).bbox = bbox; 121 | % tracks(trackIdx).contour = contour; 122 | 123 | % Update track's frame count. 124 | tracks(trackIdx).age = tracks(trackIdx).age + 1; 125 | 126 | % Update visibility. 127 | tracks(trackIdx).totalVisibleCount = ... 128 | tracks(trackIdx).totalVisibleCount + 1; 129 | tracks(trackIdx).invisibleCount = 0; 130 | if (tracks(trackIdx).totalVisibleCount > 15) && (tracks(trackIdx).id < 0) 131 | tracks(trackIdx).id = nextId; 132 | % Increment the next id. 133 | nextId = nextId + 1; 134 | %get contour here from getContours 135 | end 136 | end 137 | end 138 | 139 | function updateUnassignedTracks() 140 | for i = 1:length(unassignedTracks) 141 | ind = unassignedTracks(i); 142 | tracks(ind).age = tracks(ind).age + 1; 143 | tracks(ind).invisibleCount = ... 144 | tracks(ind).invisibleCount + 1; 145 | end 146 | end 147 | 148 | function deleteLostTracks() 149 | if isempty(tracks) 150 | return; 151 | end 152 | 153 | invisibleForTooLong = 50; 154 | ageThreshold = 20; 155 | 156 | % Compute the fraction of the track's age for which it was visible. 157 | ages = [tracks(:).age]; 158 | totalVisibleCounts = [tracks(:).totalVisibleCount]; 159 | visibility = totalVisibleCounts ./ ages; 160 | 161 | % Find the indices of 'lost' tracks. 162 | lostInds = (ages < ageThreshold & visibility < 0.5) | [tracks(:).invisibleCount] >= invisibleForTooLong; 163 | 164 | % Delete lost tracks. 165 | tracks = tracks(~lostInds); 166 | end 167 | 168 | function createNewTracks() 169 | centroids = centroids(unassignedDetections, :); 170 | bboxes = bboxes(unassignedDetections, :); 171 | % contours = contours(unassignedDetections, :); 172 | 173 | for i = 1:size(centroids, 1) 174 | 175 | centroid = centroids(i,:); 176 | bbox = bboxes(i, :); 177 | % contour = contours(i,:); 178 | 179 | % Create a Kalman filter object. 180 | % kalmanFilter = configureKalmanFilter('ConstantVelocity', ... 181 | % centroid, [200, 50], [100, 25], 100); 182 | kalmanFilter = configureKalmanFilter('ConstantAcceleration',... 183 | centroid, [1 1 1]*1e5, [25, 10, 10], 25); 184 | % Create a new track id assigned after found in 20 frames. 185 | newTrack = struct(... 186 | 'id', -1 , ... 187 | 'bbox', bbox, ... 188 | 'contour', 1, ... 189 | 'kalmanFilter', kalmanFilter, ... 190 | 'age', 1, ... 191 | 'totalVisibleCount', 1, ... 192 | 'invisibleCount', 0, ... 193 | 'track', {centroid}); 194 | % Add it to the array of temp_tracks. 195 | tracks(end + 1) = newTrack; 196 | end 197 | 198 | end 199 | 200 | function motion = predictMotion() 201 | motion = zeros(1, length(tracks)); 202 | for i = 1:length(tracks) 203 | if tracks(i).id < 0 204 | continue 205 | end 206 | horiz_comp = zeros(1,10); 207 | vert_comp = zeros(1,10); 208 | %get oldest to newest tracks 209 | counter=1; 210 | for j = 20:-2:1 211 | if (j+1 < length(tracks(i).track)) 212 | xprev = tracks(i).track(end-j-1:end-j-1); 213 | yprev = tracks(i).track(end-j:end-j); 214 | x = tracks(i).track(end-j+1:end-j+1); 215 | y = tracks(i).track(end-j+2:end-j+2); 216 | horiz_comp(counter) = (41-j)*(x-xprev); 217 | vert_comp(counter) = (41-j)*(y-yprev); 218 | counter = counter + 1; 219 | end 220 | end 221 | total_horiz=sum(horiz_comp(1:1:end))/400; 222 | total_vert=sum(vert_comp(1:1:end))/400; 223 | X = ['track ',num2str(tracks(i).id),' horiz : ', num2str(total_horiz)]; 224 | disp(X) 225 | Y = ['track ',num2str(tracks(i).id),' vert : ', num2str(total_vert)]; 226 | disp(Y) 227 | motion(i,1) = total_vert; 228 | motion(i,2) = total_horiz; 229 | end 230 | end 231 | 232 | 233 | function checkForOverlap(motion) 234 | %check future 235 | colour = 'green'; 236 | for i = 1:length(tracks) 237 | if tracks(i).id < 0 238 | continue 239 | end 240 | total_vert = motion(i,1); 241 | total_horiz = motion(i , 2); 242 | slope = total_vert/total_horiz; 243 | c = (tracks(i).track(end:end)) - slope*(tracks(i).track(end-1:end-1)); 244 | mag = sqrt((total_horiz*total_horiz) + (total_vert*total_vert)) 245 | future = mag * 10 246 | [xout, yout] = linecirc(slope,c , 250, 930, radius); 247 | endx = (tracks(i).track(end-1:end-1)) + (future * total_horiz) 248 | endy = (tracks(i).track(end:end)) + (future * total_vert) 249 | dist_end = sqrt(((250-endx)*(250-endx))+((900-endy)*(900-endy))); 250 | dist_cp = sqrt((250-(tracks(i).track(end-1:end-1)))*(250-(tracks(i).track(end-1:end-1)))+((900-(tracks(i).track(end:end)))*(900-(tracks(i).track(end:end))))); 251 | if (~isnan(xout) & dist_end < dist_cp) 252 | disp('predicted motion in circle') 253 | colour = 'red'; 254 | radius=max(200,40*mag); 255 | else 256 | radius=max(200, radius*0.8); 257 | end 258 | frame = insertShape(frame, 'line', [(tracks(i).track(end-1:end-1)), (tracks(i).track(end:end)), endx, endy], 'Color', colour, 'LineWidth',10); 259 | end 260 | 261 | %check present 262 | overlap = -1; 263 | for i = 1:length(tracks) 264 | if tracks(i).id < 0 265 | continue 266 | end 267 | tlx = tracks(i).bbox(1:1); 268 | tly = tracks(i).bbox(2:2); 269 | w = tracks(i).bbox(3:3); 270 | h = tracks(i).bbox(4:4); 271 | tlx = double(tlx); 272 | tly = double(tly); 273 | w = double(w); 274 | h = double(h); 275 | %left_line, bottom line right line, top line 276 | left = [tlx, tly, tlx, tly+h]; 277 | left = double(left); 278 | bottom = [tlx, tly+h, tlx+w, tly+h]; 279 | bottom = double(left); 280 | right = [tlx+w, tly, tlx+w, tly+h]; 281 | right = double(left); 282 | top = [tlx, tly, tlx+w, tly]; 283 | top = double(left); 284 | C = [250, 970]; 285 | %check corner distance 286 | tl_dx = C(1) - tlx; 287 | tl_dy = C(2) -tly; 288 | dist = sqrt((tl_dx*tl_dx)+(tl_dy*tl_dy)); 289 | if (dist < radius) 290 | overlap=1; 291 | return; 292 | end 293 | tr_dx = C(1) - (tlx + w); 294 | tr_dy = C(2) - tly; 295 | dist = sqrt((tr_dx*tr_dx)+(tr_dy*tr_dy)); 296 | if (dist < radius) 297 | overlap=1; 298 | return; 299 | end 300 | bl_dx = C(1) - tlx; 301 | bl_dy = C(2) - (tly+h); 302 | dist = sqrt((bl_dx*bl_dx)+(bl_dy*bl_dy)); 303 | if (dist < radius) 304 | overlap=1; 305 | return; 306 | end 307 | br_dx = C(1) - (tlx+w); 308 | br_dy = C(2) - (tly+h); 309 | dist = sqrt((br_dx*br_dx)+(br_dy*br_dy)); 310 | if (dist < radius) 311 | overlap=1; 312 | return; 313 | end 314 | U_l = ((C(1) - left(1))*(left(3)-left(1))+(C(2)-left(2))*(left(4)-left(2)))/(h*h + w*w); 315 | U_b = ((C(1) - bottom(1))*(bottom(3)-bottom(1))+(C(2)-bottom(2))*(bottom(4)-bottom(2)))/(h*h + w*w); 316 | U_r = ((C(1) - right(1))*(right(3)-right(1))+(C(2)-right(2))*(right(4)-right(2)))/(h*h + w*w); 317 | U_t = ((C(1) - top(1))*(top(3)-top(1))+(C(2)-top(2))*(top(4)-top(2)))/(h*h + w*w); 318 | if ((U_l>0 && U_l<1)) 319 | x_closest = left(1) + U_l*(left(3)-left(1)); 320 | y_closest = left(2) + U_l*(left(4)-left(2)); 321 | dx = C(1) - x_closest; 322 | dy = C(2) - y_closest; 323 | dist = sqrt((dx*dx)+(dy*dy)); 324 | if (dist < radius) 325 | overlap=1; 326 | return; 327 | end 328 | end 329 | if ((U_b>0 && U_b<1)) 330 | x_closest = bottom(1) + U_b*(bottom(3)-bottom(1)); 331 | y_closest = bottom(2) + U_b*(bottom(4)-bottom(2)); 332 | dx = C(1) - x_closest; 333 | dy = C(2) - y_closest; 334 | dist = sqrt((dx*dx)+(dy*dy)); 335 | if (dist < radius) 336 | overlap=1; 337 | return; 338 | end 339 | end 340 | if ((U_r>0 && U_r<1)) 341 | x_closest = right(1) + U_r*(right(3)-right(1)); 342 | y_closest = right(2) + U_r*(right(4)-right(2)); 343 | dx = C(1) - x_closest; 344 | dy = C(2) - y_closest; 345 | dist = sqrt((dx*dx)+(dy*dy)); 346 | if (dist < radius) 347 | overlap=1; 348 | return; 349 | end 350 | end 351 | if ((U_t>0 && U_t<1)) 352 | x_closest = top(1) + U_t*(top(3)-top(1)); 353 | y_closest = top(2) + U_t*(top(4)-top(2)); 354 | dx = C(1) - x_closest; 355 | dy = C(2) - y_closest; 356 | dist = sqrt((dx*dx)+(dy*dy)); 357 | if (dist < radius) 358 | overlap=1; 359 | return; 360 | end 361 | end 362 | end 363 | 364 | end 365 | 366 | 367 | function displayTrackingResults(mask_p) 368 | % Convert the frame and the mask to uint8 RGB. 369 | frame = im2uint8(frame); 370 | mask = uint8(repmat(mask, [1, 1, 3])) .* 255; 371 | 372 | minVisibleCount = 15; 373 | if ~isempty(tracks) 374 | 375 | % Noisy detections tend to result in short-lived tracks. 376 | % Only display tracks that have been visible for more than 377 | % a minimum number of frames. 378 | reliableTrackInds = ... 379 | [tracks(:).totalVisibleCount] > minVisibleCount; 380 | reliableTracks = tracks(reliableTrackInds); 381 | 382 | % Display the objects. If an object has not been detected 383 | % in this frame, display its predicted bounding box. 384 | if ~isempty(reliableTracks) 385 | % Get bounding boxes. 386 | bboxes = cat(1, reliableTracks.bbox); 387 | % contours = cat(1, reliableTracks.contour); 388 | % Get ids. 389 | ids = int32([reliableTracks(:).id]); 390 | 391 | % Create labels for objects indicating the ones for 392 | % which we display the predicted rather than the actual 393 | % location. 394 | labels = cellstr(int2str(ids')); 395 | predictedTrackInds = ... 396 | [reliableTracks(:).invisibleCount] > 0; 397 | isPredicted = cell(size(labels)); 398 | isPredicted(predictedTrackInds) = {' predicted'}; 399 | labels = strcat(labels, isPredicted); 400 | 401 | % Draw the objects on the frame. 402 | objectcolor = 'yellow' 403 | if overlap >= 0 404 | objectcolor = 'red' 405 | end 406 | frame = insertObjectAnnotation(frame, 'rectangle', ... 407 | bboxes, labels, 'FontSize', 20, 'LineWidth', 10, 'Color', objectcolor); 408 | % frame = shapeInserter(frame,contours); 409 | 410 | % Draw the objects on the mask. 411 | mask = insertObjectAnnotation(mask, 'rectangle', ... 412 | bboxes, labels,'FontSize', 20, 'LineWidth', 10); 413 | end 414 | end 415 | frame = insertShape(frame, 'circle', [250, 970, radius], 'Color', 'red', 'LineWidth',10); 416 | frame = insertShape(frame, 'line', [270, 950, 300, 1055], 'Color', 'red', 'LineWidth',10); 417 | 418 | % Display the mask and the frame. 419 | obj.maskPlayer.step(mask); 420 | obj.videoPlayer.step(frame); 421 | writeVideo(obj.k, frame); 422 | end 423 | 424 | end 425 | --------------------------------------------------------------------------------