├── .clang-format ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── libcamera-apps-style-checker.yml │ ├── libcamera-apps-test.yml │ └── libcamera-test.yml ├── .gitignore ├── CMakeLists.txt ├── README.md ├── apps ├── CMakeLists.txt ├── libcamera_detect.cpp ├── libcamera_hello.cpp ├── libcamera_jpeg.cpp ├── libcamera_raw.cpp ├── libcamera_still.cpp └── libcamera_vid.cpp ├── assets ├── annotate_cv.json ├── drc.json ├── face_detect_cv.json ├── hdr.json ├── motion_detect.json ├── negate.json ├── object_classify_tf.json ├── object_detect_tf.json ├── pose_estimation_tf.json ├── segmentation_labels.txt ├── segmentation_tf.json └── sobel_cv.json ├── cinepi ├── CMakeLists.txt ├── cinepi_controller.cpp ├── cinepi_controller.hpp ├── cinepi_frameinfo.hpp ├── cinepi_raw.cpp ├── cinepi_recorder.hpp ├── cinepi_state.cpp ├── cinepi_state.hpp ├── dng_encoder.cpp ├── dng_encoder.hpp ├── lj92.h ├── raw_options.hpp ├── utils.hpp └── yuv2rgb.hpp ├── core ├── CMakeLists.txt ├── completed_request.hpp ├── frame_info.hpp ├── libcamera_app.cpp ├── libcamera_app.hpp ├── libcamera_encoder.hpp ├── logging.hpp ├── metadata.hpp ├── options.cpp ├── options.hpp ├── post_processor.cpp ├── post_processor.hpp ├── still_options.hpp ├── stream_info.hpp ├── version.cmake ├── version.cpp.in ├── version.hpp └── video_options.hpp ├── encoder ├── CMakeLists.txt ├── encoder.cpp ├── encoder.hpp ├── h264_encoder.cpp ├── h264_encoder.hpp ├── libav_encoder.cpp ├── libav_encoder.hpp ├── mjpeg_encoder.cpp ├── mjpeg_encoder.hpp ├── null_encoder.cpp └── null_encoder.hpp ├── image ├── CMakeLists.txt ├── bmp.cpp ├── dng.cpp ├── image.hpp ├── jpeg.cpp ├── png.cpp └── yuv.cpp ├── license.txt ├── output ├── CMakeLists.txt ├── circular_output.cpp ├── circular_output.hpp ├── file_output.cpp ├── file_output.hpp ├── net_output.cpp ├── net_output.hpp ├── output.cpp └── output.hpp ├── package.cmake ├── post_processing_stages ├── CMakeLists.txt ├── annotate_cv_stage.cpp ├── face_detect_cv_stage.cpp ├── hdr_stage.cpp ├── histogram.cpp ├── histogram.hpp ├── motion_detect_stage.cpp ├── negate_stage.cpp ├── object_classify_tf_stage.cpp ├── object_detect.hpp ├── object_detect_draw_cv_stage.cpp ├── object_detect_tf_stage.cpp ├── plot_pose_cv_stage.cpp ├── pose_estimation_tf_stage.cpp ├── post_processing_stage.cpp ├── post_processing_stage.hpp ├── pwl.cpp ├── pwl.hpp ├── segmentation.hpp ├── segmentation_tf_stage.cpp ├── sobel_cv_stage.cpp ├── tf_stage.cpp └── tf_stage.hpp ├── preview ├── CMakeLists.txt ├── drm_preview.cpp ├── egl_preview.cpp ├── null_preview.cpp ├── preview.cpp ├── preview.hpp └── qt_preview.cpp └── utils ├── CMakeLists.txt ├── camera-bug-report ├── checkstyle.py ├── test.py ├── timestamp.py └── version.py /.clang-format: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0-only 2 | # 3 | # clang-format configuration file. Intended for clang-format >= 7. 4 | # 5 | # For more information, see: 6 | # 7 | # Documentation/process/clang-format.rst 8 | # https://clang.llvm.org/docs/ClangFormat.html 9 | # https://clang.llvm.org/docs/ClangFormatStyleOptions.html 10 | # 11 | --- 12 | Language: Cpp 13 | AccessModifierOffset: -4 14 | AlignAfterOpenBracket: Align 15 | AlignConsecutiveAssignments: false 16 | AlignConsecutiveDeclarations: false 17 | AlignEscapedNewlines: Right 18 | AlignOperands: true 19 | AlignTrailingComments: false 20 | AllowAllParametersOfDeclarationOnNextLine: false 21 | AllowShortBlocksOnASingleLine: false 22 | AllowShortCaseLabelsOnASingleLine: false 23 | AllowShortLambdasOnASingleLine: Inline 24 | AllowShortFunctionsOnASingleLine: InlineOnly 25 | AllowShortIfStatementsOnASingleLine: false 26 | AllowShortLoopsOnASingleLine: false 27 | AlwaysBreakAfterDefinitionReturnType: None 28 | AlwaysBreakAfterReturnType: None 29 | AlwaysBreakBeforeMultilineStrings: false 30 | AlwaysBreakTemplateDeclarations: Yes 31 | BinPackArguments: true 32 | BinPackParameters: true 33 | BreakBeforeBraces: Allman 34 | BraceWrapping: 35 | SplitEmptyFunction: true 36 | SplitEmptyRecord: true 37 | SplitEmptyNamespace: true 38 | BreakBeforeBinaryOperators: None 39 | BreakBeforeInheritanceComma: false 40 | BreakInheritanceList: BeforeColon 41 | BreakBeforeTernaryOperators: true 42 | BreakConstructorInitializers: BeforeColon 43 | BreakAfterJavaFieldAnnotations: false 44 | BreakStringLiterals: false 45 | CommentPragmas: '^ IWYU pragma:' 46 | CompactNamespaces: false 47 | ConstructorInitializerAllOnOneLineOrOnePerLine: false 48 | Cpp11BracedListStyle: false 49 | DerivePointerAlignment: false 50 | DisableFormat: false 51 | ExperimentalAutoDetectBinPacking: false 52 | FixNamespaceComments: true 53 | ForEachMacros: 54 | - 'udev_list_entry_foreach' 55 | IncludeBlocks: Preserve 56 | IncludeCategories: 57 | - Regex: '.*' 58 | Priority: 1 59 | IncludeIsMainRegex: '(_test)?$' 60 | IndentCaseLabels: false 61 | IndentPPDirectives: None 62 | IndentWrappedFunctionNames: false 63 | JavaScriptQuotes: Leave 64 | JavaScriptWrapImports: true 65 | KeepEmptyLinesAtTheStartOfBlocks: false 66 | MacroBlockBegin: '' 67 | MacroBlockEnd: '' 68 | MaxEmptyLinesToKeep: 1 69 | NamespaceIndentation: None 70 | ObjCBinPackProtocolList: Auto 71 | ObjCBlockIndentWidth: 8 72 | ObjCSpaceAfterProperty: true 73 | ObjCSpaceBeforeProtocolList: true 74 | 75 | # Taken from git's rules 76 | PenaltyBreakAssignment: 10 77 | PenaltyBreakBeforeFirstCallParameter: 30 78 | PenaltyBreakComment: 10 79 | PenaltyBreakFirstLessLess: 0 80 | PenaltyBreakString: 10 81 | PenaltyBreakTemplateDeclaration: 10 82 | PenaltyExcessCharacter: 100 83 | PenaltyReturnTypeOnItsOwnLine: 60 84 | 85 | PointerAlignment: Right 86 | ReflowComments: false 87 | SortIncludes: true 88 | SortUsingDeclarations: true 89 | SpaceAfterCStyleCast: false 90 | SpaceAfterTemplateKeyword: true 91 | SpaceAfterLogicalNot: false 92 | #SpaceBeforeCaseColon: false 93 | SpaceBeforeAssignmentOperators: true 94 | SpaceBeforeCpp11BracedList: true 95 | SpaceBeforeCtorInitializerColon: true 96 | SpaceBeforeInheritanceColon: true 97 | SpaceBeforeParens: ControlStatements 98 | SpaceBeforeRangeBasedForLoopColon: true 99 | SpaceInEmptyParentheses: false 100 | SpacesBeforeTrailingComments: 1 101 | SpacesInAngles: false 102 | SpacesInContainerLiterals: false 103 | SpacesInCStyleCastParentheses: false 104 | SpacesInParentheses: false 105 | SpacesInSquareBrackets: false 106 | IndentWidth: 4 107 | TabWidth: 4 108 | UseTab: Always 109 | ConstructorInitializerIndentWidth: 4 110 | ContinuationIndentWidth: 4 111 | ColumnLimit: 120 112 | ... 113 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **Bug report** 14 | Please use the ``camera-bug-report`` tool to create a bug report, and upload it here. 15 | 16 | The bug report tool uses the following syntax: 17 | 18 | ``` 19 | camera-bug-report -t -o -c "" 20 | ``` 21 | 22 | For example, 23 | 24 | ``` 25 | camera-bug-report -t 5 -o bug.txt -c "libcamera-still -t 1000 -o test.jpg" 26 | ``` 27 | will attempt to run libcamera-still and timeout after 5 seconds if the application has stalled. The script will generate a ``bug.txt`` file that captures all the output logs and system state to help us debug the issue. 28 | 29 | You can also run without a timeout: 30 | 31 | ``` 32 | camera-bug-report -o bug.txt -c "libcamera-vid -t 0 -o test.264" 33 | ``` 34 | This will run ``libcamera-vid`` indefinitely until either you press ``Ctrl+C`` or the application terminates, after which the necessary output logs and system state will be captured. 35 | 36 | If you cannot run your application through the ``camera-bug-report`` tool, run it without the ``-c`` command line argument **after running the camera application.** In these cases, please also provide the command line used to run the application, as well as any output generated during the run. 37 | 38 | **Additional context** 39 | Add any other context about the problem here. 40 | -------------------------------------------------------------------------------- /.github/workflows/libcamera-apps-style-checker.yml: -------------------------------------------------------------------------------- 1 | name: libcamera-apps style checker 2 | on: 3 | pull_request: 4 | branches: [ main ] 5 | 6 | jobs: 7 | style-check: 8 | 9 | runs-on: [ self-hosted ] 10 | 11 | steps: 12 | - uses: actions/checkout@v3 13 | with: 14 | fetch-depth: 0 15 | clean: true 16 | 17 | - name: Check style 18 | run: ${{github.workspace}}/utils/checkstyle.py $(git log --format=%P -1 | awk '{print $1 ".." $2}') 19 | -------------------------------------------------------------------------------- /.github/workflows/libcamera-apps-test.yml: -------------------------------------------------------------------------------- 1 | name: libcamera-app smoke tests 2 | 3 | on: 4 | pull_request: 5 | branches: [ main ] 6 | 7 | env: 8 | NUM_JOBS: 4 9 | GCC_COMPILER: "-DCMAKE_C_COMPILER=/usr/bin/gcc -DCMAKE_CXX_COMPILER=/usr/bin/g++" 10 | CLANG_COMPILER: "-DCMAKE_C_COMPILER=/usr/bin/clang -DCMAKE_CXX_COMPILER=/usr/bin/clang++" 11 | CMAKE_OTHER_FLAGS: "-DENABLE_TFLITE=1" 12 | LIBCAMERA_LKG_DIR: "${{github.workspace}}/../libcamera_lkg" 13 | 14 | jobs: 15 | build-test: 16 | 17 | runs-on: [ self-hosted ] 18 | 19 | strategy: 20 | matrix: 21 | compiler: [ gcc, clang ] 22 | build_type: [ Release, Debug ] 23 | 24 | steps: 25 | - uses: actions/checkout@v3 26 | with: 27 | fetch-depth: 1 28 | clean: true 29 | 30 | - name: Configure CMake 31 | env: 32 | COMPILER: "${{ matrix.compiler == 'gcc' && env.GCC_COMPILER || env.CLANG_COMPILER }}" 33 | run: cmake -B ${{github.workspace}}/build -DPKG_CONFIG_USE_CMAKE_PREFIX_PATH=TRUE -DCMAKE_PREFIX_PATH=${{env.LIBCAMERA_LKG_DIR}} -DCMAKE_BUILD_TYPE=${{matrix.build_type}} ${{env.COMPILER}} ${{env.CMAKE_OTHER_FLAGS}} 34 | timeout-minutes: 5 35 | 36 | - name: Build 37 | run: make -C ${{github.workspace}}/build -j ${{env.NUM_JOBS}} 38 | timeout-minutes: 10 39 | 40 | - name: Tar files 41 | run: tar -cvf build-artifacts-${{matrix.compiler}}-${{matrix.build_type}}.tar -C ${{github.workspace}}/build . 42 | 43 | - name: Upload build files 44 | uses: actions/upload-artifact@v3 45 | with: 46 | name: build-artifacts-${{matrix.compiler}}-${{matrix.build_type}} 47 | path: build-artifacts-${{matrix.compiler}}-${{matrix.build_type}}.tar 48 | retention-days: 21 49 | 50 | build-test-lite: 51 | 52 | runs-on: [ self-hosted ] 53 | 54 | steps: 55 | - uses: actions/checkout@v3 56 | with: 57 | fetch-depth: 1 58 | clean: true 59 | 60 | - name: Configure CMake 61 | run: cmake -B ${{github.workspace}}/build -DPKG_CONFIG_USE_CMAKE_PREFIX_PATH=TRUE -DCMAKE_PREFIX_PATH=${{env.LIBCAMERA_LKG_DIR}} -DCMAKE_BUILD_TYPE=Release ${{env.GCC_COMPILER}} ${{env.CMAKE_OTHER_FLAGS}} -DENABLE_DRM=0 -DENABLE_X11=0 -DENABLE_QT=0 -DENABLE_OPENCV=0 -DENABLE_TFLITE=0 62 | timeout-minutes: 5 63 | 64 | - name: Build 65 | run: make -C ${{github.workspace}}/build -j ${{env.NUM_JOBS}} 66 | timeout-minutes: 10 67 | 68 | - name: Tar files 69 | run: tar -cvf build-artifacts-gcc-lite.tar -C ${{github.workspace}}/build . 70 | 71 | - name: Upload build files 72 | uses: actions/upload-artifact@v3 73 | with: 74 | name: build-artifacts-gcc-lite 75 | path: build-artifacts-gcc-lite.tar 76 | retention-days: 21 77 | 78 | run-test: 79 | 80 | runs-on: ${{matrix.camera}} 81 | needs: build-test 82 | 83 | strategy: 84 | matrix: 85 | camera: [ imx219, imx477, imx708 ] 86 | 87 | steps: 88 | - uses: actions/checkout@v3 89 | with: 90 | fetch-depth: 1 91 | clean: true 92 | 93 | - name: Create test output dir 94 | run: mkdir -p ${{github.workspace}}/test_output 95 | 96 | - name: Download build 97 | uses: actions/download-artifact@v3 98 | with: 99 | name: build-artifacts-gcc-Release 100 | path: ${{github.workspace}} 101 | 102 | - name: Untar files 103 | run: tar -xvf build-artifacts-gcc-Release.tar --one-top-level=build 104 | 105 | - name: Print version string 106 | run: ${{github.workspace}}/build/libcamera-hello --version 107 | 108 | - name: Print linkage info 109 | run: ldd ${{github.workspace}}/build/libcamera-hello | grep libcamera 110 | 111 | - name: Test 112 | run: ${{github.workspace}}/utils/camera-bug-report -o ${{github.workspace}}/test_output/bug.txt -t 300 -c "${{github.workspace}}/utils/test.py --exe-dir ${{github.workspace}}/build --output-dir ${{github.workspace}}/test_output --json-dir ${{github.workspace}}/assets" 113 | timeout-minutes: 8 # Must be larger than the camera-bug-report timeout. 114 | 115 | - name: Upload test output 116 | if: ${{ failure() }} 117 | uses: actions/upload-artifact@v3 118 | with: 119 | name: test-artifacts-${{matrix.camera}} 120 | path: ${{github.workspace}}/test_output/ 121 | retention-days: 21 122 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | *~ 3 | build*/ 4 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | project(libcamera-apps VERSION 1.1.0) 4 | 5 | if (NOT EXISTS ${CMAKE_BINARY_DIR}/CMakeCache.txt) 6 | if (NOT CMAKE_BUILD_TYPE) 7 | set(CMAKE_BUILD_TYPE "Release" CACHE STRING "" FORCE) 8 | message(STATUS "No previous build - default to Release build") 9 | endif() 10 | endif() 11 | 12 | set (CMAKE_EXPORT_COMPILE_COMMANDS ON) 13 | set (CMAKE_CXX_STANDARD 17) 14 | add_compile_options(-Wall -Wextra -pedantic -Wno-unused-parameter -faligned-new) 15 | add_definitions(-D_FILE_OFFSET_BITS=64) 16 | 17 | if (CMAKE_COMPILER_IS_GNUCXX) 18 | add_compile_options(-Wno-psabi) 19 | endif() 20 | 21 | option(BUILD_SHARED_LIBS "Build using shared libraries" ON) 22 | 23 | IF (NOT ENABLE_COMPILE_FLAGS_FOR_TARGET) 24 | # On a Pi this will give us armhf or arm64. 25 | execute_process(COMMAND dpkg-architecture -qDEB_HOST_ARCH 26 | OUTPUT_VARIABLE ENABLE_COMPILE_FLAGS_FOR_TARGET OUTPUT_STRIP_TRAILING_WHITESPACE) 27 | endif() 28 | message(STATUS "Platform: ${ENABLE_COMPILE_FLAGS_FOR_TARGET}") 29 | if ("${ENABLE_COMPILE_FLAGS_FOR_TARGET}" STREQUAL "arm64") 30 | # 64-bit binaries can be fully optimised. 31 | add_definitions(-ftree-vectorize) 32 | elseif ("${ENABLE_COMPILE_FLAGS_FOR_TARGET}" STREQUAL "armv8-neon") 33 | # Only build with 32-bit Pi 3/4 specific optimisations if requested on the command line. 34 | add_definitions(-mfpu=neon-fp-armv8 -ftree-vectorize) 35 | endif() 36 | 37 | # Source package generation setup. 38 | set(CPACK_GENERATOR "TXZ") 39 | set(CPACK_PACKAGE_FILE_NAME "libcamera-apps-build") 40 | set(CPACK_SOURCE_GENERATOR "TXZ") 41 | set(CPACK_INSTALL_SCRIPTS ${CMAKE_SOURCE_DIR}/package.cmake) 42 | set(CPACK_SOURCE_PACKAGE_FILE_NAME "libcamera-apps-src") 43 | set(CPACK_SOURCE_IGNORE_FILES "/\.git*;/build;") 44 | include(CPack) 45 | 46 | find_package(PkgConfig REQUIRED) 47 | 48 | pkg_check_modules(LIBCAMERA REQUIRED libcamera) 49 | message(STATUS "libcamera library found:") 50 | message(STATUS " version: ${LIBCAMERA_VERSION}") 51 | message(STATUS " libraries: ${LIBCAMERA_LINK_LIBRARIES}") 52 | message(STATUS " include path: ${LIBCAMERA_INCLUDE_DIRS}") 53 | include_directories(${CMAKE_SOURCE_DIR} ${LIBCAMERA_INCLUDE_DIRS}) 54 | 55 | add_subdirectory(cinepi) 56 | add_subdirectory(core) 57 | add_subdirectory(encoder) 58 | # add_subdirectory(image) 59 | # add_subdirectory(output) 60 | add_subdirectory(preview) 61 | # add_subdirectory(post_processing_stages) 62 | # add_subdirectory(apps) 63 | # add_subdirectory(utils) 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![cp_raw_banner](https://github.com/cinepi/cinepi-raw/assets/25234407/71591abc-f9b2-467e-806f-30557bcd1491) 2 | 3 | *fork of rpicam-apps that builds upon the rpicam-raw app, offering cinema dng recording capabillities and integration with REDIS offering an abstract "API" like layer for custom integrations / controls.* 4 | 5 | Requirements 6 | ----- 7 | Please install the below requirements before continuing with the rest of the build process: 8 | 9 | [Redis](https://github.com/redis/redis) 10 | 11 | [Hiredis](https://github.com/redis/hiredis) 12 | 13 | [Redis++](https://github.com/sewenew/redis-plus-plus) 14 | 15 | Build 16 | ----- 17 | For usage and build instructions, see the official Raspberry Pi documenation pages [here.](https://www.raspberrypi.com/documentation/computers/camera_software.html#building-libcamera-and-libcamera-apps) 18 | 19 | License 20 | ------- 21 | 22 | The source code is made available under the simplified [BSD 2-Clause license](https://spdx.org/licenses/BSD-2-Clause.html). 23 | 24 | -------------------------------------------------------------------------------- /apps/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | project(libcamera-still) 4 | add_executable(libcamera-still libcamera_still.cpp) 5 | target_link_libraries(libcamera-still libcamera_app outputs images) 6 | 7 | project(libcamera-vid) 8 | add_executable(libcamera-vid libcamera_vid.cpp) 9 | target_link_libraries(libcamera-vid libcamera_app encoders outputs) 10 | 11 | project(libcamera-hello) 12 | add_executable(libcamera-hello libcamera_hello.cpp) 13 | target_link_libraries(libcamera-hello libcamera_app) 14 | 15 | project(libcamera-raw) 16 | add_executable(libcamera-raw libcamera_raw.cpp) 17 | target_link_libraries(libcamera-raw libcamera_app encoders outputs) 18 | 19 | project(libcamera-jpeg) 20 | add_executable(libcamera-jpeg libcamera_jpeg.cpp) 21 | target_link_libraries(libcamera-jpeg libcamera_app images) 22 | 23 | set(EXECUTABLES libcamera-still libcamera-vid libcamera-hello libcamera-raw libcamera-jpeg) 24 | 25 | if (ENABLE_TFLITE) 26 | project(libcamera-detect) 27 | add_executable(libcamera-detect libcamera_detect.cpp) 28 | target_link_libraries(libcamera-detect libcamera_app images) 29 | set(EXECUTABLES ${EXECUTABLES} libcamera-detect) 30 | endif() 31 | 32 | set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}) 33 | install(TARGETS ${EXECUTABLES} RUNTIME DESTINATION bin) 34 | -------------------------------------------------------------------------------- /apps/libcamera_detect.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_detect.cpp - take pictures when objects are detected 6 | */ 7 | 8 | // Example: libcamera-detect --post-process-file object_detect_tf.json --lores-width 400 --lores-height 300 -t 0 --object cat -o cat%03d.jpg 9 | 10 | #include 11 | 12 | #include "core/libcamera_app.hpp" 13 | #include "core/still_options.hpp" 14 | 15 | #include "image/image.hpp" 16 | 17 | #include "post_processing_stages/object_detect.hpp" 18 | 19 | struct DetectOptions : public StillOptions 20 | { 21 | DetectOptions() : StillOptions() 22 | { 23 | using namespace boost::program_options; 24 | options_.add_options() 25 | ("object", value(&object), "Name of object to detect") 26 | ("gap", value(&gap)->default_value(30), "Smallest gap between captures in frames") 27 | ("timeformat", value(&timeformat)->default_value("%m%d%H%M%S"), "Date/Time format string - see C++ strftime()") 28 | ; 29 | } 30 | 31 | std::string object; 32 | unsigned int gap; 33 | std::string timeformat; 34 | 35 | virtual void Print() const override 36 | { 37 | StillOptions::Print(); 38 | std::cerr << " object: " << object << std::endl; 39 | std::cerr << " gap: " << gap << std::endl; 40 | std::cerr << " timeformat: " << timeformat << std::endl; 41 | } 42 | }; 43 | 44 | class LibcameraDetectApp : public LibcameraApp 45 | { 46 | public: 47 | LibcameraDetectApp() : LibcameraApp(std::make_unique()) {} 48 | DetectOptions *GetOptions() const { return static_cast(options_.get()); } 49 | }; 50 | 51 | // The main even loop for the application. 52 | 53 | static void event_loop(LibcameraDetectApp &app) 54 | { 55 | DetectOptions *options = app.GetOptions(); 56 | app.OpenCamera(); 57 | app.ConfigureViewfinder(); 58 | app.StartCamera(); 59 | auto start_time = std::chrono::high_resolution_clock::now(); 60 | unsigned int last_capture_frame = 0; 61 | 62 | for (unsigned int count = 0;; count++) 63 | { 64 | LibcameraApp::Msg msg = app.Wait(); 65 | if (msg.type == LibcameraApp::MsgType::Timeout) 66 | { 67 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 68 | app.StopCamera(); 69 | app.StartCamera(); 70 | continue; 71 | } 72 | if (msg.type == LibcameraApp::MsgType::Quit) 73 | return; 74 | 75 | // In viewfinder mode, simply run until the timeout, but do a capture if the object 76 | // we're looking for is detected. 77 | CompletedRequestPtr &completed_request = std::get(msg.payload); 78 | if (app.ViewfinderStream()) 79 | { 80 | auto now = std::chrono::high_resolution_clock::now(); 81 | if (options->timeout && now - start_time > std::chrono::milliseconds(options->timeout)) 82 | return; 83 | 84 | std::vector detections; 85 | bool detected = completed_request->sequence - last_capture_frame >= options->gap && 86 | completed_request->post_process_metadata.Get("object_detect.results", detections) == 0 && 87 | std::find_if(detections.begin(), detections.end(), [options](const Detection &d) { 88 | return d.name.find(options->object) != std::string::npos; 89 | }) != detections.end(); 90 | 91 | app.ShowPreview(completed_request, app.ViewfinderStream()); 92 | 93 | if (detected) 94 | { 95 | app.StopCamera(); 96 | app.Teardown(); 97 | app.ConfigureStill(); 98 | app.StartCamera(); 99 | LOG(1, options->object << " detected"); 100 | } 101 | } 102 | // In still capture mode, save a jpeg and go back to preview. 103 | else if (app.StillStream()) 104 | { 105 | app.StopCamera(); 106 | last_capture_frame = completed_request->sequence; 107 | 108 | StreamInfo info; 109 | libcamera::Stream *stream = app.StillStream(&info); 110 | const std::vector> mem = app.Mmap(completed_request->buffers[stream]); 111 | 112 | // Generate a filename for the output and save it. 113 | char filename[128]; 114 | if (options->datetime) 115 | { 116 | std::time_t raw_time; 117 | std::time(&raw_time); 118 | char time_string[32]; 119 | std::tm *time_info = std::localtime(&raw_time); 120 | std::strftime(time_string, sizeof(time_string), options->timeformat.c_str() , time_info); 121 | snprintf(filename, sizeof(filename), "%s%s.%s", options->output.c_str(), time_string, options->encoding.c_str()); 122 | } 123 | else if (options->timestamp) 124 | snprintf(filename, sizeof(filename), "%s%u.%s", options->output.c_str(), (unsigned)time(NULL), options->encoding.c_str()); 125 | else 126 | snprintf(filename, sizeof(filename), options->output.c_str(), options->framestart); 127 | filename[sizeof(filename) - 1] = 0; 128 | options->framestart++; 129 | LOG(1, "Save image " << filename); 130 | jpeg_save(mem, info, completed_request->metadata, std::string(filename), app.CameraModel(), options); 131 | 132 | // Restart camera in preview mode. 133 | app.Teardown(); 134 | app.ConfigureViewfinder(); 135 | app.StartCamera(); 136 | } 137 | } 138 | } 139 | 140 | int main(int argc, char *argv[]) 141 | { 142 | try 143 | { 144 | LibcameraDetectApp app; 145 | DetectOptions *options = app.GetOptions(); 146 | if (options->Parse(argc, argv)) 147 | { 148 | if (options->verbose >= 2) 149 | options->Print(); 150 | if (options->output.empty()) 151 | throw std::runtime_error("output file name required"); 152 | 153 | event_loop(app); 154 | } 155 | } 156 | catch (std::exception const &e) 157 | { 158 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 159 | return -1; 160 | } 161 | return 0; 162 | } 163 | -------------------------------------------------------------------------------- /apps/libcamera_hello.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_hello.cpp - libcamera "hello world" app. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/libcamera_app.hpp" 11 | #include "core/options.hpp" 12 | 13 | using namespace std::placeholders; 14 | 15 | // The main event loop for the application. 16 | 17 | static void event_loop(LibcameraApp &app) 18 | { 19 | Options const *options = app.GetOptions(); 20 | 21 | app.OpenCamera(); 22 | app.ConfigureViewfinder(); 23 | app.StartCamera(); 24 | 25 | auto start_time = std::chrono::high_resolution_clock::now(); 26 | 27 | for (unsigned int count = 0; ; count++) 28 | { 29 | LibcameraApp::Msg msg = app.Wait(); 30 | if (msg.type == LibcameraApp::MsgType::Timeout) 31 | { 32 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 33 | app.StopCamera(); 34 | app.StartCamera(); 35 | continue; 36 | } 37 | if (msg.type == LibcameraApp::MsgType::Quit) 38 | return; 39 | else if (msg.type != LibcameraApp::MsgType::RequestComplete) 40 | throw std::runtime_error("unrecognised message!"); 41 | 42 | LOG(2, "Viewfinder frame " << count); 43 | auto now = std::chrono::high_resolution_clock::now(); 44 | if (options->timeout && now - start_time > std::chrono::milliseconds(options->timeout)) 45 | return; 46 | 47 | CompletedRequestPtr &completed_request = std::get(msg.payload); 48 | app.ShowPreview(completed_request, app.ViewfinderStream()); 49 | } 50 | } 51 | 52 | int main(int argc, char *argv[]) 53 | { 54 | try 55 | { 56 | LibcameraApp app; 57 | Options *options = app.GetOptions(); 58 | if (options->Parse(argc, argv)) 59 | { 60 | if (options->verbose >= 2) 61 | options->Print(); 62 | 63 | event_loop(app); 64 | } 65 | } 66 | catch (std::exception const &e) 67 | { 68 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 69 | return -1; 70 | } 71 | return 0; 72 | } 73 | -------------------------------------------------------------------------------- /apps/libcamera_jpeg.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_jpeg.cpp - minimal libcamera jpeg capture app. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/libcamera_app.hpp" 11 | #include "core/still_options.hpp" 12 | 13 | #include "image/image.hpp" 14 | 15 | using namespace std::placeholders; 16 | using libcamera::Stream; 17 | 18 | class LibcameraJpegApp : public LibcameraApp 19 | { 20 | public: 21 | LibcameraJpegApp() 22 | : LibcameraApp(std::make_unique()) 23 | { 24 | } 25 | 26 | StillOptions *GetOptions() const 27 | { 28 | return static_cast(options_.get()); 29 | } 30 | }; 31 | 32 | // The main even loop for the application. 33 | 34 | static void event_loop(LibcameraJpegApp &app) 35 | { 36 | StillOptions const *options = app.GetOptions(); 37 | app.OpenCamera(); 38 | app.ConfigureViewfinder(); 39 | app.StartCamera(); 40 | auto start_time = std::chrono::high_resolution_clock::now(); 41 | 42 | for (;;) 43 | { 44 | LibcameraApp::Msg msg = app.Wait(); 45 | if (msg.type == LibcameraApp::MsgType::Timeout) 46 | { 47 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 48 | app.StopCamera(); 49 | app.StartCamera(); 50 | continue; 51 | } 52 | if (msg.type == LibcameraApp::MsgType::Quit) 53 | return; 54 | else if (msg.type != LibcameraApp::MsgType::RequestComplete) 55 | throw std::runtime_error("unrecognised message!"); 56 | 57 | // In viewfinder mode, simply run until the timeout. When that happens, switch to 58 | // capture mode. 59 | if (app.ViewfinderStream()) 60 | { 61 | auto now = std::chrono::high_resolution_clock::now(); 62 | if (options->timeout && now - start_time > std::chrono::milliseconds(options->timeout)) 63 | { 64 | app.StopCamera(); 65 | app.Teardown(); 66 | app.ConfigureStill(); 67 | app.StartCamera(); 68 | } 69 | else 70 | { 71 | CompletedRequestPtr &completed_request = std::get(msg.payload); 72 | app.ShowPreview(completed_request, app.ViewfinderStream()); 73 | } 74 | } 75 | // In still capture mode, save a jpeg and quit. 76 | else if (app.StillStream()) 77 | { 78 | app.StopCamera(); 79 | LOG(1, "Still capture image received"); 80 | 81 | Stream *stream = app.StillStream(); 82 | StreamInfo info = app.GetStreamInfo(stream); 83 | CompletedRequestPtr &payload = std::get(msg.payload); 84 | const std::vector> mem = app.Mmap(payload->buffers[stream]); 85 | jpeg_save(mem, info, payload->metadata, options->output, app.CameraModel(), options); 86 | return; 87 | } 88 | } 89 | } 90 | 91 | int main(int argc, char *argv[]) 92 | { 93 | try 94 | { 95 | LibcameraJpegApp app; 96 | StillOptions *options = app.GetOptions(); 97 | if (options->Parse(argc, argv)) 98 | { 99 | if (options->verbose >= 2) 100 | options->Print(); 101 | if (options->output.empty()) 102 | throw std::runtime_error("output file name required"); 103 | 104 | event_loop(app); 105 | } 106 | } 107 | catch (std::exception const &e) 108 | { 109 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 110 | return -1; 111 | } 112 | return 0; 113 | } 114 | -------------------------------------------------------------------------------- /apps/libcamera_raw.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_raw.cpp - libcamera raw video record app. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/libcamera_encoder.hpp" 11 | #include "encoder/null_encoder.hpp" 12 | #include "output/output.hpp" 13 | 14 | using namespace std::placeholders; 15 | 16 | class LibcameraRaw : public LibcameraEncoder 17 | { 18 | public: 19 | LibcameraRaw() : LibcameraEncoder() {} 20 | 21 | protected: 22 | // Force the use of "null" encoder. 23 | void createEncoder() { encoder_ = std::unique_ptr(new NullEncoder(GetOptions())); } 24 | }; 25 | 26 | // The main even loop for the application. 27 | 28 | static void event_loop(LibcameraRaw &app) 29 | { 30 | VideoOptions const *options = app.GetOptions(); 31 | std::unique_ptr output = std::unique_ptr(Output::Create(options)); 32 | app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4)); 33 | app.SetMetadataReadyCallback(std::bind(&Output::MetadataReady, output.get(), _1)); 34 | 35 | app.OpenCamera(); 36 | app.ConfigureVideo(LibcameraRaw::FLAG_VIDEO_RAW); 37 | app.StartEncoder(); 38 | app.StartCamera(); 39 | auto start_time = std::chrono::high_resolution_clock::now(); 40 | 41 | for (unsigned int count = 0; ; count++) 42 | { 43 | LibcameraRaw::Msg msg = app.Wait(); 44 | 45 | if (msg.type == LibcameraApp::MsgType::Timeout) 46 | { 47 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 48 | app.StopCamera(); 49 | app.StartCamera(); 50 | continue; 51 | } 52 | if (msg.type != LibcameraRaw::MsgType::RequestComplete) 53 | throw std::runtime_error("unrecognised message!"); 54 | if (count == 0) 55 | { 56 | libcamera::StreamConfiguration const &cfg = app.RawStream()->configuration(); 57 | LOG(1, "Raw stream: " << cfg.size.width << "x" << cfg.size.height << " stride " << cfg.stride << " format " 58 | << cfg.pixelFormat.toString()); 59 | } 60 | 61 | LOG(2, "Viewfinder frame " << count); 62 | auto now = std::chrono::high_resolution_clock::now(); 63 | if (options->timeout && now - start_time > std::chrono::milliseconds(options->timeout)) 64 | { 65 | app.StopCamera(); 66 | app.StopEncoder(); 67 | return; 68 | } 69 | 70 | app.EncodeBuffer(std::get(msg.payload), app.RawStream()); 71 | } 72 | } 73 | 74 | int main(int argc, char *argv[]) 75 | { 76 | try 77 | { 78 | LibcameraRaw app; 79 | VideoOptions *options = app.GetOptions(); 80 | if (options->Parse(argc, argv)) 81 | { 82 | options->denoise = "cdn_off"; 83 | options->nopreview = true; 84 | if (options->verbose >= 2) 85 | options->Print(); 86 | 87 | event_loop(app); 88 | } 89 | } 90 | catch (std::exception const &e) 91 | { 92 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 93 | return -1; 94 | } 95 | return 0; 96 | } 97 | -------------------------------------------------------------------------------- /apps/libcamera_vid.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_vid.cpp - libcamera video record app. 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "core/libcamera_encoder.hpp" 15 | #include "output/output.hpp" 16 | 17 | using namespace std::placeholders; 18 | 19 | // Some keypress/signal handling. 20 | 21 | static int signal_received; 22 | static void default_signal_handler(int signal_number) 23 | { 24 | signal_received = signal_number; 25 | LOG(1, "Received signal " << signal_number); 26 | } 27 | 28 | static int get_key_or_signal(VideoOptions const *options, pollfd p[1]) 29 | { 30 | int key = 0; 31 | if (signal_received == SIGINT) 32 | return 'x'; 33 | if (options->keypress) 34 | { 35 | poll(p, 1, 0); 36 | if (p[0].revents & POLLIN) 37 | { 38 | char *user_string = nullptr; 39 | size_t len; 40 | [[maybe_unused]] size_t r = getline(&user_string, &len, stdin); 41 | key = user_string[0]; 42 | } 43 | } 44 | if (options->signal) 45 | { 46 | if (signal_received == SIGUSR1) 47 | key = '\n'; 48 | else if (signal_received == SIGUSR2) 49 | key = 'x'; 50 | signal_received = 0; 51 | } 52 | return key; 53 | } 54 | 55 | static int get_colourspace_flags(std::string const &codec) 56 | { 57 | if (codec == "mjpeg" || codec == "yuv420") 58 | return LibcameraEncoder::FLAG_VIDEO_JPEG_COLOURSPACE; 59 | else 60 | return LibcameraEncoder::FLAG_VIDEO_NONE; 61 | } 62 | 63 | // The main even loop for the application. 64 | 65 | static void event_loop(LibcameraEncoder &app) 66 | { 67 | VideoOptions const *options = app.GetOptions(); 68 | std::unique_ptr output = std::unique_ptr(Output::Create(options)); 69 | app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4)); 70 | app.SetMetadataReadyCallback(std::bind(&Output::MetadataReady, output.get(), _1)); 71 | 72 | app.OpenCamera(); 73 | app.ConfigureVideo(get_colourspace_flags(options->codec)); 74 | app.StartEncoder(); 75 | app.StartCamera(); 76 | auto start_time = std::chrono::high_resolution_clock::now(); 77 | 78 | // Monitoring for keypresses and signals. 79 | signal(SIGUSR1, default_signal_handler); 80 | signal(SIGUSR2, default_signal_handler); 81 | signal(SIGINT, default_signal_handler); 82 | pollfd p[1] = { { STDIN_FILENO, POLLIN, 0 } }; 83 | 84 | for (unsigned int count = 0; ; count++) 85 | { 86 | LibcameraEncoder::Msg msg = app.Wait(); 87 | if (msg.type == LibcameraApp::MsgType::Timeout) 88 | { 89 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 90 | app.StopCamera(); 91 | app.StartCamera(); 92 | continue; 93 | } 94 | if (msg.type == LibcameraEncoder::MsgType::Quit) 95 | return; 96 | else if (msg.type != LibcameraEncoder::MsgType::RequestComplete) 97 | throw std::runtime_error("unrecognised message!"); 98 | int key = get_key_or_signal(options, p); 99 | if (key == '\n') 100 | output->Signal(); 101 | 102 | LOG(2, "Viewfinder frame " << count); 103 | auto now = std::chrono::high_resolution_clock::now(); 104 | bool timeout = !options->frames && options->timeout && 105 | (now - start_time > std::chrono::milliseconds(options->timeout)); 106 | bool frameout = options->frames && count >= options->frames; 107 | if (timeout || frameout || key == 'x' || key == 'X') 108 | { 109 | if (timeout) 110 | LOG(1, "Halting: reached timeout of " << options->timeout << " milliseconds."); 111 | app.StopCamera(); // stop complains if encoder very slow to close 112 | app.StopEncoder(); 113 | return; 114 | } 115 | 116 | CompletedRequestPtr &completed_request = std::get(msg.payload); 117 | app.EncodeBuffer(completed_request, app.VideoStream()); 118 | app.ShowPreview(completed_request, app.VideoStream()); 119 | } 120 | } 121 | 122 | int main(int argc, char *argv[]) 123 | { 124 | try 125 | { 126 | LibcameraEncoder app; 127 | VideoOptions *options = app.GetOptions(); 128 | if (options->Parse(argc, argv)) 129 | { 130 | if (options->verbose >= 2) 131 | options->Print(); 132 | 133 | event_loop(app); 134 | } 135 | } 136 | catch (std::exception const &e) 137 | { 138 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 139 | return -1; 140 | } 141 | return 0; 142 | } 143 | -------------------------------------------------------------------------------- /assets/annotate_cv.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotate_cv" : 3 | { 4 | "text" : "Frame %frame exp %exp ag %ag dg %dg", 5 | "fg" : 255, 6 | "bg" : 0, 7 | "scale" : 1.0, 8 | "thickness" : 2, 9 | "alpha" : 0.3 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /assets/drc.json: -------------------------------------------------------------------------------- 1 | { 2 | "hdr" : 3 | { 4 | "num_frames" : 1, 5 | "lp_filter_strength" : 0.2, 6 | "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ], 7 | "global_tonemap_points" : 8 | [ 9 | { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 1.5, "max_down": 0.7 }, 10 | { "q": 0.5, "width": 0.05, "target": 0.5, "max_up": 1.5, "max_down": 0.7 }, 11 | { "q": 0.8, "width": 0.05, "target": 0.8, "max_up": 1.5, "max_down": 0.7 } 12 | ], 13 | "global_tonemap_strength" : 1.0, 14 | "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ], 15 | "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ], 16 | "local_tonemap_strength" : 1.0, 17 | "local_colour_scale" : 0.9 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /assets/face_detect_cv.json: -------------------------------------------------------------------------------- 1 | { 2 | "face_detect_cv": 3 | { 4 | "cascade_name" : "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml", 5 | "scaling_factor" : 1.1, 6 | "min_neighbors" : 2, 7 | "min_size" : 32, 8 | "max_size" : 256, 9 | "refresh_rate" : 1, 10 | "draw_features" : 1 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /assets/hdr.json: -------------------------------------------------------------------------------- 1 | { 2 | "hdr" : 3 | { 4 | "num_frames" : 8, 5 | "lp_filter_strength" : 0.2, 6 | "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ], 7 | "global_tonemap_points" : 8 | [ 9 | { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 5.0, "max_down": 0.5 }, 10 | { "q": 0.5, "width": 0.05, "target": 0.45, "max_up": 5.0, "max_down": 0.5 }, 11 | { "q": 0.8, "width": 0.05, "target": 0.7, "max_up": 5.0, "max_down": 0.5 } 12 | ], 13 | "global_tonemap_strength" : 1.0, 14 | "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ], 15 | "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ], 16 | "local_tonemap_strength" : 1.0, 17 | "local_colour_scale" : 0.8 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /assets/motion_detect.json: -------------------------------------------------------------------------------- 1 | { 2 | "motion_detect" : 3 | { 4 | "roi_x" : 0.1, 5 | "roi_y" : 0.1, 6 | "roi_width" : 0.8, 7 | "roi_height" : 0.8, 8 | "difference_m" : 0.1, 9 | "difference_c" : 10, 10 | "region_threshold" : 0.005, 11 | "frame_period" : 5, 12 | "hskip" : 2, 13 | "vskip" : 2, 14 | "verbose" : 0 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /assets/negate.json: -------------------------------------------------------------------------------- 1 | { 2 | "negate": 3 | { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /assets/object_classify_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "object_classify_tf": 3 | { 4 | "number_of_results" : 2, 5 | "number_of_threads" : 2, 6 | "refresh_rate" : 30, 7 | "threshold_high" : 0.1, 8 | "threshold_low" : 0.05, 9 | "model_file" : "/home/pi/models/mobilenet_v1_1.0_224_quant.tflite", 10 | "labels_file" : "/home/pi/models/labels.txt", 11 | "display_labels" : 1, 12 | "verbose" : 1 13 | }, 14 | "annotate_cv" : 15 | { 16 | "text" : "Frame %frame exp %exp ag %ag dg %dg", 17 | "fg" : 255, 18 | "bg" : 0, 19 | "scale" : 1.0, 20 | "thickness" : 2, 21 | "alpha" : 0.3 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /assets/object_detect_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "object_detect_tf": 3 | { 4 | "number_of_threads" : 2, 5 | "refresh_rate" : 10, 6 | "confidence_threshold" : 0.5, 7 | "overlap_threshold" : 0.5, 8 | "model_file" : "/home/pi/models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/detect.tflite", 9 | "labels_file" : "/home/pi/models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/labelmap.txt", 10 | "verbose" : 1 11 | }, 12 | "object_detect_draw_cv": 13 | { 14 | "line_thickness" : 2 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /assets/pose_estimation_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "pose_estimation_tf": 3 | { 4 | "refresh_rate" : 5, 5 | "model_file" : "/home/pi/models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite" 6 | }, 7 | "plot_pose_cv": 8 | { 9 | "confidence_threshold" : -0.5 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /assets/segmentation_labels.txt: -------------------------------------------------------------------------------- 1 | background 2 | aeroplane 3 | bicycle 4 | bird 5 | boat 6 | bottle 7 | bus 8 | car 9 | cat 10 | chair 11 | cow 12 | diningtable 13 | dog 14 | horse 15 | motorbike 16 | person 17 | pottedplant 18 | sheep 19 | sofa 20 | train 21 | tv 22 | -------------------------------------------------------------------------------- /assets/segmentation_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "segmentation_tf": 3 | { 4 | "number_of_threads" : 2, 5 | "refresh_rate" : 10, 6 | "model_file" : "/home/pi/models/lite-model_deeplabv3_1_metadata_2.tflite", 7 | "labels_file" : "/home/pi/models/segmentation_labels.txt", 8 | "verbose" : 1 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /assets/sobel_cv.json: -------------------------------------------------------------------------------- 1 | { 2 | "sobel_cv": 3 | { 4 | "ksize":5 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /cinepi/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | find_library(TIFF_LIBRARY tiff REQUIRED) 4 | 5 | project(cinepi-raw) 6 | add_executable(cinepi-raw cinepi_raw.cpp dng_encoder.cpp cinepi_controller.cpp cinepi_state.cpp) 7 | target_link_libraries(cinepi-raw libcamera_app encoders outputs tiff) 8 | 9 | # <---------- set c++ standard -------------> 10 | # NOTE: you must build redis-plus-plus and your application code with the same standard. 11 | set(CMAKE_CXX_STANDARD 17) 12 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 13 | 14 | # <------------ add hiredis dependency ---------------> 15 | find_path(HIREDIS_HEADER hiredis) 16 | target_include_directories(cinepi-raw PUBLIC ${HIREDIS_HEADER}) 17 | 18 | find_library(HIREDIS_LIB hiredis) 19 | target_link_libraries(cinepi-raw ${HIREDIS_LIB}) 20 | 21 | # <------------ add redis-plus-plus dependency --------------> 22 | # NOTE: this should be *sw* NOT *redis++* 23 | find_path(REDIS_PLUS_PLUS_HEADER sw) 24 | target_include_directories(cinepi-raw PUBLIC ${REDIS_PLUS_PLUS_HEADER}) 25 | 26 | find_library(REDIS_PLUS_PLUS_LIB redis++) 27 | target_link_libraries(cinepi-raw ${REDIS_PLUS_PLUS_LIB}) 28 | 29 | set(EXECUTABLES cinepi-raw) 30 | 31 | set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}) 32 | install(TARGETS ${EXECUTABLES} RUNTIME DESTINATION bin) 33 | -------------------------------------------------------------------------------- /cinepi/cinepi_controller.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "dng_encoder.hpp" 12 | #include "preview/preview.hpp" 13 | #include "core/logging.hpp" 14 | 15 | #include 16 | #include 17 | #include 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | #include "utils.hpp" 24 | 25 | #include "cinepi_frameinfo.hpp" 26 | #include "core/stream_info.hpp" 27 | 28 | #include "cinepi_recorder.hpp" 29 | #include "cinepi_state.hpp" 30 | #include "raw_options.hpp" 31 | #include 32 | 33 | #define CHANNEL_CONTROLS "cp_controls" 34 | #define CHANNEL_STATS "cp_stats" 35 | #define CHANNEL_HISTOGRAM "cp_histogram" 36 | 37 | #define REDIS_DEFAULT "redis://127.0.0.1:6379/0" 38 | 39 | using namespace sw::redis; 40 | 41 | class CinePIController : public CinePIState 42 | { 43 | public: 44 | CinePIController(CinePIRecorder *app) : CinePIState(), app_(app), options_(app->GetOptions()), 45 | folderOpen(false), abortThread_(false), cameraInit_(true), cameraRunning(false), triggerStill_(0) {}; 46 | ~CinePIController() { 47 | abortThread_ = true; 48 | main_thread_.join(); 49 | }; 50 | 51 | void start(){ 52 | redis_ = new Redis(options_->redis.value_or(REDIS_DEFAULT)); 53 | LOG(2, redis_->ping()); 54 | main_thread_ = std::thread(std::bind(&CinePIController::mainThread, this)); 55 | } 56 | 57 | void sync(); 58 | 59 | void process(CompletedRequestPtr &completed_request); 60 | void process_stream_info(libcamera::StreamConfiguration const &cfg){ 61 | redis_->publish(CHANNEL_STATS, cfg.toString()); 62 | redis_->set(CONTROL_KEY_WIDTH, std::to_string(cfg.size.width)); 63 | redis_->set(CONTROL_KEY_HEIGHT, std::to_string(cfg.size.height)); 64 | } 65 | 66 | bool folderOpen; 67 | bool cameraRunning; 68 | 69 | bool configChanged(){ 70 | bool c = cameraInit_; 71 | cameraInit_ = false; 72 | return c; 73 | } 74 | 75 | int triggerRec(){ 76 | if(!disk_mounted(const_cast(options_))){ 77 | return 0; 78 | } 79 | int state = trigger_; 80 | if(state < 0){ 81 | clip_number_++; 82 | } 83 | trigger_ = 0; 84 | return state; 85 | } 86 | 87 | int triggerStill(){ 88 | int ts_ = triggerStill_; 89 | triggerStill_ = 0; 90 | return ts_; 91 | } 92 | 93 | protected: 94 | 95 | private: 96 | void mainThread(); 97 | void pubThread(); 98 | 99 | int trigger_; 100 | int triggerStill_; 101 | 102 | bool cameraInit_; 103 | 104 | CinePIRecorder *app_; 105 | RawOptions *options_; 106 | 107 | Redis *redis_; 108 | 109 | bool abortThread_; 110 | std::thread main_thread_; 111 | }; -------------------------------------------------------------------------------- /cinepi/cinepi_frameinfo.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "core/frame_info.hpp" 9 | 10 | #define HISTOGRAM_SIZE 3*NUM_HISTOGRAM_BINS 11 | 12 | struct CinePIFrameInfo : public FrameInfo 13 | { 14 | CinePIFrameInfo(libcamera::ControlList &ctrls) 15 | : FrameInfo(ctrls) 16 | { 17 | auto colorT = ctrls.get(libcamera::controls::ColourTemperature); 18 | if (colorT) 19 | colorTemp = *colorT; 20 | 21 | 22 | auto sts = ctrls.get(libcamera::controls::SensorTimestamp); 23 | if(sts){ 24 | ts = (*sts); 25 | } 26 | 27 | #ifdef LIBCAMERA_CINEPI_CONTROLS 28 | trafficLight = 0; 29 | threshold_l = 2.0; 30 | threshold_h = 25.0; 31 | 32 | auto histo = ctrls.get(libcamera::controls::RawHistogram); 33 | if(histo){ 34 | memcpy(histogram,&(*histo)[0],sizeof(histogram)); 35 | } 36 | 37 | auto histo_stats = ctrls.get(libcamera::controls::RawHistogramExt); 38 | if(histo_stats){ 39 | histogram_stats[0] = (*histo_stats)[0]; 40 | histogram_stats[1] = (*histo_stats)[1]; 41 | histogram_stats[2] = (*histo_stats)[2]; 42 | histogram_stats[3] = (*histo_stats)[3]; 43 | histogram_stats[4] = (*histo_stats)[4]; 44 | histogram_stats[5] = (*histo_stats)[5]; 45 | histogram_stats[6] = (*histo_stats)[6]; 46 | histogram_stats[7] = (*histo_stats)[7]; 47 | histogram_stats[8] = (*histo_stats)[8]; 48 | 49 | rL = ((float)histogram_stats[3] / (float)histogram_stats[0])*100.0; 50 | gL = ((float)histogram_stats[4] / (float)histogram_stats[1])*100.0; 51 | bL = ((float)histogram_stats[5] / (float)histogram_stats[2])*100.0; 52 | 53 | rH = ((float)histogram_stats[6] / (float)histogram_stats[0])*100.0; 54 | gH = ((float)histogram_stats[7] / (float)histogram_stats[1])*100.0; 55 | bH = ((float)histogram_stats[8] / (float)histogram_stats[2])*100.0; 56 | 57 | trafficLight = 0; 58 | trafficLight ^= (-(rL > threshold_l) ^ trafficLight) & (0x01); 59 | trafficLight ^= (-(gL > threshold_l) ^ trafficLight) & (0x02); 60 | trafficLight ^= (-(bL > threshold_l) ^ trafficLight) & (0x04); 61 | 62 | trafficLight ^= (-(rH > threshold_h) ^ trafficLight) & (0x10); 63 | trafficLight ^= (-(gH > threshold_h) ^ trafficLight) & (0x20); 64 | trafficLight ^= (-(bH > threshold_h) ^ trafficLight) & (0x40); 65 | } 66 | #endif 67 | } 68 | 69 | #ifdef LIBCAMERA_CINEPI_CONTROLS 70 | std::string histoString() const{ 71 | std::ostringstream os; 72 | os << rL << "%, " << gL << "%, " << bL << "% : " << rH << "%, " << gH << "%, " << bH << "%"; 73 | // os << (unsigned int)trafficLight; 74 | return os.str(); 75 | }; 76 | 77 | uint8_t trafficLight; 78 | float threshold_h, threshold_l; 79 | float rL, gL, bL, rH, gH, bH; 80 | int32_t histogram[HISTOGRAM_SIZE]; 81 | int32_t histogram_stats[9]; 82 | #endif 83 | 84 | unsigned int colorTemp; 85 | int64_t ts; 86 | }; -------------------------------------------------------------------------------- /cinepi/cinepi_raw.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2022, Csaba Nagy. 4 | * 5 | * cinepi_raw.cpp - cinepi raw dng recording app. 6 | */ 7 | 8 | #include 9 | 10 | #include "cinepi_controller.hpp" 11 | #include "dng_encoder.hpp" 12 | #include "output/output.hpp" 13 | 14 | using namespace std::placeholders; 15 | 16 | // The main even loop for the application. 17 | 18 | static void event_loop(CinePIRecorder &app, CinePIController &controller) 19 | { 20 | controller.start(); 21 | controller.sync(); 22 | 23 | RawOptions const *options = app.GetOptions(); 24 | std::unique_ptr output = std::unique_ptr(Output::Create(options)); 25 | app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4)); 26 | app.SetMetadataReadyCallback(std::bind(&Output::MetadataReady, output.get(), _1)); 27 | 28 | app.OpenCamera(); 29 | app.StartEncoder(); 30 | app.GetOptions()->sensor = app.GetCamera()->properties().get(libcamera::properties::Model).value_or(app.CameraId()); 31 | 32 | for (unsigned int count = 0; ; count++) 33 | { 34 | // if we change framerate or sensor mode, restart the camera. 35 | if(controller.configChanged()){ 36 | if(controller.cameraRunning){ 37 | app.StopCamera(); 38 | app.Teardown(); 39 | } 40 | app.ConfigureVideo(CinePIRecorder::FLAG_VIDEO_RAW); 41 | app.StartCamera(); 42 | controller.cameraRunning = true; 43 | 44 | libcamera::StreamConfiguration const &cfg = app.RawStream()->configuration(); 45 | LOG(1, "Raw stream: " << cfg.size.width << "x" << cfg.size.height << " stride " << cfg.stride << " format " 46 | << cfg.pixelFormat.toString()); 47 | 48 | controller.process_stream_info(cfg); 49 | } 50 | 51 | CinePIRecorder::Msg msg = app.Wait(); 52 | 53 | if (msg.type == LibcameraApp::MsgType::Quit) 54 | return; 55 | 56 | if (msg.type == LibcameraApp::MsgType::Timeout) 57 | { 58 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 59 | app.StopCamera(); 60 | app.StartCamera(); 61 | continue; 62 | } 63 | if (msg.type != CinePIRecorder::MsgType::RequestComplete) 64 | throw std::runtime_error("unrecognised message!"); 65 | 66 | CompletedRequestPtr &completed_request = std::get(msg.payload); 67 | 68 | // parse the frame info metadata for the current frame, publish to redis stats channel 69 | controller.process(completed_request); 70 | 71 | // check for record trigger signal, open a new folder if rec_start or reset frame count if _rec_stop 72 | int trigger = controller.triggerRec(); 73 | if(trigger > 0){ 74 | controller.folderOpen = create_clip_folder(app.GetOptions(), controller.getClipNumber()); 75 | } else if (trigger < 0){ 76 | controller.folderOpen = false; 77 | app.GetEncoder()->resetFrameCount(); 78 | } 79 | 80 | // send frame to dng encoder and save to disk 81 | if(controller.isRecording() && controller.folderOpen){ 82 | app.EncodeBuffer(completed_request, app.RawStream(), app.LoresStream()); 83 | } 84 | 85 | // check for still trigger signal, open the stills folder and save a still frame 86 | int still_trigger = controller.triggerStill(); 87 | if(still_trigger){ 88 | create_stills_folder(app.GetOptions(), controller.getStillNumber()); 89 | app.GetEncoder()->still_capture = true; 90 | app.EncodeBuffer(completed_request, app.RawStream(), app.LoresStream()); 91 | } 92 | 93 | // show frame on display 94 | app.ShowPreview(completed_request, app.VideoStream()); 95 | } 96 | } 97 | 98 | int main(int argc, char *argv[]) 99 | { 100 | try 101 | { 102 | CinePIRecorder app; 103 | CinePIController controller(&app); 104 | 105 | RawOptions *options = app.GetOptions(); 106 | if (options->Parse(argc, argv)) 107 | { 108 | options->mediaDest = "/media/RAW"; 109 | 110 | if (options->verbose >= 2) 111 | options->Print(); 112 | 113 | event_loop(app, controller); 114 | } 115 | } 116 | catch (std::exception const &e) 117 | { 118 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 119 | return -1; 120 | } 121 | return 0; 122 | } 123 | -------------------------------------------------------------------------------- /cinepi/cinepi_recorder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_encoder.cpp - libcamera video encoding class. 6 | */ 7 | 8 | #include "core/libcamera_app.hpp" 9 | #include "core/stream_info.hpp" 10 | #include "raw_options.hpp" 11 | 12 | #include "dng_encoder.hpp" 13 | #include "encoder/encoder.hpp" 14 | 15 | typedef std::function EncodeOutputReadyCallback; 16 | typedef std::function MetadataReadyCallback; 17 | 18 | class CinePIRecorder : public LibcameraApp 19 | { 20 | public: 21 | using Stream = libcamera::Stream; 22 | using FrameBuffer = libcamera::FrameBuffer; 23 | 24 | CinePIRecorder() : LibcameraApp(std::make_unique()) {} 25 | 26 | void StartEncoder() 27 | { 28 | createEncoder(); 29 | encoder_->SetInputDoneCallback(std::bind(&CinePIRecorder::encodeBufferDone, this, std::placeholders::_1)); 30 | encoder_->SetOutputReadyCallback(encode_output_ready_callback_); 31 | } 32 | // This is callback when the encoder gives you the encoded output data. 33 | void SetEncodeOutputReadyCallback(EncodeOutputReadyCallback callback) { encode_output_ready_callback_ = callback; } 34 | void SetMetadataReadyCallback(MetadataReadyCallback callback) { metadata_ready_callback_ = callback; } 35 | void EncodeBuffer(CompletedRequestPtr &completed_request, Stream *stream, Stream *lostream) 36 | { 37 | assert(encoder_); 38 | 39 | StreamInfo info = GetStreamInfo(stream); 40 | StreamInfo loinfo = GetStreamInfo(lostream); 41 | 42 | FrameBuffer *buffer = completed_request->buffers[stream]; 43 | FrameBuffer *loBuffer = completed_request->buffers[lostream]; 44 | 45 | libcamera::Span span = Mmap(buffer)[0]; 46 | libcamera::Span lospan = Mmap(loBuffer)[0]; 47 | 48 | void *mem = span.data(); 49 | void *lomem = lospan.data(); 50 | 51 | if (!buffer || !mem) 52 | throw std::runtime_error("no buffer to encode"); 53 | 54 | if (!loBuffer || !lomem) 55 | throw std::runtime_error("no buffer to encode, thumbnail"); 56 | 57 | auto ts = completed_request->metadata.get(controls::SensorTimestamp); 58 | int64_t timestamp_ns = ts ? *ts : buffer->metadata().timestamp; 59 | { 60 | std::lock_guard lock(encode_buffer_queue_mutex_); 61 | encode_buffer_queue_.push(completed_request); // creates a new reference 62 | } 63 | encoder_->EncodeBuffer2(buffer->planes()[0].fd.get(), span.size(), mem, info, lospan.size(), lomem, loinfo, timestamp_ns / 1000, completed_request->metadata); 64 | } 65 | RawOptions *GetOptions() const { return static_cast(options_.get()); } 66 | DngEncoder *GetEncoder() { return encoder_.get(); } 67 | void StopEncoder() { encoder_.reset(); } 68 | 69 | protected: 70 | virtual void createEncoder() 71 | { 72 | encoder_ = std::unique_ptr(new DngEncoder(GetOptions())); 73 | } 74 | std::unique_ptr encoder_; 75 | 76 | private: 77 | 78 | void encodeBufferDone(void *mem) 79 | { 80 | // If non-NULL, mem would indicate which buffer has been completed, but 81 | // currently we're just assuming everything is done in order. (We could 82 | // handle this by replacing the queue with a vector of 83 | // pairs.) 84 | assert(mem == nullptr); 85 | { 86 | std::lock_guard lock(encode_buffer_queue_mutex_); 87 | if (encode_buffer_queue_.empty()) 88 | throw std::runtime_error("no buffer available to return"); 89 | CompletedRequestPtr &completed_request = encode_buffer_queue_.front(); 90 | if (metadata_ready_callback_ && !GetOptions()->metadata.empty()) 91 | metadata_ready_callback_(completed_request->metadata); 92 | encode_buffer_queue_.pop(); // drop shared_ptr reference 93 | } 94 | } 95 | 96 | std::queue encode_buffer_queue_; 97 | std::mutex encode_buffer_queue_mutex_; 98 | EncodeOutputReadyCallback encode_output_ready_callback_; 99 | MetadataReadyCallback metadata_ready_callback_; 100 | }; 101 | -------------------------------------------------------------------------------- /cinepi/cinepi_state.cpp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cinepi/cinepi-raw/1f21e0992139d66e5e3670721c42cffd46b1d188/cinepi/cinepi_state.cpp -------------------------------------------------------------------------------- /cinepi/cinepi_state.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "core/logging.hpp" 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #define CONTROL_TRIGGER_RECORD "rec" 22 | #define CONTROL_TRIGGER_STILL "stll" 23 | #define CONTROL_TRIGGER_TIMELASPSE "tlps" 24 | 25 | #define CONTROL_KEY_RECORD "is_recording" 26 | #define CONTROL_KEY_ISO "iso" 27 | #define CONTROL_KEY_WB "awb" 28 | #define CONTROL_KEY_COLORGAINS "cg_rb" 29 | #define CONTROL_KEY_SHUTTER_ANGLE "shutter_a" 30 | #define CONTROL_KEY_SHUTTER_SPEED "shutter_s" 31 | 32 | #define CONTROL_KEY_FRAMERATE "fps" 33 | #define CONTROL_KEY_WIDTH "width" 34 | #define CONTROL_KEY_HEIGHT "height" 35 | #define CONTROL_KEY_MODE "mode" 36 | #define CONTROL_KEY_COMPRESSION "compress" 37 | 38 | #define CONTROL_KEY_CAMERAINIT "cam_init" 39 | 40 | class CinePIState 41 | { 42 | public: 43 | CinePIState() : is_recording_(false), clip_number_(0), still_number_(0) {}; 44 | ~CinePIState() {}; 45 | 46 | bool isRecording(){ 47 | return is_recording_; 48 | } 49 | 50 | unsigned int getClipNumber(){ 51 | return clip_number_; 52 | } 53 | 54 | unsigned int getStillNumber(){ 55 | return still_number_; 56 | } 57 | 58 | protected: 59 | float framerate_; 60 | bool is_recording_; 61 | unsigned int iso_; 62 | unsigned int awb_; 63 | float shutter_speed_; 64 | float shutter_angle_; 65 | unsigned int color_temp_; 66 | float cg_rb_[2]; 67 | 68 | uint16_t width_; 69 | uint16_t height_; 70 | int mode_; 71 | int compression_; 72 | 73 | unsigned int clip_number_; 74 | unsigned int still_number_; 75 | 76 | }; -------------------------------------------------------------------------------- /cinepi/dng_encoder.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "encoder/encoder.hpp" 11 | #include "raw_options.hpp" 12 | #include "cinepi_frameinfo.hpp" 13 | 14 | class DngEncoder : public Encoder 15 | { 16 | public: 17 | DngEncoder(RawOptions const *options); 18 | ~DngEncoder(); 19 | // Encode the given buffer. 20 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 21 | // void Encode(CompletedRequestPtr &completed_request); 22 | void EncodeBuffer2(int fd, size_t size, void *mem, StreamInfo const &info, size_t losize, void *lomem, StreamInfo const &loinfo, int64_t timestamp_us, CompletedRequest::ControlList const &metadata); 23 | void resetFrameCount(){ 24 | index_ = 0; 25 | } 26 | int bufferSize(){ 27 | return cache_buffer_.size(); 28 | } 29 | uint64_t getFrameCount(){ 30 | return frames_; 31 | } 32 | 33 | CompletedRequest::ControlList const *metadata_; 34 | 35 | bool compressed; 36 | bool still_capture; 37 | 38 | private: 39 | // How many threads to use. Whichever thread is idle will pick up the next frame. 40 | static const int NUM_ENC_THREADS = 6; 41 | 42 | // These threads do the actual encoding. 43 | void encodeThread(int num); 44 | 45 | void cacheThread(int num); 46 | // Handle the output buffers in another thread so as not to block the encoders. The 47 | // application can take its time, after which we return this buffer to the encoder for 48 | // re-use. 49 | void outputThread(); 50 | 51 | bool encodeCheck_; 52 | bool abortEncode_; 53 | bool abortOutput_; 54 | bool resetCount_; 55 | uint64_t index_; 56 | uint64_t frames_; 57 | uint64_t frameStop_; 58 | 59 | RawOptions const *options_; 60 | 61 | void dng_save(uint8_t const *mem, StreamInfo const &info, uint8_t const *lomem, StreamInfo const &loinfo, size_t losize, 62 | libcamera::ControlList const &metadata, std::string const &filename, std::string const &cam_name, 63 | RawOptions const *options, uint64_t fn); 64 | 65 | struct EncodeItem 66 | { 67 | void *mem; 68 | size_t size; 69 | StreamInfo info; 70 | void *lomem; 71 | size_t losize; 72 | StreamInfo loinfo; 73 | CompletedRequest::ControlList met; 74 | int64_t timestamp_us; 75 | uint64_t index; 76 | }; 77 | std::queue encode_queue_; 78 | std::mutex encode_mutex_; 79 | std::condition_variable encode_cond_var_; 80 | std::thread encode_thread_[NUM_ENC_THREADS]; 81 | 82 | struct CachedItem 83 | { 84 | void *mem; 85 | size_t size; 86 | StreamInfo info; 87 | void *lomem; 88 | size_t losize; 89 | StreamInfo loinfo; 90 | CompletedRequest::ControlList met; 91 | int64_t timestamp_us; 92 | uint64_t index; 93 | }; 94 | boost::circular_buffer cache_buffer_; 95 | std::queue cache_queue_; 96 | std::mutex cache_mutex_; 97 | std::condition_variable cache_cond_var_; 98 | std::thread cache_thread_[NUM_ENC_THREADS]; 99 | }; -------------------------------------------------------------------------------- /cinepi/raw_options.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * still_video.hpp - video capture program options 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include "core/video_options.hpp" 15 | 16 | struct RawOptions : public VideoOptions 17 | { 18 | RawOptions() : VideoOptions() 19 | { 20 | using namespace boost::program_options; 21 | options_.add_options(); 22 | } 23 | 24 | std::optional redis; 25 | 26 | uint32_t clip_number; 27 | std::string mediaDest; 28 | std::string folder; 29 | 30 | float wb; 31 | std::string sensor; 32 | std::string model; 33 | std::string make; 34 | std::string serial; 35 | 36 | float clipping; 37 | 38 | }; 39 | -------------------------------------------------------------------------------- /cinepi/utils.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "raw_options.hpp" 3 | #include 4 | namespace fs = std::filesystem; 5 | 6 | enum CompressionType { NONE = 1, LOSSLESS = 7 }; 7 | 8 | static bool disk_mounted(RawOptions const *options){ 9 | return fs::exists(fs::path(options->mediaDest)); 10 | } 11 | 12 | static void generate_filename(RawOptions *options, unsigned int clip_number = 0) 13 | { 14 | char filename[128]; 15 | std::time_t raw_time; 16 | std::time(&raw_time); 17 | char time_string[32]; 18 | std::tm *time_info = std::localtime(&raw_time); 19 | std::strftime(time_string, sizeof(time_string), "%y-%m-%d_%H%M", time_info); 20 | snprintf(filename, sizeof(filename), "%s_%s_C%05d", "CINEPI", time_string, clip_number); 21 | options->folder = std::string(filename); 22 | } 23 | 24 | static bool create_clip_folder(RawOptions *options, unsigned int clip_number = 0) 25 | { 26 | if(!disk_mounted(options)) 27 | return false; 28 | generate_filename(options, clip_number); 29 | return fs::create_directories(options->mediaDest + std::string("/") + options->folder); 30 | } 31 | 32 | 33 | static bool create_stills_folder(RawOptions *options, unsigned int still_number = 0) 34 | { 35 | if(!disk_mounted(options)) 36 | return false; 37 | std::string stillsPath = options->mediaDest + std::string("/stills"); 38 | bool exists = fs::exists(fs::path(stillsPath)); 39 | generate_filename(options, still_number); 40 | if(!exists){ 41 | return fs::create_directories(options->mediaDest + std::string("/stills")); 42 | } 43 | return exists; 44 | } -------------------------------------------------------------------------------- /core/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | include(GNUInstallDirs) 4 | 5 | find_package(Boost REQUIRED COMPONENTS program_options) 6 | 7 | add_custom_target(VersionCpp ${CMAKE_COMMAND} -DSOURCE_DIR=${CMAKE_SOURCE_DIR} -P ${CMAKE_CURRENT_LIST_DIR}/version.cmake) 8 | set_source_files_properties(version.cpp PROPERTIES GENERATED 1) 9 | 10 | add_library(libcamera_app libcamera_app.cpp post_processor.cpp version.cpp options.cpp) 11 | add_dependencies(libcamera_app VersionCpp) 12 | 13 | set_target_properties(libcamera_app PROPERTIES PREFIX "" IMPORT_PREFIX "" VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) 14 | target_link_libraries(libcamera_app pthread preview ${LIBCAMERA_LINK_LIBRARIES} ${Boost_LIBRARIES} post_processing_stages) 15 | 16 | install(TARGETS libcamera_app LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) 17 | -------------------------------------------------------------------------------- /core/completed_request.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * completed_request.hpp - structure holding request results. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | #include "core/metadata.hpp" 16 | 17 | struct CompletedRequest 18 | { 19 | using BufferMap = libcamera::Request::BufferMap; 20 | using ControlList = libcamera::ControlList; 21 | using Request = libcamera::Request; 22 | 23 | CompletedRequest(unsigned int seq, Request *r) 24 | : sequence(seq), buffers(r->buffers()), metadata(r->metadata()), request(r) 25 | { 26 | r->reuse(); 27 | } 28 | unsigned int sequence; 29 | BufferMap buffers; 30 | ControlList metadata; 31 | Request *request; 32 | float framerate; 33 | Metadata post_process_metadata; 34 | }; 35 | 36 | using CompletedRequestPtr = std::shared_ptr; 37 | -------------------------------------------------------------------------------- /core/frame_info.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * frame_info.hpp - Frame info class for libcamera apps 6 | */ 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | struct FrameInfo 16 | { 17 | FrameInfo(libcamera::ControlList &ctrls) 18 | : exposure_time(0.0), digital_gain(0.0), colour_gains({ { 0.0f, 0.0f } }), focus(0.0), aelock(false), 19 | lens_position(-1.0), af_state(0) 20 | { 21 | auto exp = ctrls.get(libcamera::controls::ExposureTime); 22 | if (exp) 23 | exposure_time = *exp; 24 | 25 | auto ag = ctrls.get(libcamera::controls::AnalogueGain); 26 | if (ag) 27 | analogue_gain = *ag; 28 | 29 | auto dg = ctrls.get(libcamera::controls::DigitalGain); 30 | if (dg) 31 | digital_gain = *dg; 32 | 33 | auto cg = ctrls.get(libcamera::controls::ColourGains); 34 | if (cg) 35 | { 36 | colour_gains[0] = (*cg)[0], colour_gains[1] = (*cg)[1]; 37 | } 38 | 39 | auto fom = ctrls.get(libcamera::controls::FocusFoM); 40 | if (fom) 41 | focus = *fom; 42 | 43 | auto ae = ctrls.get(libcamera::controls::AeLocked); 44 | if (ae) 45 | aelock = *ae; 46 | 47 | auto lp = ctrls.get(libcamera::controls::LensPosition); 48 | if (lp) 49 | lens_position = *lp; 50 | 51 | auto afs = ctrls.get(libcamera::controls::AfState); 52 | if (afs) 53 | af_state = *afs; 54 | } 55 | 56 | std::string ToString(std::string &info_string) const 57 | { 58 | std::string parsed(info_string); 59 | 60 | for (auto const &t : tokens) 61 | { 62 | std::size_t pos = parsed.find(t); 63 | if (pos != std::string::npos) 64 | { 65 | std::stringstream value; 66 | value << std::fixed << std::setprecision(2); 67 | 68 | if (t == "%frame") 69 | value << sequence; 70 | else if (t == "%fps") 71 | value << fps; 72 | else if (t == "%exp") 73 | value << exposure_time; 74 | else if (t == "%ag") 75 | value << analogue_gain; 76 | else if (t == "%dg") 77 | value << digital_gain; 78 | else if (t == "%rg") 79 | value << colour_gains[0]; 80 | else if (t == "%bg") 81 | value << colour_gains[1]; 82 | else if (t == "%focus") 83 | value << focus; 84 | else if (t == "%aelock") 85 | value << aelock; 86 | else if (t == "%lp") 87 | value << lens_position; 88 | else if (t == "%afstate") 89 | { 90 | switch (af_state) 91 | { 92 | case libcamera::controls::AfStateIdle: 93 | value << "idle"; 94 | break; 95 | case libcamera::controls::AfStateScanning: 96 | value << "scanning"; 97 | break; 98 | case libcamera::controls::AfStateFocused: 99 | value << "focused"; 100 | break; 101 | default: 102 | value << "failed"; 103 | } 104 | } 105 | 106 | parsed.replace(pos, t.length(), value.str()); 107 | } 108 | } 109 | 110 | return parsed; 111 | } 112 | 113 | unsigned int sequence; 114 | float exposure_time; 115 | float analogue_gain; 116 | float digital_gain; 117 | std::array colour_gains; 118 | float focus; 119 | float fps; 120 | bool aelock; 121 | float lens_position; 122 | int af_state; 123 | 124 | private: 125 | // Info text tokens. 126 | inline static const std::string tokens[] = { "%frame", "%fps", "%exp", "%ag", "%dg", 127 | "%rg", "%bg", "%focus", "%aelock", 128 | "%lp", "%afstate" }; 129 | }; 130 | -------------------------------------------------------------------------------- /core/libcamera_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * libcamera_encoder.cpp - libcamera video encoding class. 6 | */ 7 | 8 | #include "core/libcamera_app.hpp" 9 | #include "core/stream_info.hpp" 10 | #include "core/video_options.hpp" 11 | 12 | #include "encoder/encoder.hpp" 13 | 14 | typedef std::function EncodeOutputReadyCallback; 15 | typedef std::function MetadataReadyCallback; 16 | 17 | class LibcameraEncoder : public LibcameraApp 18 | { 19 | public: 20 | using Stream = libcamera::Stream; 21 | using FrameBuffer = libcamera::FrameBuffer; 22 | 23 | LibcameraEncoder() : LibcameraApp(std::make_unique()) {} 24 | 25 | void StartEncoder() 26 | { 27 | createEncoder(); 28 | encoder_->SetInputDoneCallback(std::bind(&LibcameraEncoder::encodeBufferDone, this, std::placeholders::_1)); 29 | encoder_->SetOutputReadyCallback(encode_output_ready_callback_); 30 | } 31 | // This is callback when the encoder gives you the encoded output data. 32 | void SetEncodeOutputReadyCallback(EncodeOutputReadyCallback callback) { encode_output_ready_callback_ = callback; } 33 | void SetMetadataReadyCallback(MetadataReadyCallback callback) { metadata_ready_callback_ = callback; } 34 | void EncodeBuffer(CompletedRequestPtr &completed_request, Stream *stream) 35 | { 36 | assert(encoder_); 37 | StreamInfo info = GetStreamInfo(stream); 38 | FrameBuffer *buffer = completed_request->buffers[stream]; 39 | libcamera::Span span = Mmap(buffer)[0]; 40 | void *mem = span.data(); 41 | if (!buffer || !mem) 42 | throw std::runtime_error("no buffer to encode"); 43 | auto ts = completed_request->metadata.get(controls::SensorTimestamp); 44 | int64_t timestamp_ns = ts ? *ts : buffer->metadata().timestamp; 45 | { 46 | std::lock_guard lock(encode_buffer_queue_mutex_); 47 | encode_buffer_queue_.push(completed_request); // creates a new reference 48 | } 49 | encoder_->EncodeBuffer(buffer->planes()[0].fd.get(), span.size(), mem, info, timestamp_ns / 1000); 50 | } 51 | VideoOptions *GetOptions() const { return static_cast(options_.get()); } 52 | void StopEncoder() { encoder_.reset(); } 53 | 54 | protected: 55 | virtual void createEncoder() 56 | { 57 | StreamInfo info; 58 | VideoStream(&info); 59 | if (!info.width || !info.height || !info.stride) 60 | throw std::runtime_error("video steam is not configured"); 61 | encoder_ = std::unique_ptr(Encoder::Create(GetOptions(), info)); 62 | } 63 | std::unique_ptr encoder_; 64 | 65 | private: 66 | void encodeBufferDone(void *mem) 67 | { 68 | // If non-NULL, mem would indicate which buffer has been completed, but 69 | // currently we're just assuming everything is done in order. (We could 70 | // handle this by replacing the queue with a vector of 71 | // pairs.) 72 | assert(mem == nullptr); 73 | { 74 | std::lock_guard lock(encode_buffer_queue_mutex_); 75 | if (encode_buffer_queue_.empty()) 76 | throw std::runtime_error("no buffer available to return"); 77 | CompletedRequestPtr &completed_request = encode_buffer_queue_.front(); 78 | if (metadata_ready_callback_ && !GetOptions()->metadata.empty()) 79 | metadata_ready_callback_(completed_request->metadata); 80 | encode_buffer_queue_.pop(); // drop shared_ptr reference 81 | } 82 | } 83 | 84 | std::queue encode_buffer_queue_; 85 | std::mutex encode_buffer_queue_mutex_; 86 | EncodeOutputReadyCallback encode_output_ready_callback_; 87 | MetadataReadyCallback metadata_ready_callback_; 88 | }; 89 | -------------------------------------------------------------------------------- /core/logging.hpp: -------------------------------------------------------------------------------- 1 | #include "core/libcamera_app.hpp" 2 | 3 | #define LOG(level, text) \ 4 | do \ 5 | { \ 6 | if (LibcameraApp::GetVerbosity() >= level) \ 7 | std::cerr << text << std::endl; \ 8 | } while (0) 9 | #define LOG_ERROR(text) std::cerr << text << std::endl 10 | -------------------------------------------------------------------------------- /core/metadata.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited 4 | * 5 | * metadata.hpp - general metadata class 6 | */ 7 | #pragma once 8 | 9 | // A simple class for carrying arbitrary metadata, for example about an image. 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | class Metadata 17 | { 18 | public: 19 | Metadata() = default; 20 | 21 | Metadata(Metadata const &other) 22 | { 23 | std::scoped_lock other_lock(other.mutex_); 24 | data_ = other.data_; 25 | } 26 | 27 | Metadata(Metadata &&other) 28 | { 29 | std::scoped_lock other_lock(other.mutex_); 30 | data_ = std::move(other.data_); 31 | other.data_.clear(); 32 | } 33 | 34 | template 35 | void Set(std::string const &tag, T &&value) 36 | { 37 | std::scoped_lock lock(mutex_); 38 | data_.insert_or_assign(tag, std::forward(value)); 39 | } 40 | 41 | template 42 | int Get(std::string const &tag, T &value) const 43 | { 44 | std::scoped_lock lock(mutex_); 45 | auto it = data_.find(tag); 46 | if (it == data_.end()) 47 | return -1; 48 | value = std::any_cast(it->second); 49 | return 0; 50 | } 51 | 52 | void Clear() 53 | { 54 | std::scoped_lock lock(mutex_); 55 | data_.clear(); 56 | } 57 | 58 | Metadata &operator=(Metadata const &other) 59 | { 60 | std::scoped_lock lock(mutex_, other.mutex_); 61 | data_ = other.data_; 62 | return *this; 63 | } 64 | 65 | Metadata &operator=(Metadata &&other) 66 | { 67 | std::scoped_lock lock(mutex_, other.mutex_); 68 | data_ = std::move(other.data_); 69 | other.data_.clear(); 70 | return *this; 71 | } 72 | 73 | void Merge(Metadata &other) 74 | { 75 | std::scoped_lock lock(mutex_, other.mutex_); 76 | data_.merge(other.data_); 77 | } 78 | 79 | template 80 | T *GetLocked(std::string const &tag) 81 | { 82 | // This allows in-place access to the Metadata contents, 83 | // for which you should be holding the lock. 84 | auto it = data_.find(tag); 85 | if (it == data_.end()) 86 | return nullptr; 87 | return std::any_cast(&it->second); 88 | } 89 | 90 | template 91 | void SetLocked(std::string const &tag, T &&value) 92 | { 93 | // Use this only if you're holding the lock yourself. 94 | data_.insert_or_assign(tag, std::forward(value)); 95 | } 96 | 97 | // Note: use of (lowercase) lock and unlock means you can create scoped 98 | // locks with the standard lock classes. 99 | // e.g. std::lock_guard lock(metadata) 100 | void lock() { mutex_.lock(); } 101 | void unlock() { mutex_.unlock(); } 102 | 103 | private: 104 | mutable std::mutex mutex_; 105 | std::map data_; 106 | }; 107 | -------------------------------------------------------------------------------- /core/post_processor.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * post_processor.cpp - Post processor implementation. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/libcamera_app.hpp" 11 | #include "core/post_processor.hpp" 12 | 13 | #include "post_processing_stages/post_processing_stage.hpp" 14 | 15 | #include 16 | #include 17 | 18 | PostProcessor::PostProcessor(LibcameraApp *app) : app_(app) 19 | { 20 | } 21 | 22 | PostProcessor::~PostProcessor() 23 | { 24 | } 25 | 26 | void PostProcessor::Read(std::string const &filename) 27 | { 28 | boost::property_tree::ptree root; 29 | boost::property_tree::read_json(filename, root); 30 | for (auto const &key_and_value : root) 31 | { 32 | PostProcessingStage *stage = createPostProcessingStage(key_and_value.first.c_str()); 33 | if (stage) 34 | { 35 | LOG(1, "Reading post processing stage \"" << key_and_value.first << "\""); 36 | stage->Read(key_and_value.second); 37 | stages_.push_back(StagePtr(stage)); 38 | } 39 | else 40 | LOG(1, "No post processing stage found for \"" << key_and_value.first << "\""); 41 | } 42 | } 43 | 44 | PostProcessingStage *PostProcessor::createPostProcessingStage(char const *name) 45 | { 46 | auto it = GetPostProcessingStages().find(std::string(name)); 47 | return it != GetPostProcessingStages().end() ? (*it->second)(app_) : nullptr; 48 | } 49 | 50 | void PostProcessor::SetCallback(PostProcessorCallback callback) 51 | { 52 | callback_ = callback; 53 | } 54 | 55 | void PostProcessor::AdjustConfig(std::string const &use_case, StreamConfiguration *config) 56 | { 57 | for (auto &stage : stages_) 58 | { 59 | stage->AdjustConfig(use_case, config); 60 | } 61 | } 62 | 63 | void PostProcessor::Configure() 64 | { 65 | for (auto &stage : stages_) 66 | { 67 | stage->Configure(); 68 | } 69 | } 70 | 71 | void PostProcessor::Start() 72 | { 73 | quit_ = false; 74 | output_thread_ = std::thread(&PostProcessor::outputThread, this); 75 | 76 | for (auto &stage : stages_) 77 | { 78 | stage->Start(); 79 | } 80 | } 81 | 82 | void PostProcessor::Process(CompletedRequestPtr &request) 83 | { 84 | if (stages_.empty()) 85 | { 86 | callback_(request); 87 | return; 88 | } 89 | 90 | std::unique_lock l(mutex_); 91 | requests_.push(std::move(request)); // caller has given us ownership of this reference 92 | 93 | std::promise promise; 94 | auto process_fn = [this](CompletedRequestPtr &request, std::promise promise) { 95 | bool drop_request = false; 96 | for (auto &stage : stages_) 97 | { 98 | if (stage->Process(request)) 99 | { 100 | drop_request = true; 101 | break; 102 | } 103 | } 104 | promise.set_value(drop_request); 105 | cv_.notify_one(); 106 | }; 107 | 108 | // Queue the futures to ensure we have correct ordering in the output thread. The promise/future return value 109 | // tells us when all the streams for this request have been processed and output_ready_callback_ can be called. 110 | futures_.push(promise.get_future()); 111 | std::thread { process_fn, std::ref(requests_.back()), std::move(promise) }.detach(); 112 | } 113 | 114 | void PostProcessor::outputThread() 115 | { 116 | while (true) 117 | { 118 | CompletedRequestPtr request; 119 | 120 | bool drop_request = false; 121 | { 122 | std::unique_lock l(mutex_); 123 | 124 | cv_.wait(l, [this] { 125 | return (quit_ && futures_.empty()) || 126 | (!futures_.empty() && futures_.front().wait_for(0s) == std::future_status::ready); 127 | }); 128 | 129 | // Only quit when the futures_ queue is empty. 130 | if (quit_ && futures_.empty()) 131 | break; 132 | 133 | drop_request = futures_.front().get(); 134 | futures_.pop(); 135 | request = std::move(requests_.front()); // reuse as it's being dropped from the queue 136 | requests_.pop(); 137 | } 138 | 139 | if (!drop_request) 140 | callback_(request); // callback can take over ownership from us 141 | } 142 | } 143 | 144 | void PostProcessor::Stop() 145 | { 146 | for (auto &stage : stages_) 147 | { 148 | stage->Stop(); 149 | } 150 | 151 | { 152 | std::unique_lock l(mutex_); 153 | quit_ = true; 154 | cv_.notify_one(); 155 | } 156 | 157 | output_thread_.join(); 158 | } 159 | 160 | void PostProcessor::Teardown() 161 | { 162 | for (auto &stage : stages_) 163 | { 164 | stage->Teardown(); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /core/post_processor.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * post_processor.hpp - Post processor definition. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "core/completed_request.hpp" 17 | #include "core/logging.hpp" 18 | 19 | namespace libcamera 20 | { 21 | struct StreamConfiguration; 22 | } 23 | 24 | class LibcameraApp; 25 | 26 | using namespace std::chrono_literals; 27 | class PostProcessingStage; 28 | using PostProcessorCallback = std::function; 29 | using StreamConfiguration = libcamera::StreamConfiguration; 30 | typedef std::unique_ptr StagePtr; 31 | 32 | class PostProcessor 33 | { 34 | public: 35 | PostProcessor(LibcameraApp *app); 36 | 37 | ~PostProcessor(); 38 | 39 | void Read(std::string const &filename); 40 | 41 | void SetCallback(PostProcessorCallback callback); 42 | 43 | void AdjustConfig(std::string const &use_case, StreamConfiguration *config); 44 | 45 | void Configure(); 46 | 47 | void Start(); 48 | 49 | void Process(CompletedRequestPtr &request); 50 | 51 | void Stop(); 52 | 53 | void Teardown(); 54 | 55 | private: 56 | PostProcessingStage *createPostProcessingStage(char const *name); 57 | 58 | LibcameraApp *app_; 59 | std::vector stages_; 60 | void outputThread(); 61 | 62 | std::queue requests_; 63 | std::queue> futures_; 64 | std::thread output_thread_; 65 | bool quit_; 66 | PostProcessorCallback callback_; 67 | std::mutex mutex_; 68 | std::condition_variable cv_; 69 | }; 70 | -------------------------------------------------------------------------------- /core/still_options.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * still_options.hpp - still capture program options 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "options.hpp" 13 | 14 | struct StillOptions : public Options 15 | { 16 | StillOptions() : Options() 17 | { 18 | using namespace boost::program_options; 19 | // clang-format off 20 | options_.add_options() 21 | ("quality,q", value(&quality)->default_value(93), 22 | "Set the JPEG quality parameter") 23 | ("exif,x", value>(&exif), 24 | "Add these extra EXIF tags to the output file") 25 | ("timelapse", value(&timelapse)->default_value(0), 26 | "Time interval (in ms) between timelapse captures") 27 | ("framestart", value(&framestart)->default_value(0), 28 | "Initial frame counter value for timelapse captures") 29 | ("datetime", value(&datetime)->default_value(false)->implicit_value(true), 30 | "Use date format for output file names") 31 | ("timestamp", value(×tamp)->default_value(false)->implicit_value(true), 32 | "Use system timestamps for output file names") 33 | ("restart", value(&restart)->default_value(0), 34 | "Set JPEG restart interval") 35 | ("keypress,k", value(&keypress)->default_value(false)->implicit_value(true), 36 | "Perform capture when ENTER pressed") 37 | ("signal,s", value(&signal)->default_value(false)->implicit_value(true), 38 | "Perform capture when signal received") 39 | ("thumb", value(&thumb)->default_value("320:240:70"), 40 | "Set thumbnail parameters as width:height:quality, or none") 41 | ("encoding,e", value(&encoding)->default_value("jpg"), 42 | "Set the desired output encoding, either jpg, png, rgb, bmp or yuv420") 43 | ("raw,r", value(&raw)->default_value(false)->implicit_value(true), 44 | "Also save raw file in DNG format") 45 | ("latest", value(&latest), 46 | "Create a symbolic link with this name to most recent saved file") 47 | ("immediate", value(&immediate)->default_value(false)->implicit_value(true), 48 | "Perform first capture immediately, with no preview phase") 49 | ("autofocus-on-capture", value(&af_on_capture)->default_value(false)->implicit_value(true), 50 | "Switch to AfModeAuto and trigger a scan just before capturing a still") 51 | ; 52 | // clang-format on 53 | } 54 | 55 | int quality; 56 | std::vector exif; 57 | uint64_t timelapse; 58 | uint32_t framestart; 59 | bool datetime; 60 | bool timestamp; 61 | unsigned int restart; 62 | bool keypress; 63 | bool signal; 64 | std::string thumb; 65 | unsigned int thumb_width, thumb_height, thumb_quality; 66 | std::string encoding; 67 | bool raw; 68 | std::string latest; 69 | bool immediate; 70 | 71 | virtual bool Parse(int argc, char *argv[]) override 72 | { 73 | if (Options::Parse(argc, argv) == false) 74 | return false; 75 | if ((keypress || signal) && timelapse) 76 | throw std::runtime_error("keypress/signal and timelapse options are mutually exclusive"); 77 | if (strcasecmp(thumb.c_str(), "none") == 0) 78 | thumb_quality = 0; 79 | else if (sscanf(thumb.c_str(), "%u:%u:%u", &thumb_width, &thumb_height, &thumb_quality) != 3) 80 | throw std::runtime_error("bad thumbnail parameters " + thumb); 81 | if (strcasecmp(encoding.c_str(), "jpg") == 0) 82 | encoding = "jpg"; 83 | else if (strcasecmp(encoding.c_str(), "yuv420") == 0) 84 | encoding = "yuv420"; 85 | else if (strcasecmp(encoding.c_str(), "rgb") == 0) 86 | encoding = "rgb"; 87 | else if (strcasecmp(encoding.c_str(), "png") == 0) 88 | encoding = "png"; 89 | else if (strcasecmp(encoding.c_str(), "bmp") == 0) 90 | encoding = "bmp"; 91 | else 92 | throw std::runtime_error("invalid encoding format " + encoding); 93 | return true; 94 | } 95 | virtual void Print() const override 96 | { 97 | Options::Print(); 98 | std::cerr << " encoding: " << encoding << std::endl; 99 | std::cerr << " quality: " << quality << std::endl; 100 | std::cerr << " raw: " << raw << std::endl; 101 | std::cerr << " restart: " << restart << std::endl; 102 | std::cerr << " timelapse: " << timelapse << std::endl; 103 | std::cerr << " framestart: " << framestart << std::endl; 104 | std::cerr << " datetime: " << datetime << std::endl; 105 | std::cerr << " timestamp: " << timestamp << std::endl; 106 | std::cerr << " keypress: " << keypress << std::endl; 107 | std::cerr << " signal: " << signal << std::endl; 108 | std::cerr << " thumbnail width: " << thumb_width << std::endl; 109 | std::cerr << " thumbnail height: " << thumb_height << std::endl; 110 | std::cerr << " thumbnail quality: " << thumb_quality << std::endl; 111 | std::cerr << " latest: " << latest << std::endl; 112 | std::cerr << " immediate " << immediate << std::endl; 113 | std::cerr << " AF on capture: " << af_on_capture << std::endl; 114 | for (auto &s : exif) 115 | std::cerr << " EXIF: " << s << std::endl; 116 | } 117 | }; 118 | -------------------------------------------------------------------------------- /core/stream_info.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * stream_info.hpp - structure holding details about a libcamera Stream. 6 | */ 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | struct StreamInfo 15 | { 16 | StreamInfo() : width(0), height(0), stride(0) {} 17 | unsigned int width; 18 | unsigned int height; 19 | unsigned int stride; 20 | libcamera::PixelFormat pixel_format; 21 | std::optional colour_space; 22 | }; 23 | -------------------------------------------------------------------------------- /core/version.cmake: -------------------------------------------------------------------------------- 1 | # Script to generate a version string and embed it in the version.cpp source file 2 | 3 | if (EXISTS ${SOURCE_DIR}/version.gen) 4 | message("Reading version string from version.gen") 5 | file(READ ${SOURCE_DIR}/version.gen SHA) 6 | endif() 7 | 8 | execute_process(COMMAND ${SOURCE_DIR}/utils/version.py ${SHA} 9 | WORKING_DIRECTORY ${SOURCE_DIR} 10 | OUTPUT_VARIABLE VER) 11 | 12 | configure_file(${CMAKE_CURRENT_LIST_DIR}/version.cpp.in version.cpp @ONLY) 13 | message("Generating version string: " ${VER}) 14 | -------------------------------------------------------------------------------- /core/version.cpp.in: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * AUTO-GENERATED, DO NOT MODIFY! 6 | */ 7 | #include "core/version.hpp" 8 | 9 | static const std::string versionString {"@VER@"}; 10 | 11 | const std::string& LibcameraAppsVersion() 12 | { 13 | return versionString; 14 | } 15 | -------------------------------------------------------------------------------- /core/version.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | */ 6 | #pragma once 7 | 8 | #include 9 | 10 | const std::string& LibcameraAppsVersion(); 11 | -------------------------------------------------------------------------------- /encoder/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | include(GNUInstallDirs) 4 | 5 | if (NOT DEFINED ENABLE_LIBAV) 6 | set(ENABLE_LIBAV 1) 7 | endif() 8 | 9 | set(LIBAV_PRESENT 0) 10 | set(SRC encoder.cpp null_encoder.cpp h264_encoder.cpp mjpeg_encoder.cpp) 11 | set(TARGET_LIBS images) 12 | 13 | if (ENABLE_LIBAV) 14 | message(STATUS "Checking for libavcodec") 15 | pkg_check_modules(LIBAV QUIET IMPORTED_TARGET 16 | libavcodec 17 | libavdevice 18 | libavformat 19 | libswresample 20 | ) 21 | if (LIBAV_FOUND) 22 | include_directories(${LIBAV_INCLUDE_DIRS}) 23 | set(SRC ${SRC} libav_encoder.cpp) 24 | set(TARGET_LIBS ${TARGET_LIBS} ${LIBAV_LIBRARIES}) 25 | set(LIBAV_PRESENT 1) 26 | message(STATUS "libavcodec found:") 27 | message(STATUS " libraries: ${LIBAV_LIBRARIES}") 28 | endif() 29 | else() 30 | message(STATUS "Omitting libavcodec") 31 | endif() 32 | 33 | add_library(encoders ${SRC}) 34 | set_target_properties(encoders PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) 35 | target_link_libraries(encoders ${TARGET_LIBS}) 36 | target_compile_definitions(encoders PUBLIC LIBAV_PRESENT=${LIBAV_PRESENT}) 37 | 38 | install(TARGETS encoders LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) 39 | 40 | -------------------------------------------------------------------------------- /encoder/encoder.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * encoder.cpp - Video encoder class. 6 | */ 7 | 8 | #include 9 | 10 | #include "encoder.hpp" 11 | #include "h264_encoder.hpp" 12 | #include "mjpeg_encoder.hpp" 13 | #include "null_encoder.hpp" 14 | 15 | #if LIBAV_PRESENT 16 | #include "libav_encoder.hpp" 17 | #endif 18 | 19 | Encoder *Encoder::Create(VideoOptions const *options, const StreamInfo &info) 20 | { 21 | if (strcasecmp(options->codec.c_str(), "yuv420") == 0) 22 | return new NullEncoder(options); 23 | else if (strcasecmp(options->codec.c_str(), "h264") == 0) 24 | return new H264Encoder(options, info); 25 | #if LIBAV_PRESENT 26 | else if (strcasecmp(options->codec.c_str(), "libav") == 0) 27 | return new LibAvEncoder(options, info); 28 | #endif 29 | else if (strcasecmp(options->codec.c_str(), "mjpeg") == 0) 30 | return new MjpegEncoder(options); 31 | throw std::runtime_error("Unrecognised codec " + options->codec); 32 | } 33 | -------------------------------------------------------------------------------- /encoder/encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * encoder.hpp - Video encoder class. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "core/stream_info.hpp" 13 | #include "core/video_options.hpp" 14 | 15 | typedef std::function InputDoneCallback; 16 | typedef std::function OutputReadyCallback; 17 | 18 | class Encoder 19 | { 20 | public: 21 | static Encoder *Create(VideoOptions const *options, StreamInfo const &info); 22 | 23 | Encoder(VideoOptions const *options) : options_(options) {} 24 | virtual ~Encoder() {} 25 | // This is where the application sets the callback it gets whenever the encoder 26 | // has finished with an input buffer, so the application can re-use it. 27 | void SetInputDoneCallback(InputDoneCallback callback) { input_done_callback_ = callback; } 28 | // This callback is how the application is told that an encoded buffer is 29 | // available. The application may not hang on to the memory once it returns 30 | // (but the callback is already running in its own thread). 31 | void SetOutputReadyCallback(OutputReadyCallback callback) { output_ready_callback_ = callback; } 32 | // Encode the given buffer. The buffer is specified both by an fd and size 33 | // describing a DMABUF, and by a mmapped userland pointer. 34 | virtual void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) = 0; 35 | 36 | protected: 37 | InputDoneCallback input_done_callback_; 38 | OutputReadyCallback output_ready_callback_; 39 | VideoOptions const *options_; 40 | }; 41 | -------------------------------------------------------------------------------- /encoder/h264_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * h264_encoder.hpp - h264 video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "encoder.hpp" 16 | 17 | class H264Encoder : public Encoder 18 | { 19 | public: 20 | H264Encoder(VideoOptions const *options, StreamInfo const &info); 21 | ~H264Encoder(); 22 | // Encode the given DMABUF. 23 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 24 | 25 | private: 26 | // We want at least as many output buffers as there are in the camera queue 27 | // (we always want to be able to queue them when they arrive). Make loads 28 | // of capture buffers, as this is our buffering mechanism in case of delays 29 | // dealing with the output bitstream. 30 | static constexpr int NUM_OUTPUT_BUFFERS = 6; 31 | static constexpr int NUM_CAPTURE_BUFFERS = 12; 32 | 33 | // This thread just sits waiting for the encoder to finish stuff. It will either: 34 | // * receive "output" buffers (codec inputs), which we must return to the caller 35 | // * receive encoded buffers, which we pass to the application. 36 | void pollThread(); 37 | 38 | // Handle the output buffers in another thread so as not to block the encoder. The 39 | // application can take its time, after which we return this buffer to the encoder for 40 | // re-use. 41 | void outputThread(); 42 | 43 | bool abortPoll_; 44 | bool abortOutput_; 45 | int fd_; 46 | struct BufferDescription 47 | { 48 | void *mem; 49 | size_t size; 50 | }; 51 | BufferDescription buffers_[NUM_CAPTURE_BUFFERS]; 52 | int num_capture_buffers_; 53 | std::thread poll_thread_; 54 | std::mutex input_buffers_available_mutex_; 55 | std::queue input_buffers_available_; 56 | struct OutputItem 57 | { 58 | void *mem; 59 | size_t bytes_used; 60 | size_t length; 61 | unsigned int index; 62 | bool keyframe; 63 | int64_t timestamp_us; 64 | }; 65 | std::queue output_queue_; 66 | std::mutex output_mutex_; 67 | std::condition_variable output_cond_var_; 68 | std::thread output_thread_; 69 | }; 70 | -------------------------------------------------------------------------------- /encoder/libav_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2022, Raspberry Pi Ltd 4 | * 5 | * libav_encoder.hpp - libav video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | extern "C" 17 | { 18 | #include "libavcodec/avcodec.h" 19 | #include "libavdevice/avdevice.h" 20 | #include "libavformat/avformat.h" 21 | #include "libavutil/audio_fifo.h" 22 | #include "libavutil/hwcontext.h" 23 | #include "libavutil/hwcontext_drm.h" 24 | #include "libavutil/timestamp.h" 25 | #include "libavutil/version.h" 26 | #include "libswresample/swresample.h" 27 | } 28 | 29 | #include "encoder.hpp" 30 | 31 | class LibAvEncoder : public Encoder 32 | { 33 | public: 34 | LibAvEncoder(VideoOptions const *options, StreamInfo const &info); 35 | ~LibAvEncoder(); 36 | // Encode the given DMABUF. 37 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 38 | 39 | private: 40 | void initVideoCodec(VideoOptions const *options, StreamInfo const &info); 41 | void initAudioInCodec(VideoOptions const *options, StreamInfo const &info); 42 | void initAudioOutCodec(VideoOptions const *options, StreamInfo const &info); 43 | 44 | void initOutput(); 45 | void deinitOutput(); 46 | void encode(AVPacket *pkt, unsigned int stream_id); 47 | 48 | void videoThread(); 49 | void audioThread(); 50 | 51 | std::atomic output_ready_; 52 | bool abort_video_; 53 | bool abort_audio_; 54 | uint64_t video_start_ts_; 55 | uint64_t audio_samples_; 56 | 57 | std::queue frame_queue_; 58 | std::mutex video_mutex_; 59 | std::mutex output_mutex_; 60 | std::condition_variable video_cv_; 61 | std::thread video_thread_; 62 | std::thread audio_thread_; 63 | 64 | // The ordering in the enum below must not change! 65 | enum Context { Video = 0, AudioOut = 1, AudioIn = 2 }; 66 | AVCodecContext *codec_ctx_[3]; 67 | AVStream *stream_[3]; 68 | AVFormatContext *in_fmt_ctx_; 69 | AVFormatContext *out_fmt_ctx_; 70 | }; 71 | -------------------------------------------------------------------------------- /encoder/mjpeg_encoder.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * mjpeg_encoder.cpp - mjpeg video encoder. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include "mjpeg_encoder.hpp" 14 | 15 | #if JPEG_LIB_VERSION_MAJOR > 9 || (JPEG_LIB_VERSION_MAJOR == 9 && JPEG_LIB_VERSION_MINOR >= 4) 16 | typedef size_t jpeg_mem_len_t; 17 | #else 18 | typedef unsigned long jpeg_mem_len_t; 19 | #endif 20 | 21 | MjpegEncoder::MjpegEncoder(VideoOptions const *options) 22 | : Encoder(options), abortEncode_(false), abortOutput_(false), index_(0) 23 | { 24 | output_thread_ = std::thread(&MjpegEncoder::outputThread, this); 25 | for (int i = 0; i < NUM_ENC_THREADS; i++) 26 | encode_thread_[i] = std::thread(std::bind(&MjpegEncoder::encodeThread, this, i)); 27 | LOG(2, "Opened MjpegEncoder"); 28 | } 29 | 30 | MjpegEncoder::~MjpegEncoder() 31 | { 32 | abortEncode_ = true; 33 | for (int i = 0; i < NUM_ENC_THREADS; i++) 34 | encode_thread_[i].join(); 35 | abortOutput_ = true; 36 | output_thread_.join(); 37 | LOG(2, "MjpegEncoder closed"); 38 | } 39 | 40 | void MjpegEncoder::EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) 41 | { 42 | std::lock_guard lock(encode_mutex_); 43 | EncodeItem item = { mem, info, timestamp_us, index_++ }; 44 | encode_queue_.push(item); 45 | encode_cond_var_.notify_all(); 46 | } 47 | 48 | void MjpegEncoder::encodeJPEG(struct jpeg_compress_struct &cinfo, EncodeItem &item, uint8_t *&encoded_buffer, 49 | size_t &buffer_len) 50 | { 51 | // Copied from YUV420_to_JPEG_fast in jpeg.cpp. 52 | cinfo.image_width = item.info.width; 53 | cinfo.image_height = item.info.height; 54 | cinfo.input_components = 3; 55 | cinfo.in_color_space = JCS_YCbCr; 56 | cinfo.restart_interval = 0; 57 | 58 | jpeg_set_defaults(&cinfo); 59 | cinfo.raw_data_in = TRUE; 60 | jpeg_set_quality(&cinfo, options_->quality, TRUE); 61 | encoded_buffer = nullptr; 62 | buffer_len = 0; 63 | jpeg_mem_len_t jpeg_mem_len; 64 | jpeg_mem_dest(&cinfo, &encoded_buffer, &jpeg_mem_len); 65 | jpeg_start_compress(&cinfo, TRUE); 66 | 67 | int stride2 = item.info.stride / 2; 68 | uint8_t *Y = (uint8_t *)item.mem; 69 | uint8_t *U = (uint8_t *)Y + item.info.stride * item.info.height; 70 | uint8_t *V = (uint8_t *)U + stride2 * (item.info.height / 2); 71 | uint8_t *Y_max = U - item.info.stride; 72 | uint8_t *U_max = V - stride2; 73 | uint8_t *V_max = U_max + stride2 * (item.info.height / 2); 74 | 75 | JSAMPROW y_rows[16]; 76 | JSAMPROW u_rows[8]; 77 | JSAMPROW v_rows[8]; 78 | 79 | for (uint8_t *Y_row = Y, *U_row = U, *V_row = V; cinfo.next_scanline < item.info.height;) 80 | { 81 | for (int i = 0; i < 16; i++, Y_row += item.info.stride) 82 | y_rows[i] = std::min(Y_row, Y_max); 83 | for (int i = 0; i < 8; i++, U_row += stride2, V_row += stride2) 84 | u_rows[i] = std::min(U_row, U_max), v_rows[i] = std::min(V_row, V_max); 85 | 86 | JSAMPARRAY rows[] = { y_rows, u_rows, v_rows }; 87 | jpeg_write_raw_data(&cinfo, rows, 16); 88 | } 89 | 90 | jpeg_finish_compress(&cinfo); 91 | buffer_len = jpeg_mem_len; 92 | } 93 | 94 | void MjpegEncoder::encodeThread(int num) 95 | { 96 | struct jpeg_compress_struct cinfo; 97 | struct jpeg_error_mgr jerr; 98 | cinfo.err = jpeg_std_error(&jerr); 99 | jpeg_create_compress(&cinfo); 100 | std::chrono::duration encode_time(0); 101 | uint32_t frames = 0; 102 | 103 | EncodeItem encode_item; 104 | while (true) 105 | { 106 | { 107 | std::unique_lock lock(encode_mutex_); 108 | while (true) 109 | { 110 | using namespace std::chrono_literals; 111 | if (abortEncode_ && encode_queue_.empty()) 112 | { 113 | if (frames) 114 | LOG(2, "Encode " << frames << " frames, average time " << encode_time.count() * 1000 / frames 115 | << "ms"); 116 | jpeg_destroy_compress(&cinfo); 117 | return; 118 | } 119 | if (!encode_queue_.empty()) 120 | { 121 | encode_item = encode_queue_.front(); 122 | encode_queue_.pop(); 123 | break; 124 | } 125 | else 126 | encode_cond_var_.wait_for(lock, 200ms); 127 | } 128 | } 129 | 130 | // Encode the buffer. 131 | uint8_t *encoded_buffer = nullptr; 132 | size_t buffer_len = 0; 133 | auto start_time = std::chrono::high_resolution_clock::now(); 134 | encodeJPEG(cinfo, encode_item, encoded_buffer, buffer_len); 135 | encode_time += (std::chrono::high_resolution_clock::now() - start_time); 136 | frames++; 137 | // Don't return buffers until the output thread as that's where they're 138 | // in order again. 139 | 140 | // We push this encoded buffer to another thread so that our 141 | // application can take its time with the data without blocking the 142 | // encode process. 143 | OutputItem output_item = { encoded_buffer, buffer_len, encode_item.timestamp_us, encode_item.index }; 144 | std::lock_guard lock(output_mutex_); 145 | output_queue_[num].push(output_item); 146 | output_cond_var_.notify_one(); 147 | } 148 | } 149 | 150 | void MjpegEncoder::outputThread() 151 | { 152 | OutputItem item; 153 | uint64_t index = 0; 154 | while (true) 155 | { 156 | { 157 | std::unique_lock lock(output_mutex_); 158 | while (true) 159 | { 160 | using namespace std::chrono_literals; 161 | // We look for the thread that's completed the frame we want next. 162 | // If we don't find it, we wait. 163 | // 164 | // Must also check for an abort signal, and if set, all queues must 165 | // be empty. This is done first to ensure all frame callbacks have 166 | // had a chance to run. 167 | bool abort = abortOutput_ ? true : false; 168 | for (auto &q : output_queue_) 169 | { 170 | if (abort && !q.empty()) 171 | abort = false; 172 | 173 | if (!q.empty() && q.front().index == index) 174 | { 175 | item = q.front(); 176 | q.pop(); 177 | goto got_item; 178 | } 179 | } 180 | if (abort) 181 | return; 182 | 183 | output_cond_var_.wait_for(lock, 200ms); 184 | } 185 | } 186 | got_item: 187 | input_done_callback_(nullptr); 188 | 189 | output_ready_callback_(item.mem, item.bytes_used, item.timestamp_us, true); 190 | free(item.mem); 191 | index++; 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /encoder/mjpeg_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * mjpeg_encoder.hpp - mjpeg video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "encoder.hpp" 16 | 17 | struct jpeg_compress_struct; 18 | 19 | class MjpegEncoder : public Encoder 20 | { 21 | public: 22 | MjpegEncoder(VideoOptions const *options); 23 | ~MjpegEncoder(); 24 | // Encode the given buffer. 25 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 26 | 27 | private: 28 | // How many threads to use. Whichever thread is idle will pick up the next frame. 29 | static const int NUM_ENC_THREADS = 4; 30 | 31 | // These threads do the actual encoding. 32 | void encodeThread(int num); 33 | 34 | // Handle the output buffers in another thread so as not to block the encoders. The 35 | // application can take its time, after which we return this buffer to the encoder for 36 | // re-use. 37 | void outputThread(); 38 | 39 | bool abortEncode_; 40 | bool abortOutput_; 41 | uint64_t index_; 42 | 43 | struct EncodeItem 44 | { 45 | void *mem; 46 | StreamInfo info; 47 | int64_t timestamp_us; 48 | uint64_t index; 49 | }; 50 | std::queue encode_queue_; 51 | std::mutex encode_mutex_; 52 | std::condition_variable encode_cond_var_; 53 | std::thread encode_thread_[NUM_ENC_THREADS]; 54 | void encodeJPEG(struct jpeg_compress_struct &cinfo, EncodeItem &item, uint8_t *&encoded_buffer, size_t &buffer_len); 55 | 56 | struct OutputItem 57 | { 58 | void *mem; 59 | size_t bytes_used; 60 | int64_t timestamp_us; 61 | uint64_t index; 62 | }; 63 | std::queue output_queue_[NUM_ENC_THREADS]; 64 | std::mutex output_mutex_; 65 | std::condition_variable output_cond_var_; 66 | std::thread output_thread_; 67 | }; 68 | -------------------------------------------------------------------------------- /encoder/null_encoder.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * null_encoder.cpp - dummy "do nothing" video encoder. 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "null_encoder.hpp" 13 | 14 | NullEncoder::NullEncoder(VideoOptions const *options) : Encoder(options), abort_(false) 15 | { 16 | LOG(2, "Opened NullEncoder"); 17 | output_thread_ = std::thread(&NullEncoder::outputThread, this); 18 | } 19 | 20 | NullEncoder::~NullEncoder() 21 | { 22 | abort_ = true; 23 | output_thread_.join(); 24 | LOG(2, "NullEncoder closed"); 25 | } 26 | 27 | // Push the buffer onto the output queue to be "encoded" and returned. 28 | void NullEncoder::EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) 29 | { 30 | std::lock_guard lock(output_mutex_); 31 | OutputItem item = { mem, size, timestamp_us }; 32 | output_queue_.push(item); 33 | output_cond_var_.notify_one(); 34 | } 35 | 36 | // Realistically we would probably want more of a queue as the caller's number 37 | // of buffers limits the amount of queueing possible here... 38 | void NullEncoder::outputThread() 39 | { 40 | OutputItem item; 41 | while (true) 42 | { 43 | { 44 | std::unique_lock lock(output_mutex_); 45 | while (true) 46 | { 47 | using namespace std::chrono_literals; 48 | if (!output_queue_.empty()) 49 | { 50 | item = output_queue_.front(); 51 | output_queue_.pop(); 52 | break; 53 | } 54 | else 55 | output_cond_var_.wait_for(lock, 200ms); 56 | if (abort_) 57 | return; 58 | } 59 | } 60 | // Ensure the input done callback happens before the output ready callback. 61 | // This is needed as the metadata queue gets pushed in the former, and popped 62 | // in the latter. 63 | input_done_callback_(nullptr); 64 | output_ready_callback_(item.mem, item.length, item.timestamp_us, true); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /encoder/null_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * null_encoder.hpp - dummy "do nothing" video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "core/video_options.hpp" 16 | #include "encoder.hpp" 17 | 18 | class NullEncoder : public Encoder 19 | { 20 | public: 21 | NullEncoder(VideoOptions const *options); 22 | ~NullEncoder(); 23 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 24 | 25 | private: 26 | void outputThread(); 27 | 28 | bool abort_; 29 | VideoOptions options_; 30 | struct OutputItem 31 | { 32 | void *mem; 33 | size_t length; 34 | int64_t timestamp_us; 35 | }; 36 | std::queue output_queue_; 37 | std::mutex output_mutex_; 38 | std::condition_variable output_cond_var_; 39 | std::thread output_thread_; 40 | }; 41 | -------------------------------------------------------------------------------- /image/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | include(GNUInstallDirs) 4 | 5 | find_library(EXIF_LIBRARY exif REQUIRED) 6 | find_library(JPEG_LIBRARY jpeg REQUIRED) 7 | find_library(TIFF_LIBRARY tiff REQUIRED) 8 | find_library(PNG_LIBRARY png REQUIRED) 9 | 10 | add_library(images bmp.cpp yuv.cpp jpeg.cpp png.cpp dng.cpp) 11 | set_target_properties(images PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) 12 | target_link_libraries(images jpeg exif png tiff) 13 | 14 | install(TARGETS images LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) 15 | -------------------------------------------------------------------------------- /image/bmp.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * bmp.cpp - Encode image as bmp and write to file. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include "core/still_options.hpp" 14 | #include "core/stream_info.hpp" 15 | 16 | struct ImageHeader 17 | { 18 | uint32_t size = sizeof(ImageHeader); 19 | uint32_t width; 20 | int32_t height; 21 | uint16_t planes = 1; 22 | uint16_t bitcount = 24; 23 | uint32_t compression = 0; 24 | uint32_t imagesize = 0; 25 | uint32_t xpels = 100000; 26 | uint32_t ypels = 100000; 27 | uint32_t clrused = 0; 28 | uint32_t clrimportant = 0; 29 | }; 30 | static_assert(sizeof(ImageHeader) == 40, "ImageHeader size wrong"); 31 | 32 | struct FileHeader 33 | { 34 | uint16_t dummy; // 2 dummy bytes so that our uint32_ts line up 35 | uint8_t type1 = 'B'; 36 | uint8_t type2 = 'M'; 37 | uint32_t filesize; 38 | uint16_t reserved1 = 0; 39 | uint16_t reserved2 = 0; 40 | uint32_t offset = sizeof(FileHeader) - 2 + sizeof(ImageHeader); 41 | }; 42 | static_assert(sizeof(FileHeader) == 16, "FileHeader size wrong"); 43 | 44 | void bmp_save(std::vector> const &mem, StreamInfo const &info, 45 | std::string const &filename, StillOptions const *options) 46 | { 47 | if (info.pixel_format != libcamera::formats::RGB888) 48 | throw std::runtime_error("pixel format for bmp should be RGB"); 49 | 50 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "wb"); 51 | 52 | if (fp == NULL) 53 | throw std::runtime_error("failed to open file " + filename); 54 | 55 | try 56 | { 57 | unsigned int line = info.width * 3; 58 | unsigned int pitch = (line + 3) & ~3; // lines are multiples of 4 bytes 59 | unsigned int pad = pitch - line; 60 | uint8_t padding[3] = {}; 61 | uint8_t *ptr = (uint8_t *)mem[0].data(); 62 | 63 | FileHeader file_header; 64 | ImageHeader image_header; 65 | file_header.filesize = file_header.offset + info.height * pitch; 66 | image_header.width = info.width; 67 | image_header.height = -info.height; // make image come out the right way up 68 | 69 | // Don't write the file header's 2 dummy bytes 70 | if (fwrite((uint8_t *)&file_header + 2, sizeof(file_header) - 2, 1, fp) != 1 || 71 | fwrite(&image_header, sizeof(image_header), 1, fp) != 1) 72 | throw std::runtime_error("failed to write BMP file"); 73 | 74 | for (unsigned int i = 0; i < info.height; i++, ptr += info.stride) 75 | { 76 | if (fwrite(ptr, line, 1, fp) != 1 || (pad != 0 && fwrite(padding, pad, 1, fp) != 1)) 77 | throw std::runtime_error("failed to write BMP file, row " + std::to_string(i)); 78 | } 79 | 80 | LOG(2, "Wrote " << file_header.filesize << " bytes to BMP file"); 81 | 82 | if (fp != stdout) 83 | fclose(fp); 84 | } 85 | catch (std::exception const &e) 86 | { 87 | if (fp && fp != stdout) 88 | fclose(fp); 89 | throw; 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /image/image.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * image.hpp - still image encoder declarations 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include 15 | 16 | #include "core/stream_info.hpp" 17 | 18 | struct StillOptions; 19 | 20 | // In jpeg.cpp: 21 | void jpeg_save(std::vector> const &mem, StreamInfo const &info, 22 | libcamera::ControlList const &metadata, std::string const &filename, std::string const &cam_model, 23 | StillOptions const *options); 24 | 25 | // In yuv.cpp: 26 | void yuv_save(std::vector> const &mem, StreamInfo const &info, 27 | std::string const &filename, StillOptions const *options); 28 | 29 | // In dng.cpp: 30 | void dng_save(std::vector> const &mem, StreamInfo const &info, 31 | libcamera::ControlList const &metadata, std::string const &filename, std::string const &cam_model, 32 | StillOptions const *options); 33 | 34 | // In png.cpp: 35 | void png_save(std::vector> const &mem, StreamInfo const &info, 36 | std::string const &filename, StillOptions const *options); 37 | 38 | // In bmp.cpp: 39 | void bmp_save(std::vector> const &mem, StreamInfo const &info, 40 | std::string const &filename, StillOptions const *options); 41 | -------------------------------------------------------------------------------- /image/png.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * png.cpp - Encode image as png and write to file. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include 14 | 15 | #include "core/still_options.hpp" 16 | #include "core/stream_info.hpp" 17 | 18 | void png_save(std::vector> const &mem, StreamInfo const &info, 19 | std::string const &filename, StillOptions const *options) 20 | { 21 | if (info.pixel_format != libcamera::formats::BGR888) 22 | throw std::runtime_error("pixel format for png should be BGR"); 23 | 24 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "wb"); 25 | png_structp png_ptr = NULL; 26 | png_infop info_ptr = NULL; 27 | 28 | if (fp == NULL) 29 | throw std::runtime_error("failed to open file " + filename); 30 | 31 | try 32 | { 33 | // Open everything up. 34 | png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); 35 | if (png_ptr == NULL) 36 | throw std::runtime_error("failed to create png write struct"); 37 | 38 | info_ptr = png_create_info_struct(png_ptr); 39 | if (info_ptr == NULL) 40 | throw std::runtime_error("failed to create png info struct"); 41 | 42 | if (setjmp(png_jmpbuf(png_ptr))) 43 | throw std::runtime_error("failed to set png error handling"); 44 | 45 | // Set image attributes. 46 | png_set_IHDR(png_ptr, info_ptr, info.width, info.height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, 47 | PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); 48 | // These settings get us most of the compression, but are much faster. 49 | png_set_filter(png_ptr, 0, PNG_FILTER_AVG); 50 | png_set_compression_level(png_ptr, 1); 51 | 52 | // Set up the image data. 53 | png_byte **row_ptrs = (png_byte **)png_malloc(png_ptr, info.height * sizeof(png_byte *)); 54 | png_byte *row = (uint8_t *)mem[0].data(); 55 | for (unsigned int i = 0; i < info.height; i++, row += info.stride) 56 | row_ptrs[i] = row; 57 | 58 | png_init_io(png_ptr, fp); 59 | png_set_rows(png_ptr, info_ptr, row_ptrs); 60 | png_write_png(png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL); 61 | 62 | long int size = ftell(fp); 63 | LOG(2, "Wrote PNG file of " << size << " bytes"); 64 | 65 | // Free and close everything and we're done. 66 | png_free(png_ptr, row_ptrs); 67 | png_destroy_write_struct(&png_ptr, &info_ptr); 68 | if (fp != stdout) 69 | fclose(fp); 70 | } 71 | catch (std::exception const &e) 72 | { 73 | if (png_ptr) 74 | png_destroy_write_struct(&png_ptr, &info_ptr); 75 | if (fp && fp != stdout) 76 | fclose(fp); 77 | throw; 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /image/yuv.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * yuv.cpp - dummy stills encoder to save uncompressed data 6 | */ 7 | 8 | #include 9 | 10 | #include "core/still_options.hpp" 11 | #include "core/stream_info.hpp" 12 | 13 | static void yuv420_save(std::vector> const &mem, StreamInfo const &info, 14 | std::string const &filename, StillOptions const *options) 15 | { 16 | if (options->encoding == "yuv420") 17 | { 18 | unsigned w = info.width, h = info.height, stride = info.stride; 19 | if ((w & 1) || (h & 1)) 20 | throw std::runtime_error("both width and height must be even"); 21 | if (mem.size() != 1) 22 | throw std::runtime_error("incorrect number of planes in YUV420 data"); 23 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "w"); 24 | if (!fp) 25 | throw std::runtime_error("failed to open file " + filename); 26 | try 27 | { 28 | uint8_t *Y = (uint8_t *)mem[0].data(); 29 | for (unsigned int j = 0; j < h; j++) 30 | { 31 | if (fwrite(Y + j * stride, w, 1, fp) != 1) 32 | throw std::runtime_error("failed to write file " + filename); 33 | } 34 | uint8_t *U = Y + stride * h; 35 | h /= 2, w /= 2, stride /= 2; 36 | for (unsigned int j = 0; j < h; j++) 37 | { 38 | if (fwrite(U + j * stride, w, 1, fp) != 1) 39 | throw std::runtime_error("failed to write file " + filename); 40 | } 41 | uint8_t *V = U + stride * h; 42 | for (unsigned int j = 0; j < h; j++) 43 | { 44 | if (fwrite(V + j * stride, w, 1, fp) != 1) 45 | throw std::runtime_error("failed to write file " + filename); 46 | } 47 | } 48 | catch (std::exception const &e) 49 | { 50 | if (fp != stdout) 51 | fclose(fp); 52 | throw; 53 | } 54 | } 55 | else 56 | throw std::runtime_error("output format " + options->encoding + " not supported"); 57 | } 58 | 59 | static void yuyv_save(std::vector> const &mem, StreamInfo const &info, 60 | std::string const &filename, StillOptions const *options) 61 | { 62 | if (options->encoding == "yuv420") 63 | { 64 | if ((info.width & 1) || (info.height & 1)) 65 | throw std::runtime_error("both width and height must be even"); 66 | 67 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "w"); 68 | if (!fp) 69 | throw std::runtime_error("failed to open file " + filename); 70 | try 71 | { 72 | // We could doubtless do this much quicker. Though starting with 73 | // YUV420 planar buffer would have been nice. 74 | std::vector row(info.width); 75 | uint8_t *ptr = (uint8_t *)mem[0].data(); 76 | for (unsigned int j = 0; j < info.height; j++, ptr += info.stride) 77 | { 78 | for (unsigned int i = 0; i < info.width; i++) 79 | row[i] = ptr[i << 1]; 80 | if (fwrite(&row[0], info.width, 1, fp) != 1) 81 | throw std::runtime_error("failed to write file " + filename); 82 | } 83 | ptr = (uint8_t *)mem[0].data(); 84 | for (unsigned int j = 0; j < info.height; j += 2, ptr += 2 * info.stride) 85 | { 86 | for (unsigned int i = 0; i < info.width / 2; i++) 87 | row[i] = ptr[(i << 2) + 1]; 88 | if (fwrite(&row[0], info.width / 2, 1, fp) != 1) 89 | throw std::runtime_error("failed to write file " + filename); 90 | } 91 | ptr = (uint8_t *)mem[0].data(); 92 | for (unsigned int j = 0; j < info.height; j += 2, ptr += 2 * info.stride) 93 | { 94 | for (unsigned int i = 0; i < info.width / 2; i++) 95 | row[i] = ptr[(i << 2) + 3]; 96 | if (fwrite(&row[0], info.width / 2, 1, fp) != 1) 97 | throw std::runtime_error("failed to write file " + filename); 98 | } 99 | if (fp != stdout) 100 | fclose(fp); 101 | } 102 | catch (std::exception const &e) 103 | { 104 | if (fp != stdout) 105 | fclose(fp); 106 | throw; 107 | } 108 | } 109 | else 110 | throw std::runtime_error("output format " + options->encoding + " not supported"); 111 | } 112 | 113 | static void rgb_save(std::vector> const &mem, StreamInfo const &info, 114 | std::string const &filename, StillOptions const *options) 115 | { 116 | if (options->encoding != "rgb") 117 | throw std::runtime_error("encoding should be set to rgb"); 118 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "w"); 119 | if (!fp) 120 | throw std::runtime_error("failed to open file " + filename); 121 | try 122 | { 123 | uint8_t *ptr = (uint8_t *)mem[0].data(); 124 | for (unsigned int j = 0; j < info.height; j++, ptr += info.stride) 125 | { 126 | if (fwrite(ptr, 3 * info.width, 1, fp) != 1) 127 | throw std::runtime_error("failed to write file " + filename); 128 | } 129 | if (fp != stdout) 130 | fclose(fp); 131 | } 132 | catch (std::exception const &e) 133 | { 134 | if (fp != stdout) 135 | fclose(fp); 136 | throw; 137 | } 138 | } 139 | 140 | void yuv_save(std::vector> const &mem, StreamInfo const &info, 141 | std::string const &filename, StillOptions const *options) 142 | { 143 | if (info.pixel_format == libcamera::formats::YUYV) 144 | yuyv_save(mem, info, filename, options); 145 | else if (info.pixel_format == libcamera::formats::YUV420) 146 | yuv420_save(mem, info, filename, options); 147 | else if (info.pixel_format == libcamera::formats::BGR888 || info.pixel_format == libcamera::formats::RGB888) 148 | rgb_save(mem, info, filename, options); 149 | else 150 | throw std::runtime_error("unrecognised YUV/RGB save format"); 151 | } 152 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2020 2021, Raspberry Pi (Trading) Limited 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /output/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | include(GNUInstallDirs) 4 | 5 | add_library(outputs output.cpp file_output.cpp net_output.cpp circular_output.cpp) 6 | set_target_properties(outputs PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) 7 | 8 | install(TARGETS outputs LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) 9 | 10 | -------------------------------------------------------------------------------- /output/circular_output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * circular_output.cpp - Write output to circular buffer which we save on exit. 6 | */ 7 | 8 | #include "circular_output.hpp" 9 | 10 | // We're going to align the frames within the buffer to friendly byte boundaries 11 | static constexpr int ALIGN = 16; // power of 2, please 12 | 13 | struct Header 14 | { 15 | unsigned int length; 16 | bool keyframe; 17 | int64_t timestamp; 18 | }; 19 | static_assert(sizeof(Header) % ALIGN == 0, "Header should have aligned size"); 20 | 21 | // Size of buffer (options->circular) is given in megabytes. 22 | CircularOutput::CircularOutput(VideoOptions const *options) : Output(options), cb_(options->circular<<20) 23 | { 24 | // Open this now, so that we can get any complaints out of the way 25 | if (options_->output == "-") 26 | fp_ = stdout; 27 | else if (!options_->output.empty()) 28 | { 29 | fp_ = fopen(options_->output.c_str(), "w"); 30 | } 31 | if (!fp_) 32 | throw std::runtime_error("could not open output file"); 33 | } 34 | 35 | CircularOutput::~CircularOutput() 36 | { 37 | // We do have to skip to the first I frame before dumping stuff to disk. If there are 38 | // no I frames you will get nothing. Caveat emptor, methinks. 39 | unsigned int total = 0, frames = 0; 40 | bool seen_keyframe = false; 41 | Header header; 42 | FILE *fp = fp_; // can't capture a class member in a lambda 43 | while (!cb_.Empty()) 44 | { 45 | uint8_t *dst = (uint8_t *)&header; 46 | cb_.Read( 47 | [&dst](void *src, int n) { 48 | memcpy(dst, src, n); 49 | dst += n; 50 | }, 51 | sizeof(header)); 52 | seen_keyframe |= header.keyframe; 53 | if (seen_keyframe) 54 | { 55 | cb_.Read([fp](void *src, int n) { fwrite(src, 1, n, fp); }, header.length); 56 | cb_.Skip((ALIGN - header.length) & (ALIGN - 1)); 57 | total += header.length; 58 | if (fp_timestamps_) 59 | { 60 | Output::timestampReady(header.timestamp); 61 | } 62 | frames++; 63 | } 64 | else 65 | cb_.Skip((header.length + ALIGN - 1) & ~(ALIGN - 1)); 66 | } 67 | fclose(fp_); 68 | LOG(1, "Wrote " << total << " bytes (" << frames << " frames)"); 69 | } 70 | 71 | void CircularOutput::outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) 72 | { 73 | // First make sure there's enough space. 74 | int pad = (ALIGN - size) & (ALIGN - 1); 75 | while (size + pad + sizeof(Header) > cb_.Available()) 76 | { 77 | if (cb_.Empty()) 78 | throw std::runtime_error("circular buffer too small"); 79 | Header header; 80 | uint8_t *dst = (uint8_t *)&header; 81 | cb_.Read( 82 | [&dst](void *src, int n) { 83 | memcpy(dst, src, n); 84 | dst += n; 85 | }, 86 | sizeof(header)); 87 | cb_.Skip((header.length + ALIGN - 1) & ~(ALIGN - 1)); 88 | } 89 | Header header = { static_cast(size), !!(flags & FLAG_KEYFRAME), timestamp_us }; 90 | cb_.Write(&header, sizeof(header)); 91 | cb_.Write(mem, size); 92 | cb_.Pad(pad); 93 | } 94 | 95 | void CircularOutput::timestampReady(int64_t timestamp) 96 | { 97 | // Don't want to save every timestamp as we go along, only outputs them at the end 98 | } 99 | -------------------------------------------------------------------------------- /output/circular_output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * circular_output.hpp - Write output to a circular buffer. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include "output.hpp" 11 | 12 | // A simple circular buffer implementation used by the CircularOutput class. 13 | 14 | class CircularBuffer 15 | { 16 | public: 17 | CircularBuffer(size_t size) : size_(size), buf_(size), rptr_(0), wptr_(0) {} 18 | bool Empty() const { return rptr_ == wptr_; } 19 | size_t Available() const { return wptr_ == rptr_ ? size_ - 1 : (size_ - wptr_ + rptr_) % size_ - 1; } 20 | void Skip(unsigned int n) { rptr_ = (rptr_ + n) % size_; } 21 | // The dst function allows bytes read to go straight to memory or a file etc. 22 | void Read(std::function dst, unsigned int n) 23 | { 24 | if (rptr_ + n >= size_) 25 | { 26 | dst(&buf_[rptr_], size_ - rptr_); 27 | n -= size_ - rptr_; 28 | rptr_ = 0; 29 | } 30 | dst(&buf_[rptr_], n); 31 | rptr_ += n; 32 | } 33 | void Pad(unsigned int n) { wptr_ = (wptr_ + n) % size_; } 34 | void Write(const void *ptr, unsigned int n) 35 | { 36 | if (wptr_ + n >= size_) 37 | { 38 | memcpy(&buf_[wptr_], ptr, size_ - wptr_); 39 | n -= size_ - wptr_; 40 | ptr = static_cast(ptr) + size_ - wptr_; 41 | wptr_ = 0; 42 | } 43 | memcpy(&buf_[wptr_], ptr, n); 44 | wptr_ += n; 45 | } 46 | 47 | private: 48 | const size_t size_; 49 | std::vector buf_; 50 | size_t rptr_, wptr_; 51 | }; 52 | 53 | // Write frames to a circular buffer, and dump them to disk when we quit. 54 | 55 | class CircularOutput : public Output 56 | { 57 | public: 58 | CircularOutput(VideoOptions const *options); 59 | ~CircularOutput(); 60 | 61 | protected: 62 | void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) override; 63 | void timestampReady(int64_t timestamp) override; 64 | 65 | private: 66 | CircularBuffer cb_; 67 | FILE *fp_; 68 | }; 69 | -------------------------------------------------------------------------------- /output/file_output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * file_output.cpp - Write output to file. 6 | */ 7 | 8 | #include "file_output.hpp" 9 | 10 | FileOutput::FileOutput(VideoOptions const *options) 11 | : Output(options), fp_(nullptr), count_(0), file_start_time_ms_(0) 12 | { 13 | } 14 | 15 | FileOutput::~FileOutput() 16 | { 17 | closeFile(); 18 | } 19 | 20 | void FileOutput::outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) 21 | { 22 | // We need to open a new file if we're in "segment" mode and our segment is full 23 | // (though we have to wait for the next I frame), or if we're in "split" mode 24 | // and recording is being restarted (this is necessarily an I-frame already). 25 | if (fp_ == nullptr || 26 | (options_->segment && (flags & FLAG_KEYFRAME) && 27 | timestamp_us / 1000 - file_start_time_ms_ > options_->segment) || 28 | (options_->split && (flags & FLAG_RESTART))) 29 | { 30 | closeFile(); 31 | openFile(timestamp_us); 32 | } 33 | 34 | LOG(2, "FileOutput: output buffer " << mem << " size " << size); 35 | if (fp_ && size) 36 | { 37 | if (fwrite(mem, size, 1, fp_) != 1) 38 | throw std::runtime_error("failed to write output bytes"); 39 | if (options_->flush) 40 | fflush(fp_); 41 | } 42 | } 43 | 44 | void FileOutput::openFile(int64_t timestamp_us) 45 | { 46 | if (options_->output == "-") 47 | fp_ = stdout; 48 | else if (!options_->output.empty()) 49 | { 50 | // Generate the next output file name. 51 | char filename[256]; 52 | int n = snprintf(filename, sizeof(filename), options_->output.c_str(), count_); 53 | count_++; 54 | if (options_->wrap) 55 | count_ = count_ % options_->wrap; 56 | if (n < 0) 57 | throw std::runtime_error("failed to generate filename"); 58 | 59 | fp_ = fopen(filename, "w"); 60 | if (!fp_) 61 | throw std::runtime_error("failed to open output file " + std::string(filename)); 62 | LOG(2, "FileOutput: opened output file " << filename); 63 | 64 | file_start_time_ms_ = timestamp_us / 1000; 65 | } 66 | } 67 | 68 | void FileOutput::closeFile() 69 | { 70 | if (fp_ && fp_ != stdout) 71 | fclose(fp_); 72 | fp_ = nullptr; 73 | } 74 | -------------------------------------------------------------------------------- /output/file_output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * file_output.hpp - Write output to file. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include "output.hpp" 11 | 12 | class FileOutput : public Output 13 | { 14 | public: 15 | FileOutput(VideoOptions const *options); 16 | ~FileOutput(); 17 | 18 | protected: 19 | void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) override; 20 | 21 | private: 22 | void openFile(int64_t timestamp_us); 23 | void closeFile(); 24 | FILE *fp_; 25 | unsigned int count_; 26 | int64_t file_start_time_ms_; 27 | }; 28 | -------------------------------------------------------------------------------- /output/net_output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * net_output.cpp - send output over network. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include "net_output.hpp" 12 | 13 | NetOutput::NetOutput(VideoOptions const *options) : Output(options) 14 | { 15 | char protocol[4]; 16 | int start, end, a, b, c, d, port; 17 | if (sscanf(options->output.c_str(), "%3s://%n%d.%d.%d.%d%n:%d", protocol, &start, &a, &b, &c, &d, &end, &port) != 6) 18 | throw std::runtime_error("bad network address " + options->output); 19 | std::string address = options->output.substr(start, end - start); 20 | 21 | if (strcmp(protocol, "udp") == 0) 22 | { 23 | saddr_ = {}; 24 | saddr_.sin_family = AF_INET; 25 | saddr_.sin_port = htons(port); 26 | if (inet_aton(address.c_str(), &saddr_.sin_addr) == 0) 27 | throw std::runtime_error("inet_aton failed for " + address); 28 | 29 | fd_ = socket(AF_INET, SOCK_DGRAM, 0); 30 | if (fd_ < 0) 31 | throw std::runtime_error("unable to open udp socket"); 32 | 33 | saddr_ptr_ = (const sockaddr *)&saddr_; // sendto needs these for udp 34 | sockaddr_in_size_ = sizeof(sockaddr_in); 35 | } 36 | else if (strcmp(protocol, "tcp") == 0) 37 | { 38 | // WARNING: I've not actually tried this yet... 39 | if (options->listen) 40 | { 41 | // We are the server. 42 | int listen_fd = socket(AF_INET, SOCK_STREAM, 0); 43 | if (listen_fd < 0) 44 | throw std::runtime_error("unable to open listen socket"); 45 | 46 | sockaddr_in server_saddr = {}; 47 | server_saddr.sin_family = AF_INET; 48 | server_saddr.sin_addr.s_addr = INADDR_ANY; 49 | server_saddr.sin_port = htons(port); 50 | 51 | int enable = 1; 52 | if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable)) < 0) 53 | throw std::runtime_error("failed to setsockopt listen socket"); 54 | 55 | if (bind(listen_fd, (struct sockaddr *)&server_saddr, sizeof(server_saddr)) < 0) 56 | throw std::runtime_error("failed to bind listen socket"); 57 | listen(listen_fd, 1); 58 | 59 | LOG(2, "Waiting for client to connect..."); 60 | fd_ = accept(listen_fd, (struct sockaddr *)&saddr_, &sockaddr_in_size_); 61 | if (fd_ < 0) 62 | throw std::runtime_error("accept socket failed"); 63 | LOG(2, "Client connection accepted"); 64 | 65 | close(listen_fd); 66 | } 67 | else 68 | { 69 | // We are a client. 70 | saddr_ = {}; 71 | saddr_.sin_family = AF_INET; 72 | saddr_.sin_port = htons(port); 73 | if (inet_aton(address.c_str(), &saddr_.sin_addr) == 0) 74 | throw std::runtime_error("inet_aton failed for " + address); 75 | 76 | fd_ = socket(AF_INET, SOCK_STREAM, 0); 77 | if (fd_ < 0) 78 | throw std::runtime_error("unable to open client socket"); 79 | 80 | LOG(2, "Connecting to server..."); 81 | if (connect(fd_, (struct sockaddr *)&saddr_, sizeof(sockaddr_in)) < 0) 82 | throw std::runtime_error("connect to server failed"); 83 | LOG(2, "Connected"); 84 | } 85 | 86 | saddr_ptr_ = NULL; // sendto doesn't want these for tcp 87 | sockaddr_in_size_ = 0; 88 | } 89 | else 90 | throw std::runtime_error("unrecognised network protocol " + options->output); 91 | } 92 | 93 | NetOutput::~NetOutput() 94 | { 95 | close(fd_); 96 | } 97 | 98 | // Maximum size that sendto will accept. 99 | constexpr size_t MAX_UDP_SIZE = 65507; 100 | 101 | void NetOutput::outputBuffer(void *mem, size_t size, int64_t /*timestamp_us*/, uint32_t /*flags*/) 102 | { 103 | LOG(2, "NetOutput: output buffer " << mem << " size " << size); 104 | size_t max_size = saddr_ptr_ ? MAX_UDP_SIZE : size; 105 | for (uint8_t *ptr = (uint8_t *)mem; size;) 106 | { 107 | size_t bytes_to_send = std::min(size, max_size); 108 | if (sendto(fd_, ptr, bytes_to_send, 0, saddr_ptr_, sockaddr_in_size_) < 0) 109 | throw std::runtime_error("failed to send data on socket"); 110 | ptr += bytes_to_send; 111 | size -= bytes_to_send; 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /output/net_output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * net_output.hpp - send output over network. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "output.hpp" 13 | 14 | class NetOutput : public Output 15 | { 16 | public: 17 | NetOutput(VideoOptions const *options); 18 | ~NetOutput(); 19 | 20 | protected: 21 | void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) override; 22 | 23 | private: 24 | int fd_; 25 | sockaddr_in saddr_; 26 | const sockaddr *saddr_ptr_; 27 | socklen_t sockaddr_in_size_; 28 | }; 29 | -------------------------------------------------------------------------------- /output/output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * output.cpp - video stream output base class 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include "circular_output.hpp" 12 | #include "file_output.hpp" 13 | #include "net_output.hpp" 14 | #include "output.hpp" 15 | 16 | Output::Output(VideoOptions const *options) 17 | : options_(options), fp_timestamps_(nullptr), state_(WAITING_KEYFRAME), time_offset_(0), last_timestamp_(0), 18 | buf_metadata_(std::cout.rdbuf()), of_metadata_() 19 | { 20 | if (!options->save_pts.empty()) 21 | { 22 | fp_timestamps_ = fopen(options->save_pts.c_str(), "w"); 23 | if (!fp_timestamps_) 24 | throw std::runtime_error("Failed to open timestamp file " + options->save_pts); 25 | fprintf(fp_timestamps_, "# timecode format v2\n"); 26 | } 27 | if (!options->metadata.empty()) 28 | { 29 | const std::string &filename = options_->metadata; 30 | 31 | if (filename.compare("-")) 32 | { 33 | of_metadata_.open(filename, std::ios::out); 34 | buf_metadata_ = of_metadata_.rdbuf(); 35 | start_metadata_output(buf_metadata_, options_->metadata_format); 36 | } 37 | } 38 | 39 | enable_ = !options->pause; 40 | } 41 | 42 | Output::~Output() 43 | { 44 | if (fp_timestamps_) 45 | fclose(fp_timestamps_); 46 | if (!options_->metadata.empty()) 47 | stop_metadata_output(buf_metadata_, options_->metadata_format); 48 | } 49 | 50 | void Output::Signal() 51 | { 52 | enable_ = !enable_; 53 | } 54 | 55 | void Output::OutputReady(void *mem, size_t size, int64_t timestamp_us, bool keyframe) 56 | { 57 | // When output is enabled, we may have to wait for the next keyframe. 58 | uint32_t flags = keyframe ? FLAG_KEYFRAME : FLAG_NONE; 59 | if (!enable_) 60 | state_ = DISABLED; 61 | else if (state_ == DISABLED) 62 | state_ = WAITING_KEYFRAME; 63 | if (state_ == WAITING_KEYFRAME && keyframe) 64 | state_ = RUNNING, flags |= FLAG_RESTART; 65 | if (state_ != RUNNING) 66 | return; 67 | 68 | // Frig the timestamps to be continuous after a pause. 69 | if (flags & FLAG_RESTART) 70 | time_offset_ = timestamp_us - last_timestamp_; 71 | last_timestamp_ = timestamp_us - time_offset_; 72 | 73 | outputBuffer(mem, size, last_timestamp_, flags); 74 | 75 | // Save timestamps to a file, if that was requested. 76 | if (fp_timestamps_) 77 | { 78 | timestampReady(last_timestamp_); 79 | } 80 | 81 | if (!options_->metadata.empty()) 82 | { 83 | libcamera::ControlList metadata = metadata_queue_.front(); 84 | write_metadata(buf_metadata_, options_->metadata_format, metadata, !metadata_started_); 85 | metadata_started_ = true; 86 | metadata_queue_.pop(); 87 | } 88 | } 89 | 90 | void Output::timestampReady(int64_t timestamp) 91 | { 92 | fprintf(fp_timestamps_, "%" PRId64 ".%03" PRId64 "\n", timestamp / 1000, timestamp % 1000); 93 | if (options_->flush) 94 | fflush(fp_timestamps_); 95 | } 96 | 97 | void Output::outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) 98 | { 99 | // Supply this so that a vanilla Output gives you an object that outputs no buffers. 100 | } 101 | 102 | Output *Output::Create(VideoOptions const *options) 103 | { 104 | if (options->codec == "libav") 105 | return new Output(options); 106 | 107 | if (strncmp(options->output.c_str(), "udp://", 6) == 0 || strncmp(options->output.c_str(), "tcp://", 6) == 0) 108 | return new NetOutput(options); 109 | else if (options->circular) 110 | return new CircularOutput(options); 111 | else if (!options->output.empty()) 112 | return new FileOutput(options); 113 | else 114 | return new Output(options); 115 | } 116 | 117 | void Output::MetadataReady(libcamera::ControlList &metadata) 118 | { 119 | if (options_->metadata.empty()) 120 | return; 121 | 122 | metadata_queue_.push(metadata); 123 | } 124 | 125 | void start_metadata_output(std::streambuf *buf, std::string fmt) 126 | { 127 | std::ostream out(buf); 128 | if (fmt == "json") 129 | out << "[" << std::endl; 130 | } 131 | 132 | void write_metadata(std::streambuf *buf, std::string fmt, libcamera::ControlList &metadata, bool first_write) 133 | { 134 | std::ostream out(buf); 135 | const libcamera::ControlIdMap *id_map = metadata.idMap(); 136 | if (fmt == "txt") 137 | { 138 | for (auto const &[id, val] : metadata) 139 | out << id_map->at(id)->name() << "=" << val.toString() << std::endl; 140 | out << std::endl; 141 | } 142 | else 143 | { 144 | if (!first_write) 145 | out << "," << std::endl; 146 | out << "{"; 147 | bool first_done = false; 148 | for (auto const &[id, val] : metadata) 149 | { 150 | std::string arg_quote = (val.toString().find('/') != std::string::npos) ? "\"" : ""; 151 | out << (first_done ? "," : "") << std::endl 152 | << " \"" << id_map->at(id)->name() << "\": " << arg_quote << val.toString() << arg_quote; 153 | first_done = true; 154 | } 155 | out << std::endl << "}"; 156 | } 157 | } 158 | 159 | void stop_metadata_output(std::streambuf *buf, std::string fmt) 160 | { 161 | std::ostream out(buf); 162 | if (fmt == "json") 163 | out << std::endl << "]" << std::endl; 164 | } 165 | -------------------------------------------------------------------------------- /output/output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * output.hpp - video stream output base class 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include "core/video_options.hpp" 15 | 16 | class Output 17 | { 18 | public: 19 | static Output *Create(VideoOptions const *options); 20 | 21 | Output(VideoOptions const *options); 22 | virtual ~Output(); 23 | virtual void Signal(); // a derived class might redefine what this means 24 | void OutputReady(void *mem, size_t size, int64_t timestamp_us, bool keyframe); 25 | void MetadataReady(libcamera::ControlList &metadata); 26 | 27 | protected: 28 | enum Flag 29 | { 30 | FLAG_NONE = 0, 31 | FLAG_KEYFRAME = 1, 32 | FLAG_RESTART = 2 33 | }; 34 | virtual void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags); 35 | virtual void timestampReady(int64_t timestamp); 36 | VideoOptions const *options_; 37 | FILE *fp_timestamps_; 38 | 39 | private: 40 | enum State 41 | { 42 | DISABLED = 0, 43 | WAITING_KEYFRAME = 1, 44 | RUNNING = 2 45 | }; 46 | State state_; 47 | std::atomic enable_; 48 | int64_t time_offset_; 49 | int64_t last_timestamp_; 50 | std::streambuf *buf_metadata_; 51 | std::ofstream of_metadata_; 52 | bool metadata_started_ = false; 53 | std::queue metadata_queue_; 54 | }; 55 | 56 | void start_metadata_output(std::streambuf *buf, std::string fmt); 57 | void write_metadata(std::streambuf *buf, std::string fmt, libcamera::ControlList &metadata, bool first_write); 58 | void stop_metadata_output(std::streambuf *buf, std::string fmt); 59 | -------------------------------------------------------------------------------- /package.cmake: -------------------------------------------------------------------------------- 1 | # Script to generate a version string and save it to the package source root 2 | 3 | execute_process(COMMAND git rev-parse HEAD 4 | WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} 5 | OUTPUT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/version.gen) 6 | -------------------------------------------------------------------------------- /post_processing_stages/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | include(GNUInstallDirs) 4 | 5 | set(SRC post_processing_stage.cpp negate_stage.cpp hdr_stage.cpp pwl.cpp histogram.cpp motion_detect_stage.cpp) 6 | set(TARGET_LIBS images) 7 | 8 | 9 | if (NOT DEFINED ENABLE_OPENCV) 10 | set(ENABLE_OPENCV 1) 11 | endif() 12 | set(OpenCV_FOUND 0) 13 | if (ENABLE_OPENCV) 14 | message(STATUS "Checking for OpenCV") 15 | find_package(OpenCV QUIET) 16 | else() 17 | message(STATUS "Omitting check for OpenCV") 18 | endif() 19 | 20 | if (OpenCV_FOUND) 21 | # OpenCV has so many libraries, we're going to link only the ones we need. 22 | # But if you add more OpenCV stages, you may need more libraries here! 23 | set(OpenCV_LIBS_REDUCED -lopencv_core -lopencv_imgproc -lopencv_objdetect) 24 | message(STATUS "OpenCV library found:") 25 | message(STATUS " version: ${OpenCV_VERSION}") 26 | message(STATUS " libraries: ${OpenCV_LIBS_REDUCED}") 27 | message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}") 28 | include_directories(${OpenCV_INCLUDE_DIRS}) 29 | set(SRC ${SRC} sobel_cv_stage.cpp face_detect_cv_stage.cpp annotate_cv_stage.cpp plot_pose_cv_stage.cpp object_detect_draw_cv_stage.cpp) 30 | set(TARGET_LIBS ${TARGET_LIBS} ${OpenCV_LIBS_REDUCED}) 31 | message(STATUS "OpenCV support is included") 32 | else() 33 | if (ENABLE_OPENCV) 34 | message(WARNING "OpenCV support was enabled but no libraries found!") 35 | else() 36 | message(STATUS "OpenCV support not being included") 37 | endif() 38 | endif() 39 | 40 | if (NOT DEFINED ENABLE_TFLITE) 41 | set(ENABLE_TFLITE 0) 42 | endif() 43 | if (ENABLE_TFLITE) 44 | set(SRC ${SRC} tf_stage.cpp object_classify_tf_stage.cpp pose_estimation_tf_stage.cpp object_detect_tf_stage.cpp segmentation_tf_stage.cpp) 45 | set(TARGET_LIBS ${TARGET_LIBS} tensorflow-lite) 46 | message(STATUS "Adding TFLite support") 47 | else() 48 | message(STATUS "TFLite support not being included") 49 | endif() 50 | 51 | add_library(post_processing_stages ${SRC}) 52 | set_target_properties(post_processing_stages PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) 53 | target_link_libraries(post_processing_stages ${TARGET_LIBS}) 54 | target_compile_definitions(post_processing_stages PUBLIC OPENCV_PRESENT=${OpenCV_FOUND}) 55 | 56 | install(TARGETS post_processing_stages LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) 57 | -------------------------------------------------------------------------------- /post_processing_stages/annotate_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * annotate_cv_stage.cpp - add text annotation to image 6 | */ 7 | 8 | // The text string can include the % directives supported by FrameInfo. 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include "core/frame_info.hpp" 15 | #include "core/libcamera_app.hpp" 16 | 17 | #include "post_processing_stages/post_processing_stage.hpp" 18 | 19 | #include "opencv2/core.hpp" 20 | #include "opencv2/imgproc.hpp" 21 | 22 | using namespace cv; 23 | 24 | using Stream = libcamera::Stream; 25 | 26 | class AnnotateCvStage : public PostProcessingStage 27 | { 28 | public: 29 | AnnotateCvStage(LibcameraApp *app) : PostProcessingStage(app) {} 30 | 31 | char const *Name() const override; 32 | 33 | void Read(boost::property_tree::ptree const ¶ms) override; 34 | 35 | void Configure() override; 36 | 37 | bool Process(CompletedRequestPtr &completed_request) override; 38 | 39 | private: 40 | Stream *stream_; 41 | StreamInfo info_; 42 | std::string text_; 43 | int fg_; 44 | int bg_; 45 | double scale_; 46 | int thickness_; 47 | double alpha_; 48 | double adjusted_scale_; 49 | int adjusted_thickness_; 50 | }; 51 | 52 | #define NAME "annotate_cv" 53 | 54 | char const *AnnotateCvStage::Name() const 55 | { 56 | return NAME; 57 | } 58 | 59 | void AnnotateCvStage::Read(boost::property_tree::ptree const ¶ms) 60 | { 61 | text_ = params.get("text"); 62 | fg_ = params.get("fg", 255); 63 | bg_ = params.get("bg", 0); 64 | scale_ = params.get("scale", 1.0); 65 | thickness_ = params.get("thickness", 2); 66 | alpha_ = params.get("alpha", 0.5); 67 | } 68 | 69 | void AnnotateCvStage::Configure() 70 | { 71 | stream_ = app_->GetMainStream(); 72 | if (!stream_ || stream_->configuration().pixelFormat != libcamera::formats::YUV420) 73 | throw std::runtime_error("AnnotateCvStage: only YUV420 format supported"); 74 | info_ = app_->GetStreamInfo(stream_); 75 | 76 | // Adjust the scale and thickness according to the image size, so that the relative 77 | // size is preserved across different camera modes. Note that the thickness can get 78 | // rather harshly quantised, not much we can do about that. 79 | adjusted_scale_ = scale_ * info_.width / 1200; 80 | adjusted_thickness_ = std::max(thickness_ * info_.width / 700, 1u); 81 | } 82 | 83 | bool AnnotateCvStage::Process(CompletedRequestPtr &completed_request) 84 | { 85 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[stream_])[0]; 86 | FrameInfo info(completed_request->metadata); 87 | info.sequence = completed_request->sequence; 88 | 89 | // Other post-processing stages can supply metadata to update the text. 90 | completed_request->post_process_metadata.Get("annotate.text", text_); 91 | std::string text = info.ToString(text_); 92 | char text_with_date[256]; 93 | time_t t = time(NULL); 94 | tm *tm_ptr = localtime(&t); 95 | if (strftime(text_with_date, sizeof(text_with_date), text.c_str(), tm_ptr) != 0) 96 | text = std::string(text_with_date); 97 | 98 | uint8_t *ptr = (uint8_t *)buffer.data(); 99 | Mat im(info_.height, info_.width, CV_8U, ptr, info_.stride); 100 | int font = FONT_HERSHEY_SIMPLEX; 101 | 102 | int baseline = 0; 103 | Size size = getTextSize(text, font, adjusted_scale_, adjusted_thickness_, &baseline); 104 | 105 | // Can't find a handy "draw rectangle with alpha" function... 106 | for (int y = 0; y < size.height + baseline; y++, ptr += info_.stride) 107 | { 108 | for (int x = 0; x < size.width; x++) 109 | ptr[x] = bg_ * alpha_ + (1 - alpha_) * ptr[x]; 110 | } 111 | putText(im, text, Point(0, size.height), font, adjusted_scale_, fg_, adjusted_thickness_, 0); 112 | 113 | return false; 114 | } 115 | 116 | static PostProcessingStage *Create(LibcameraApp *app) 117 | { 118 | return new AnnotateCvStage(app); 119 | } 120 | 121 | static RegisterStage reg(NAME, &Create); 122 | -------------------------------------------------------------------------------- /post_processing_stages/histogram.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019, Raspberry Pi (Trading) Limited 4 | * 5 | * histogram.cpp - histogram calculations 6 | */ 7 | #include 8 | #include 9 | 10 | #include "histogram.hpp" 11 | 12 | uint64_t Histogram::CumulativeFreq(double bin) const 13 | { 14 | if (bin <= 0) 15 | return 0; 16 | else if (bin >= Bins()) 17 | return Total(); 18 | int b = (int)bin; 19 | return cumulative_[b] + 20 | (bin - b) * (cumulative_[b + 1] - cumulative_[b]); 21 | } 22 | 23 | double Histogram::Quantile(double q, int first, int last) const 24 | { 25 | if (first == -1) 26 | first = 0; 27 | if (last == -1) 28 | last = cumulative_.size() - 2; 29 | assert(first <= last); 30 | uint64_t items = q * Total(); 31 | while (first < last) // binary search to find the right bin 32 | { 33 | int middle = (first + last) / 2; 34 | if (cumulative_[middle + 1] > items) 35 | last = middle; // between first and middle 36 | else 37 | first = middle + 1; // after middle 38 | } 39 | assert(items >= cumulative_[first] && items <= cumulative_[last + 1]); 40 | double frac = cumulative_[first + 1] == cumulative_[first] ? 0 41 | : (double)(items - cumulative_[first]) / 42 | (cumulative_[first + 1] - cumulative_[first]); 43 | return first + frac; 44 | } 45 | 46 | double Histogram::InterQuantileMean(double q_lo, double q_hi) const 47 | { 48 | assert(q_hi > q_lo); 49 | double p_lo = Quantile(q_lo); 50 | double p_hi = Quantile(q_hi, (int)p_lo); 51 | double sum_bin_freq = 0, cumul_freq = 0; 52 | for (double p_next = floor(p_lo) + 1.0; p_next <= ceil(p_hi); 53 | p_lo = p_next, p_next += 1.0) { 54 | int bin = floor(p_lo); 55 | double freq = (cumulative_[bin + 1] - cumulative_[bin]) * 56 | (std::min(p_next, p_hi) - p_lo); 57 | sum_bin_freq += bin * freq; 58 | cumul_freq += freq; 59 | } 60 | // add 0.5 to give an average for bin mid-points 61 | return sum_bin_freq / cumul_freq + 0.5; 62 | } 63 | -------------------------------------------------------------------------------- /post_processing_stages/histogram.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019, Raspberry Pi (Trading) Limited 4 | * 5 | * histogram.hpp - histogram calculation interface 6 | */ 7 | #pragma once 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | // A simple histogram class, for use in particular to find "quantiles" and 14 | // averages between "quantiles". 15 | 16 | class Histogram 17 | { 18 | public: 19 | template Histogram(T *histogram, int num) 20 | { 21 | assert(num); 22 | cumulative_.reserve(num + 1); 23 | cumulative_.push_back(0); 24 | for (int i = 0; i < num; i++) 25 | cumulative_.push_back(cumulative_.back() + 26 | histogram[i]); 27 | } 28 | uint32_t Bins() const { return cumulative_.size() - 1; } 29 | uint64_t Total() const { return cumulative_[cumulative_.size() - 1]; } 30 | // Cumulative frequency up to a (fractional) point in a bin. 31 | uint64_t CumulativeFreq(double bin) const; 32 | // Return the (fractional) bin of the point q (0 <= q <= 1) through the 33 | // histogram. Optionally provide limits to help. 34 | double Quantile(double q, int first = -1, int last = -1) const; 35 | // Return the average histogram bin value between the two quantiles. 36 | double InterQuantileMean(double q_lo, double q_hi) const; 37 | 38 | private: 39 | std::vector cumulative_; 40 | }; 41 | -------------------------------------------------------------------------------- /post_processing_stages/negate_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * negate_stage.cpp - image negate effect 6 | */ 7 | 8 | #include 9 | 10 | #include "core/libcamera_app.hpp" 11 | 12 | #include "post_processing_stages/post_processing_stage.hpp" 13 | 14 | using Stream = libcamera::Stream; 15 | 16 | class NegateStage : public PostProcessingStage 17 | { 18 | public: 19 | NegateStage(LibcameraApp *app) : PostProcessingStage(app) {} 20 | 21 | char const *Name() const override; 22 | 23 | void Read(boost::property_tree::ptree const ¶ms) override {} 24 | 25 | void Configure() override; 26 | 27 | bool Process(CompletedRequestPtr &completed_request) override; 28 | 29 | private: 30 | Stream *stream_; 31 | }; 32 | 33 | #define NAME "negate" 34 | 35 | char const *NegateStage::Name() const 36 | { 37 | return NAME; 38 | } 39 | 40 | void NegateStage::Configure() 41 | { 42 | stream_ = app_->GetMainStream(); 43 | } 44 | 45 | bool NegateStage::Process(CompletedRequestPtr &completed_request) 46 | { 47 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[stream_])[0]; 48 | uint32_t *ptr = (uint32_t *)buffer.data(); 49 | 50 | // Constraints on the stride mean we always have multiple-of-4 bytes. 51 | for (unsigned int i = 0; i < buffer.size(); i += 4) 52 | *(ptr++) ^= 0xffffffff; 53 | 54 | return false; 55 | } 56 | 57 | static PostProcessingStage *Create(LibcameraApp *app) 58 | { 59 | return new NegateStage(app); 60 | } 61 | 62 | static RegisterStage reg(NAME, &Create); 63 | -------------------------------------------------------------------------------- /post_processing_stages/object_detect.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * object_detect.hpp - object detector result 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | struct Detection 15 | { 16 | Detection(int c, const std::string &n, float conf, int x, int y, int w, int h) 17 | : category(c), name(n), confidence(conf), box(x, y, w, h) 18 | { 19 | } 20 | int category; 21 | std::string name; 22 | float confidence; 23 | libcamera::Rectangle box; 24 | std::string toString() const 25 | { 26 | std::stringstream output; 27 | output.precision(2); 28 | output << name << "[" << category << "] (" << confidence << ") @ " << box.x << "," << box.y << " " << box.width 29 | << "x" << box.height; 30 | return output.str(); 31 | } 32 | }; 33 | -------------------------------------------------------------------------------- /post_processing_stages/object_detect_draw_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * object_detect_draw_cv_stage.cpp - draw object detection results 6 | */ 7 | 8 | #include "opencv2/imgproc.hpp" 9 | 10 | #include "core/libcamera_app.hpp" 11 | 12 | #include "post_processing_stages/post_processing_stage.hpp" 13 | 14 | #include "object_detect.hpp" 15 | 16 | using namespace cv; 17 | 18 | using Rectange = libcamera::Rectangle; 19 | using Stream = libcamera::Stream; 20 | 21 | class ObjectDetectDrawCvStage : public PostProcessingStage 22 | { 23 | public: 24 | ObjectDetectDrawCvStage(LibcameraApp *app) : PostProcessingStage(app) {} 25 | 26 | char const *Name() const override; 27 | 28 | void Read(boost::property_tree::ptree const ¶ms) override; 29 | 30 | void Configure() override; 31 | 32 | bool Process(CompletedRequestPtr &completed_request) override; 33 | 34 | private: 35 | Stream *stream_; 36 | int line_thickness_; 37 | double font_size_; 38 | }; 39 | 40 | #define NAME "object_detect_draw_cv" 41 | 42 | char const *ObjectDetectDrawCvStage::Name() const 43 | { 44 | return NAME; 45 | } 46 | 47 | void ObjectDetectDrawCvStage::Configure() 48 | { 49 | // Only draw on image if a low res stream was specified. 50 | stream_ = app_->LoresStream() ? app_->GetMainStream() : nullptr; 51 | } 52 | 53 | void ObjectDetectDrawCvStage::Read(boost::property_tree::ptree const ¶ms) 54 | { 55 | line_thickness_ = params.get("line_thickness", 1); 56 | font_size_ = params.get("font_size", 1.0); 57 | } 58 | 59 | bool ObjectDetectDrawCvStage::Process(CompletedRequestPtr &completed_request) 60 | { 61 | if (!stream_) 62 | return false; 63 | 64 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[stream_])[0]; 65 | uint32_t *ptr = (uint32_t *)buffer.data(); 66 | StreamInfo info = app_->GetStreamInfo(stream_); 67 | 68 | std::vector detections; 69 | 70 | completed_request->post_process_metadata.Get("object_detect.results", detections); 71 | 72 | Mat image(info.height, info.width, CV_8U, ptr, info.stride); 73 | Scalar colour = Scalar(255, 255, 255); 74 | int font = FONT_HERSHEY_SIMPLEX; 75 | 76 | for (auto &detection : detections) 77 | { 78 | Rect r(detection.box.x, detection.box.y, detection.box.width, detection.box.height); 79 | rectangle(image, r, colour, line_thickness_); 80 | std::stringstream text_stream; 81 | text_stream << detection.name << " " << (int)(detection.confidence * 100) << "%"; 82 | std::string text = text_stream.str(); 83 | int baseline = 0; 84 | Size size = getTextSize(text, font, font_size_, 2, &baseline); 85 | Point text_origin(detection.box.x + 5, detection.box.y + size.height + 5); 86 | putText(image, text, text_origin, font, font_size_, colour, 2); 87 | } 88 | 89 | return false; 90 | } 91 | 92 | static PostProcessingStage *Create(LibcameraApp *app) 93 | { 94 | return new ObjectDetectDrawCvStage(app); 95 | } 96 | 97 | static RegisterStage reg(NAME, &Create); 98 | -------------------------------------------------------------------------------- /post_processing_stages/object_detect_tf_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * object_detect_tf_stage.cpp - object detector 6 | */ 7 | 8 | #include "object_detect.hpp" 9 | #include "tf_stage.hpp" 10 | 11 | using Rectangle = libcamera::Rectangle; 12 | 13 | constexpr int WIDTH = 300; 14 | constexpr int HEIGHT = 300; 15 | 16 | struct ObjectDetectTfConfig : public TfConfig 17 | { 18 | float confidence_threshold; 19 | float overlap_threshold; 20 | }; 21 | 22 | #define NAME "object_detect_tf" 23 | 24 | class ObjectDetectTfStage : public TfStage 25 | { 26 | public: 27 | // The model we use expects 224x224 images. 28 | ObjectDetectTfStage(LibcameraApp *app) : TfStage(app, WIDTH, HEIGHT) 29 | { 30 | config_ = std::make_unique(); 31 | } 32 | char const *Name() const override { return NAME; } 33 | 34 | protected: 35 | ObjectDetectTfConfig *config() const { return static_cast(config_.get()); } 36 | 37 | // Read the label file, plus some thresholds. 38 | void readExtras(boost::property_tree::ptree const ¶ms) override; 39 | 40 | void checkConfiguration() override; 41 | 42 | // Retrieve the top-n most likely results. 43 | void interpretOutputs() override; 44 | 45 | // Attach the results as metadata; optionally write the labels too for the annotate_cv 46 | // stage to pick up. 47 | void applyResults(CompletedRequestPtr &completed_request) override; 48 | 49 | private: 50 | void readLabelsFile(const std::string &file_name); 51 | 52 | std::vector output_results_; 53 | std::vector labels_; 54 | size_t label_count_; 55 | }; 56 | 57 | void ObjectDetectTfStage::readExtras(boost::property_tree::ptree const ¶ms) 58 | { 59 | config()->confidence_threshold = params.get("confidence_threshold", 0.5f); 60 | config()->overlap_threshold = params.get("overlap_threshold", 0.5f); 61 | 62 | std::string labels_file = params.get("labels_file", ""); 63 | readLabelsFile(labels_file); 64 | if (config()->verbose) 65 | LOG(1, "Read " << label_count_ << " labels"); 66 | 67 | // Check the tensor outputs and label classes match up. 68 | int output = interpreter_->outputs()[0]; 69 | TfLiteIntArray *output_dims = interpreter_->tensor(output)->dims; 70 | // Causes might include loading the wrong model. 71 | if (output_dims->size != 3 || output_dims->data[0] != 1 || output_dims->data[1] != 10 || output_dims->data[2] != 4) 72 | throw std::runtime_error("ObjectDetectTfStage: unexpected output dimensions"); 73 | } 74 | 75 | void ObjectDetectTfStage::readLabelsFile(const std::string &file_name) 76 | { 77 | std::ifstream file(file_name); 78 | if (!file) 79 | throw std::runtime_error("ObjectDetectTfStage: Failed to load labels file"); 80 | 81 | std::string line; 82 | std::getline(file, line); // discard first line of ??? 83 | while (std::getline(file, line)) 84 | labels_.push_back(line); 85 | 86 | label_count_ = labels_.size(); 87 | } 88 | 89 | void ObjectDetectTfStage::checkConfiguration() 90 | { 91 | if (!main_stream_) 92 | throw std::runtime_error("ObjectDetectTfStage: Main stream is required"); 93 | } 94 | 95 | void ObjectDetectTfStage::applyResults(CompletedRequestPtr &completed_request) 96 | { 97 | completed_request->post_process_metadata.Set("object_detect.results", output_results_); 98 | } 99 | 100 | static unsigned int area(const Rectangle &r) 101 | { 102 | return r.width * r.height; 103 | } 104 | 105 | void ObjectDetectTfStage::interpretOutputs() 106 | { 107 | int box_index = interpreter_->outputs()[0]; 108 | int class_index = interpreter_->outputs()[1]; 109 | int score_index = interpreter_->outputs()[2]; 110 | int num_detections = interpreter_->tensor(box_index)->dims->data[1]; 111 | float *boxes = interpreter_->tensor(box_index)->data.f; 112 | float *scores = interpreter_->tensor(score_index)->data.f; 113 | float *classes = interpreter_->tensor(class_index)->data.f; 114 | 115 | output_results_.clear(); 116 | 117 | for (int i = 0; i < num_detections; i++) 118 | { 119 | if (scores[i] < config()->confidence_threshold) 120 | continue; 121 | 122 | // The coords in the WIDTH x HEIGHT image fed to the network are: 123 | int y = std::clamp(HEIGHT * boxes[i * 4 + 0], 0, HEIGHT); 124 | int x = std::clamp(WIDTH * boxes[i * 4 + 1], 0, WIDTH); 125 | int h = std::clamp(HEIGHT * boxes[i * 4 + 2] - y, 0, HEIGHT); 126 | int w = std::clamp(WIDTH * boxes[i * 4 + 3] - x, 0, WIDTH); 127 | // The network is fed a crop from the lores (if that was too large), so the coords 128 | // in the full lores image are: 129 | y += (lores_info_.height - HEIGHT) / 2; 130 | x += (lores_info_.width - WIDTH) / 2; 131 | // The lores is a pure scaling of the main image (squishing if the aspect ratios 132 | // don't match), so: 133 | y = y * main_stream_info_.height / lores_info_.height; 134 | x = x * main_stream_info_.width / lores_info_.width; 135 | h = h * main_stream_info_.height / lores_info_.height; 136 | w = w * main_stream_info_.width / lores_info_.width; 137 | 138 | int c = classes[i]; 139 | Detection detection(c, labels_[c], scores[i], x, y, w, h); 140 | 141 | // Before adding this detection to the results, see if it overlaps an existing one. 142 | bool overlapped = false; 143 | for (auto &prev_detection : output_results_) 144 | { 145 | if (prev_detection.category == c) 146 | { 147 | unsigned int prev_area = area(prev_detection.box); 148 | unsigned int new_area = area(detection.box); 149 | unsigned int overlap = area(prev_detection.box.boundedTo(detection.box)); 150 | if (overlap > config()->overlap_threshold * prev_area || 151 | overlap > config()->overlap_threshold * new_area) 152 | { 153 | // Take the box with the higher confidence. 154 | if (detection.confidence > prev_detection.confidence) 155 | prev_detection = detection; 156 | overlapped = true; 157 | break; 158 | } 159 | } 160 | } 161 | if (!overlapped) 162 | output_results_.push_back(detection); 163 | } 164 | 165 | if (config()->verbose) 166 | { 167 | for (auto &detection : output_results_) 168 | LOG(1, detection.toString()); 169 | } 170 | } 171 | 172 | static PostProcessingStage *Create(LibcameraApp *app) 173 | { 174 | return new ObjectDetectTfStage(app); 175 | } 176 | 177 | static RegisterStage reg(NAME, &Create); 178 | -------------------------------------------------------------------------------- /post_processing_stages/plot_pose_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * negate_stage.cpp - image negate effect 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | 18 | #include "core/libcamera_app.hpp" 19 | 20 | #include "post_processing_stages/post_processing_stage.hpp" 21 | 22 | #include "opencv2/imgproc.hpp" 23 | 24 | using namespace cv; 25 | 26 | using Stream = libcamera::Stream; 27 | 28 | enum Features 29 | { 30 | nose, 31 | leftEye, 32 | rightEye, 33 | leftEar, 34 | rightEar, 35 | leftShoulder, 36 | rightShoulder, 37 | leftElbow, 38 | rightElbow, 39 | leftWrist, 40 | rightWrist, 41 | leftHip, 42 | rightHip, 43 | leftKnee, 44 | rightKnee, 45 | leftAnkle, 46 | rightAnkle 47 | }; 48 | 49 | constexpr int FEATURE_SIZE = 17; 50 | 51 | class PlotPoseCvStage : public PostProcessingStage 52 | { 53 | public: 54 | PlotPoseCvStage(LibcameraApp *app) : PostProcessingStage(app) {} 55 | 56 | char const *Name() const override; 57 | 58 | void Read(boost::property_tree::ptree const ¶ms) override; 59 | 60 | void Configure() override; 61 | 62 | bool Process(CompletedRequestPtr &completed_request) override; 63 | 64 | private: 65 | void drawFeatures(cv::Mat &img, std::vector locations, std::vector confidences); 66 | 67 | Stream *stream_; 68 | float confidence_threshold_; 69 | }; 70 | 71 | #define NAME "plot_pose_cv" 72 | 73 | char const *PlotPoseCvStage::Name() const 74 | { 75 | return NAME; 76 | } 77 | 78 | void PlotPoseCvStage::Configure() 79 | { 80 | stream_ = app_->GetMainStream(); 81 | } 82 | 83 | void PlotPoseCvStage::Read(boost::property_tree::ptree const ¶ms) 84 | { 85 | confidence_threshold_ = params.get("confidence_threshold", -1.0); 86 | } 87 | 88 | bool PlotPoseCvStage::Process(CompletedRequestPtr &completed_request) 89 | { 90 | if (!stream_) 91 | return false; 92 | 93 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[stream_])[0]; 94 | uint32_t *ptr = (uint32_t *)buffer.data(); 95 | StreamInfo info = app_->GetStreamInfo(stream_); 96 | 97 | std::vector rects; 98 | std::vector lib_locations; 99 | std::vector cv_locations; 100 | std::vector confidences; 101 | 102 | completed_request->post_process_metadata.Get("pose_estimation.locations", lib_locations); 103 | completed_request->post_process_metadata.Get("pose_estimation.confidences", confidences); 104 | 105 | if (!confidences.empty() && !lib_locations.empty()) 106 | { 107 | Mat image(info.height, info.width, CV_8U, ptr, info.stride); 108 | for (libcamera::Point lib_location : lib_locations) 109 | { 110 | Point cv_location; 111 | cv_location.x = lib_location.x; 112 | cv_location.y = lib_location.y; 113 | cv_locations.push_back(cv_location); 114 | } 115 | drawFeatures(image, cv_locations, confidences); 116 | } 117 | return false; 118 | } 119 | 120 | void PlotPoseCvStage::drawFeatures(Mat &img, std::vector locations, std::vector confidences) 121 | { 122 | Scalar colour = Scalar(255, 255, 255); 123 | int radius = 5; 124 | 125 | for (int i = 0; i < FEATURE_SIZE; i++) 126 | { 127 | if (confidences[i] < confidence_threshold_) 128 | circle(img, locations[i], radius, colour, 2, 8, 0); 129 | } 130 | 131 | if (confidences[leftShoulder] > confidence_threshold_) 132 | { 133 | if (confidences[rightShoulder] > confidence_threshold_) 134 | line(img, locations[leftShoulder], locations[rightShoulder], colour, 2); 135 | 136 | if (confidences[leftElbow] > confidence_threshold_) 137 | line(img, locations[leftShoulder], locations[leftElbow], colour, 2); 138 | 139 | if (confidences[leftHip] > confidence_threshold_) 140 | line(img, locations[leftShoulder], locations[leftHip], colour, 2); 141 | } 142 | if (confidences[rightShoulder] > confidence_threshold_) 143 | { 144 | if (confidences[rightElbow] > confidence_threshold_) 145 | line(img, locations[rightShoulder], locations[rightElbow], colour, 2); 146 | 147 | if (confidences[rightHip] > confidence_threshold_) 148 | line(img, locations[rightShoulder], locations[rightHip], colour, 2); 149 | } 150 | if (confidences[leftElbow] > confidence_threshold_) 151 | { 152 | if (confidences[leftWrist] > confidence_threshold_) 153 | line(img, locations[leftElbow], locations[leftWrist], colour, 2); 154 | } 155 | if (confidences[rightElbow] > confidence_threshold_) 156 | { 157 | if (confidences[rightWrist] > confidence_threshold_) 158 | line(img, locations[rightElbow], locations[rightWrist], colour, 2); 159 | } 160 | if (confidences[leftHip] > confidence_threshold_) 161 | { 162 | if (confidences[rightHip] > confidence_threshold_) 163 | line(img, locations[leftHip], locations[rightHip], colour, 2); 164 | 165 | if (confidences[leftKnee] > confidence_threshold_) 166 | line(img, locations[leftHip], locations[leftKnee], colour, 2); 167 | } 168 | if (confidences[leftKnee] > confidence_threshold_) 169 | { 170 | if (confidences[leftAnkle] > confidence_threshold_) 171 | line(img, locations[leftKnee], locations[leftAnkle], colour, 2); 172 | } 173 | if (confidences[rightKnee] > confidence_threshold_) 174 | { 175 | if (confidences[rightHip] > confidence_threshold_) 176 | line(img, locations[rightKnee], locations[rightHip], colour, 2); 177 | 178 | if (confidences[rightAnkle] > confidence_threshold_) 179 | line(img, locations[rightKnee], locations[rightAnkle], colour, 2); 180 | } 181 | } 182 | 183 | static PostProcessingStage *Create(LibcameraApp *app) 184 | { 185 | return new PlotPoseCvStage(app); 186 | } 187 | 188 | static RegisterStage reg(NAME, &Create); 189 | -------------------------------------------------------------------------------- /post_processing_stages/pose_estimation_tf_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * pose_estimation_tf_stage - pose estimator 6 | */ 7 | 8 | #include "tf_stage.hpp" 9 | 10 | constexpr int FEATURE_SIZE = 17; 11 | constexpr int HEATMAP_DIMS = 9; 12 | 13 | #define NAME "pose_estimation_tf" 14 | 15 | class PoseEstimationTfStage : public TfStage 16 | { 17 | public: 18 | // The model we use expects 257x257 images. Really. 19 | PoseEstimationTfStage(LibcameraApp *app) : TfStage(app, 257, 257) { config_ = std::make_unique(); } 20 | char const *Name() const override { return NAME; } 21 | 22 | protected: 23 | void readExtras(boost::property_tree::ptree const ¶ms) override; 24 | 25 | void checkConfiguration() override; 26 | 27 | // Retrieve the various joint coordinates and confidences from the model. 28 | void interpretOutputs() override; 29 | 30 | // Attach results as metadata. 31 | void applyResults(CompletedRequestPtr &completed_request) override; 32 | 33 | private: 34 | std::vector heats_; 35 | std::vector confidences_; 36 | std::vector locations_; 37 | }; 38 | 39 | void PoseEstimationTfStage::readExtras([[maybe_unused]] boost::property_tree::ptree const ¶ms) 40 | { 41 | // Actually we don't read anything, but we can check the output tensor dimensions. 42 | int output = interpreter_->outputs()[0]; 43 | TfLiteIntArray *dims = interpreter_->tensor(output)->dims; 44 | // Causes might include loading the wrong model. 45 | if (dims->data[0] != 1 || dims->data[1] != HEATMAP_DIMS || dims->data[2] != HEATMAP_DIMS || 46 | dims->data[3] != FEATURE_SIZE) 47 | throw std::runtime_error("PoseEstimationTfStage: Unexpected output dimensions"); 48 | } 49 | 50 | void PoseEstimationTfStage::checkConfiguration() 51 | { 52 | if (!main_stream_) 53 | throw std::runtime_error("PoseEstimationTfStage: Main stream is required"); 54 | } 55 | 56 | void PoseEstimationTfStage::applyResults(CompletedRequestPtr &completed_request) 57 | { 58 | completed_request->post_process_metadata.Set("pose_estimation.locations", locations_); 59 | completed_request->post_process_metadata.Set("pose_estimation.confidences", confidences_); 60 | } 61 | 62 | void PoseEstimationTfStage::interpretOutputs() 63 | { 64 | // This code has been adapted from the "Qengineering/TensorFlow_Lite_Pose_RPi_32-bits" repository and can be 65 | // found here: "https://github.com/Qengineering/TensorFlow_Lite_Pose_RPi_32-bits/blob/master/Pose_single.cpp" 66 | float *heatmaps = interpreter_->tensor(interpreter_->outputs()[0])->data.f; 67 | float *offsets = interpreter_->tensor(interpreter_->outputs()[1])->data.f; 68 | 69 | heats_.clear(); 70 | confidences_.clear(); 71 | locations_.clear(); 72 | 73 | for (int i = 0; i < FEATURE_SIZE; i++) 74 | { 75 | float confidence_temp = heatmaps[i]; 76 | libcamera::Point heat_coord; 77 | for (int y = 0; y < HEATMAP_DIMS; y++) 78 | { 79 | for (int x = 0; x < HEATMAP_DIMS; x++) 80 | { 81 | int j = FEATURE_SIZE * (HEATMAP_DIMS * y + x) + i; 82 | if (heatmaps[j] > confidence_temp) 83 | { 84 | confidence_temp = heatmaps[j]; 85 | heat_coord.x = x; 86 | heat_coord.y = y; 87 | } 88 | } 89 | } 90 | heats_.push_back(heat_coord); 91 | confidences_.push_back(confidence_temp); 92 | } 93 | 94 | for (int i = 0; i < FEATURE_SIZE; i++) 95 | { 96 | libcamera::Point location_coord; 97 | int x = heats_[i].x, y = heats_[i].y, j = (FEATURE_SIZE * 2) * (HEATMAP_DIMS * y + x) + i; 98 | 99 | location_coord.y = (y * main_stream_info_.height) / (HEATMAP_DIMS - 1) + offsets[j]; 100 | location_coord.x = (x * main_stream_info_.width) / (HEATMAP_DIMS - 1) + offsets[j + FEATURE_SIZE]; 101 | 102 | locations_.push_back(location_coord); 103 | } 104 | } 105 | 106 | static PostProcessingStage *Create(LibcameraApp *app) 107 | { 108 | return new PoseEstimationTfStage(app); 109 | } 110 | 111 | static RegisterStage reg(NAME, &Create); 112 | 113 | -------------------------------------------------------------------------------- /post_processing_stages/post_processing_stage.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * post_processing_stage.hpp - Post processing stage base class definition. 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | // Prevents compiler warnings in Boost headers with more recent versions of GCC. 13 | #define BOOST_BIND_GLOBAL_PLACEHOLDERS 14 | 15 | #include 16 | #include 17 | 18 | #include "core/completed_request.hpp" 19 | #include "core/stream_info.hpp" 20 | 21 | namespace libcamera 22 | { 23 | struct StreamConfiguration; 24 | } 25 | 26 | class LibcameraApp; 27 | 28 | using StreamConfiguration = libcamera::StreamConfiguration; 29 | 30 | class PostProcessingStage 31 | { 32 | public: 33 | PostProcessingStage(LibcameraApp *app); 34 | 35 | virtual ~PostProcessingStage(); 36 | 37 | virtual char const *Name() const = 0; 38 | 39 | virtual void Read(boost::property_tree::ptree const ¶ms); 40 | 41 | virtual void AdjustConfig(std::string const &use_case, StreamConfiguration *config); 42 | 43 | virtual void Configure(); 44 | 45 | virtual void Start(); 46 | 47 | // Return true if this request is to be dropped. 48 | virtual bool Process(CompletedRequestPtr &completed_request) = 0; 49 | 50 | virtual void Stop(); 51 | 52 | virtual void Teardown(); 53 | 54 | // Below here are some helpers provided for the convenience of derived classes. 55 | 56 | // Convert YUV420 image to RGB. We crop from the centre of the image if the src 57 | // image is larger than the destination. 58 | static std::vector Yuv420ToRgb(const uint8_t *src, StreamInfo &src_info, StreamInfo &dst_info); 59 | 60 | protected: 61 | // Helper to calculate the execution time of any callable object and return it in as a std::chrono::duration. 62 | // For functions returning a value, the simplest thing would be to wrap the call in a lambda and capture 63 | // the return value. 64 | template 65 | static auto ExecutionTime(F &&f, Args &&... args) 66 | { 67 | auto t1 = T::now(); 68 | std::invoke(std::forward(f), std::forward(args)...); 69 | auto t2 = T::now(); 70 | return std::chrono::duration(t2 - t1); 71 | } 72 | 73 | LibcameraApp *app_; 74 | }; 75 | 76 | typedef PostProcessingStage *(*StageCreateFunc)(LibcameraApp *app); 77 | struct RegisterStage 78 | { 79 | RegisterStage(char const *name, StageCreateFunc create_func); 80 | }; 81 | 82 | std::map const &GetPostProcessingStages(); 83 | -------------------------------------------------------------------------------- /post_processing_stages/pwl.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019, Raspberry Pi (Trading) Limited 4 | * 5 | * pwl.hpp - piecewise linear functions interface 6 | */ 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | class Pwl 17 | { 18 | public: 19 | struct Interval { 20 | Interval(double _start, double _end) : start(_start), end(_end) {} 21 | double start, end; 22 | bool Contains(double value) { return value >= start && value <= end; } 23 | double Clip(double value) { return value < start ? start : (value > end ? end : value); } 24 | double Len() const { return end - start; } 25 | }; 26 | struct Point { 27 | Point() : x(0), y(0) {} 28 | Point(double _x, double _y) : x(_x), y(_y) {} 29 | double x, y; 30 | Point operator-(Point const &p) const { return Point(x - p.x, y - p.y); } 31 | Point operator+(Point const &p) const { return Point(x + p.x, y + p.y); } 32 | double operator%(Point const &p) const { return x * p.x + y * p.y; } 33 | Point operator*(double f) const { return Point(x * f, y * f); } 34 | Point operator/(double f) const { return Point(x / f, y / f); } 35 | double Len2() const { return x * x + y * y; } 36 | double Len() const { return sqrt(Len2()); } 37 | }; 38 | Pwl() {} 39 | Pwl(std::vector const &points) : points_(points) {} 40 | void Read(boost::property_tree::ptree const ¶ms); 41 | void Append(double x, double y, const double eps = 1e-6); 42 | void Prepend(double x, double y, const double eps = 1e-6); 43 | Interval Domain() const; 44 | Interval Range() const; 45 | bool Empty() const; 46 | // Evaluate Pwl, optionally supplying an initial guess for the 47 | // "span". The "span" may be optionally be updated. If you want to know 48 | // the "span" value but don't have an initial guess you can set it to 49 | // -1. 50 | double Eval(double x, int *span_ptr = nullptr, bool update_span = true) const; 51 | // Find perpendicular closest to xy, starting from span+1 so you can 52 | // call it repeatedly to check for multiple closest points (set span to 53 | // -1 on the first call). Also returns "pseudo" perpendiculars; see 54 | // PerpType enum. 55 | enum class PerpType { 56 | NotFound, // no perpendicular found 57 | Start, // start of Pwl is closest point 58 | End, // end of Pwl is closest point 59 | Vertex, // vertex of Pwl is closest point 60 | Perpendicular // true perpendicular found 61 | }; 62 | PerpType Invert(Point const &xy, Point &perp, int &span, const double eps = 1e-6) const; 63 | // Compose two Pwls together, doing "this" first and "other" after. 64 | Pwl Compose(Pwl const &other, const double eps = 1e-6) const; 65 | // Apply function to (x,y) values at every control point. 66 | void Map(std::function f) const; 67 | // Apply function to (x, y0, y1) values wherever either Pwl has a 68 | // control point. 69 | static void Map2(Pwl const &pwl0, Pwl const &pwl1, 70 | std::function f); 71 | // Combine two Pwls, meaning we create a new Pwl where the y values are 72 | // given by running f wherever either has a knot. 73 | static Pwl Combine(Pwl const &pwl0, Pwl const &pwl1, 74 | std::function f, 75 | const double eps = 1e-6); 76 | // Make "this" match (at least) the given domain. Any extension my be 77 | // clipped or linear. 78 | void MatchDomain(Interval const &domain, bool clip = true, const double eps = 1e-6); 79 | // Generate a LUT for this funciton. 80 | template std::vector GenerateLut() const 81 | { 82 | int end = Domain().end + 1, span = 0; 83 | std::vector lut(end); 84 | for (int x = 0; x < end; x++) 85 | lut[x] = Eval(x, &span); 86 | return lut; 87 | } 88 | Pwl &operator*=(double d); 89 | void Debug(FILE *fp = stderr) const; 90 | 91 | private: 92 | int findSpan(double x, int span) const; 93 | std::vector points_; 94 | }; 95 | -------------------------------------------------------------------------------- /post_processing_stages/segmentation.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * segmentation.hpp - segmentation result 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | struct Segmentation 14 | { 15 | Segmentation(int w, int h, std::vector l, const std::vector &s) 16 | : width(w), height(h), labels(l), segmentation(s) 17 | { 18 | } 19 | int width; 20 | int height; 21 | std::vector labels; 22 | std::vector segmentation; 23 | }; 24 | -------------------------------------------------------------------------------- /post_processing_stages/segmentation_tf_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * segmentation_tf_stage - image segmentation 6 | */ 7 | 8 | #include "segmentation.hpp" 9 | #include "tf_stage.hpp" 10 | 11 | // The neural network expects a 257x257 input. 12 | 13 | constexpr int WIDTH = 257; 14 | constexpr int HEIGHT = 257; 15 | 16 | struct SegmentationTfConfig : public TfConfig 17 | { 18 | bool draw; 19 | uint32_t threshold; // number of pixels in a category before we print its name 20 | }; 21 | 22 | #define NAME "segmentation_tf" 23 | 24 | class SegmentationTfStage : public TfStage 25 | { 26 | public: 27 | SegmentationTfStage(LibcameraApp *app) : TfStage(app, WIDTH, HEIGHT), segmentation_(WIDTH * HEIGHT) 28 | { 29 | config_ = std::make_unique(); 30 | } 31 | char const *Name() const override { return NAME; } 32 | 33 | protected: 34 | SegmentationTfConfig *config() const { return static_cast(config_.get()); } 35 | 36 | void readExtras(boost::property_tree::ptree const ¶ms) override; 37 | 38 | void checkConfiguration() override; 39 | 40 | // Read out the segmentation map. 41 | void interpretOutputs() override; 42 | 43 | // Add metadata and optionally draw the segmentation in the corner of the image. 44 | void applyResults(CompletedRequestPtr &completed_request) override; 45 | 46 | void readLabelsFile(const std::string &filename); 47 | 48 | private: 49 | std::vector labels_; 50 | std::vector segmentation_; 51 | }; 52 | 53 | void SegmentationTfStage::readLabelsFile(const std::string &file_name) 54 | { 55 | std::ifstream file(file_name); 56 | if (!file) 57 | throw std::runtime_error("SegmentationTfStage: Failed to load labels file"); 58 | 59 | for (std::string line; std::getline(file, line); labels_.push_back(line)) 60 | ; 61 | } 62 | 63 | void SegmentationTfStage::readExtras([[maybe_unused]] boost::property_tree::ptree const ¶ms) 64 | { 65 | config()->draw = params.get("draw", 1); 66 | config()->threshold = params.get("threshold", 5000); 67 | std::string labels_file = params.get("labels_file", ""); 68 | readLabelsFile(labels_file); 69 | 70 | // Check the output dimensions. 71 | int output = interpreter_->outputs()[0]; 72 | TfLiteIntArray *dims = interpreter_->tensor(output)->dims; 73 | if (dims->size != 4 || dims->data[1] != HEIGHT || dims->data[2] != WIDTH || 74 | dims->data[3] != static_cast(labels_.size())) 75 | throw std::runtime_error("SegmentationTfStage: Unexpected output tensor size"); 76 | } 77 | 78 | void SegmentationTfStage::checkConfiguration() 79 | { 80 | if (!main_stream_ && config()->draw) 81 | throw std::runtime_error("SegmentationTfStage: Main stream is required for drawing"); 82 | } 83 | 84 | void SegmentationTfStage::applyResults(CompletedRequestPtr &completed_request) 85 | { 86 | // Store the segmentation in image metadata. 87 | completed_request->post_process_metadata.Set("segmentation.result", 88 | Segmentation(WIDTH, HEIGHT, labels_, segmentation_)); 89 | 90 | // Optionally, draw the segmentation in the bottom right corner of the main image. 91 | if (!config()->draw) 92 | return; 93 | 94 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[main_stream_])[0]; 95 | int y_offset = main_stream_info_.height - HEIGHT; 96 | int x_offset = main_stream_info_.width - WIDTH; 97 | int scale = 255 / labels_.size(); 98 | 99 | for (int y = 0; y < HEIGHT; y++) 100 | { 101 | uint8_t *src = &segmentation_[y * WIDTH]; 102 | uint8_t *dst = buffer.data() + (y + y_offset) * main_stream_info_.stride + x_offset; 103 | for (int x = 0; x < WIDTH; x++) 104 | *(dst++) = scale * *(src++); 105 | } 106 | 107 | // Also make it greyscale. 108 | uint8_t *U_start = buffer.data() + main_stream_info_.height * main_stream_info_.stride; 109 | int UV_size = (main_stream_info_.height / 2) * (main_stream_info_.stride / 2); 110 | y_offset /= 2; 111 | x_offset /= 2; 112 | 113 | for (int y = 0; y < HEIGHT / 2; y++) 114 | { 115 | uint8_t *dst = U_start + (y + y_offset) * (main_stream_info_.stride / 2) + x_offset; 116 | memset(dst, 128, WIDTH / 2); 117 | memset(dst + UV_size, 128, WIDTH / 2); 118 | } 119 | } 120 | 121 | void SegmentationTfStage::interpretOutputs() 122 | { 123 | float *output = interpreter_->tensor(interpreter_->outputs()[0])->data.f; 124 | uint8_t *seg_ptr = &segmentation_[0]; 125 | int num_categories = labels_.size(); 126 | std::vector> hist(num_categories); 127 | std::generate(hist.begin(), hist.end(), [i = 0]() mutable { return std::pair(0, i++); }); 128 | 129 | // Extract the segmentation from the output tensor. Also accumulate a histogram. 130 | 131 | for (int y = 0; y < HEIGHT; y++) 132 | { 133 | for (int x = 0; x < WIDTH; x++, output += num_categories) 134 | { 135 | // For each pixel we get a "confidence" value for every category - pick the largest. 136 | int index = std::max_element(output, output + num_categories) - output; 137 | *(seg_ptr++) = index; 138 | hist[index].first++; 139 | } 140 | } 141 | 142 | if (config()->verbose) 143 | { 144 | // Output the category names of the largest histogram bins. 145 | std::sort(hist.begin(), hist.end(), [](const auto &lhs, const auto &rhs) { return lhs.first > rhs.first; }); 146 | for (int i = 0; i < num_categories && hist[i].first >= config()->threshold; i++) 147 | std::cerr << (i ? ", " : "") << labels_[hist[i].second] << " (" << hist[i].first << ")"; 148 | std::cerr << std::endl; 149 | } 150 | } 151 | 152 | static PostProcessingStage *Create(LibcameraApp *app) 153 | { 154 | return new SegmentationTfStage(app); 155 | } 156 | 157 | static RegisterStage reg(NAME, &Create); 158 | 159 | -------------------------------------------------------------------------------- /post_processing_stages/sobel_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * sobel_cv_stage.cpp - Sobel filter implementation, using OpenCV 6 | */ 7 | 8 | #include 9 | 10 | #include "core/libcamera_app.hpp" 11 | 12 | #include "post_processing_stages/post_processing_stage.hpp" 13 | 14 | #include "opencv2/core.hpp" 15 | #include "opencv2/imgproc.hpp" 16 | 17 | using namespace cv; 18 | 19 | using Stream = libcamera::Stream; 20 | 21 | class SobelCvStage : public PostProcessingStage 22 | { 23 | public: 24 | SobelCvStage(LibcameraApp *app) : PostProcessingStage(app) {} 25 | 26 | char const *Name() const override; 27 | 28 | void Read(boost::property_tree::ptree const ¶ms) override; 29 | 30 | void Configure() override; 31 | 32 | bool Process(CompletedRequestPtr &completed_request) override; 33 | 34 | private: 35 | Stream *stream_; 36 | int ksize_ = 3; 37 | }; 38 | 39 | #define NAME "sobel_cv" 40 | 41 | char const *SobelCvStage::Name() const 42 | { 43 | return NAME; 44 | } 45 | 46 | void SobelCvStage::Read(boost::property_tree::ptree const ¶ms) 47 | { 48 | ksize_ = params.get("ksize", 3); 49 | } 50 | 51 | void SobelCvStage::Configure() 52 | { 53 | stream_ = app_->GetMainStream(); 54 | if (!stream_ || stream_->configuration().pixelFormat != libcamera::formats::YUV420) 55 | throw std::runtime_error("SobelCvStage: only YUV420 format supported"); 56 | } 57 | 58 | bool SobelCvStage::Process(CompletedRequestPtr &completed_request) 59 | { 60 | StreamInfo info = app_->GetStreamInfo(stream_); 61 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[stream_])[0]; 62 | uint8_t *ptr = (uint8_t *)buffer.data(); 63 | 64 | //Everything beyond this point is image processing... 65 | 66 | uint8_t value = 128; 67 | int num = (info.stride * info.height) / 2; 68 | Mat src = Mat(info.height, info.width, CV_8U, ptr, info.stride); 69 | int scale = 1; 70 | int delta = 0; 71 | int ddepth = CV_16S; 72 | 73 | memset(ptr + info.stride * info.height, value, num); 74 | 75 | // Remove noise by blurring with a Gaussian filter ( kernal size = 3 ) 76 | GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT); 77 | 78 | Mat grad_x, grad_y; 79 | 80 | //Scharr(src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT); 81 | Sobel(src, grad_x, ddepth, 1, 0, ksize_, scale, delta, BORDER_DEFAULT); 82 | //Scharr(src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT); 83 | Sobel(src, grad_y, ddepth, 0, 1, ksize_, scale, delta, BORDER_DEFAULT); 84 | 85 | // converting back to CV_8U 86 | convertScaleAbs(grad_x, grad_x); 87 | convertScaleAbs(grad_y, grad_y); 88 | 89 | //weight the x and y gradients and add their magnitudes 90 | addWeighted(grad_x, 0.5, grad_y, 0.5, 0, src); 91 | 92 | return false; 93 | } 94 | 95 | static PostProcessingStage *Create(LibcameraApp *app) 96 | { 97 | return new SobelCvStage(app); 98 | } 99 | 100 | static RegisterStage reg(NAME, &Create); 101 | -------------------------------------------------------------------------------- /post_processing_stages/tf_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * tf_stage.hpp - base class for TensorFlowLite stages 6 | */ 7 | #include "tf_stage.hpp" 8 | 9 | TfStage::TfStage(LibcameraApp *app, int tf_w, int tf_h) : PostProcessingStage(app), tf_w_(tf_w), tf_h_(tf_h) 10 | { 11 | if (tf_w_ <= 0 || tf_h_ <= 0) 12 | throw std::runtime_error("TfStage: Bad TFLite input dimensions"); 13 | } 14 | 15 | void TfStage::Read(boost::property_tree::ptree const ¶ms) 16 | { 17 | config_->number_of_threads = params.get("number_of_threads", 2); 18 | config_->refresh_rate = params.get("refresh_rate", 5); 19 | config_->model_file = params.get("model_file", ""); 20 | config_->verbose = params.get("verbose", 0); 21 | config_->normalisation_offset = params.get("normalisation_offset", 127.5); 22 | config_->normalisation_scale = params.get("normalisation_scale", 127.5); 23 | 24 | initialise(); 25 | 26 | readExtras(params); 27 | } 28 | 29 | void TfStage::initialise() 30 | { 31 | model_ = tflite::FlatBufferModel::BuildFromFile(config_->model_file.c_str()); 32 | if (!model_) 33 | throw std::runtime_error("TfStage: Failed to load model"); 34 | LOG(1, "TfStage: Loaded model " << config_->model_file); 35 | 36 | tflite::ops::builtin::BuiltinOpResolver resolver; 37 | tflite::InterpreterBuilder(*model_, resolver)(&interpreter_); 38 | if (!interpreter_) 39 | throw std::runtime_error("TfStage: Failed to construct interpreter"); 40 | 41 | if (config_->number_of_threads != -1) 42 | interpreter_->SetNumThreads(config_->number_of_threads); 43 | 44 | if (interpreter_->AllocateTensors() != kTfLiteOk) 45 | throw std::runtime_error("TfStage: Failed to allocate tensors"); 46 | 47 | // Make an attempt to verify that the model expects this size of input. 48 | int input = interpreter_->inputs()[0]; 49 | size_t size = interpreter_->tensor(input)->bytes; 50 | size_t check = tf_w_ * tf_h_ * 3; // assume RGB 51 | if (interpreter_->tensor(input)->type == kTfLiteUInt8) 52 | check *= sizeof(uint8_t); 53 | else if (interpreter_->tensor(input)->type == kTfLiteFloat32) 54 | check *= sizeof(float); 55 | else 56 | throw std::runtime_error("TfStage: Input tensor data type not supported"); 57 | 58 | // Causes might include loading the wrong model. 59 | if (check != size) 60 | throw std::runtime_error("TfStage: Input tensor size mismatch"); 61 | } 62 | 63 | void TfStage::Configure() 64 | { 65 | lores_stream_ = app_->LoresStream(); 66 | if (lores_stream_) 67 | { 68 | lores_info_ = app_->GetStreamInfo(lores_stream_); 69 | if (config_->verbose) 70 | LOG(1, "TfStage: Low resolution stream is " << lores_info_.width << "x" << lores_info_.height); 71 | if (tf_w_ > lores_info_.width || tf_h_ > lores_info_.height) 72 | { 73 | LOG_ERROR("TfStage: WARNING: Low resolution image too small"); 74 | lores_stream_ = nullptr; 75 | } 76 | } 77 | else if (config_->verbose) 78 | LOG(1, "TfStage: no low resolution stream"); 79 | 80 | main_stream_ = app_->GetMainStream(); 81 | if (main_stream_) 82 | { 83 | main_stream_info_ = app_->GetStreamInfo(main_stream_); 84 | if (config_->verbose) 85 | LOG(1, "TfStage: Main stream is " << main_stream_info_.width << "x" << main_stream_info_.height); 86 | } 87 | else if (config_->verbose) 88 | LOG(1, "TfStage: No main stream"); 89 | 90 | checkConfiguration(); 91 | } 92 | 93 | bool TfStage::Process(CompletedRequestPtr &completed_request) 94 | { 95 | if (!lores_stream_) 96 | return false; 97 | 98 | { 99 | std::unique_lock lck(future_mutex_); 100 | if (config_->refresh_rate && completed_request->sequence % config_->refresh_rate == 0 && 101 | (!future_ || future_->wait_for(std::chrono::seconds(0)) == std::future_status::ready)) 102 | { 103 | libcamera::Span buffer = app_->Mmap(completed_request->buffers[lores_stream_])[0]; 104 | 105 | // Copy the lores image here and let the asynchronous thread convert it to RGB. 106 | // Doing the "extra" copy is in fact hugely beneficial because it turns uncacned 107 | // memory into cached memory, which is then *much* quicker. 108 | lores_copy_.assign(buffer.data(), buffer.data() + buffer.size()); 109 | 110 | future_ = std::make_unique>(); 111 | *future_ = std::async(std::launch::async, [this] { 112 | auto time_taken = ExecutionTime(&TfStage::runInference, this).count(); 113 | 114 | if (config_->verbose) 115 | LOG(1, "TfStage: Inference time: " << time_taken << " ms"); 116 | }); 117 | } 118 | } 119 | 120 | std::unique_lock lock(output_mutex_); 121 | applyResults(completed_request); 122 | 123 | return false; 124 | } 125 | 126 | void TfStage::runInference() 127 | { 128 | int input = interpreter_->inputs()[0]; 129 | StreamInfo tf_info; 130 | tf_info.width = tf_w_, tf_info.height = tf_h_, tf_info.stride = tf_w_ * 3; 131 | std::vector rgb_image = Yuv420ToRgb(lores_copy_.data(), lores_info_, tf_info); 132 | 133 | if (interpreter_->tensor(input)->type == kTfLiteUInt8) 134 | { 135 | uint8_t *tensor = interpreter_->typed_tensor(input); 136 | for (unsigned int i = 0; i < rgb_image.size(); i++) 137 | tensor[i] = rgb_image[i]; 138 | } 139 | else if (interpreter_->tensor(input)->type == kTfLiteFloat32) 140 | { 141 | float *tensor = interpreter_->typed_tensor(input); 142 | for (unsigned int i = 0; i < rgb_image.size(); i++) 143 | tensor[i] = (rgb_image[i] - config_->normalisation_offset) / config_->normalisation_scale; 144 | } 145 | 146 | if (interpreter_->Invoke() != kTfLiteOk) 147 | throw std::runtime_error("TfStage: Failed to invoke TFLite"); 148 | 149 | std::unique_lock lock(output_mutex_); 150 | interpretOutputs(); 151 | } 152 | 153 | void TfStage::Stop() 154 | { 155 | if (future_) 156 | future_->wait(); 157 | } 158 | -------------------------------------------------------------------------------- /post_processing_stages/tf_stage.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * tf_stage.hpp - base class for TensorFlowLite stages 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | #include "tensorflow/lite/builtin_op_data.h" 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/interpreter.h" 19 | #include "tensorflow/lite/kernels/register.h" 20 | 21 | #include "core/libcamera_app.hpp" 22 | #include "core/stream_info.hpp" 23 | 24 | #include "post_processing_stages/post_processing_stage.hpp" 25 | 26 | // The TfStage is a convenient base class from which post processing stages using 27 | // TensorFlowLite can be derived. It provides a certain amount of boiler plate code 28 | // and some other useful functions. Please refer to the examples provided that make 29 | // use of it. 30 | 31 | struct TfConfig 32 | { 33 | int number_of_threads = 3; 34 | int refresh_rate = 5; 35 | std::string model_file; 36 | bool verbose = false; 37 | float normalisation_offset = 127.5; 38 | float normalisation_scale = 127.5; 39 | }; 40 | 41 | class TfStage : public PostProcessingStage 42 | { 43 | public: 44 | // The TfStage provides implementations of the PostProcessingStage functions with the 45 | // exception of Name(), which derived classes must still provide. 46 | 47 | // The constructor supplies the width and height that TFLite wants. 48 | TfStage(LibcameraApp *app, int tf_w, int tf_h); 49 | 50 | //char const *Name() const override; 51 | 52 | void Read(boost::property_tree::ptree const ¶ms) override; 53 | 54 | void Configure() override; 55 | 56 | bool Process(CompletedRequestPtr &completed_request) override; 57 | 58 | void Stop() override; 59 | 60 | protected: 61 | TfConfig *config() const { return config_.get(); } 62 | 63 | // Instead of redefining the above public interface, derived class should implement 64 | // the following four virtual methods. 65 | 66 | // Read additional parameters required by the stage. Can also do some model checking. 67 | virtual void readExtras(boost::property_tree::ptree const ¶ms) {} 68 | 69 | // Check the stream and image configuration. Here the stage should report any errors 70 | // and/or fail. 71 | virtual void checkConfiguration() {} 72 | 73 | // This runs asynchronously from the main thread right after the model has run. The 74 | // outputs should be processed into a form where applyResults can make use of them. 75 | virtual void interpretOutputs() {} 76 | 77 | // Here we run synchronously again and so should not take too long. The results 78 | // produced by interpretOutputs can be used now, for example as metadata to attach 79 | // to the image, or even drawn onto the image itself. 80 | virtual void applyResults(CompletedRequestPtr &completed_request) {} 81 | 82 | std::unique_ptr config_; 83 | 84 | // The width and height that TFLite wants. 85 | unsigned int tf_w_, tf_h_; 86 | 87 | // We run TFLite on the low resolution image, details of which are here. 88 | libcamera::Stream *lores_stream_; 89 | StreamInfo lores_info_; 90 | 91 | // The stage may or may not make use of the larger or "main" image stream. 92 | libcamera::Stream *main_stream_; 93 | StreamInfo main_stream_info_; 94 | 95 | std::unique_ptr model_; 96 | std::unique_ptr interpreter_; 97 | 98 | private: 99 | void initialise(); 100 | void runInference(); 101 | 102 | std::mutex future_mutex_; 103 | std::unique_ptr> future_; 104 | std::vector lores_copy_; 105 | std::mutex output_mutex_; 106 | }; 107 | -------------------------------------------------------------------------------- /preview/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | include(GNUInstallDirs) 4 | 5 | pkg_check_modules(LIBDRM QUIET libdrm) 6 | pkg_check_modules(X11 QUIET x11) 7 | pkg_check_modules(EPOXY QUIET epoxy) 8 | pkg_check_modules(QTCORE QUIET Qt5Core) 9 | pkg_check_modules(QTWIDGETS QUIET Qt5Widgets) 10 | 11 | set(SRC "preview.cpp") 12 | set(TARGET_LIBS "") 13 | 14 | IF (NOT DEFINED ENABLE_DRM) 15 | SET(ENABLE_DRM 1) 16 | endif() 17 | set(DRM_FOUND 0) 18 | if (ENABLE_DRM AND LIBDRM_FOUND) 19 | message(STATUS "LIBDRM_LINK_LIBRARIES=${LIBDRM_LINK_LIBRARIES}") 20 | include_directories(${LIBDRM_INCLUDE_DIRS}) 21 | set(TARGET_LIBS ${TARGET_LIBS} ${LIBDRM_LIBRARIES}) 22 | set(SRC ${SRC} drm_preview.cpp) 23 | set(DRM_FOUND 1) 24 | message(STATUS "LIBDRM display mode enabled") 25 | else() 26 | message(STATUS "LIBDRM display mode will be unavailable!") 27 | endif() 28 | 29 | IF (NOT DEFINED ENABLE_X11) 30 | set(ENABLE_X11 1) 31 | endif() 32 | set(EGL_FOUND 0) 33 | if (ENABLE_X11 AND X11_FOUND AND EPOXY_FOUND) 34 | message(STATUS "X11_LINK_LIBRARIES=${X11_LINK_LIBRARIES}") 35 | message(STATUS "EPOXY_LINK_LIBRARIES=${EPOXY_LINK_LIBRARIES}") 36 | set(TARGET_LIBS ${TARGET_LIBS} ${X11_LIBRARIES} ${EPOXY_LIBRARIES}) 37 | set(SRC ${SRC} egl_preview.cpp) 38 | set(EGL_FOUND 1) 39 | message(STATUS "EGL display mode enabled") 40 | else() 41 | message(STATUS "EGL display mode will be unavailable!") 42 | endif() 43 | 44 | IF (NOT DEFINED ENABLE_QT) 45 | message(STATUS "ENABLE_QT not specified - set to 1") 46 | set(ENABLE_QT 1) 47 | endif() 48 | set(QT_FOUND 0) 49 | if (ENABLE_QT AND QTCORE_FOUND AND QTWIDGETS_FOUND) 50 | message(STATUS "QTCORE_LINK_LIBRARIES=${QTCORE_LINK_LIBRARIES}") 51 | message(STATUS "QTCORE_INCLUDE_DIRS=${QTCORE_INCLUDE_DIRS}") 52 | include_directories(${QTCORE_INCLUDE_DIRS} ${QTWIDGETS_INCLUDE_DIRS}) 53 | set(TARGET_LIBS ${TARGET_LIBS} ${QTCORE_LIBRARIES} ${QTWIDGETS_LIBRARIES}) 54 | # The qt5/QtCore/qvariant.h header throws a warning, so suppress this. 55 | # Annoyingly there are two different (incompatible) flags for clang < 10 56 | # and >= 10, so set both, and supress unknown options warnings. 57 | set_source_files_properties(qt_preview.cpp PROPERTIES COMPILE_FLAGS 58 | "-Wno-unknown-warning-option -Wno-deprecated-copy -Wno-deprecated") 59 | set(SRC ${SRC} qt_preview.cpp) 60 | set(QT_FOUND 1) 61 | message(STATUS "QT display mode enabled") 62 | else() 63 | message(STATUS "QT display mode will be unavailable!") 64 | endif() 65 | 66 | add_library(preview null_preview.cpp ${SRC}) 67 | set_target_properties(preview PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) 68 | target_link_libraries(preview ${TARGET_LIBS}) 69 | 70 | target_compile_definitions(preview PUBLIC LIBDRM_PRESENT=${DRM_FOUND}) 71 | target_compile_definitions(preview PUBLIC LIBEGL_PRESENT=${EGL_FOUND}) 72 | target_compile_definitions(preview PUBLIC QT_PRESENT=${QT_FOUND}) 73 | 74 | install(TARGETS preview LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) 75 | 76 | -------------------------------------------------------------------------------- /preview/null_preview.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * null_preview.cpp - dummy "show nothing" preview window. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/options.hpp" 11 | 12 | #include "preview.hpp" 13 | 14 | class NullPreview : public Preview 15 | { 16 | public: 17 | NullPreview(Options const *options) : Preview(options) { LOG(2, "Running without preview window"); } 18 | ~NullPreview() {} 19 | // Display the buffer. You get given the fd back in the BufferDoneCallback 20 | // once its available for re-use. 21 | virtual void Show(int fd, libcamera::Span span, StreamInfo const &info) override { done_callback_(fd); } 22 | // Reset the preview window, clearing the current buffers and being ready to 23 | // show new ones. 24 | void Reset() override {} 25 | // Return the maximum image size allowed. Zeroes mean "no limit". 26 | virtual void MaxImageSize(unsigned int &w, unsigned int &h) const override { w = h = 0; } 27 | 28 | void SetInfoText(const std::string &text) override { LOG(1, text); } 29 | 30 | private: 31 | }; 32 | 33 | Preview *make_null_preview(Options const *options) 34 | { 35 | return new NullPreview(options); 36 | } 37 | -------------------------------------------------------------------------------- /preview/preview.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * preview.cpp - preview window interface 6 | */ 7 | 8 | #include "core/options.hpp" 9 | 10 | #include "preview.hpp" 11 | 12 | Preview *make_null_preview(Options const *options); 13 | Preview *make_egl_preview(Options const *options); 14 | Preview *make_drm_preview(Options const *options); 15 | Preview *make_qt_preview(Options const *options); 16 | 17 | Preview *make_preview(Options const *options) 18 | { 19 | if (options->nopreview) 20 | return make_null_preview(options); 21 | #if QT_PRESENT 22 | else if (options->qt_preview) 23 | { 24 | Preview *p = make_qt_preview(options); 25 | if (p) 26 | LOG(1, "Made QT preview window"); 27 | return p; 28 | } 29 | #endif 30 | else 31 | { 32 | try 33 | { 34 | #if LIBEGL_PRESENT 35 | Preview *p = make_egl_preview(options); 36 | if (p) 37 | LOG(1, "Made X/EGL preview window"); 38 | return p; 39 | #else 40 | throw std::runtime_error("egl libraries unavailable."); 41 | #endif 42 | } 43 | catch (std::exception const &e) 44 | { 45 | try 46 | { 47 | #if LIBDRM_PRESENT 48 | Preview *p = make_drm_preview(options); 49 | if (p) 50 | LOG(1, "Made DRM preview window"); 51 | return p; 52 | #else 53 | throw std::runtime_error("drm libraries unavailable."); 54 | #endif 55 | } 56 | catch (std::exception const &e) 57 | { 58 | LOG(1, "Preview window unavailable"); 59 | return make_null_preview(options); 60 | } 61 | } 62 | } 63 | 64 | return nullptr; // prevents compiler warning in debug builds 65 | } 66 | -------------------------------------------------------------------------------- /preview/preview.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * preview.hpp - preview window interface 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include "core/stream_info.hpp" 16 | 17 | struct Options; 18 | 19 | class Preview 20 | { 21 | public: 22 | typedef std::function DoneCallback; 23 | 24 | Preview(Options const *options) : options_(options) {} 25 | virtual ~Preview() {} 26 | // This is where the application sets the callback it gets whenever the viewfinder 27 | // is no longer displaying the buffer and it can be safely recycled. 28 | void SetDoneCallback(DoneCallback callback) { done_callback_ = callback; } 29 | virtual void SetInfoText(const std::string &text) {} 30 | // Display the buffer. You get given the fd back in the BufferDoneCallback 31 | // once its available for re-use. 32 | virtual void Show(int fd, libcamera::Span span, StreamInfo const &info) = 0; 33 | // Reset the preview window, clearing the current buffers and being ready to 34 | // show new ones. 35 | virtual void Reset() = 0; 36 | // Check if preview window has been shut down. 37 | virtual bool Quit() { return false; } 38 | // Return the maximum image size allowed. 39 | virtual void MaxImageSize(unsigned int &w, unsigned int &h) const = 0; 40 | 41 | protected: 42 | DoneCallback done_callback_; 43 | Options const *options_; 44 | }; 45 | 46 | Preview *make_preview(Options const *options); 47 | -------------------------------------------------------------------------------- /utils/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | set(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}) 4 | install(PROGRAMS camera-bug-report DESTINATION bin) 5 | -------------------------------------------------------------------------------- /utils/camera-bug-report: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # libcamera-apps bug report generator. 4 | # Copyright (C) 2021, Raspberry Pi Ltd. 5 | # 6 | import argparse 7 | from datetime import datetime 8 | import select 9 | import subprocess 10 | import sys 11 | import time 12 | 13 | 14 | class Report: 15 | def __init__(self, id, file): 16 | self._id = id 17 | self._cmds = [] 18 | self._strs = [] 19 | self._file = file 20 | 21 | def __run_cmd(self, cmd): 22 | print(f'** {cmd} **', file=self._file) 23 | try: 24 | p = subprocess.run(cmd, text=True, check=False, shell=True, 25 | stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 26 | print(p.stdout, file=self._file) 27 | except RuntimeError as e: 28 | print(f'Error: {e}', file=self._file) 29 | 30 | def add_cmd(self, c): 31 | self._cmds.append(c) 32 | 33 | def add_str(self, s): 34 | self._strs.append(s) 35 | 36 | def exec(self): 37 | print(f'{"-"*80}\n{self._id}\n{"-"*80}', file=self._file) 38 | 39 | for c in self._cmds: 40 | self.__run_cmd(c) 41 | 42 | for s in self._strs: 43 | print(s, file=self._file) 44 | 45 | 46 | def run_prog(cmd, t): 47 | cmd = cmd.split(' ') 48 | out = [] 49 | try: 50 | start = time.time() 51 | p = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, errors='ignore') 52 | poll = select.poll() 53 | poll.register(p.stdout, select.POLLIN) 54 | 55 | while p.poll() is None: 56 | if poll.poll(0): 57 | line = p.stdout.readline() 58 | print(line, end='', flush=True) 59 | out.append(line) 60 | 61 | if (t != 0) and (time.time() - start > t): 62 | p.kill() 63 | out = out + p.communicate()[0].splitlines(keepends=True) 64 | out.append('Error: ***** TIMEOUT *****') 65 | break 66 | 67 | except KeyboardInterrupt: 68 | p.kill() 69 | out = out + p.communicate()[0].splitlines(keepends=True) 70 | out.append('Error: ***** INTERRUPT *****') 71 | 72 | p.wait() 73 | return ''.join(out) 74 | 75 | 76 | if __name__ == '__main__': 77 | parser = argparse.ArgumentParser(description='libcamera-apps Bug Report Generator') 78 | parser.add_argument('-o', help='Report filename', type=str, default='bug-report.txt') 79 | parser.add_argument('-t', help='Timeout (seconds) for the command to run. A value of 0 \ 80 | disables the timeout.', type=float, default=0) 81 | parser.add_argument('-c', help='Command to run, e.g. -c "libcamera-still -t 1000 -o test.jpg"', type=str) 82 | args = parser.parse_args() 83 | 84 | # This is the app the user is actually running. 85 | app = 'libcamera-hello' 86 | if args.c: 87 | app = args.c.split(" ")[0] 88 | # Can we identify the app? If not, use libcamera-hello for version checks. 89 | if not any([s in app for s in ['libcamera-still', 'libcamera-vid', 'libcamera-hello', 'libcamera-raw', 'libcamera-jpeg']]): 90 | app = 'libcamera-hello' 91 | 92 | reports = [] 93 | with open(args.o, 'wt') as file: 94 | title = Report('libcamera-apps Bug Report', file) 95 | title.add_str(f'Date: {datetime.now().strftime("%d-%m-%Y (%H:%M:%S)")}') 96 | title.add_str(f'Command: {" ".join(sys.argv)}\n') 97 | reports.append(title) 98 | 99 | hwinfo = Report('Hardware information', file) 100 | hwinfo.add_cmd('hostname') 101 | hwinfo.add_cmd('cat /proc/cpuinfo') 102 | reports.append(hwinfo) 103 | 104 | config = Report('Configuration', file) 105 | config.add_cmd('cat /boot/cmdline.txt') 106 | config.add_cmd('cat /boot/config.txt') 107 | reports.append(config) 108 | 109 | logs = Report('Logs', file) 110 | logs.add_cmd('dmesg') 111 | logs.add_cmd('sudo vcdbg log msg') 112 | logs.add_cmd('sudo vcdbg log assert') 113 | logs.add_cmd('sudo vcdbg log ex') 114 | reports.append(logs) 115 | 116 | mem = Report('Memory', file) 117 | mem.add_cmd('cat /proc/meminfo') 118 | mem.add_cmd('sudo cat /sys/kernel/debug/dma_buf/bufinfo') 119 | mem.add_cmd('sudo cat /sys/kernel/debug/vcsm-cma/state') 120 | reports.append(mem) 121 | 122 | media = Report('Media Devices', file) 123 | for i in range(5): 124 | media.add_cmd(f'media-ctl -d {i} -p') 125 | reports.append(media) 126 | 127 | # Get the camera list with the same program specified in the run command 128 | cam = Report('Cameras', file) 129 | cam.add_cmd(f'{app} --list-cameras') 130 | reports.append(cam) 131 | 132 | # Get the version with the same program specified in the run command 133 | ver = Report('Versions', file) 134 | ver.add_cmd('uname -a') 135 | ver.add_cmd('cat /etc/os-release') 136 | ver.add_cmd('vcgencmd version') 137 | ver.add_cmd(f'{app} --version') 138 | reports.append(ver) 139 | 140 | # Run the actual application before executing the reports! 141 | if args.c: 142 | cmd_out = run_prog(args.c, args.t) 143 | 144 | # Report for the command output 145 | cmd = Report(args.c, file) 146 | cmd.add_str(cmd_out) 147 | reports.append(cmd) 148 | 149 | for r in reports: 150 | r.exec() 151 | 152 | print(f'\nBug report generated to {args.o}') 153 | print('Please upload this file when you create a new bug report at:') 154 | print('https://github.com/raspberrypi/libcamera-apps/issues/') 155 | -------------------------------------------------------------------------------- /utils/timestamp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # libcamera-apps timestamp analysis tool 4 | # Copyright (C) 2021, Raspberry Pi Ltd. 5 | # 6 | import argparse 7 | 8 | try: 9 | from matplotlib import pyplot as plt 10 | plot_available = True 11 | except ImportError: 12 | plot_available = False 13 | 14 | 15 | def read_times(file): 16 | with open(file) as f: 17 | f.readline() # there's one header line we must skip 18 | return [float(line) for line in f.readlines()] 19 | 20 | 21 | def get_differences(items): 22 | return [next_item - item for item, next_item in zip(items[:-1], items[1:])] 23 | 24 | 25 | def outliers(diffs, frac, avg): 26 | return f'{sum(d < (1 - frac) * avg or d > (1 + frac) * avg for d in diffs)} ({frac * 100}%)' 27 | 28 | 29 | def plot_pts(diffs, avg, title): 30 | fig, ax = plt.subplots() 31 | ax.plot(diffs, label='Frame times') 32 | ax.plot([0, len(diffs)], [avg, avg], 'g--', label='Average') 33 | # Find an plot the max value 34 | max_val, idx = max((val, idx) for (idx, val) in enumerate(diffs)) 35 | ax.plot([idx], [max_val], 'rx', label='Maximum') 36 | ax.axis([0, len(diffs), min(diffs) * 0.995, max_val * 1.005]) 37 | ax.legend() 38 | plt.title(title) 39 | plt.xlabel('Frame') 40 | plt.ylabel('Frame time (ms)') 41 | plt.grid(True) 42 | plt.show() 43 | 44 | 45 | if __name__ == '__main__': 46 | parser = argparse.ArgumentParser(description='libcamera-apps timestamp analysis tool') 47 | parser.add_argument('filename', help='PTS file generated from libcamera-vid', type=str) 48 | parser.add_argument('--plot', help='Plot timestamp graph', action='store_true') 49 | args = parser.parse_args() 50 | 51 | times = read_times(args.filename) 52 | diffs = get_differences(times) 53 | avg = sum(diffs) / len(diffs) 54 | min_val, min_idx = min((val, idx) for (idx, val) in enumerate(diffs)) 55 | max_val, max_idx = max((val, idx) for (idx, val) in enumerate(diffs)) 56 | print(f'Minimum: {min_val:.3f} ms at frame {min_idx}\nMaximum: {max_val:.3f} ms at frame {max_idx}\nAverage: {avg:.3f} ms') 57 | print(f'Total: {len(diffs)} samples') 58 | print('Outliers:', *[outliers(diffs, f, avg) for f in (1, .1, .01, .001)]) 59 | 60 | if args.plot: 61 | if plot_available: 62 | plot_pts(diffs, avg, f'{args.filename}') 63 | else: 64 | print('\nError: matplotlib is not installed, please install with "pip3 install matplotlib"') 65 | -------------------------------------------------------------------------------- /utils/version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | # Generate version information for libcamera-apps 5 | 6 | import subprocess 7 | import sys 8 | from datetime import datetime 9 | from string import hexdigits 10 | 11 | digits = 12 12 | 13 | 14 | def generate_version(): 15 | try: 16 | if len(sys.argv) == 1: 17 | # Check if this is a git directory 18 | r = subprocess.run(['git', 'rev-parse', '--git-dir'], 19 | stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, text=True) 20 | if r.returncode: 21 | raise RuntimeError('Invalid git directory!') 22 | 23 | # Get commit id 24 | r = subprocess.run(['git', 'rev-parse', '--verify', 'HEAD'], 25 | stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) 26 | if r.returncode: 27 | raise RuntimeError('Invalid git commit!') 28 | 29 | commit = r.stdout.strip('\n')[0:digits] + '-intree' 30 | 31 | # Check dirty status 32 | r = subprocess.run(['git', 'diff-index', '--quiet', 'HEAD'], 33 | stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, text=True) 34 | if r.returncode: 35 | commit = commit + '-dirty' 36 | else: 37 | commit = sys.argv[1].lower().strip() 38 | if any(c not in hexdigits for c in commit): 39 | raise RuntimeError('Invalid git sha!') 40 | 41 | commit = commit[0:digits] 42 | 43 | except RuntimeError as e: 44 | print(f'ERR: {e}', file=sys.stderr) 45 | commit = '0' * digits + '-invalid' 46 | 47 | finally: 48 | print(f'{commit} {datetime.now().strftime("%d-%m-%Y (%H:%M:%S)")}', end="") 49 | 50 | 51 | if __name__ == "__main__": 52 | generate_version() 53 | --------------------------------------------------------------------------------