├── .clang-format ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── gen_orig.yml │ ├── rpicam-apps-style-checker.yml │ ├── rpicam-apps-test.yml │ └── rpicam-test.yml ├── .gitignore ├── README.md ├── apps ├── meson.build ├── rpicam_detect.cpp ├── rpicam_hello.cpp ├── rpicam_jpeg.cpp ├── rpicam_raw.cpp ├── rpicam_still.cpp └── rpicam_vid.cpp ├── assets ├── acoustic_focus.json ├── annotate_cv.json ├── drc.json ├── face_detect_cv.json ├── hailo_classifier.json ├── hailo_pose_inf_fl.json ├── hailo_scrfd.json ├── hailo_yolov5_personface.json ├── hailo_yolov5_segmentation.json ├── hailo_yolov6_inference.json ├── hailo_yolov8_inference.json ├── hailo_yolov8_pose.json ├── hailo_yolox_inference.json ├── hdr.json ├── imx500_mobilenet_ssd.json ├── imx500_posenet.json ├── motion_detect.json ├── negate.json ├── object_classify_tf.json ├── object_detect_tf.json ├── pose_estimation_tf.json ├── segmentation_labels.txt ├── segmentation_tf.json ├── sobel_cv.json ├── yolov5_personface.json └── yolov5seg.json ├── core ├── buffer_sync.cpp ├── buffer_sync.hpp ├── completed_request.hpp ├── dma_heaps.cpp ├── dma_heaps.hpp ├── frame_info.hpp ├── logging.hpp ├── meson.build ├── metadata.hpp ├── options.cpp ├── options.hpp ├── post_processor.cpp ├── post_processor.hpp ├── rpicam_app.cpp ├── rpicam_app.hpp ├── rpicam_encoder.hpp ├── still_options.hpp ├── stream_info.hpp ├── version.cpp.in ├── version.hpp └── video_options.hpp ├── encoder ├── encoder.cpp ├── encoder.hpp ├── h264_encoder.cpp ├── h264_encoder.hpp ├── libav_encoder.cpp ├── libav_encoder.hpp ├── meson.build ├── mjpeg_encoder.cpp ├── mjpeg_encoder.hpp ├── null_encoder.cpp └── null_encoder.hpp ├── image ├── bmp.cpp ├── dng.cpp ├── image.hpp ├── jpeg.cpp ├── meson.build ├── png.cpp └── yuv.cpp ├── license.txt ├── meson.build ├── meson_options.txt ├── output ├── circular_output.cpp ├── circular_output.hpp ├── file_output.cpp ├── file_output.hpp ├── meson.build ├── net_output.cpp ├── net_output.hpp ├── output.cpp └── output.hpp ├── post_processing_stages ├── README.md ├── acoustic_focus_stage.cpp ├── annotate_cv_stage.cpp ├── face_detect_cv_stage.cpp ├── hailo │ ├── hailo_classifier.cpp │ ├── hailo_postprocessing_stage.cpp │ ├── hailo_postprocessing_stage.hpp │ ├── hailo_scrfd.cpp │ ├── hailo_yolo_inference.cpp │ ├── hailo_yolov5_segmentation.cpp │ ├── hailo_yolov8_pose.cpp │ └── meson.build ├── hdr_stage.cpp ├── histogram.cpp ├── histogram.hpp ├── imx500 │ ├── imx500_object_detection.cpp │ ├── imx500_posenet.cpp │ ├── imx500_post_processing_stage.cpp │ ├── imx500_post_processing_stage.hpp │ └── meson.build ├── meson.build ├── motion_detect_stage.cpp ├── negate_stage.cpp ├── object_classify_tf_stage.cpp ├── object_detect.hpp ├── object_detect_draw_cv_stage.cpp ├── object_detect_tf_stage.cpp ├── plot_pose_cv_stage.cpp ├── pose_estimation_tf_stage.cpp ├── post_processing_stage.cpp ├── post_processing_stage.hpp ├── pwl.cpp ├── pwl.hpp ├── segmentation.hpp ├── segmentation_tf_stage.cpp ├── sobel_cv_stage.cpp ├── tf_stage.cpp └── tf_stage.hpp ├── preview ├── drm_preview.cpp ├── egl_preview.cpp ├── meson.build ├── null_preview.cpp ├── preview.cpp ├── preview.hpp └── qt_preview.cpp └── utils ├── camera-bug-report ├── checkstyle.py ├── download-hailo-models.sh ├── download-imx500-models.sh ├── gen-dist.sh ├── meson.build ├── test.py ├── timestamp.py └── version.py /.clang-format: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: GPL-2.0-only 2 | # 3 | # clang-format configuration file. Intended for clang-format >= 7. 4 | # 5 | # For more information, see: 6 | # 7 | # Documentation/process/clang-format.rst 8 | # https://clang.llvm.org/docs/ClangFormat.html 9 | # https://clang.llvm.org/docs/ClangFormatStyleOptions.html 10 | # 11 | --- 12 | Language: Cpp 13 | AccessModifierOffset: -4 14 | AlignAfterOpenBracket: Align 15 | AlignConsecutiveAssignments: false 16 | AlignConsecutiveDeclarations: false 17 | AlignEscapedNewlines: Right 18 | AlignOperands: true 19 | AlignTrailingComments: false 20 | AllowAllParametersOfDeclarationOnNextLine: false 21 | AllowShortBlocksOnASingleLine: false 22 | AllowShortCaseLabelsOnASingleLine: false 23 | AllowShortLambdasOnASingleLine: Inline 24 | AllowShortFunctionsOnASingleLine: InlineOnly 25 | AllowShortIfStatementsOnASingleLine: false 26 | AllowShortLoopsOnASingleLine: false 27 | AlwaysBreakAfterDefinitionReturnType: None 28 | AlwaysBreakAfterReturnType: None 29 | AlwaysBreakBeforeMultilineStrings: false 30 | AlwaysBreakTemplateDeclarations: Yes 31 | BinPackArguments: true 32 | BinPackParameters: true 33 | BreakBeforeBraces: Allman 34 | BraceWrapping: 35 | SplitEmptyFunction: true 36 | SplitEmptyRecord: true 37 | SplitEmptyNamespace: true 38 | BreakBeforeBinaryOperators: None 39 | BreakBeforeInheritanceComma: false 40 | BreakInheritanceList: BeforeColon 41 | BreakBeforeTernaryOperators: true 42 | BreakConstructorInitializers: BeforeColon 43 | BreakAfterJavaFieldAnnotations: false 44 | BreakStringLiterals: false 45 | CommentPragmas: '^ IWYU pragma:' 46 | CompactNamespaces: false 47 | ConstructorInitializerAllOnOneLineOrOnePerLine: false 48 | Cpp11BracedListStyle: false 49 | DerivePointerAlignment: false 50 | DisableFormat: false 51 | ExperimentalAutoDetectBinPacking: false 52 | FixNamespaceComments: true 53 | ForEachMacros: 54 | - 'udev_list_entry_foreach' 55 | IncludeBlocks: Preserve 56 | IncludeCategories: 57 | - Regex: '.*' 58 | Priority: 1 59 | IncludeIsMainRegex: '(_test)?$' 60 | IndentCaseLabels: false 61 | IndentPPDirectives: None 62 | IndentWrappedFunctionNames: false 63 | JavaScriptQuotes: Leave 64 | JavaScriptWrapImports: true 65 | KeepEmptyLinesAtTheStartOfBlocks: false 66 | MacroBlockBegin: '' 67 | MacroBlockEnd: '' 68 | MaxEmptyLinesToKeep: 1 69 | NamespaceIndentation: None 70 | ObjCBinPackProtocolList: Auto 71 | ObjCBlockIndentWidth: 8 72 | ObjCSpaceAfterProperty: true 73 | ObjCSpaceBeforeProtocolList: true 74 | 75 | # Taken from git's rules 76 | PenaltyBreakAssignment: 10 77 | PenaltyBreakBeforeFirstCallParameter: 30 78 | PenaltyBreakComment: 10 79 | PenaltyBreakFirstLessLess: 0 80 | PenaltyBreakString: 10 81 | PenaltyBreakTemplateDeclaration: 10 82 | PenaltyExcessCharacter: 100 83 | PenaltyReturnTypeOnItsOwnLine: 60 84 | 85 | PointerAlignment: Right 86 | ReflowComments: false 87 | SortIncludes: true 88 | SortUsingDeclarations: true 89 | SpaceAfterCStyleCast: false 90 | SpaceAfterTemplateKeyword: true 91 | SpaceAfterLogicalNot: false 92 | #SpaceBeforeCaseColon: false 93 | SpaceBeforeAssignmentOperators: true 94 | SpaceBeforeCpp11BracedList: true 95 | SpaceBeforeCtorInitializerColon: true 96 | SpaceBeforeInheritanceColon: true 97 | SpaceBeforeParens: ControlStatements 98 | SpaceBeforeRangeBasedForLoopColon: true 99 | SpaceInEmptyParentheses: false 100 | SpacesBeforeTrailingComments: 1 101 | SpacesInAngles: false 102 | SpacesInContainerLiterals: false 103 | SpacesInCStyleCastParentheses: false 104 | SpacesInParentheses: false 105 | SpacesInSquareBrackets: false 106 | IndentWidth: 4 107 | TabWidth: 4 108 | UseTab: Always 109 | ConstructorInitializerIndentWidth: 4 110 | ContinuationIndentWidth: 4 111 | ColumnLimit: 120 112 | ... 113 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **Bug report** 14 | Please use the ``camera-bug-report`` tool to create a bug report, and upload it here. 15 | 16 | The bug report tool uses the following syntax: 17 | 18 | ``` 19 | camera-bug-report -t -o -c "" 20 | ``` 21 | 22 | For example, 23 | 24 | ``` 25 | camera-bug-report -t 5 -o bug.txt -c "rpicam-still -t 1000 -o test.jpg" 26 | ``` 27 | will attempt to run rpicam-still and timeout after 5 seconds if the application has stalled. The script will generate a ``bug.txt`` file that captures all the output logs and system state to help us debug the issue. 28 | 29 | You can also run without a timeout: 30 | 31 | ``` 32 | camera-bug-report -o bug.txt -c "rpicam-vid -t 0 -o test.264" 33 | ``` 34 | This will run ``rpicam-vid`` indefinitely until either you press ``Ctrl+C`` or the application terminates, after which the necessary output logs and system state will be captured. 35 | 36 | If you cannot run your application through the ``camera-bug-report`` tool, run it without the ``-c`` command line argument **after running the camera application.** In these cases, please also provide the command line used to run the application, as well as any output generated during the run. 37 | 38 | **Additional context** 39 | Add any other context about the problem here. 40 | -------------------------------------------------------------------------------- /.github/workflows/gen_orig.yml: -------------------------------------------------------------------------------- 1 | name: Generate source release tarball 2 | run-name: Generating source release tarball 3 | on: 4 | push: 5 | tags: # vX.Y.Z 6 | - 'v[0-9]+.[0-9]+.[0-9]+' 7 | workflow_dispatch: 8 | jobs: 9 | publish_tarball: 10 | permissions: 11 | contents: write 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Install dependencies 15 | run: | # Jammy versions of Meson and libcamera are too old 16 | pip3 install --user meson 17 | sudo apt-get update 18 | sudo apt-get install -y ninja-build pkgconf libboost-program-options-dev libcamera-dev libexif-dev libjpeg-dev libpng-dev libtiff-dev 19 | sudo cp /usr/lib/x86_64-linux-gnu/pkgconfig/{,lib}camera.pc ||: 20 | - name: Check out repository code 21 | uses: actions/checkout@v4 22 | - name: Generate tarballs 23 | run: | 24 | PATH="${HOME}/.local/bin:${PATH}" 25 | TARBALL="rpicam-apps-${GITHUB_REF_NAME:1}.tar.xz" 26 | meson setup build 27 | meson dist --no-tests --include-subprojects -C build 28 | if ! [ -f "build/meson-dist/$TARBALL" ]; then 29 | echo "Expected tarball not found - $TARBALL" 30 | echo "Does 'version' in meson.build match the tag?" 31 | exit 1 32 | fi 33 | mv "build/meson-dist/$TARBALL" rpicam-apps_${GITHUB_REF_NAME:1}.orig.tar.xz 34 | ./utils/download-hailo-models.sh hailo-models 35 | XZ_OPT=-9 tar -cJf rpicam-apps_${GITHUB_REF_NAME:1}.orig-hailo-models.tar.xz hailo-models 36 | ./utils/download-imx500-models.sh imx500-models 37 | XZ_OPT=-9 tar -cJf rpicam-apps_${GITHUB_REF_NAME:1}.orig-imx500-models.tar.xz imx500-models 38 | - name: Release tarballs 39 | uses: softprops/action-gh-release@v2 40 | with: 41 | files: | 42 | *.tar.xz 43 | - if: failure() 44 | run: cat build/meson-logs/meson-log.txt 45 | -------------------------------------------------------------------------------- /.github/workflows/rpicam-apps-style-checker.yml: -------------------------------------------------------------------------------- 1 | name: rpicam-apps style checker 2 | on: 3 | pull_request: 4 | branches: [ main ] 5 | 6 | jobs: 7 | style-check: 8 | 9 | runs-on: [ self-hosted ] 10 | 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | fetch-depth: 0 15 | clean: true 16 | 17 | - name: Check style 18 | run: ${{github.workspace}}/utils/checkstyle.py $(git log --format=%P -1 | awk '{print $1 ".." $2}') 19 | -------------------------------------------------------------------------------- /.github/workflows/rpicam-apps-test.yml: -------------------------------------------------------------------------------- 1 | name: rpicam-app smoke tests 2 | 3 | on: 4 | pull_request: 5 | branches: [ main ] 6 | 7 | env: 8 | GCC_COMPILER: "CC=/usr/bin/gcc CXX=/usr/bin/g++" 9 | CLANG_COMPILER: "CC=/usr/bin/clang-13 CXX=/usr/bin/clang++-13" 10 | LIBCAMERA_LKG_DIR: "/home/pi/libcamera_lkg" 11 | 12 | jobs: 13 | build-test: 14 | 15 | runs-on: [ self-hosted ] 16 | 17 | strategy: 18 | matrix: 19 | compiler: [ gcc, clang ] 20 | build_type: [ release, debug ] 21 | 22 | steps: 23 | - uses: actions/checkout@v4 24 | with: 25 | fetch-depth: 1 26 | clean: true 27 | 28 | - name: Configure meson 29 | env: 30 | COMPILER: "${{ matrix.compiler == 'gcc' && env.GCC_COMPILER || env.CLANG_COMPILER }}" 31 | run: ${{env.COMPILER}} meson setup ${{github.workspace}}/build --pkg-config-path=${{env.LIBCAMERA_LKG_DIR}}/lib/aarch64-linux-gnu/pkgconfig/ -Dbuildtype=${{matrix.build_type}} 32 | timeout-minutes: 5 33 | 34 | - name: Build 35 | run: ninja -C ${{github.workspace}}/build 36 | timeout-minutes: 10 37 | 38 | - name: Tar files 39 | run: tar -cvf build-artifacts-${{matrix.compiler}}-${{matrix.build_type}}.tar -C ${{github.workspace}}/build . 40 | 41 | - name: Upload build files 42 | uses: actions/upload-artifact@v4 43 | with: 44 | name: build-artifacts-${{matrix.compiler}}-${{matrix.build_type}} 45 | path: build-artifacts-${{matrix.compiler}}-${{matrix.build_type}}.tar 46 | retention-days: 21 47 | 48 | build-test-lite: 49 | 50 | runs-on: [ self-hosted ] 51 | 52 | steps: 53 | - uses: actions/checkout@v4 54 | with: 55 | fetch-depth: 1 56 | clean: true 57 | 58 | - name: Configure meson 59 | run: meson setup ${{github.workspace}}/build --pkg-config-path=${{env.LIBCAMERA_LKG_DIR}}/lib/aarch64-linux-gnu/pkgconfig/ -Dbuildtype=release -Denable_drm='disabled' -Denable_egl='disabled' -Denable_qt='disabled' -Denable_opencv='disabled' -Denable_tflite='disabled' -Denable_libav='disabled' 60 | timeout-minutes: 5 61 | 62 | - name: Build 63 | run: ninja -C ${{github.workspace}}/build 64 | timeout-minutes: 10 65 | 66 | - name: Tar files 67 | run: tar -cvf build-artifacts-gcc-lite.tar -C ${{github.workspace}}/build . 68 | 69 | - name: Upload build files 70 | uses: actions/upload-artifact@v4 71 | with: 72 | name: build-artifacts-gcc-lite 73 | path: build-artifacts-gcc-lite.tar 74 | retention-days: 21 75 | 76 | run-test: 77 | 78 | runs-on: ${{matrix.camera}} 79 | needs: build-test 80 | 81 | strategy: 82 | matrix: 83 | camera: [ imx219, imx477, imx708, pi5-imx708-imx477 ] 84 | 85 | steps: 86 | - uses: actions/checkout@v4 87 | with: 88 | fetch-depth: 1 89 | clean: true 90 | 91 | - name: Create test output dir 92 | run: mkdir -p ${{github.workspace}}/test_output 93 | 94 | - name: Download build 95 | uses: actions/download-artifact@v4 96 | with: 97 | name: build-artifacts-gcc-release 98 | path: ${{github.workspace}} 99 | 100 | - name: Untar files 101 | run: tar -xvf build-artifacts-gcc-release.tar --one-top-level=build 102 | 103 | - name: Print version string 104 | run: ${{github.workspace}}/build/apps/rpicam-hello --version 105 | 106 | - name: Print linkage info 107 | run: ldd ${{github.workspace}}/build/apps/rpicam-hello | grep libcamera 108 | 109 | - name: Test 110 | run: ${{github.workspace}}/utils/test.py --exe-dir ${{github.workspace}}/build/apps/ --output-dir ${{github.workspace}}/test_output --json-dir ${{github.workspace}}/assets --post-process-libs ${{github.workspace}}/build/post_processing_stages/ 111 | timeout-minutes: 8 112 | 113 | - name: Upload test output 114 | if: ${{ failure() }} 115 | uses: actions/upload-artifact@v4 116 | with: 117 | name: test-artifacts-${{matrix.camera}} 118 | path: ${{github.workspace}}/test_output/ 119 | retention-days: 21 120 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | *~ 3 | .vscode/ 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rpicam-apps 2 | This is a small suite of libcamera-based applications to drive the cameras on a Raspberry Pi platform. 3 | 4 | >[!WARNING] 5 | >These applications and libraries have been renamed from `libcamera-*` to `rpicam-*`. Symbolic links to allow users to keep using the old application names have now been removed. 6 | 7 | Build 8 | ----- 9 | For usage and build instructions, see the official Raspberry Pi documentation pages [here.](https://www.raspberrypi.com/documentation/computers/camera_software.html#building-libcamera-and-rpicam-apps) 10 | 11 | License 12 | ------- 13 | 14 | The source code is made available under the simplified [BSD 2-Clause license](https://spdx.org/licenses/BSD-2-Clause.html). 15 | 16 | Status 17 | ------ 18 | 19 | [![ToT libcamera build/run test](https://github.com/raspberrypi/rpicam-apps/actions/workflows/rpicam-test.yml/badge.svg)](https://github.com/raspberrypi/rpicam-apps/actions/workflows/rpicam-test.yml) 20 | -------------------------------------------------------------------------------- /apps/meson.build: -------------------------------------------------------------------------------- 1 | rpicam_still = executable('rpicam-still', files('rpicam_still.cpp'), 2 | include_directories : include_directories('..'), 3 | dependencies: [libcamera_dep, boost_dep], 4 | link_with : rpicam_app, 5 | install : true) 6 | 7 | rpicam_vid = executable('rpicam-vid', files('rpicam_vid.cpp'), 8 | include_directories : include_directories('..'), 9 | dependencies: [libcamera_dep, boost_dep], 10 | link_with : rpicam_app, 11 | install : true) 12 | 13 | rpicam_hello = executable('rpicam-hello', files('rpicam_hello.cpp'), 14 | include_directories : include_directories('..'), 15 | dependencies: libcamera_dep, 16 | link_with : rpicam_app, 17 | install : true) 18 | 19 | rpicam_raw = executable('rpicam-raw', files('rpicam_raw.cpp'), 20 | include_directories : include_directories('..'), 21 | dependencies: [libcamera_dep, boost_dep], 22 | link_with : rpicam_app, 23 | install : true) 24 | 25 | rpicam_jpeg = executable('rpicam-jpeg', files('rpicam_jpeg.cpp'), 26 | include_directories : include_directories('..'), 27 | dependencies: [libcamera_dep, boost_dep], 28 | link_with : rpicam_app, 29 | install : true) 30 | 31 | if enable_tflite 32 | rpicam_detect = executable('rpicam-detect', files('rpicam_detect.cpp'), 33 | include_directories : include_directories('..'), 34 | dependencies: [libcamera_dep, boost_dep], 35 | link_with : rpicam_app, 36 | install : true) 37 | endif 38 | -------------------------------------------------------------------------------- /apps/rpicam_detect.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * rpicam_detect.cpp - take pictures when objects are detected 6 | */ 7 | 8 | // Example: rpicam-detect --post-process-file object_detect_tf.json --lores-width 400 --lores-height 300 -t 0 --object cat -o cat%03d.jpg 9 | 10 | #include 11 | 12 | #include "core/rpicam_app.hpp" 13 | #include "core/still_options.hpp" 14 | 15 | #include "image/image.hpp" 16 | 17 | #include "post_processing_stages/object_detect.hpp" 18 | 19 | struct DetectOptions : public StillOptions 20 | { 21 | DetectOptions() : StillOptions() 22 | { 23 | using namespace boost::program_options; 24 | options_.add_options() 25 | ("object", value(&object), "Name of object to detect") 26 | ("gap", value(&gap)->default_value(30), "Smallest gap between captures in frames") 27 | ("timeformat", value(&timeformat)->default_value("%m%d%H%M%S"), "Date/Time format string - see C++ strftime()") 28 | ; 29 | } 30 | 31 | std::string object; 32 | unsigned int gap; 33 | std::string timeformat; 34 | 35 | virtual void Print() const override 36 | { 37 | StillOptions::Print(); 38 | std::cerr << " object: " << object << std::endl; 39 | std::cerr << " gap: " << gap << std::endl; 40 | std::cerr << " timeformat: " << timeformat << std::endl; 41 | } 42 | }; 43 | 44 | class RPiCamDetectApp : public RPiCamApp 45 | { 46 | public: 47 | RPiCamDetectApp() : RPiCamApp(std::make_unique()) {} 48 | DetectOptions *GetOptions() const { return static_cast(options_.get()); } 49 | }; 50 | 51 | // The main even loop for the application. 52 | 53 | static void event_loop(RPiCamDetectApp &app) 54 | { 55 | DetectOptions *options = app.GetOptions(); 56 | app.OpenCamera(); 57 | app.ConfigureViewfinder(); 58 | app.StartCamera(); 59 | auto start_time = std::chrono::high_resolution_clock::now(); 60 | unsigned int last_capture_frame = 0; 61 | 62 | for (unsigned int count = 0;; count++) 63 | { 64 | RPiCamApp::Msg msg = app.Wait(); 65 | if (msg.type == RPiCamApp::MsgType::Timeout) 66 | { 67 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 68 | app.StopCamera(); 69 | app.StartCamera(); 70 | continue; 71 | } 72 | if (msg.type == RPiCamApp::MsgType::Quit) 73 | return; 74 | 75 | // In viewfinder mode, simply run until the timeout, but do a capture if the object 76 | // we're looking for is detected. 77 | CompletedRequestPtr &completed_request = std::get(msg.payload); 78 | if (app.ViewfinderStream()) 79 | { 80 | auto now = std::chrono::high_resolution_clock::now(); 81 | if (options->timeout && (now - start_time) > options->timeout.value) 82 | return; 83 | 84 | std::vector detections; 85 | bool detected = completed_request->sequence - last_capture_frame >= options->gap && 86 | completed_request->post_process_metadata.Get("object_detect.results", detections) == 0 && 87 | std::find_if(detections.begin(), detections.end(), [options](const Detection &d) { 88 | return d.name.find(options->object) != std::string::npos; 89 | }) != detections.end(); 90 | 91 | app.ShowPreview(completed_request, app.ViewfinderStream()); 92 | 93 | if (detected) 94 | { 95 | app.StopCamera(); 96 | app.Teardown(); 97 | app.ConfigureStill(); 98 | app.StartCamera(); 99 | LOG(1, options->object << " detected"); 100 | } 101 | } 102 | // In still capture mode, save a jpeg and go back to preview. 103 | else if (app.StillStream()) 104 | { 105 | app.StopCamera(); 106 | last_capture_frame = completed_request->sequence; 107 | 108 | StreamInfo info; 109 | libcamera::Stream *stream = app.StillStream(&info); 110 | BufferReadSync r(&app, completed_request->buffers[stream]); 111 | const std::vector> mem = r.Get(); 112 | 113 | // Generate a filename for the output and save it. 114 | char filename[128]; 115 | if (options->datetime) 116 | { 117 | std::time_t raw_time; 118 | std::time(&raw_time); 119 | char time_string[32]; 120 | std::tm *time_info = std::localtime(&raw_time); 121 | std::strftime(time_string, sizeof(time_string), options->timeformat.c_str() , time_info); 122 | snprintf(filename, sizeof(filename), "%s%s.%s", options->output.c_str(), time_string, options->encoding.c_str()); 123 | } 124 | else if (options->timestamp) 125 | snprintf(filename, sizeof(filename), "%s%u.%s", options->output.c_str(), (unsigned)time(NULL), options->encoding.c_str()); 126 | else 127 | snprintf(filename, sizeof(filename), options->output.c_str(), options->framestart); 128 | filename[sizeof(filename) - 1] = 0; 129 | options->framestart++; 130 | LOG(1, "Save image " << filename); 131 | jpeg_save(mem, info, completed_request->metadata, std::string(filename), app.CameraModel(), options); 132 | 133 | // Restart camera in preview mode. 134 | app.Teardown(); 135 | app.ConfigureViewfinder(); 136 | app.StartCamera(); 137 | } 138 | } 139 | } 140 | 141 | int main(int argc, char *argv[]) 142 | { 143 | try 144 | { 145 | RPiCamDetectApp app; 146 | DetectOptions *options = app.GetOptions(); 147 | if (options->Parse(argc, argv)) 148 | { 149 | if (options->verbose >= 2) 150 | options->Print(); 151 | if (options->output.empty()) 152 | throw std::runtime_error("output file name required"); 153 | 154 | event_loop(app); 155 | } 156 | } 157 | catch (std::exception const &e) 158 | { 159 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 160 | return -1; 161 | } 162 | return 0; 163 | } 164 | -------------------------------------------------------------------------------- /apps/rpicam_hello.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * rpicam_hello.cpp - libcamera "hello world" app. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/rpicam_app.hpp" 11 | #include "core/options.hpp" 12 | 13 | using namespace std::placeholders; 14 | 15 | // The main event loop for the application. 16 | 17 | static void event_loop(RPiCamApp &app) 18 | { 19 | Options const *options = app.GetOptions(); 20 | 21 | app.OpenCamera(); 22 | app.ConfigureViewfinder(); 23 | app.StartCamera(); 24 | 25 | auto start_time = std::chrono::high_resolution_clock::now(); 26 | 27 | for (unsigned int count = 0; ; count++) 28 | { 29 | RPiCamApp::Msg msg = app.Wait(); 30 | if (msg.type == RPiCamApp::MsgType::Timeout) 31 | { 32 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 33 | app.StopCamera(); 34 | app.StartCamera(); 35 | continue; 36 | } 37 | if (msg.type == RPiCamApp::MsgType::Quit) 38 | return; 39 | else if (msg.type != RPiCamApp::MsgType::RequestComplete) 40 | throw std::runtime_error("unrecognised message!"); 41 | 42 | LOG(2, "Viewfinder frame " << count); 43 | auto now = std::chrono::high_resolution_clock::now(); 44 | if (options->timeout && (now - start_time) > options->timeout.value) 45 | return; 46 | 47 | CompletedRequestPtr &completed_request = std::get(msg.payload); 48 | app.ShowPreview(completed_request, app.ViewfinderStream()); 49 | } 50 | } 51 | 52 | int main(int argc, char *argv[]) 53 | { 54 | try 55 | { 56 | RPiCamApp app; 57 | Options *options = app.GetOptions(); 58 | if (options->Parse(argc, argv)) 59 | { 60 | if (options->verbose >= 2) 61 | options->Print(); 62 | 63 | event_loop(app); 64 | } 65 | } 66 | catch (std::exception const &e) 67 | { 68 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 69 | return -1; 70 | } 71 | return 0; 72 | } 73 | -------------------------------------------------------------------------------- /apps/rpicam_jpeg.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * rpicam_jpeg.cpp - minimal libcamera jpeg capture app. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/rpicam_app.hpp" 11 | #include "core/still_options.hpp" 12 | 13 | #include "image/image.hpp" 14 | 15 | using namespace std::placeholders; 16 | using libcamera::Stream; 17 | 18 | class RPiCamJpegApp : public RPiCamApp 19 | { 20 | public: 21 | RPiCamJpegApp() 22 | : RPiCamApp(std::make_unique()) 23 | { 24 | } 25 | 26 | StillOptions *GetOptions() const 27 | { 28 | return static_cast(options_.get()); 29 | } 30 | }; 31 | 32 | // The main even loop for the application. 33 | 34 | static void event_loop(RPiCamJpegApp &app) 35 | { 36 | StillOptions const *options = app.GetOptions(); 37 | app.OpenCamera(); 38 | app.ConfigureViewfinder(); 39 | app.StartCamera(); 40 | auto start_time = std::chrono::high_resolution_clock::now(); 41 | 42 | for (;;) 43 | { 44 | RPiCamApp::Msg msg = app.Wait(); 45 | if (msg.type == RPiCamApp::MsgType::Timeout) 46 | { 47 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 48 | app.StopCamera(); 49 | app.StartCamera(); 50 | continue; 51 | } 52 | if (msg.type == RPiCamApp::MsgType::Quit) 53 | return; 54 | else if (msg.type != RPiCamApp::MsgType::RequestComplete) 55 | throw std::runtime_error("unrecognised message!"); 56 | 57 | // In viewfinder mode, simply run until the timeout. When that happens, switch to 58 | // capture mode. 59 | if (app.ViewfinderStream()) 60 | { 61 | auto now = std::chrono::high_resolution_clock::now(); 62 | if (options->timeout && (now - start_time) > options->timeout.value) 63 | { 64 | app.StopCamera(); 65 | app.Teardown(); 66 | app.ConfigureStill(); 67 | app.StartCamera(); 68 | } 69 | else 70 | { 71 | CompletedRequestPtr &completed_request = std::get(msg.payload); 72 | app.ShowPreview(completed_request, app.ViewfinderStream()); 73 | } 74 | } 75 | // In still capture mode, save a jpeg and quit. 76 | else if (app.StillStream()) 77 | { 78 | app.StopCamera(); 79 | LOG(1, "Still capture image received"); 80 | 81 | Stream *stream = app.StillStream(); 82 | StreamInfo info = app.GetStreamInfo(stream); 83 | CompletedRequestPtr &payload = std::get(msg.payload); 84 | BufferReadSync r(&app, payload->buffers[stream]); 85 | const std::vector> mem = r.Get(); 86 | jpeg_save(mem, info, payload->metadata, options->output, app.CameraModel(), options); 87 | return; 88 | } 89 | } 90 | } 91 | 92 | int main(int argc, char *argv[]) 93 | { 94 | try 95 | { 96 | RPiCamJpegApp app; 97 | StillOptions *options = app.GetOptions(); 98 | if (options->Parse(argc, argv)) 99 | { 100 | if (options->verbose >= 2) 101 | options->Print(); 102 | if (options->output.empty()) 103 | throw std::runtime_error("output file name required"); 104 | 105 | event_loop(app); 106 | } 107 | } 108 | catch (std::exception const &e) 109 | { 110 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 111 | return -1; 112 | } 113 | return 0; 114 | } 115 | -------------------------------------------------------------------------------- /apps/rpicam_raw.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * rpicam_raw.cpp - libcamera raw video record app. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/rpicam_encoder.hpp" 11 | #include "encoder/null_encoder.hpp" 12 | #include "output/output.hpp" 13 | 14 | using namespace std::placeholders; 15 | 16 | class LibcameraRaw : public RPiCamEncoder 17 | { 18 | public: 19 | LibcameraRaw() : RPiCamEncoder() {} 20 | 21 | protected: 22 | // Force the use of "null" encoder. 23 | void createEncoder() { encoder_ = std::unique_ptr(new NullEncoder(GetOptions())); } 24 | }; 25 | 26 | // The main even loop for the application. 27 | 28 | static void event_loop(LibcameraRaw &app) 29 | { 30 | VideoOptions const *options = app.GetOptions(); 31 | std::unique_ptr output = std::unique_ptr(Output::Create(options)); 32 | app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4)); 33 | app.SetMetadataReadyCallback(std::bind(&Output::MetadataReady, output.get(), _1)); 34 | 35 | app.OpenCamera(); 36 | app.ConfigureVideo(LibcameraRaw::FLAG_VIDEO_RAW); 37 | app.StartEncoder(); 38 | app.StartCamera(); 39 | auto start_time = std::chrono::high_resolution_clock::now(); 40 | 41 | for (unsigned int count = 0; ; count++) 42 | { 43 | LibcameraRaw::Msg msg = app.Wait(); 44 | 45 | if (msg.type == RPiCamApp::MsgType::Timeout) 46 | { 47 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 48 | app.StopCamera(); 49 | app.StartCamera(); 50 | continue; 51 | } 52 | if (msg.type != LibcameraRaw::MsgType::RequestComplete) 53 | throw std::runtime_error("unrecognised message!"); 54 | if (count == 0) 55 | { 56 | libcamera::StreamConfiguration const &cfg = app.RawStream()->configuration(); 57 | LOG(1, "Raw stream: " << cfg.size.width << "x" << cfg.size.height << " stride " << cfg.stride << " format " 58 | << cfg.pixelFormat.toString()); 59 | } 60 | 61 | LOG(2, "Viewfinder frame " << count); 62 | auto now = std::chrono::high_resolution_clock::now(); 63 | if (options->timeout && (now - start_time) > options->timeout.value) 64 | { 65 | app.StopCamera(); 66 | app.StopEncoder(); 67 | return; 68 | } 69 | 70 | if (!app.EncodeBuffer(std::get(msg.payload), app.RawStream())) 71 | { 72 | // Keep advancing our "start time" if we're still waiting to start recording (e.g. 73 | // waiting for synchronisation with another camera). 74 | start_time = now; 75 | } 76 | } 77 | } 78 | 79 | int main(int argc, char *argv[]) 80 | { 81 | try 82 | { 83 | LibcameraRaw app; 84 | VideoOptions *options = app.GetOptions(); 85 | if (options->Parse(argc, argv)) 86 | { 87 | // Disable any codec (h.264/libav) based operations. 88 | options->codec = "yuv420"; 89 | options->denoise = "cdn_off"; 90 | options->nopreview = true; 91 | if (options->verbose >= 2) 92 | options->Print(); 93 | 94 | event_loop(app); 95 | } 96 | } 97 | catch (std::exception const &e) 98 | { 99 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 100 | return -1; 101 | } 102 | return 0; 103 | } 104 | -------------------------------------------------------------------------------- /apps/rpicam_vid.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * rpicam_vid.cpp - libcamera video record app. 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "core/rpicam_encoder.hpp" 15 | #include "output/output.hpp" 16 | 17 | using namespace std::placeholders; 18 | 19 | // Some keypress/signal handling. 20 | 21 | static int signal_received; 22 | static void default_signal_handler(int signal_number) 23 | { 24 | signal_received = signal_number; 25 | LOG(1, "Received signal " << signal_number); 26 | } 27 | 28 | static int get_key_or_signal(VideoOptions const *options, pollfd p[1]) 29 | { 30 | int key = 0; 31 | if (signal_received == SIGINT) 32 | return 'x'; 33 | if (options->keypress) 34 | { 35 | poll(p, 1, 0); 36 | if (p[0].revents & POLLIN) 37 | { 38 | char *user_string = nullptr; 39 | size_t len; 40 | [[maybe_unused]] size_t r = getline(&user_string, &len, stdin); 41 | key = user_string[0]; 42 | } 43 | } 44 | if (options->signal) 45 | { 46 | if (signal_received == SIGUSR1) 47 | key = '\n'; 48 | else if ((signal_received == SIGUSR2) || (signal_received == SIGPIPE)) 49 | key = 'x'; 50 | signal_received = 0; 51 | } 52 | return key; 53 | } 54 | 55 | static int get_colourspace_flags(std::string const &codec) 56 | { 57 | if (codec == "mjpeg" || codec == "yuv420") 58 | return RPiCamEncoder::FLAG_VIDEO_JPEG_COLOURSPACE; 59 | else 60 | return RPiCamEncoder::FLAG_VIDEO_NONE; 61 | } 62 | 63 | // The main even loop for the application. 64 | 65 | static void event_loop(RPiCamEncoder &app) 66 | { 67 | VideoOptions const *options = app.GetOptions(); 68 | std::unique_ptr output = std::unique_ptr(Output::Create(options)); 69 | app.SetEncodeOutputReadyCallback(std::bind(&Output::OutputReady, output.get(), _1, _2, _3, _4)); 70 | app.SetMetadataReadyCallback(std::bind(&Output::MetadataReady, output.get(), _1)); 71 | 72 | app.OpenCamera(); 73 | app.ConfigureVideo(get_colourspace_flags(options->codec)); 74 | app.StartEncoder(); 75 | app.StartCamera(); 76 | auto start_time = std::chrono::high_resolution_clock::now(); 77 | 78 | // Monitoring for keypresses and signals. 79 | signal(SIGUSR1, default_signal_handler); 80 | signal(SIGUSR2, default_signal_handler); 81 | signal(SIGINT, default_signal_handler); 82 | // SIGPIPE gets raised when trying to write to an already closed socket. This can happen, when 83 | // you're using TCP to stream to VLC and the user presses the stop button in VLC. Catching the 84 | // signal to be able to react on it, otherwise the app terminates. 85 | signal(SIGPIPE, default_signal_handler); 86 | pollfd p[1] = { { STDIN_FILENO, POLLIN, 0 } }; 87 | 88 | for (unsigned int count = 0; ; count++) 89 | { 90 | RPiCamEncoder::Msg msg = app.Wait(); 91 | if (msg.type == RPiCamApp::MsgType::Timeout) 92 | { 93 | LOG_ERROR("ERROR: Device timeout detected, attempting a restart!!!"); 94 | app.StopCamera(); 95 | app.StartCamera(); 96 | continue; 97 | } 98 | if (msg.type == RPiCamEncoder::MsgType::Quit) 99 | return; 100 | else if (msg.type != RPiCamEncoder::MsgType::RequestComplete) 101 | throw std::runtime_error("unrecognised message!"); 102 | int key = get_key_or_signal(options, p); 103 | if (key == '\n') 104 | output->Signal(); 105 | 106 | LOG(2, "Viewfinder frame " << count); 107 | auto now = std::chrono::high_resolution_clock::now(); 108 | bool timeout = !options->frames && options->timeout && 109 | ((now - start_time) > options->timeout.value); 110 | bool frameout = options->frames && count >= options->frames; 111 | if (timeout || frameout || key == 'x' || key == 'X') 112 | { 113 | if (timeout) 114 | LOG(1, "Halting: reached timeout of " << options->timeout.get() 115 | << " milliseconds."); 116 | app.StopCamera(); // stop complains if encoder very slow to close 117 | app.StopEncoder(); 118 | return; 119 | } 120 | CompletedRequestPtr &completed_request = std::get(msg.payload); 121 | if (!app.EncodeBuffer(completed_request, app.VideoStream())) 122 | { 123 | // Keep advancing our "start time" if we're still waiting to start recording (e.g. 124 | // waiting for synchronisation with another camera). 125 | start_time = now; 126 | count = 0; // reset the "frames encoded" counter too 127 | } 128 | app.ShowPreview(completed_request, app.VideoStream()); 129 | } 130 | } 131 | 132 | int main(int argc, char *argv[]) 133 | { 134 | try 135 | { 136 | RPiCamEncoder app; 137 | VideoOptions *options = app.GetOptions(); 138 | if (options->Parse(argc, argv)) 139 | { 140 | if (options->verbose >= 2) 141 | options->Print(); 142 | 143 | event_loop(app); 144 | } 145 | } 146 | catch (std::exception const &e) 147 | { 148 | LOG_ERROR("ERROR: *** " << e.what() << " ***"); 149 | return -1; 150 | } 151 | return 0; 152 | } 153 | -------------------------------------------------------------------------------- /assets/acoustic_focus.json: -------------------------------------------------------------------------------- 1 | { 2 | "acoustic_focus": 3 | { 4 | "stage": "acoustic_focus", 5 | "minFoM": 1, 6 | "maxFoM": 2000, 7 | "minFreq": 300, 8 | "maxFreq": 5000, 9 | "duration": 0.1, 10 | "mapping": "log", 11 | "description": "mapping values are log (logarithmic) or linear" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /assets/annotate_cv.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotate_cv" : 3 | { 4 | "text" : "Frame %frame exp %exp ag %ag dg %dg", 5 | "fg" : 255, 6 | "bg" : 0, 7 | "scale" : 1.0, 8 | "thickness" : 2, 9 | "alpha" : 0.3 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /assets/drc.json: -------------------------------------------------------------------------------- 1 | { 2 | "hdr" : 3 | { 4 | "num_frames" : 1, 5 | "lp_filter_strength" : 0.2, 6 | "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ], 7 | "global_tonemap_points" : 8 | [ 9 | { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 1.5, "max_down": 0.7 }, 10 | { "q": 0.5, "width": 0.05, "target": 0.5, "max_up": 1.5, "max_down": 0.7 }, 11 | { "q": 0.8, "width": 0.05, "target": 0.8, "max_up": 1.5, "max_down": 0.7 } 12 | ], 13 | "global_tonemap_strength" : 1.0, 14 | "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ], 15 | "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ], 16 | "local_tonemap_strength" : 1.0, 17 | "local_colour_scale" : 0.9 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /assets/face_detect_cv.json: -------------------------------------------------------------------------------- 1 | { 2 | "face_detect_cv": 3 | { 4 | "cascade_name" : "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml", 5 | "scaling_factor" : 1.1, 6 | "min_neighbors" : 2, 7 | "min_size" : 32, 8 | "max_size" : 256, 9 | "refresh_rate" : 1, 10 | "draw_features" : 1 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /assets/hailo_classifier.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 224, 7 | "height": 224, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_classifier": 13 | { 14 | "hef_file": "/usr/share/hailo-models/resnet_v1_50_h8l.hef" 15 | }, 16 | 17 | "annotate_cv" : 18 | { 19 | "text": "", 20 | "fg" : 255, 21 | "bg" : 0, 22 | "scale" : 1.0, 23 | "thickness" : 2, 24 | "alpha" : 0.3 25 | } 26 | } -------------------------------------------------------------------------------- /assets/hailo_pose_inf_fl.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb", 9 | "par": true 10 | } 11 | }, 12 | 13 | "hailo_yolo_pose": 14 | { 15 | "hef_file": "/usr/share/hailo-models/yolov8s_pose_h8.hef" 16 | }, 17 | 18 | "hailo_yolo_inference": 19 | { 20 | "hef_file": "/usr/share/hailo-models/yolov8s_h8.hef", 21 | "max_detections": 20, 22 | "threshold": 0.4, 23 | 24 | "temporal_filter": 25 | { 26 | "tolerance": 0.1, 27 | "factor": 0.75, 28 | "visible_frames": 6, 29 | "hidden_frames": 3 30 | } 31 | }, 32 | 33 | "hailo_scrfd": 34 | { 35 | "hef_file": "/usr/share/hailo-models/scrfd_2.5g_h8l.hef" 36 | }, 37 | 38 | "object_detect_draw_cv": 39 | { 40 | "line_thickness" : 2 41 | } 42 | } -------------------------------------------------------------------------------- /assets/hailo_scrfd.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb", 9 | "par": true 10 | } 11 | }, 12 | 13 | "hailo_scrfd": 14 | { 15 | "hef_file": "/usr/share/hailo-models/scrfd_2.5g_h8l.hef" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /assets/hailo_yolov5_personface.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_yolo_inference": 13 | { 14 | "hef_file": "/usr/share/hailo-models/yolov5s_personface_h8l.hef", 15 | "hailopp_config_file": "/usr/share/hailo-models/yolov5_personface.json", 16 | "max_detections": 5, 17 | "threshold": 0.5, 18 | 19 | "temporal_filter": 20 | { 21 | "tolerance": 0.1, 22 | "factor": 0.70, 23 | "visible_frames": 10, 24 | "hidden_frames": 5 25 | } 26 | }, 27 | 28 | "object_detect_draw_cv": 29 | { 30 | "line_thickness" : 2 31 | } 32 | } -------------------------------------------------------------------------------- /assets/hailo_yolov5_segmentation.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_yolo_segmentation": 13 | { 14 | "show_results": true, 15 | "flush_results": true, 16 | "confidence_threshold": 0.4, 17 | 18 | "hef_file_8L": "/usr/share/hailo-models/yolov5n_seg_h8l_mz.hef", 19 | "hef_file_8": "/usr/share/hailo-models/yolov8m_seg_h8.hef", 20 | "hailopp_config_file": "/usr/share/hailo-models/yolov5seg.json" 21 | } 22 | } -------------------------------------------------------------------------------- /assets/hailo_yolov6_inference.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_yolo_inference": 13 | { 14 | "hef_file_8L": "/usr/share/hailo-models/yolov6n_h8l.hef", 15 | "hef_file_8": "/usr/share/hailo-models/yolov6n_h8.hef", 16 | "max_detections": 20, 17 | "threshold": 0.4, 18 | 19 | "temporal_filter": 20 | { 21 | "tolerance": 0.1, 22 | "factor": 0.75, 23 | "visible_frames": 6, 24 | "hidden_frames": 3 25 | } 26 | }, 27 | 28 | "object_detect_draw_cv": 29 | { 30 | "line_thickness" : 2 31 | } 32 | } -------------------------------------------------------------------------------- /assets/hailo_yolov8_inference.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_yolo_inference": 13 | { 14 | "hef_file_8L": "/usr/share/hailo-models/yolov8s_h8l.hef", 15 | "hef_file_8": "/usr/share/hailo-models/yolov8s_h8.hef", 16 | "max_detections": 20, 17 | "threshold": 0.4, 18 | 19 | "temporal_filter": 20 | { 21 | "tolerance": 0.1, 22 | "factor": 0.75, 23 | "visible_frames": 6, 24 | "hidden_frames": 3 25 | } 26 | }, 27 | 28 | "object_detect_draw_cv": 29 | { 30 | "line_thickness" : 2 31 | } 32 | } -------------------------------------------------------------------------------- /assets/hailo_yolov8_pose.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_yolo_pose": 13 | { 14 | "hef_file_8L": "/usr/share/hailo-models/yolov8s_pose_h8l_pi.hef", 15 | "hef_file_8": "/usr/share/hailo-models/yolov8s_pose_h8.hef" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /assets/hailo_yolox_inference.json: -------------------------------------------------------------------------------- 1 | { 2 | "rpicam-apps": 3 | { 4 | "lores": 5 | { 6 | "width": 640, 7 | "height": 640, 8 | "format": "rgb" 9 | } 10 | }, 11 | 12 | "hailo_yolo_inference": 13 | { 14 | "hef_file": "/usr/share/hailo-models/yolox_s_leaky_h8l_rpi.hef", 15 | "max_detections": 20, 16 | "threshold": 0.4, 17 | 18 | "temporal_filter": 19 | { 20 | "tolerance": 0.1, 21 | "factor": 0.8, 22 | "visible_frames": 6, 23 | "hidden_frames": 3 24 | } 25 | }, 26 | 27 | "object_detect_draw_cv": 28 | { 29 | "line_thickness" : 2 30 | } 31 | } -------------------------------------------------------------------------------- /assets/hdr.json: -------------------------------------------------------------------------------- 1 | { 2 | "hdr" : 3 | { 4 | "num_frames" : 8, 5 | "lp_filter_strength" : 0.2, 6 | "lp_filter_threshold" : [ 0, 10.0 , 2048, 205.0, 4095, 205.0 ], 7 | "global_tonemap_points" : 8 | [ 9 | { "q": 0.1, "width": 0.05, "target": 0.15, "max_up": 5.0, "max_down": 0.5 }, 10 | { "q": 0.5, "width": 0.05, "target": 0.45, "max_up": 5.0, "max_down": 0.5 }, 11 | { "q": 0.8, "width": 0.05, "target": 0.7, "max_up": 5.0, "max_down": 0.5 } 12 | ], 13 | "global_tonemap_strength" : 1.0, 14 | "local_pos_strength" : [ 0, 6.0, 1024, 2.0, 4095, 2.0 ], 15 | "local_neg_strength" : [ 0, 4.0, 1024, 1.5, 4095, 1.5 ], 16 | "local_tonemap_strength" : 1.0, 17 | "local_colour_scale" : 0.8 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /assets/imx500_mobilenet_ssd.json: -------------------------------------------------------------------------------- 1 | { 2 | "imx500_object_detection": 3 | { 4 | "max_detections" : 5, 5 | "threshold" : 0.6, 6 | "network_file": "/usr/share/imx500-models/imx500_network_ssd_mobilenetv2_fpnlite_320x320_pp.rpk", 7 | 8 | "save_input_tensor": 9 | { 10 | "filename": "/home/pi/input_tensor.raw", 11 | "num_tensors": 10, 12 | "norm_val": [384, 384, 384, 0], 13 | "norm_shift": [0, 0, 0, 0] 14 | }, 15 | 16 | "temporal_filter": 17 | { 18 | "tolerance": 0.1, 19 | "factor": 0.2, 20 | "visible_frames": 4, 21 | "hidden_frames": 2 22 | }, 23 | 24 | "classes": 25 | [ 26 | "person", 27 | "bicycle", 28 | "car", 29 | "motorcycle", 30 | "airplane", 31 | "bus", 32 | "train", 33 | "truck", 34 | "boat", 35 | "traffic light", 36 | "fire hydrant", 37 | "-", 38 | "stop sign", 39 | "parking meter", 40 | "bench", 41 | "bird", 42 | "cat", 43 | "dog", 44 | "horse", 45 | "sheep", 46 | "cow", 47 | "elephant", 48 | "bear", 49 | "zebra", 50 | "giraffe", 51 | "-", 52 | "backpack", 53 | "umbrella", 54 | "-", 55 | "-", 56 | "handbag", 57 | "tie", 58 | "suitcase", 59 | "frisbee", 60 | "skis", 61 | "snowboard", 62 | "sports ball", 63 | "kite", 64 | "baseball bat", 65 | "baseball glove", 66 | "skateboard", 67 | "surfboard", 68 | "tennis racket", 69 | "bottle", 70 | "-", 71 | "wine glass", 72 | "cup", 73 | "fork", 74 | "knife", 75 | "spoon", 76 | "bowl", 77 | "banana", 78 | "apple", 79 | "sandwich", 80 | "orange", 81 | "broccoli", 82 | "carrot", 83 | "hot dog", 84 | "pizza", 85 | "donut", 86 | "cake", 87 | "chair", 88 | "couch", 89 | "potted plant", 90 | "bed", 91 | "-", 92 | "dining table", 93 | "-", 94 | "-", 95 | "toilet", 96 | "-", 97 | "tv", 98 | "laptop", 99 | "mouse", 100 | "remote", 101 | "keyboard", 102 | "cell phone", 103 | "microwave", 104 | "oven", 105 | "toaster", 106 | "sink", 107 | "refrigerator", 108 | "-", 109 | "book", 110 | "clock", 111 | "vase", 112 | "scissors", 113 | "teddy bear", 114 | "hair drier", 115 | "toothbrush" 116 | ] 117 | }, 118 | 119 | "object_detect_draw_cv": 120 | { 121 | "line_thickness" : 2 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /assets/imx500_posenet.json: -------------------------------------------------------------------------------- 1 | { 2 | "imx500_posenet": 3 | { 4 | "max_detections" : 5, 5 | "threshold" : 0.4, 6 | "offset_refinement_steps": 5, 7 | "nms_radius": 10.0, 8 | "network_file": "/usr/share/imx500-models/imx500_network_posenet.rpk", 9 | 10 | "save_input_tensor": 11 | { 12 | "filename": "/home/pi/posenet_input_tensor.raw", 13 | "num_tensors": 10 14 | }, 15 | 16 | "temporal_filter": 17 | { 18 | "tolerance": 0.3, 19 | "factor": 0.3, 20 | "visible_frames": 8, 21 | "hidden_frames": 2 22 | } 23 | }, 24 | 25 | "plot_pose_cv": 26 | { 27 | "confidence_threshold" : 0.2 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /assets/motion_detect.json: -------------------------------------------------------------------------------- 1 | { 2 | "motion_detect" : 3 | { 4 | "roi_x" : 0.1, 5 | "roi_y" : 0.1, 6 | "roi_width" : 0.8, 7 | "roi_height" : 0.8, 8 | "difference_m" : 0.1, 9 | "difference_c" : 10, 10 | "region_threshold" : 0.005, 11 | "frame_period" : 5, 12 | "hskip" : 2, 13 | "vskip" : 2, 14 | "verbose" : 0 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /assets/negate.json: -------------------------------------------------------------------------------- 1 | { 2 | "negate": 3 | { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /assets/object_classify_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "object_classify_tf": 3 | { 4 | "number_of_results" : 2, 5 | "number_of_threads" : 2, 6 | "refresh_rate" : 30, 7 | "threshold_high" : 0.1, 8 | "threshold_low" : 0.05, 9 | "model_file" : "/home/pi/models/mobilenet_v1_1.0_224_quant.tflite", 10 | "labels_file" : "/home/pi/models/labels.txt", 11 | "display_labels" : 1, 12 | "verbose" : 1 13 | }, 14 | "annotate_cv" : 15 | { 16 | "text" : "Frame %frame exp %exp ag %ag dg %dg", 17 | "fg" : 255, 18 | "bg" : 0, 19 | "scale" : 1.0, 20 | "thickness" : 2, 21 | "alpha" : 0.3 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /assets/object_detect_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "object_detect_tf": 3 | { 4 | "number_of_threads" : 2, 5 | "refresh_rate" : 10, 6 | "confidence_threshold" : 0.5, 7 | "overlap_threshold" : 0.5, 8 | "model_file" : "/home/pi/models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/detect.tflite", 9 | "labels_file" : "/home/pi/models/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29/labelmap.txt", 10 | "verbose" : 1 11 | }, 12 | "object_detect_draw_cv": 13 | { 14 | "line_thickness" : 2 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /assets/pose_estimation_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "pose_estimation_tf": 3 | { 4 | "refresh_rate" : 5, 5 | "model_file" : "/home/pi/models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite" 6 | }, 7 | "plot_pose_cv": 8 | { 9 | "confidence_threshold" : -0.5 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /assets/segmentation_labels.txt: -------------------------------------------------------------------------------- 1 | background 2 | aeroplane 3 | bicycle 4 | bird 5 | boat 6 | bottle 7 | bus 8 | car 9 | cat 10 | chair 11 | cow 12 | diningtable 13 | dog 14 | horse 15 | motorbike 16 | person 17 | pottedplant 18 | sheep 19 | sofa 20 | train 21 | tv 22 | -------------------------------------------------------------------------------- /assets/segmentation_tf.json: -------------------------------------------------------------------------------- 1 | { 2 | "segmentation_tf": 3 | { 4 | "number_of_threads" : 2, 5 | "refresh_rate" : 10, 6 | "model_file" : "/home/pi/models/lite-model_deeplabv3_1_metadata_2.tflite", 7 | "labels_file" : "/home/pi/models/segmentation_labels.txt", 8 | "verbose" : 1 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /assets/sobel_cv.json: -------------------------------------------------------------------------------- 1 | { 2 | "sobel_cv": 3 | { 4 | "ksize":5 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /assets/yolov5_personface.json: -------------------------------------------------------------------------------- 1 | { 2 | "iou_threshold": 0.5, 3 | "detection_threshold": 0.5, 4 | "output_activation": "none", 5 | "label_offset":1, 6 | "max_boxes":10000, 7 | "anchors": [ 8 | [ 9 | 116, 10 | 90, 11 | 156, 12 | 198, 13 | 373, 14 | 326 15 | ], 16 | [ 17 | 30, 18 | 61, 19 | 62, 20 | 45, 21 | 59, 22 | 119 23 | ], 24 | [ 25 | 10, 26 | 13, 27 | 16, 28 | 30, 29 | 33, 30 | 23 31 | ] 32 | ], 33 | "labels": [ 34 | "unlabeled", 35 | "person", 36 | "face" 37 | ] 38 | } -------------------------------------------------------------------------------- /assets/yolov5seg.json: -------------------------------------------------------------------------------- 1 | { 2 | "iou_threshold": 0.6, 3 | "score_threshold": 0.25, 4 | "outputs_size": [20, 40, 80], 5 | "outputs_name": ["yolov5n_seg/conv63", "yolov5n_seg/conv48", "yolov5n_seg/conv55", "yolov5n_seg/conv61"], 6 | "anchors": [ 7 | [ 8 | 116, 9 | 90, 10 | 156, 11 | 198, 12 | 373, 13 | 326 14 | ], 15 | [ 16 | 30, 17 | 61, 18 | 62, 19 | 45, 20 | 59, 21 | 119 22 | ], 23 | [ 24 | 10, 25 | 13, 26 | 16, 27 | 30, 28 | 33, 29 | 23 30 | ] 31 | ], 32 | "input_shape": [640, 640], 33 | "strides": [32, 16, 8] 34 | } -------------------------------------------------------------------------------- /core/buffer_sync.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2023, Raspberry Pi Ltd 4 | * 5 | * buffer_sync.cpp - Buffer coherency handling 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "core/buffer_sync.hpp" 13 | #include "core/rpicam_app.hpp" 14 | #include "core/logging.hpp" 15 | 16 | BufferWriteSync::BufferWriteSync(RPiCamApp *app, libcamera::FrameBuffer *fb) 17 | : fb_(fb) 18 | { 19 | struct dma_buf_sync dma_sync {}; 20 | dma_sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW; 21 | 22 | auto it = app->mapped_buffers_.find(fb_); 23 | if (it == app->mapped_buffers_.end()) 24 | { 25 | LOG_ERROR("failed to find buffer in BufferWriteSync"); 26 | return; 27 | } 28 | 29 | int ret = ::ioctl(fb_->planes()[0].fd.get(), DMA_BUF_IOCTL_SYNC, &dma_sync); 30 | if (ret) 31 | { 32 | LOG_ERROR("failed to lock-sync-write dma buf"); 33 | return; 34 | } 35 | 36 | planes_ = it->second; 37 | } 38 | 39 | BufferWriteSync::~BufferWriteSync() 40 | { 41 | struct dma_buf_sync dma_sync {}; 42 | dma_sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_RW; 43 | 44 | int ret = ::ioctl(fb_->planes()[0].fd.get(), DMA_BUF_IOCTL_SYNC, &dma_sync); 45 | if (ret) 46 | LOG_ERROR("failed to unlock-sync-write dma buf"); 47 | } 48 | 49 | const std::vector> &BufferWriteSync::Get() const 50 | { 51 | return planes_; 52 | } 53 | 54 | BufferReadSync::BufferReadSync(RPiCamApp *app, libcamera::FrameBuffer *fb) 55 | { 56 | auto it = app->mapped_buffers_.find(fb); 57 | if (it == app->mapped_buffers_.end()) 58 | { 59 | LOG_ERROR("failed to find buffer in BufferReadSync"); 60 | return; 61 | } 62 | 63 | // DMA_BUF_SYNC_START | DMA_BUF_SYNC_READ happens when the request completes, 64 | // so nothing to do here but cache the planes map. 65 | planes_ = it->second; 66 | } 67 | 68 | BufferReadSync::~BufferReadSync() 69 | { 70 | // DMA_BUF_SYNC_END | DMA_BUF_SYNC_READ happens when we resend the buffer 71 | // in the next request, so nothing to do here. 72 | } 73 | 74 | const std::vector> &BufferReadSync::Get() const 75 | { 76 | return planes_; 77 | } 78 | -------------------------------------------------------------------------------- /core/buffer_sync.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2023 Raspberry Pi Ltd 4 | * 5 | * buffer_sync.hpp - Buffer coherency handling 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | class RPiCamApp; 13 | 14 | class BufferWriteSync 15 | { 16 | public: 17 | BufferWriteSync(RPiCamApp *app, libcamera::FrameBuffer *fb); 18 | ~BufferWriteSync(); 19 | 20 | const std::vector> &Get() const; 21 | 22 | private: 23 | libcamera::FrameBuffer *fb_; 24 | std::vector> planes_; 25 | }; 26 | 27 | class BufferReadSync 28 | { 29 | public: 30 | BufferReadSync(RPiCamApp *app, libcamera::FrameBuffer *fb); 31 | ~BufferReadSync(); 32 | 33 | const std::vector> &Get() const; 34 | 35 | private: 36 | std::vector> planes_; 37 | }; 38 | -------------------------------------------------------------------------------- /core/completed_request.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * completed_request.hpp - structure holding request results. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | #include "core/metadata.hpp" 16 | 17 | struct CompletedRequest 18 | { 19 | using BufferMap = libcamera::Request::BufferMap; 20 | using ControlList = libcamera::ControlList; 21 | using Request = libcamera::Request; 22 | 23 | CompletedRequest(unsigned int seq, Request *r) 24 | : sequence(seq), buffers(r->buffers()), metadata(r->metadata()), request(r) 25 | { 26 | r->reuse(); 27 | } 28 | unsigned int sequence; 29 | BufferMap buffers; 30 | ControlList metadata; 31 | Request *request; 32 | float framerate; 33 | Metadata post_process_metadata; 34 | }; 35 | 36 | using CompletedRequestPtr = std::shared_ptr; 37 | -------------------------------------------------------------------------------- /core/dma_heaps.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2023, Raspberry Pi Ltd 4 | * 5 | * dma_heaps.cpp - Helper class for dma-heap allocations. 6 | */ 7 | 8 | #include "dma_heaps.hpp" 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "core/logging.hpp" 18 | 19 | namespace 20 | { 21 | /* 22 | * /dev/dma-heap/vidbuf_cached sym links to either the system heap (Pi 5) or the 23 | * CMA allocator (Pi 4 and below). If missing, fallback to the CMA allocator. 24 | */ 25 | const std::vector heapNames 26 | { 27 | "/dev/dma_heap/vidbuf_cached", 28 | "/dev/dma_heap/linux,cma", 29 | }; 30 | 31 | } // namespace 32 | 33 | DmaHeap::DmaHeap() 34 | { 35 | for (const char *name : heapNames) 36 | { 37 | int ret = ::open(name, O_RDWR | O_CLOEXEC, 0); 38 | if (ret < 0) 39 | { 40 | LOG(2, "Failed to open " << name << ": " << ret); 41 | continue; 42 | } 43 | 44 | dmaHeapHandle_ = libcamera::UniqueFD(ret); 45 | break; 46 | } 47 | 48 | if (!dmaHeapHandle_.isValid()) 49 | LOG_ERROR("Could not open any dmaHeap device"); 50 | } 51 | 52 | DmaHeap::~DmaHeap() 53 | { 54 | } 55 | 56 | libcamera::UniqueFD DmaHeap::alloc(const char *name, std::size_t size) const 57 | { 58 | int ret; 59 | 60 | if (!name) 61 | return {}; 62 | 63 | struct dma_heap_allocation_data alloc = {}; 64 | 65 | alloc.len = size; 66 | alloc.fd_flags = O_CLOEXEC | O_RDWR; 67 | 68 | ret = ::ioctl(dmaHeapHandle_.get(), DMA_HEAP_IOCTL_ALLOC, &alloc); 69 | if (ret < 0) 70 | { 71 | LOG_ERROR("dmaHeap allocation failure for " << name); 72 | return {}; 73 | } 74 | 75 | libcamera::UniqueFD allocFd(alloc.fd); 76 | ret = ::ioctl(allocFd.get(), DMA_BUF_SET_NAME, name); 77 | if (ret < 0) 78 | { 79 | LOG_ERROR("dmaHeap naming failure for " << name); 80 | return {}; 81 | } 82 | 83 | return allocFd; 84 | } 85 | -------------------------------------------------------------------------------- /core/dma_heaps.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2023, Raspberry Pi Ltd 4 | * 5 | * dma_heaps.h - Helper class for dma-heap allocations. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | class DmaHeap 15 | { 16 | public: 17 | DmaHeap(); 18 | ~DmaHeap(); 19 | bool isValid() const { return dmaHeapHandle_.isValid(); } 20 | libcamera::UniqueFD alloc(const char *name, std::size_t size) const; 21 | 22 | private: 23 | libcamera::UniqueFD dmaHeapHandle_; 24 | }; 25 | -------------------------------------------------------------------------------- /core/frame_info.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * frame_info.hpp - Frame info class for libcamera apps 6 | */ 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | #include "completed_request.hpp" 16 | 17 | struct FrameInfo 18 | { 19 | FrameInfo(const CompletedRequestPtr &completed_request) 20 | : exposure_time(0.0), digital_gain(0.0), colour_gains({ { 0.0f, 0.0f } }), focus(0.0), aelock(false), 21 | lens_position(-1.0), af_state(0), sensor_temp(0.0) 22 | { 23 | const libcamera::ControlList &ctrls = completed_request->metadata; 24 | 25 | sequence = completed_request->sequence; 26 | fps = completed_request->framerate; 27 | 28 | auto exp = ctrls.get(libcamera::controls::ExposureTime); 29 | if (exp) 30 | exposure_time = *exp; 31 | 32 | auto ag = ctrls.get(libcamera::controls::AnalogueGain); 33 | if (ag) 34 | analogue_gain = *ag; 35 | 36 | auto dg = ctrls.get(libcamera::controls::DigitalGain); 37 | if (dg) 38 | digital_gain = *dg; 39 | 40 | auto cg = ctrls.get(libcamera::controls::ColourGains); 41 | if (cg) 42 | { 43 | colour_gains[0] = (*cg)[0], colour_gains[1] = (*cg)[1]; 44 | } 45 | 46 | auto fom = ctrls.get(libcamera::controls::FocusFoM); 47 | if (fom) 48 | focus = *fom; 49 | 50 | auto ae = ctrls.get(libcamera::controls::AeState); 51 | if (ae) 52 | aelock = *ae == libcamera::controls::AeStateConverged; 53 | 54 | auto lp = ctrls.get(libcamera::controls::LensPosition); 55 | if (lp) 56 | lens_position = *lp; 57 | 58 | auto temp = ctrls.get(libcamera::controls::SensorTemperature); 59 | if (temp) 60 | sensor_temp = *temp; 61 | 62 | auto afs = ctrls.get(libcamera::controls::AfState); 63 | if (afs) 64 | af_state = *afs; 65 | } 66 | 67 | std::string ToString(const std::string &info_string) const 68 | { 69 | std::string parsed(info_string); 70 | 71 | for (auto const &t : tokens) 72 | { 73 | std::size_t pos = parsed.find(t); 74 | if (pos != std::string::npos) 75 | { 76 | std::stringstream value; 77 | value << std::fixed << std::setprecision(2); 78 | 79 | if (t == "%frame") 80 | value << sequence; 81 | else if (t == "%fps") 82 | value << fps; 83 | else if (t == "%exp") 84 | value << exposure_time; 85 | else if (t == "%ag") 86 | value << analogue_gain; 87 | else if (t == "%dg") 88 | value << digital_gain; 89 | else if (t == "%rg") 90 | value << colour_gains[0]; 91 | else if (t == "%bg") 92 | value << colour_gains[1]; 93 | else if (t == "%focus") 94 | value << focus; 95 | else if (t == "%aelock") 96 | value << aelock; 97 | else if (t == "%lp") 98 | value << lens_position; 99 | else if (t == "%temp") 100 | value << sensor_temp; 101 | else if (t == "%afstate") 102 | { 103 | switch (af_state) 104 | { 105 | case libcamera::controls::AfStateIdle: 106 | value << "idle"; 107 | break; 108 | case libcamera::controls::AfStateScanning: 109 | value << "scanning"; 110 | break; 111 | case libcamera::controls::AfStateFocused: 112 | value << "focused"; 113 | break; 114 | default: 115 | value << "failed"; 116 | } 117 | } 118 | 119 | parsed.replace(pos, t.length(), value.str()); 120 | } 121 | } 122 | 123 | return parsed; 124 | } 125 | 126 | unsigned int sequence; 127 | float exposure_time; 128 | float analogue_gain; 129 | float digital_gain; 130 | std::array colour_gains; 131 | float focus; 132 | float fps; 133 | bool aelock; 134 | float lens_position; 135 | int af_state; 136 | float sensor_temp; 137 | 138 | private: 139 | // Info text tokens. 140 | inline static const std::string tokens[] = { 141 | "%frame", "%fps", "%exp", "%ag", "%dg", "%rg", "%bg", "%focus", 142 | "%aelock","%lp", "%temp", "%afstate" 143 | }; 144 | }; 145 | -------------------------------------------------------------------------------- /core/logging.hpp: -------------------------------------------------------------------------------- 1 | #include "core/rpicam_app.hpp" 2 | 3 | #define LOG(level, text) \ 4 | do \ 5 | { \ 6 | if (RPiCamApp::GetVerbosity() >= level) \ 7 | std::cerr << text << std::endl; \ 8 | } while (0) 9 | #define LOG_ERROR(text) std::cerr << text << std::endl 10 | -------------------------------------------------------------------------------- /core/meson.build: -------------------------------------------------------------------------------- 1 | boost_dep = dependency('boost', modules : ['program_options'], required : true) 2 | thread_dep = dependency('threads', required : true) 3 | 4 | rpicam_app_dep += [boost_dep, thread_dep] 5 | 6 | rpicam_app_src += files([ 7 | 'buffer_sync.cpp', 8 | 'dma_heaps.cpp', 9 | 'rpicam_app.cpp', 10 | 'options.cpp', 11 | 'post_processor.cpp', 12 | ]) 13 | 14 | core_headers = files([ 15 | 'buffer_sync.hpp', 16 | 'completed_request.hpp', 17 | 'dma_heaps.hpp', 18 | 'frame_info.hpp', 19 | 'rpicam_app.hpp', 20 | 'rpicam_encoder.hpp', 21 | 'logging.hpp', 22 | 'metadata.hpp', 23 | 'options.hpp', 24 | 'post_processor.hpp', 25 | 'still_options.hpp', 26 | 'stream_info.hpp', 27 | 'version.hpp', 28 | 'video_options.hpp', 29 | ]) 30 | 31 | install_headers(core_headers, subdir: meson.project_name() / 'core') 32 | -------------------------------------------------------------------------------- /core/metadata.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019-2021, Raspberry Pi (Trading) Limited 4 | * 5 | * metadata.hpp - general metadata class 6 | */ 7 | #pragma once 8 | 9 | // A simple class for carrying arbitrary metadata, for example about an image. 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | class Metadata 17 | { 18 | public: 19 | Metadata() = default; 20 | 21 | Metadata(Metadata const &other) 22 | { 23 | std::scoped_lock other_lock(other.mutex_); 24 | data_ = other.data_; 25 | } 26 | 27 | Metadata(Metadata &&other) 28 | { 29 | std::scoped_lock other_lock(other.mutex_); 30 | data_ = std::move(other.data_); 31 | other.data_.clear(); 32 | } 33 | 34 | template 35 | void Set(std::string const &tag, T &&value) 36 | { 37 | std::scoped_lock lock(mutex_); 38 | data_.insert_or_assign(tag, std::forward(value)); 39 | } 40 | 41 | template 42 | int Get(std::string const &tag, T &value) const 43 | { 44 | std::scoped_lock lock(mutex_); 45 | auto it = data_.find(tag); 46 | if (it == data_.end()) 47 | return -1; 48 | value = std::any_cast(it->second); 49 | return 0; 50 | } 51 | 52 | void Clear() 53 | { 54 | std::scoped_lock lock(mutex_); 55 | data_.clear(); 56 | } 57 | 58 | Metadata &operator=(Metadata const &other) 59 | { 60 | std::scoped_lock lock(mutex_, other.mutex_); 61 | data_ = other.data_; 62 | return *this; 63 | } 64 | 65 | Metadata &operator=(Metadata &&other) 66 | { 67 | std::scoped_lock lock(mutex_, other.mutex_); 68 | data_ = std::move(other.data_); 69 | other.data_.clear(); 70 | return *this; 71 | } 72 | 73 | void Merge(Metadata &other) 74 | { 75 | std::scoped_lock lock(mutex_, other.mutex_); 76 | data_.merge(other.data_); 77 | } 78 | 79 | template 80 | T *GetLocked(std::string const &tag) 81 | { 82 | // This allows in-place access to the Metadata contents, 83 | // for which you should be holding the lock. 84 | auto it = data_.find(tag); 85 | if (it == data_.end()) 86 | return nullptr; 87 | return std::any_cast(&it->second); 88 | } 89 | 90 | template 91 | void SetLocked(std::string const &tag, T &&value) 92 | { 93 | // Use this only if you're holding the lock yourself. 94 | data_.insert_or_assign(tag, std::forward(value)); 95 | } 96 | 97 | // Note: use of (lowercase) lock and unlock means you can create scoped 98 | // locks with the standard lock classes. 99 | // e.g. std::lock_guard lock(metadata) 100 | void lock() { mutex_.lock(); } 101 | void unlock() { mutex_.unlock(); } 102 | 103 | private: 104 | mutable std::mutex mutex_; 105 | std::map data_; 106 | }; 107 | -------------------------------------------------------------------------------- /core/options.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * options.hpp - common program options 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include "core/logging.hpp" 24 | #include "core/version.hpp" 25 | 26 | static constexpr double DEFAULT_FRAMERATE = 30.0; 27 | 28 | struct Mode 29 | { 30 | Mode() : Mode(0, 0, 0, true) {} 31 | Mode(unsigned int w, unsigned int h, unsigned int b, bool p) : width(w), height(h), bit_depth(b), packed(p), framerate(0) {} 32 | Mode(std::string const &mode_string); 33 | unsigned int width; 34 | unsigned int height; 35 | unsigned int bit_depth; 36 | bool packed; 37 | double framerate; 38 | libcamera::Size Size() const { return libcamera::Size(width, height); } 39 | std::string ToString() const; 40 | void update(const libcamera::Size &size, const std::optional &fps); 41 | }; 42 | 43 | template 44 | struct TimeVal 45 | { 46 | TimeVal() : value(0) {} 47 | 48 | void set(const std::string &s) 49 | { 50 | static const std::map match 51 | { 52 | { "min", std::chrono::minutes(1) }, 53 | { "sec", std::chrono::seconds(1) }, 54 | { "s", std::chrono::seconds(1) }, 55 | { "ms", std::chrono::milliseconds(1) }, 56 | { "us", std::chrono::microseconds(1) }, 57 | { "ns", std::chrono::nanoseconds(1) }, 58 | }; 59 | 60 | try 61 | { 62 | std::size_t end_pos; 63 | float f = std::stof(s, &end_pos); 64 | value = std::chrono::duration_cast(f * DEFAULT { 1 }); 65 | 66 | for (const auto &m : match) 67 | { 68 | auto found = s.find(m.first, end_pos); 69 | if (found != end_pos || found + m.first.length() != s.length()) 70 | continue; 71 | value = std::chrono::duration_cast(f * m.second); 72 | break; 73 | } 74 | } 75 | catch (std::exception const &e) 76 | { 77 | throw std::runtime_error("Invalid time string provided"); 78 | } 79 | } 80 | 81 | template 82 | int64_t get() const 83 | { 84 | return std::chrono::duration_cast(value).count(); 85 | } 86 | 87 | explicit constexpr operator bool() const 88 | { 89 | return !!value.count(); 90 | } 91 | 92 | std::chrono::nanoseconds value; 93 | }; 94 | 95 | enum class Platform 96 | { 97 | MISSING, 98 | UNKNOWN, 99 | LEGACY, 100 | VC4, 101 | PISP, 102 | }; 103 | 104 | struct Options 105 | { 106 | Options(); 107 | virtual ~Options() {} 108 | 109 | bool help; 110 | bool version; 111 | bool list_cameras; 112 | unsigned int verbose; 113 | TimeVal timeout; 114 | std::string config_file; 115 | std::string output; 116 | std::string post_process_file; 117 | std::string post_process_libs; 118 | unsigned int width; 119 | unsigned int height; 120 | bool nopreview; 121 | std::string preview; 122 | bool fullscreen; 123 | unsigned int preview_x, preview_y, preview_width, preview_height; 124 | libcamera::Transform transform; 125 | std::string roi; 126 | float roi_x, roi_y, roi_width, roi_height; 127 | TimeVal shutter; 128 | float gain; 129 | std::string metering; 130 | int metering_index; 131 | std::string exposure; 132 | int exposure_index; 133 | float ev; 134 | std::string awb; 135 | int awb_index; 136 | std::string awbgains; 137 | float awb_gain_r; 138 | float awb_gain_b; 139 | bool flush; 140 | unsigned int wrap; 141 | float brightness; 142 | float contrast; 143 | float saturation; 144 | float sharpness; 145 | std::optional framerate; 146 | std::string denoise; 147 | std::string info_text; 148 | unsigned int viewfinder_width; 149 | unsigned int viewfinder_height; 150 | std::string tuning_file; 151 | bool qt_preview; 152 | unsigned int lores_width; 153 | unsigned int lores_height; 154 | bool lores_par; 155 | unsigned int camera; 156 | std::string mode_string; 157 | Mode mode; 158 | std::string viewfinder_mode_string; 159 | Mode viewfinder_mode; 160 | unsigned int buffer_count; 161 | unsigned int viewfinder_buffer_count; 162 | std::string afMode; 163 | int afMode_index; 164 | std::string afRange; 165 | int afRange_index; 166 | std::string afSpeed; 167 | int afSpeed_index; 168 | std::string afWindow; 169 | float afWindow_x, afWindow_y, afWindow_width, afWindow_height; 170 | std::optional lens_position; 171 | bool set_default_lens_position; 172 | bool af_on_capture; 173 | std::string metadata; 174 | std::string metadata_format; 175 | std::string hdr; 176 | TimeVal flicker_period; 177 | bool no_raw; 178 | 179 | virtual bool Parse(int argc, char *argv[]); 180 | virtual void Print() const; 181 | 182 | void SetApp(RPiCamApp *app) { app_ = app; } 183 | Platform GetPlatform() const { return platform_; }; 184 | 185 | protected: 186 | boost::program_options::options_description options_; 187 | 188 | private: 189 | bool hflip_; 190 | bool vflip_; 191 | int rotation_; 192 | float framerate_; 193 | std::string lens_position_; 194 | std::string timeout_; 195 | std::string shutter_; 196 | std::string flicker_period_; 197 | RPiCamApp *app_; 198 | Platform platform_ = Platform::UNKNOWN; 199 | }; 200 | -------------------------------------------------------------------------------- /core/post_processor.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * post_processor.hpp - Post processor definition. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "core/completed_request.hpp" 17 | #include "core/logging.hpp" 18 | 19 | namespace libcamera 20 | { 21 | struct StreamConfiguration; 22 | } 23 | 24 | class RPiCamApp; 25 | 26 | using namespace std::chrono_literals; 27 | class PostProcessingStage; 28 | using PostProcessorCallback = std::function; 29 | using StreamConfiguration = libcamera::StreamConfiguration; 30 | typedef std::unique_ptr StagePtr; 31 | 32 | // Dynamic postprocessing library helper. 33 | class PostProcessingLib 34 | { 35 | public: 36 | PostProcessingLib(const std::string &lib); 37 | PostProcessingLib(PostProcessingLib &&other); 38 | PostProcessingLib(const PostProcessingLib &other) = delete; 39 | PostProcessingLib &operator=(const PostProcessingLib &other) = delete; 40 | ~PostProcessingLib(); 41 | 42 | const void *GetSymbol(const std::string &symbol); 43 | 44 | private: 45 | void *lib_ = nullptr; 46 | std::map symbol_map_; 47 | std::mutex lock_; 48 | }; 49 | 50 | class PostProcessor 51 | { 52 | public: 53 | PostProcessor(RPiCamApp *app); 54 | 55 | ~PostProcessor(); 56 | 57 | void LoadModules(const std::string &lib_dir); 58 | 59 | void Read(std::string const &filename); 60 | 61 | void SetCallback(PostProcessorCallback callback); 62 | 63 | void AdjustConfig(std::string const &use_case, StreamConfiguration *config); 64 | 65 | void Configure(); 66 | 67 | void Start(); 68 | 69 | void Process(CompletedRequestPtr &request); 70 | 71 | void Stop(); 72 | 73 | void Teardown(); 74 | 75 | private: 76 | PostProcessingStage *createPostProcessingStage(char const *name); 77 | 78 | RPiCamApp *app_; 79 | std::vector stages_; 80 | std::vector dynamic_stages_; 81 | void outputThread(); 82 | 83 | std::queue requests_; 84 | std::queue> futures_; 85 | std::thread output_thread_; 86 | bool quit_; 87 | PostProcessorCallback callback_; 88 | std::mutex mutex_; 89 | std::condition_variable cv_; 90 | }; 91 | -------------------------------------------------------------------------------- /core/rpicam_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * rpicam_encoder.cpp - libcamera video encoding class. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include "core/rpicam_app.hpp" 11 | #include "core/stream_info.hpp" 12 | #include "core/video_options.hpp" 13 | 14 | #include "encoder/encoder.hpp" 15 | 16 | typedef std::function EncodeOutputReadyCallback; 17 | typedef std::function MetadataReadyCallback; 18 | 19 | class RPiCamEncoder : public RPiCamApp 20 | { 21 | public: 22 | using Stream = libcamera::Stream; 23 | using FrameBuffer = libcamera::FrameBuffer; 24 | 25 | RPiCamEncoder() : RPiCamApp(std::make_unique()) {} 26 | 27 | void StartEncoder() 28 | { 29 | createEncoder(); 30 | encoder_->SetInputDoneCallback(std::bind(&RPiCamEncoder::encodeBufferDone, this, std::placeholders::_1)); 31 | encoder_->SetOutputReadyCallback(encode_output_ready_callback_); 32 | 33 | #ifndef DISABLE_RPI_FEATURES 34 | // Set up the encode function to wait for synchronisation with another camera system, 35 | // when this has been requested in the options. 36 | VideoOptions const *options = GetOptions(); 37 | libcamera::ControlList cl; 38 | if (options->sync == 0) 39 | cl.set(libcamera::controls::rpi::SyncMode, libcamera::controls::rpi::SyncModeOff); 40 | else if (options->sync == 1) 41 | cl.set(libcamera::controls::rpi::SyncMode, libcamera::controls::rpi::SyncModeServer); 42 | else if (options->sync == 2) 43 | cl.set(libcamera::controls::rpi::SyncMode, libcamera::controls::rpi::SyncModeClient); 44 | SetControls(cl); 45 | #endif 46 | } 47 | // This is callback when the encoder gives you the encoded output data. 48 | void SetEncodeOutputReadyCallback(EncodeOutputReadyCallback callback) { encode_output_ready_callback_ = callback; } 49 | void SetMetadataReadyCallback(MetadataReadyCallback callback) { metadata_ready_callback_ = callback; } 50 | bool EncodeBuffer(CompletedRequestPtr &completed_request, Stream *stream) 51 | { 52 | assert(encoder_); 53 | 54 | #ifndef DISABLE_RPI_FEATURES 55 | // If sync was enabled, and SyncReady is still "false" then we must skip this frame. Tell our 56 | // caller through the return value that we're not yet encoding anything. 57 | if (GetOptions()->sync && !completed_request->metadata.get(controls::rpi::SyncReady).value_or(false)) 58 | return false; 59 | #endif 60 | 61 | StreamInfo info = GetStreamInfo(stream); 62 | FrameBuffer *buffer = completed_request->buffers[stream]; 63 | BufferReadSync r(this, buffer); 64 | libcamera::Span span = r.Get()[0]; 65 | void *mem = span.data(); 66 | if (!buffer || !mem) 67 | throw std::runtime_error("no buffer to encode"); 68 | auto ts = completed_request->metadata.get(controls::SensorTimestamp); 69 | int64_t timestamp_ns = ts ? *ts : buffer->metadata().timestamp; 70 | { 71 | std::lock_guard lock(encode_buffer_queue_mutex_); 72 | encode_buffer_queue_.push(completed_request); // creates a new reference 73 | } 74 | encoder_->EncodeBuffer(buffer->planes()[0].fd.get(), span.size(), mem, info, timestamp_ns / 1000); 75 | 76 | // Tell our caller that encoding is underway. 77 | return true; 78 | } 79 | VideoOptions *GetOptions() const { return static_cast(options_.get()); } 80 | void StopEncoder() { encoder_.reset(); } 81 | 82 | protected: 83 | virtual void createEncoder() 84 | { 85 | StreamInfo info; 86 | VideoStream(&info); 87 | if (!info.width || !info.height || !info.stride) 88 | throw std::runtime_error("video steam is not configured"); 89 | encoder_ = std::unique_ptr(Encoder::Create(GetOptions(), info)); 90 | } 91 | std::unique_ptr encoder_; 92 | 93 | private: 94 | void encodeBufferDone(void *mem) 95 | { 96 | // If non-NULL, mem would indicate which buffer has been completed, but 97 | // currently we're just assuming everything is done in order. (We could 98 | // handle this by replacing the queue with a vector of 99 | // pairs.) 100 | assert(mem == nullptr); 101 | { 102 | std::lock_guard lock(encode_buffer_queue_mutex_); 103 | if (encode_buffer_queue_.empty()) 104 | throw std::runtime_error("no buffer available to return"); 105 | CompletedRequestPtr &completed_request = encode_buffer_queue_.front(); 106 | if (metadata_ready_callback_ && !GetOptions()->metadata.empty()) 107 | metadata_ready_callback_(completed_request->metadata); 108 | encode_buffer_queue_.pop(); // drop shared_ptr reference 109 | } 110 | } 111 | 112 | std::queue encode_buffer_queue_; 113 | std::mutex encode_buffer_queue_mutex_; 114 | EncodeOutputReadyCallback encode_output_ready_callback_; 115 | MetadataReadyCallback metadata_ready_callback_; 116 | }; 117 | -------------------------------------------------------------------------------- /core/still_options.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * still_options.hpp - still capture program options 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "options.hpp" 13 | 14 | struct StillOptions : public Options 15 | { 16 | StillOptions() : Options() 17 | { 18 | using namespace boost::program_options; 19 | // clang-format off 20 | options_.add_options() 21 | ("quality,q", value(&quality)->default_value(93), 22 | "Set the JPEG quality parameter") 23 | ("exif,x", value>(&exif), 24 | "Add these extra EXIF tags to the output file") 25 | ("timelapse", value(&timelapse_)->default_value("0ms"), 26 | "Time interval between timelapse captures. If no units are provided default to ms.") 27 | ("framestart", value(&framestart)->default_value(0), 28 | "Initial frame counter value for timelapse captures") 29 | ("datetime", value(&datetime)->default_value(false)->implicit_value(true), 30 | "Use date format for output file names") 31 | ("timestamp", value(×tamp)->default_value(false)->implicit_value(true), 32 | "Use system timestamps for output file names") 33 | ("restart", value(&restart)->default_value(0), 34 | "Set JPEG restart interval") 35 | ("keypress,k", value(&keypress)->default_value(false)->implicit_value(true), 36 | "Perform capture when ENTER pressed") 37 | ("signal,s", value(&signal)->default_value(false)->implicit_value(true), 38 | "Perform capture when signal received") 39 | ("thumb", value(&thumb)->default_value("320:240:70"), 40 | "Set thumbnail parameters as width:height:quality, or none") 41 | ("encoding,e", value(&encoding)->default_value("jpg"), 42 | "Set the desired output encoding, either jpg, png, rgb/rgb24, rgb48, bmp or yuv420") 43 | ("raw,r", value(&raw)->default_value(false)->implicit_value(true), 44 | "Also save raw file in DNG format") 45 | ("latest", value(&latest), 46 | "Create a symbolic link with this name to most recent saved file") 47 | ("immediate", value(&immediate)->default_value(false)->implicit_value(true), 48 | "Perform first capture immediately, with no preview phase") 49 | ("autofocus-on-capture", value(&af_on_capture)->default_value(false)->implicit_value(true), 50 | "Switch to AfModeAuto and trigger a scan just before capturing a still") 51 | ("zsl", value(&zsl)->default_value(false)->implicit_value(true), 52 | "Switch to AfModeAuto and trigger a scan just before capturing a still") 53 | ; 54 | // clang-format on 55 | } 56 | 57 | int quality; 58 | std::vector exif; 59 | TimeVal timelapse; 60 | uint32_t framestart; 61 | bool datetime; 62 | bool timestamp; 63 | unsigned int restart; 64 | bool keypress; 65 | bool signal; 66 | std::string thumb; 67 | unsigned int thumb_width, thumb_height, thumb_quality; 68 | std::string encoding; 69 | bool raw; 70 | std::string latest; 71 | bool immediate; 72 | bool zsl; 73 | 74 | virtual bool Parse(int argc, char *argv[]) override 75 | { 76 | if (Options::Parse(argc, argv) == false) 77 | return false; 78 | 79 | timelapse.set(timelapse_); 80 | 81 | if ((keypress || signal) && timelapse) 82 | throw std::runtime_error("keypress/signal and timelapse options are mutually exclusive"); 83 | if (strcasecmp(thumb.c_str(), "none") == 0) 84 | thumb_quality = 0; 85 | else if (sscanf(thumb.c_str(), "%u:%u:%u", &thumb_width, &thumb_height, &thumb_quality) != 3) 86 | throw std::runtime_error("bad thumbnail parameters " + thumb); 87 | if (strcasecmp(encoding.c_str(), "jpg") == 0) 88 | encoding = "jpg"; 89 | else if (strcasecmp(encoding.c_str(), "yuv420") == 0) 90 | encoding = "yuv420"; 91 | else if (strcasecmp(encoding.c_str(), "rgb") == 0 || strcasecmp(encoding.c_str(), "rgb24") == 0) 92 | encoding = "rgb24"; 93 | else if (strcasecmp(encoding.c_str(), "rgb48") == 0) 94 | encoding = "rgb48"; 95 | else if (strcasecmp(encoding.c_str(), "png") == 0) 96 | encoding = "png"; 97 | else if (strcasecmp(encoding.c_str(), "bmp") == 0) 98 | encoding = "bmp"; 99 | else 100 | throw std::runtime_error("invalid encoding format " + encoding); 101 | return true; 102 | } 103 | virtual void Print() const override 104 | { 105 | Options::Print(); 106 | std::cerr << " encoding: " << encoding << std::endl; 107 | std::cerr << " quality: " << quality << std::endl; 108 | std::cerr << " raw: " << raw << std::endl; 109 | std::cerr << " restart: " << restart << std::endl; 110 | std::cerr << " timelapse: " << timelapse.get() << "ms" << std::endl; 111 | std::cerr << " framestart: " << framestart << std::endl; 112 | std::cerr << " datetime: " << datetime << std::endl; 113 | std::cerr << " timestamp: " << timestamp << std::endl; 114 | std::cerr << " keypress: " << keypress << std::endl; 115 | std::cerr << " signal: " << signal << std::endl; 116 | std::cerr << " thumbnail width: " << thumb_width << std::endl; 117 | std::cerr << " thumbnail height: " << thumb_height << std::endl; 118 | std::cerr << " thumbnail quality: " << thumb_quality << std::endl; 119 | std::cerr << " latest: " << latest << std::endl; 120 | std::cerr << " immediate " << immediate << std::endl; 121 | std::cerr << " AF on capture: " << af_on_capture << std::endl; 122 | std::cerr << " Zero shutter lag: " << zsl << std::endl; 123 | for (auto &s : exif) 124 | std::cerr << " EXIF: " << s << std::endl; 125 | } 126 | 127 | private: 128 | std::string timelapse_; 129 | }; 130 | -------------------------------------------------------------------------------- /core/stream_info.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * stream_info.hpp - structure holding details about a libcamera Stream. 6 | */ 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | struct StreamInfo 15 | { 16 | StreamInfo() : width(0), height(0), stride(0) {} 17 | unsigned int width; 18 | unsigned int height; 19 | unsigned int stride; 20 | libcamera::PixelFormat pixel_format; 21 | std::optional colour_space; 22 | }; 23 | -------------------------------------------------------------------------------- /core/version.cpp.in: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * AUTO-GENERATED, DO NOT MODIFY! 6 | */ 7 | #include 8 | 9 | #include "core/version.hpp" 10 | 11 | #if LIBEGL_PRESENT 12 | static constexpr int egl = 1; 13 | #else 14 | static constexpr int egl = 0; 15 | #endif 16 | 17 | #if QT_PRESENT 18 | static constexpr int qt = 1; 19 | #else 20 | static constexpr int qt = 0; 21 | #endif 22 | 23 | #if LIBDRM_PRESENT 24 | static constexpr int drm = 1; 25 | #else 26 | static constexpr int drm = 0; 27 | #endif 28 | 29 | #if LIBAV_PRESENT 30 | static int libav = 1; 31 | #else 32 | static int libav = 0; 33 | #endif 34 | 35 | static const std::string version {"@VER@"}; 36 | 37 | static const std::string caps {"egl:" + std::to_string(egl) + 38 | " qt:" + std::to_string(qt) + 39 | " drm:" + std::to_string(drm) + 40 | " libav:" + std::to_string(libav)}; 41 | 42 | extern "C" { 43 | 44 | const char *RPiCamAppsVersion() 45 | { 46 | return version.c_str(); 47 | } 48 | 49 | const char *RPiCamAppsCapabilities() 50 | { 51 | return caps.c_str(); 52 | } 53 | 54 | } 55 | -------------------------------------------------------------------------------- /core/version.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | */ 6 | #pragma once 7 | 8 | extern "C" 9 | { 10 | const char *RPiCamAppsVersion(); 11 | const char *RPiCamAppsCapabilities(); 12 | } 13 | -------------------------------------------------------------------------------- /encoder/encoder.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * encoder.cpp - Video encoder class. 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include "encoder.hpp" 16 | #include "h264_encoder.hpp" 17 | #include "mjpeg_encoder.hpp" 18 | #include "null_encoder.hpp" 19 | 20 | #if LIBAV_PRESENT 21 | #include "libav_encoder.hpp" 22 | #endif 23 | 24 | static Encoder *h264_codec_select(VideoOptions *options, const StreamInfo &info) 25 | { 26 | if (options->GetPlatform() == Platform::VC4) 27 | return new H264Encoder(options, info); 28 | 29 | #if LIBAV_PRESENT 30 | // No hardware codec available, use x264 through libav. 31 | options->libav_video_codec = "libx264"; 32 | return new LibAvEncoder(options, info); 33 | #endif 34 | 35 | throw std::runtime_error("Unable to find an appropriate H.264 codec"); 36 | } 37 | 38 | #if LIBAV_PRESENT 39 | static Encoder *libav_codec_select(VideoOptions *options, const StreamInfo &info) 40 | { 41 | if (options->libav_video_codec == "h264_v4l2m2m") 42 | { 43 | if (options->GetPlatform() == Platform::VC4) 44 | return new LibAvEncoder(options, info); 45 | // No h264_v4l2m2m libav codec available, use libx264 if nothing else is provided. 46 | options->libav_video_codec = "libx264"; 47 | } 48 | return new LibAvEncoder(options, info); 49 | } 50 | #endif 51 | 52 | Encoder *Encoder::Create(VideoOptions *options, const StreamInfo &info) 53 | { 54 | if (strcasecmp(options->codec.c_str(), "yuv420") == 0) 55 | return new NullEncoder(options); 56 | else if (strcasecmp(options->codec.c_str(), "h264") == 0) 57 | return h264_codec_select(options, info); 58 | #if LIBAV_PRESENT 59 | else if (strcasecmp(options->codec.c_str(), "libav") == 0) 60 | return libav_codec_select(options, info); 61 | #endif 62 | else if (strcasecmp(options->codec.c_str(), "mjpeg") == 0) 63 | return new MjpegEncoder(options); 64 | throw std::runtime_error("Unrecognised codec " + options->codec); 65 | } 66 | -------------------------------------------------------------------------------- /encoder/encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * encoder.hpp - Video encoder class. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "core/stream_info.hpp" 13 | #include "core/video_options.hpp" 14 | 15 | typedef std::function InputDoneCallback; 16 | typedef std::function OutputReadyCallback; 17 | 18 | class Encoder 19 | { 20 | public: 21 | static Encoder *Create(VideoOptions *options, StreamInfo const &info); 22 | 23 | Encoder(VideoOptions const *options) : options_(options) {} 24 | virtual ~Encoder() {} 25 | // This is where the application sets the callback it gets whenever the encoder 26 | // has finished with an input buffer, so the application can re-use it. 27 | void SetInputDoneCallback(InputDoneCallback callback) { input_done_callback_ = callback; } 28 | // This callback is how the application is told that an encoded buffer is 29 | // available. The application may not hang on to the memory once it returns 30 | // (but the callback is already running in its own thread). 31 | void SetOutputReadyCallback(OutputReadyCallback callback) { output_ready_callback_ = callback; } 32 | // Encode the given buffer. The buffer is specified both by an fd and size 33 | // describing a DMABUF, and by a mmapped userland pointer. 34 | virtual void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) = 0; 35 | 36 | protected: 37 | InputDoneCallback input_done_callback_; 38 | OutputReadyCallback output_ready_callback_; 39 | VideoOptions const *options_; 40 | }; 41 | -------------------------------------------------------------------------------- /encoder/h264_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * h264_encoder.hpp - h264 video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "encoder.hpp" 16 | 17 | class H264Encoder : public Encoder 18 | { 19 | public: 20 | H264Encoder(VideoOptions const *options, StreamInfo const &info); 21 | ~H264Encoder(); 22 | // Encode the given DMABUF. 23 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 24 | 25 | private: 26 | // We want at least as many output buffers as there are in the camera queue 27 | // (we always want to be able to queue them when they arrive). Make loads 28 | // of capture buffers, as this is our buffering mechanism in case of delays 29 | // dealing with the output bitstream. 30 | static constexpr int NUM_OUTPUT_BUFFERS = 6; 31 | static constexpr int NUM_CAPTURE_BUFFERS = 12; 32 | 33 | // This thread just sits waiting for the encoder to finish stuff. It will either: 34 | // * receive "output" buffers (codec inputs), which we must return to the caller 35 | // * receive encoded buffers, which we pass to the application. 36 | void pollThread(); 37 | 38 | // Handle the output buffers in another thread so as not to block the encoder. The 39 | // application can take its time, after which we return this buffer to the encoder for 40 | // re-use. 41 | void outputThread(); 42 | 43 | bool abortPoll_; 44 | bool abortOutput_; 45 | int fd_; 46 | struct BufferDescription 47 | { 48 | void *mem; 49 | size_t size; 50 | }; 51 | BufferDescription buffers_[NUM_CAPTURE_BUFFERS]; 52 | int num_capture_buffers_; 53 | std::thread poll_thread_; 54 | std::mutex input_buffers_available_mutex_; 55 | std::queue input_buffers_available_; 56 | struct OutputItem 57 | { 58 | void *mem; 59 | size_t bytes_used; 60 | size_t length; 61 | unsigned int index; 62 | bool keyframe; 63 | int64_t timestamp_us; 64 | }; 65 | std::queue output_queue_; 66 | std::mutex output_mutex_; 67 | std::condition_variable output_cond_var_; 68 | std::thread output_thread_; 69 | }; 70 | -------------------------------------------------------------------------------- /encoder/libav_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2022, Raspberry Pi Ltd 4 | * 5 | * libav_encoder.hpp - libav video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | extern "C" 18 | { 19 | #include "libavcodec/avcodec.h" 20 | #include "libavcodec/codec_desc.h" 21 | #include "libavcodec/version.h" 22 | #include "libavdevice/avdevice.h" 23 | #include "libavformat/avformat.h" 24 | #include "libavutil/audio_fifo.h" 25 | #include "libavutil/hwcontext.h" 26 | #include "libavutil/hwcontext_drm.h" 27 | #include "libavutil/imgutils.h" 28 | #include "libavutil/timestamp.h" 29 | #include "libavutil/version.h" 30 | #include "libswresample/swresample.h" 31 | } 32 | 33 | #include "encoder.hpp" 34 | 35 | class LibAvEncoder : public Encoder 36 | { 37 | public: 38 | LibAvEncoder(VideoOptions const *options, StreamInfo const &info); 39 | ~LibAvEncoder(); 40 | // Encode the given DMABUF. 41 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 42 | 43 | private: 44 | void initVideoCodec(VideoOptions const *options, StreamInfo const &info); 45 | void initAudioInCodec(VideoOptions const *options, StreamInfo const &info); 46 | void initAudioOutCodec(VideoOptions const *options, StreamInfo const &info); 47 | 48 | void initOutput(); 49 | void deinitOutput(); 50 | void encode(AVPacket *pkt, unsigned int stream_id); 51 | 52 | void videoThread(); 53 | void audioThread(); 54 | 55 | static void releaseBuffer(void *opaque, uint8_t *data); 56 | 57 | std::atomic output_ready_; 58 | bool abort_video_; 59 | bool abort_audio_; 60 | uint64_t video_start_ts_; 61 | uint64_t audio_samples_; 62 | 63 | std::queue frame_queue_; 64 | std::mutex video_mutex_; 65 | std::mutex output_mutex_; 66 | std::condition_variable video_cv_; 67 | std::thread video_thread_; 68 | std::thread audio_thread_; 69 | 70 | // The ordering in the enum below must not change! 71 | enum Context { Video = 0, AudioOut = 1, AudioIn = 2 }; 72 | AVCodecContext *codec_ctx_[3]; 73 | AVStream *stream_[3]; 74 | AVFormatContext *in_fmt_ctx_; 75 | AVFormatContext *out_fmt_ctx_; 76 | 77 | std::mutex drm_queue_lock_; 78 | std::queue> drm_frame_queue_; 79 | 80 | std::string output_file_; 81 | bool output_initialised_; 82 | }; 83 | -------------------------------------------------------------------------------- /encoder/meson.build: -------------------------------------------------------------------------------- 1 | rpicam_app_src += files([ 2 | 'encoder.cpp', 3 | 'h264_encoder.cpp', 4 | 'mjpeg_encoder.cpp', 5 | 'null_encoder.cpp', 6 | ]) 7 | 8 | encoder_headers = files([ 9 | 'encoder.hpp', 10 | 'h264_encoder.hpp', 11 | 'mjpeg_encoder.hpp', 12 | 'null_encoder.hpp', 13 | ]) 14 | 15 | libav_dep_names = ['libavcodec', 'libavdevice', 'libavformat', 'libavutil', 'libswresample'] 16 | libav_deps = [] 17 | 18 | enable_libav = true 19 | foreach name : libav_dep_names 20 | dep = dependency(name, required : get_option('enable_libav')) 21 | if not dep.found() 22 | enable_libav = false 23 | break 24 | endif 25 | libav_deps += dep 26 | endforeach 27 | 28 | if enable_libav 29 | rpicam_app_src += files('libav_encoder.cpp') 30 | encoder_headers += files('libav_encoder.hpp') 31 | rpicam_app_dep += libav_deps 32 | cpp_arguments += '-DLIBAV_PRESENT=1' 33 | endif 34 | 35 | install_headers(encoder_headers, subdir: meson.project_name() / 'encoder') 36 | -------------------------------------------------------------------------------- /encoder/mjpeg_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * mjpeg_encoder.hpp - mjpeg video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "encoder.hpp" 16 | 17 | struct jpeg_compress_struct; 18 | 19 | class MjpegEncoder : public Encoder 20 | { 21 | public: 22 | MjpegEncoder(VideoOptions const *options); 23 | ~MjpegEncoder(); 24 | // Encode the given buffer. 25 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 26 | 27 | private: 28 | // How many threads to use. Whichever thread is idle will pick up the next frame. 29 | static const int NUM_ENC_THREADS = 4; 30 | 31 | // These threads do the actual encoding. 32 | void encodeThread(int num); 33 | 34 | // Handle the output buffers in another thread so as not to block the encoders. The 35 | // application can take its time, after which we return this buffer to the encoder for 36 | // re-use. 37 | void outputThread(); 38 | 39 | bool abortEncode_; 40 | bool abortOutput_; 41 | uint64_t index_; 42 | 43 | struct EncodeItem 44 | { 45 | void *mem; 46 | StreamInfo info; 47 | int64_t timestamp_us; 48 | uint64_t index; 49 | }; 50 | std::queue encode_queue_; 51 | std::mutex encode_mutex_; 52 | std::condition_variable encode_cond_var_; 53 | std::thread encode_thread_[NUM_ENC_THREADS]; 54 | void encodeJPEG(struct jpeg_compress_struct &cinfo, EncodeItem &item, uint8_t *&encoded_buffer, size_t &buffer_len); 55 | 56 | struct OutputItem 57 | { 58 | void *mem; 59 | size_t bytes_used; 60 | int64_t timestamp_us; 61 | uint64_t index; 62 | }; 63 | std::queue output_queue_[NUM_ENC_THREADS]; 64 | std::mutex output_mutex_; 65 | std::condition_variable output_cond_var_; 66 | std::thread output_thread_; 67 | }; 68 | -------------------------------------------------------------------------------- /encoder/null_encoder.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * null_encoder.cpp - dummy "do nothing" video encoder. 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "null_encoder.hpp" 13 | 14 | NullEncoder::NullEncoder(VideoOptions const *options) : Encoder(options), abort_(false) 15 | { 16 | LOG(2, "Opened NullEncoder"); 17 | output_thread_ = std::thread(&NullEncoder::outputThread, this); 18 | } 19 | 20 | NullEncoder::~NullEncoder() 21 | { 22 | abort_ = true; 23 | output_thread_.join(); 24 | LOG(2, "NullEncoder closed"); 25 | } 26 | 27 | // Push the buffer onto the output queue to be "encoded" and returned. 28 | void NullEncoder::EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) 29 | { 30 | std::lock_guard lock(output_mutex_); 31 | OutputItem item = { mem, size, timestamp_us }; 32 | output_queue_.push(item); 33 | output_cond_var_.notify_one(); 34 | } 35 | 36 | // Realistically we would probably want more of a queue as the caller's number 37 | // of buffers limits the amount of queueing possible here... 38 | void NullEncoder::outputThread() 39 | { 40 | OutputItem item; 41 | while (true) 42 | { 43 | { 44 | std::unique_lock lock(output_mutex_); 45 | while (true) 46 | { 47 | using namespace std::chrono_literals; 48 | if (!output_queue_.empty()) 49 | { 50 | item = output_queue_.front(); 51 | output_queue_.pop(); 52 | break; 53 | } 54 | else 55 | output_cond_var_.wait_for(lock, 200ms); 56 | if (abort_) 57 | return; 58 | } 59 | } 60 | // Ensure the input done callback happens before the output ready callback. 61 | // This is needed as the metadata queue gets pushed in the former, and popped 62 | // in the latter. 63 | input_done_callback_(nullptr); 64 | output_ready_callback_(item.mem, item.length, item.timestamp_us, true); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /encoder/null_encoder.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * null_encoder.hpp - dummy "do nothing" video encoder. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "core/video_options.hpp" 16 | #include "encoder.hpp" 17 | 18 | class NullEncoder : public Encoder 19 | { 20 | public: 21 | NullEncoder(VideoOptions const *options); 22 | ~NullEncoder(); 23 | void EncodeBuffer(int fd, size_t size, void *mem, StreamInfo const &info, int64_t timestamp_us) override; 24 | 25 | private: 26 | void outputThread(); 27 | 28 | bool abort_; 29 | VideoOptions options_; 30 | struct OutputItem 31 | { 32 | void *mem; 33 | size_t length; 34 | int64_t timestamp_us; 35 | }; 36 | std::queue output_queue_; 37 | std::mutex output_mutex_; 38 | std::condition_variable output_cond_var_; 39 | std::thread output_thread_; 40 | }; 41 | -------------------------------------------------------------------------------- /image/bmp.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * bmp.cpp - Encode image as bmp and write to file. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include "core/still_options.hpp" 14 | #include "core/stream_info.hpp" 15 | 16 | struct ImageHeader 17 | { 18 | uint32_t size = sizeof(ImageHeader); 19 | uint32_t width; 20 | int32_t height; 21 | uint16_t planes = 1; 22 | uint16_t bitcount = 24; 23 | uint32_t compression = 0; 24 | uint32_t imagesize = 0; 25 | uint32_t xpels = 100000; 26 | uint32_t ypels = 100000; 27 | uint32_t clrused = 0; 28 | uint32_t clrimportant = 0; 29 | }; 30 | static_assert(sizeof(ImageHeader) == 40, "ImageHeader size wrong"); 31 | 32 | struct FileHeader 33 | { 34 | uint16_t dummy; // 2 dummy bytes so that our uint32_ts line up 35 | uint8_t type1 = 'B'; 36 | uint8_t type2 = 'M'; 37 | uint32_t filesize; 38 | uint16_t reserved1 = 0; 39 | uint16_t reserved2 = 0; 40 | uint32_t offset = sizeof(FileHeader) - 2 + sizeof(ImageHeader); 41 | }; 42 | static_assert(sizeof(FileHeader) == 16, "FileHeader size wrong"); 43 | 44 | void bmp_save(std::vector> const &mem, StreamInfo const &info, 45 | std::string const &filename, StillOptions const *options) 46 | { 47 | if (info.pixel_format != libcamera::formats::RGB888) 48 | throw std::runtime_error("pixel format for bmp should be RGB"); 49 | 50 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "wb"); 51 | 52 | if (fp == NULL) 53 | throw std::runtime_error("failed to open file " + filename); 54 | 55 | try 56 | { 57 | unsigned int line = info.width * 3; 58 | unsigned int pitch = (line + 3) & ~3; // lines are multiples of 4 bytes 59 | unsigned int pad = pitch - line; 60 | uint8_t padding[3] = {}; 61 | uint8_t *ptr = (uint8_t *)mem[0].data(); 62 | 63 | FileHeader file_header; 64 | ImageHeader image_header; 65 | file_header.filesize = file_header.offset + info.height * pitch; 66 | image_header.width = info.width; 67 | image_header.height = -info.height; // make image come out the right way up 68 | 69 | // Don't write the file header's 2 dummy bytes 70 | if (fwrite((uint8_t *)&file_header + 2, sizeof(file_header) - 2, 1, fp) != 1 || 71 | fwrite(&image_header, sizeof(image_header), 1, fp) != 1) 72 | throw std::runtime_error("failed to write BMP file"); 73 | 74 | for (unsigned int i = 0; i < info.height; i++, ptr += info.stride) 75 | { 76 | if (fwrite(ptr, line, 1, fp) != 1 || (pad != 0 && fwrite(padding, pad, 1, fp) != 1)) 77 | throw std::runtime_error("failed to write BMP file, row " + std::to_string(i)); 78 | } 79 | 80 | LOG(2, "Wrote " << file_header.filesize << " bytes to BMP file"); 81 | 82 | if (fp != stdout) 83 | fclose(fp); 84 | } 85 | catch (std::exception const &e) 86 | { 87 | if (fp && fp != stdout) 88 | fclose(fp); 89 | throw; 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /image/image.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * image.hpp - still image encoder declarations 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include 15 | 16 | #include "core/stream_info.hpp" 17 | 18 | struct StillOptions; 19 | 20 | // In jpeg.cpp: 21 | void jpeg_save(std::vector> const &mem, StreamInfo const &info, 22 | libcamera::ControlList const &metadata, std::string const &filename, std::string const &cam_model, 23 | StillOptions const *options); 24 | 25 | // In yuv.cpp: 26 | void yuv_save(std::vector> const &mem, StreamInfo const &info, 27 | std::string const &filename, StillOptions const *options); 28 | 29 | // In dng.cpp: 30 | void dng_save(std::vector> const &mem, StreamInfo const &info, 31 | libcamera::ControlList const &metadata, std::string const &filename, std::string const &cam_model, 32 | StillOptions const *options); 33 | 34 | // In png.cpp: 35 | void png_save(std::vector> const &mem, StreamInfo const &info, 36 | std::string const &filename, StillOptions const *options); 37 | 38 | // In bmp.cpp: 39 | void bmp_save(std::vector> const &mem, StreamInfo const &info, 40 | std::string const &filename, StillOptions const *options); 41 | -------------------------------------------------------------------------------- /image/meson.build: -------------------------------------------------------------------------------- 1 | rpicam_app_src += files([ 2 | 'bmp.cpp', 3 | 'dng.cpp', 4 | 'jpeg.cpp', 5 | 'png.cpp', 6 | 'yuv.cpp', 7 | ]) 8 | 9 | image_headers = files([ 10 | 'image.hpp', 11 | ]) 12 | 13 | exif_dep = dependency('libexif', required : true) 14 | jpeg_dep = dependency('libjpeg', required : true) 15 | tiff_dep = dependency('libtiff-4', required : true) 16 | png_dep = dependency('libpng', required : true) 17 | 18 | rpicam_app_dep += [exif_dep, jpeg_dep, tiff_dep, png_dep] 19 | 20 | install_headers(image_headers, subdir: meson.project_name() / 'image') 21 | -------------------------------------------------------------------------------- /image/png.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * png.cpp - Encode image as png and write to file. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #include 14 | 15 | #include "core/still_options.hpp" 16 | #include "core/stream_info.hpp" 17 | 18 | void png_save(std::vector> const &mem, StreamInfo const &info, 19 | std::string const &filename, StillOptions const *options) 20 | { 21 | if (info.pixel_format != libcamera::formats::BGR888) 22 | throw std::runtime_error("pixel format for png should be BGR"); 23 | 24 | FILE *fp = filename == "-" ? stdout : fopen(filename.c_str(), "wb"); 25 | png_structp png_ptr = NULL; 26 | png_infop info_ptr = NULL; 27 | 28 | if (fp == NULL) 29 | throw std::runtime_error("failed to open file " + filename); 30 | 31 | try 32 | { 33 | // Open everything up. 34 | png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); 35 | if (png_ptr == NULL) 36 | throw std::runtime_error("failed to create png write struct"); 37 | 38 | info_ptr = png_create_info_struct(png_ptr); 39 | if (info_ptr == NULL) 40 | throw std::runtime_error("failed to create png info struct"); 41 | 42 | if (setjmp(png_jmpbuf(png_ptr))) 43 | throw std::runtime_error("failed to set png error handling"); 44 | 45 | // Set image attributes. 46 | png_set_IHDR(png_ptr, info_ptr, info.width, info.height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, 47 | PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); 48 | // These settings get us most of the compression, but are much faster. 49 | png_set_filter(png_ptr, 0, PNG_FILTER_AVG); 50 | png_set_compression_level(png_ptr, 1); 51 | 52 | // Set up the image data. 53 | png_byte **row_ptrs = (png_byte **)png_malloc(png_ptr, info.height * sizeof(png_byte *)); 54 | png_byte *row = (uint8_t *)mem[0].data(); 55 | for (unsigned int i = 0; i < info.height; i++, row += info.stride) 56 | row_ptrs[i] = row; 57 | 58 | png_init_io(png_ptr, fp); 59 | png_set_rows(png_ptr, info_ptr, row_ptrs); 60 | png_write_png(png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL); 61 | 62 | long int size = ftell(fp); 63 | LOG(2, "Wrote PNG file of " << size << " bytes"); 64 | 65 | // Free and close everything and we're done. 66 | png_free(png_ptr, row_ptrs); 67 | png_destroy_write_struct(&png_ptr, &info_ptr); 68 | if (fp != stdout) 69 | fclose(fp); 70 | } 71 | catch (std::exception const &e) 72 | { 73 | if (png_ptr) 74 | png_destroy_write_struct(&png_ptr, &info_ptr); 75 | if (fp && fp != stdout) 76 | fclose(fp); 77 | throw; 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2020 2021, Raspberry Pi (Trading) Limited 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /meson.build: -------------------------------------------------------------------------------- 1 | project('rpicam-apps', 'c', 'cpp', 2 | meson_version : '>= 0.64.0', 3 | version : '1.7.0', 4 | default_options : [ 5 | 'werror=true', 6 | 'warning_level=3', 7 | 'cpp_std=c++17', 8 | 'c_std=c11', 9 | 'buildtype=release', 10 | ], 11 | license : 'BSD-2-Clause') 12 | 13 | meson.add_dist_script('utils' / 'gen-dist.sh') 14 | 15 | fs = import('fs') 16 | 17 | cpp_arguments = ['-pedantic', '-Wno-unused-parameter', '-faligned-new'] 18 | 19 | # Needed for file sizes > 32-bits. 20 | cpp_arguments += '-D_FILE_OFFSET_BITS=64' 21 | 22 | cxx = meson.get_compiler('cpp') 23 | cpu = host_machine.cpu() 24 | neon = get_option('neon_flags') 25 | 26 | if cxx.get_id() == 'gcc' 27 | cpp_arguments += '-Wno-psabi' 28 | endif 29 | 30 | if cpu == 'aarch64' or neon == 'arm64' 31 | cpp_arguments += '-ftree-vectorize' 32 | elif neon == 'armv8-neon' 33 | cpp_arguments += ['-mfpu=neon-fp-armv8', '-ftree-vectorize'] 34 | endif 35 | 36 | dl_dep = dependency('dl', required : true) 37 | libcamera_dep = dependency('libcamera', required : true) 38 | 39 | if get_option('disable_rpi_features') == true 40 | cpp_arguments += '-DDISABLE_RPI_FEATURES' 41 | endif 42 | 43 | summary({ 44 | 'location' : libcamera_dep.get_variable('libdir'), 45 | 'version' : libcamera_dep.version() 46 | }, 47 | section : 'libcamera') 48 | 49 | rpicam_app_src = [] 50 | rpicam_app_dep = [libcamera_dep, dl_dep] 51 | 52 | subdir('core') 53 | subdir('encoder') 54 | subdir('image') 55 | subdir('output') 56 | subdir('preview') 57 | subdir('utils') 58 | 59 | add_project_arguments(cpp_arguments, language : 'cpp') 60 | 61 | # Must be put after add_project_arguments as it defines shared library targets. 62 | subdir('post_processing_stages') 63 | 64 | # Generate a version string. 65 | version_cmd = [meson.project_source_root() / 'utils' / 'version.py', meson.project_version()] 66 | 67 | # Check if a version.gen file is present. 68 | # This would have been generated from the meson dist command. 69 | dist_version_file = meson.project_source_root() / 'version.gen' 70 | if fs.is_file(dist_version_file) 71 | version_cmd += fs.read(dist_version_file) 72 | endif 73 | 74 | version_cpp = vcs_tag(command : version_cmd, 75 | replace_string: '@VER@', 76 | input : meson.project_source_root() / 'core' / 'version.cpp.in', 77 | output : 'version.cpp', 78 | fallback : meson.project_version()) 79 | 80 | rpicam_app_src += version_cpp 81 | 82 | rpicam_app = library( 83 | 'rpicam_app', 84 | rpicam_app_src, 85 | soversion : meson.project_version(), 86 | include_directories : include_directories('.'), 87 | install : true, 88 | name_prefix : '', 89 | dependencies : rpicam_app_dep, 90 | ) 91 | 92 | pkg = import('pkgconfig') 93 | pkg.generate(rpicam_app, 94 | version: meson.project_version()) 95 | 96 | subdir('apps') 97 | 98 | summary({ 99 | 'libav encoder' : enable_libav, 100 | 'drm preview' : enable_drm, 101 | 'egl preview' : enable_egl, 102 | 'qt preview' : enable_qt, 103 | 'OpenCV postprocessing' : enable_opencv, 104 | 'TFLite postprocessing' : enable_tflite, 105 | 'Hailo postprocessing' : enable_hailo, 106 | 'IMX500 postprocessing' : get_option('enable_imx500'), 107 | }, 108 | bool_yn : true, section : 'Build configuration') 109 | -------------------------------------------------------------------------------- /meson_options.txt: -------------------------------------------------------------------------------- 1 | option('enable_libav', 2 | type : 'feature', 3 | value : 'auto', 4 | description : 'Enable the libav encoder for video/audio capture') 5 | 6 | option('enable_drm', 7 | type : 'feature', 8 | value : 'auto', 9 | description : 'Enable DRM preview window support') 10 | 11 | option('enable_egl', 12 | type : 'feature', 13 | value : 'auto', 14 | description : 'Enable EGL preview window support') 15 | 16 | option('enable_qt', 17 | type : 'feature', 18 | value : 'auto', 19 | description : 'Enable QT preview window support') 20 | 21 | option('enable_opencv', 22 | type : 'feature', 23 | value : 'disabled', 24 | description : 'Enable OpenCV postprocessing support') 25 | 26 | option('enable_tflite', 27 | type : 'feature', 28 | value : 'disabled', 29 | description : 'Enable Tensorflow Lite postprocessing support') 30 | 31 | option('neon_flags', 32 | type : 'combo', 33 | choices: ['arm64', 'armv8-neon', 'auto'], 34 | value : 'auto', 35 | description : 'User selectable arm-neon optimisation flags') 36 | 37 | option('enable_hailo', 38 | type : 'feature', 39 | value : 'auto', 40 | description : 'Enable Hailo postprocessing support') 41 | 42 | option('download_hailo_models', 43 | type : 'boolean', 44 | value : true, 45 | description : 'Download and install the Hailo postprocessing models') 46 | 47 | option('enable_imx500', 48 | type : 'boolean', 49 | value : false, 50 | description : 'Enable IMX500 postprocessing support') 51 | 52 | option('download_imx500_models', 53 | type : 'boolean', 54 | value : false, 55 | description : 'Download and install the IMX500 postprocessing models') 56 | 57 | option('disable_rpi_features', 58 | type : 'boolean', 59 | value : false, 60 | description : 'Disable use Raspberry Pi specific extensions in the build') 61 | -------------------------------------------------------------------------------- /output/circular_output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * circular_output.cpp - Write output to circular buffer which we save on exit. 6 | */ 7 | 8 | #include "circular_output.hpp" 9 | 10 | // We're going to align the frames within the buffer to friendly byte boundaries 11 | static constexpr int ALIGN = 16; // power of 2, please 12 | 13 | struct Header 14 | { 15 | unsigned int length; 16 | bool keyframe; 17 | int64_t timestamp; 18 | }; 19 | static_assert(sizeof(Header) % ALIGN == 0, "Header should have aligned size"); 20 | 21 | // Size of buffer (options->circular) is given in megabytes. 22 | CircularOutput::CircularOutput(VideoOptions const *options) : Output(options), cb_(options->circular<<20) 23 | { 24 | // Open this now, so that we can get any complaints out of the way 25 | if (options_->output == "-") 26 | fp_ = stdout; 27 | else if (!options_->output.empty()) 28 | { 29 | fp_ = fopen(options_->output.c_str(), "w"); 30 | } 31 | if (!fp_) 32 | throw std::runtime_error("could not open output file"); 33 | } 34 | 35 | CircularOutput::~CircularOutput() 36 | { 37 | // We do have to skip to the first I frame before dumping stuff to disk. If there are 38 | // no I frames you will get nothing. Caveat emptor, methinks. 39 | unsigned int total = 0, frames = 0; 40 | bool seen_keyframe = false; 41 | Header header; 42 | FILE *fp = fp_; // can't capture a class member in a lambda 43 | while (!cb_.Empty()) 44 | { 45 | uint8_t *dst = (uint8_t *)&header; 46 | cb_.Read( 47 | [&dst](void *src, int n) { 48 | memcpy(dst, src, n); 49 | dst += n; 50 | }, 51 | sizeof(header)); 52 | seen_keyframe |= header.keyframe; 53 | if (seen_keyframe) 54 | { 55 | cb_.Read([fp](void *src, int n) { fwrite(src, 1, n, fp); }, header.length); 56 | cb_.Skip((ALIGN - header.length) & (ALIGN - 1)); 57 | total += header.length; 58 | if (fp_timestamps_) 59 | { 60 | Output::timestampReady(header.timestamp); 61 | } 62 | frames++; 63 | } 64 | else 65 | cb_.Skip((header.length + ALIGN - 1) & ~(ALIGN - 1)); 66 | } 67 | fclose(fp_); 68 | LOG(1, "Wrote " << total << " bytes (" << frames << " frames)"); 69 | } 70 | 71 | void CircularOutput::outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) 72 | { 73 | // First make sure there's enough space. 74 | int pad = (ALIGN - size) & (ALIGN - 1); 75 | while (size + pad + sizeof(Header) > cb_.Available()) 76 | { 77 | if (cb_.Empty()) 78 | throw std::runtime_error("circular buffer too small"); 79 | Header header; 80 | uint8_t *dst = (uint8_t *)&header; 81 | cb_.Read( 82 | [&dst](void *src, int n) { 83 | memcpy(dst, src, n); 84 | dst += n; 85 | }, 86 | sizeof(header)); 87 | cb_.Skip((header.length + ALIGN - 1) & ~(ALIGN - 1)); 88 | } 89 | Header header = { static_cast(size), !!(flags & FLAG_KEYFRAME), timestamp_us }; 90 | cb_.Write(&header, sizeof(header)); 91 | cb_.Write(mem, size); 92 | cb_.Pad(pad); 93 | } 94 | 95 | void CircularOutput::timestampReady(int64_t timestamp) 96 | { 97 | // Don't want to save every timestamp as we go along, only outputs them at the end 98 | } 99 | -------------------------------------------------------------------------------- /output/circular_output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * circular_output.hpp - Write output to a circular buffer. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include "output.hpp" 11 | 12 | // A simple circular buffer implementation used by the CircularOutput class. 13 | 14 | class CircularBuffer 15 | { 16 | public: 17 | CircularBuffer(size_t size) : size_(size), buf_(size), rptr_(0), wptr_(0) {} 18 | bool Empty() const { return rptr_ == wptr_; } 19 | size_t Available() const { return wptr_ == rptr_ ? size_ - 1 : (size_ - wptr_ + rptr_) % size_ - 1; } 20 | void Skip(unsigned int n) { rptr_ = (rptr_ + n) % size_; } 21 | // The dst function allows bytes read to go straight to memory or a file etc. 22 | void Read(std::function dst, unsigned int n) 23 | { 24 | if (rptr_ + n >= size_) 25 | { 26 | dst(&buf_[rptr_], size_ - rptr_); 27 | n -= size_ - rptr_; 28 | rptr_ = 0; 29 | } 30 | dst(&buf_[rptr_], n); 31 | rptr_ += n; 32 | } 33 | void Pad(unsigned int n) { wptr_ = (wptr_ + n) % size_; } 34 | void Write(const void *ptr, unsigned int n) 35 | { 36 | if (wptr_ + n >= size_) 37 | { 38 | memcpy(&buf_[wptr_], ptr, size_ - wptr_); 39 | n -= size_ - wptr_; 40 | ptr = static_cast(ptr) + size_ - wptr_; 41 | wptr_ = 0; 42 | } 43 | memcpy(&buf_[wptr_], ptr, n); 44 | wptr_ += n; 45 | } 46 | 47 | private: 48 | const size_t size_; 49 | std::vector buf_; 50 | size_t rptr_, wptr_; 51 | }; 52 | 53 | // Write frames to a circular buffer, and dump them to disk when we quit. 54 | 55 | class CircularOutput : public Output 56 | { 57 | public: 58 | CircularOutput(VideoOptions const *options); 59 | ~CircularOutput(); 60 | 61 | protected: 62 | void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) override; 63 | void timestampReady(int64_t timestamp) override; 64 | 65 | private: 66 | CircularBuffer cb_; 67 | FILE *fp_; 68 | }; 69 | -------------------------------------------------------------------------------- /output/file_output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * file_output.cpp - Write output to file. 6 | */ 7 | 8 | #include "file_output.hpp" 9 | 10 | FileOutput::FileOutput(VideoOptions const *options) 11 | : Output(options), fp_(nullptr), count_(0), file_start_time_ms_(0) 12 | { 13 | } 14 | 15 | FileOutput::~FileOutput() 16 | { 17 | closeFile(); 18 | } 19 | 20 | void FileOutput::outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) 21 | { 22 | // We need to open a new file if we're in "segment" mode and our segment is full 23 | // (though we have to wait for the next I frame), or if we're in "split" mode 24 | // and recording is being restarted (this is necessarily an I-frame already). 25 | if (fp_ == nullptr || 26 | (options_->segment && (flags & FLAG_KEYFRAME) && 27 | timestamp_us / 1000 - file_start_time_ms_ > options_->segment) || 28 | (options_->split && (flags & FLAG_RESTART))) 29 | { 30 | closeFile(); 31 | openFile(timestamp_us); 32 | } 33 | 34 | LOG(2, "FileOutput: output buffer " << mem << " size " << size); 35 | if (fp_ && size) 36 | { 37 | if (fwrite(mem, size, 1, fp_) != 1) 38 | throw std::runtime_error("failed to write output bytes"); 39 | if (options_->flush) 40 | fflush(fp_); 41 | } 42 | } 43 | 44 | void FileOutput::openFile(int64_t timestamp_us) 45 | { 46 | if (options_->output == "-") 47 | fp_ = stdout; 48 | else if (!options_->output.empty()) 49 | { 50 | // Generate the next output file name. 51 | char filename[256]; 52 | int n = snprintf(filename, sizeof(filename), options_->output.c_str(), count_); 53 | count_++; 54 | if (options_->wrap) 55 | count_ = count_ % options_->wrap; 56 | if (n < 0) 57 | throw std::runtime_error("failed to generate filename"); 58 | 59 | fp_ = fopen(filename, "w"); 60 | if (!fp_) 61 | throw std::runtime_error("failed to open output file " + std::string(filename)); 62 | LOG(2, "FileOutput: opened output file " << filename); 63 | 64 | file_start_time_ms_ = timestamp_us / 1000; 65 | } 66 | } 67 | 68 | void FileOutput::closeFile() 69 | { 70 | if (fp_) 71 | { 72 | if (options_->flush) 73 | fflush(fp_); 74 | if (fp_ != stdout) 75 | fclose(fp_); 76 | fp_ = nullptr; 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /output/file_output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * file_output.hpp - Write output to file. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include "output.hpp" 11 | 12 | class FileOutput : public Output 13 | { 14 | public: 15 | FileOutput(VideoOptions const *options); 16 | ~FileOutput(); 17 | 18 | protected: 19 | void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) override; 20 | 21 | private: 22 | void openFile(int64_t timestamp_us); 23 | void closeFile(); 24 | FILE *fp_; 25 | unsigned int count_; 26 | int64_t file_start_time_ms_; 27 | }; 28 | -------------------------------------------------------------------------------- /output/meson.build: -------------------------------------------------------------------------------- 1 | rpicam_app_src += files([ 2 | 'circular_output.cpp', 3 | 'file_output.cpp', 4 | 'net_output.cpp', 5 | 'output.cpp', 6 | ]) 7 | 8 | output_headers = [ 9 | 'circular_output.hpp', 10 | 'file_output.hpp', 11 | 'net_output.hpp', 12 | 'output.hpp', 13 | ] 14 | 15 | rpicam_app_dep += [exif_dep, jpeg_dep, tiff_dep, png_dep] 16 | 17 | install_headers(files(output_headers), subdir: meson.project_name() / 'output') 18 | -------------------------------------------------------------------------------- /output/net_output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * net_output.cpp - send output over network. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include "net_output.hpp" 12 | 13 | NetOutput::NetOutput(VideoOptions const *options) : Output(options) 14 | { 15 | char protocol[4]; 16 | int start, end, a, b, c, d, port; 17 | if (sscanf(options->output.c_str(), "%3s://%n%d.%d.%d.%d%n:%d", protocol, &start, &a, &b, &c, &d, &end, &port) != 6) 18 | throw std::runtime_error("bad network address " + options->output); 19 | std::string address = options->output.substr(start, end - start); 20 | 21 | if (strcmp(protocol, "udp") == 0) 22 | { 23 | saddr_ = {}; 24 | saddr_.sin_family = AF_INET; 25 | saddr_.sin_port = htons(port); 26 | if (inet_aton(address.c_str(), &saddr_.sin_addr) == 0) 27 | throw std::runtime_error("inet_aton failed for " + address); 28 | 29 | fd_ = socket(AF_INET, SOCK_DGRAM, 0); 30 | if (fd_ < 0) 31 | throw std::runtime_error("unable to open udp socket"); 32 | 33 | saddr_ptr_ = (const sockaddr *)&saddr_; // sendto needs these for udp 34 | sockaddr_in_size_ = sizeof(sockaddr_in); 35 | } 36 | else if (strcmp(protocol, "tcp") == 0) 37 | { 38 | // WARNING: I've not actually tried this yet... 39 | if (options->listen) 40 | { 41 | // We are the server. 42 | int listen_fd = socket(AF_INET, SOCK_STREAM, 0); 43 | if (listen_fd < 0) 44 | throw std::runtime_error("unable to open listen socket"); 45 | 46 | sockaddr_in server_saddr = {}; 47 | server_saddr.sin_family = AF_INET; 48 | server_saddr.sin_addr.s_addr = INADDR_ANY; 49 | server_saddr.sin_port = htons(port); 50 | 51 | int enable = 1; 52 | if (setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable)) < 0) 53 | throw std::runtime_error("failed to setsockopt listen socket"); 54 | 55 | if (bind(listen_fd, (struct sockaddr *)&server_saddr, sizeof(server_saddr)) < 0) 56 | throw std::runtime_error("failed to bind listen socket"); 57 | listen(listen_fd, 1); 58 | 59 | LOG(2, "Waiting for client to connect..."); 60 | fd_ = accept(listen_fd, (struct sockaddr *)&saddr_, &sockaddr_in_size_); 61 | if (fd_ < 0) 62 | throw std::runtime_error("accept socket failed"); 63 | LOG(2, "Client connection accepted"); 64 | 65 | close(listen_fd); 66 | } 67 | else 68 | { 69 | // We are a client. 70 | saddr_ = {}; 71 | saddr_.sin_family = AF_INET; 72 | saddr_.sin_port = htons(port); 73 | if (inet_aton(address.c_str(), &saddr_.sin_addr) == 0) 74 | throw std::runtime_error("inet_aton failed for " + address); 75 | 76 | fd_ = socket(AF_INET, SOCK_STREAM, 0); 77 | if (fd_ < 0) 78 | throw std::runtime_error("unable to open client socket"); 79 | 80 | LOG(2, "Connecting to server..."); 81 | if (connect(fd_, (struct sockaddr *)&saddr_, sizeof(sockaddr_in)) < 0) 82 | throw std::runtime_error("connect to server failed"); 83 | LOG(2, "Connected"); 84 | } 85 | 86 | saddr_ptr_ = NULL; // sendto doesn't want these for tcp 87 | sockaddr_in_size_ = 0; 88 | } 89 | else 90 | throw std::runtime_error("unrecognised network protocol " + options->output); 91 | } 92 | 93 | NetOutput::~NetOutput() 94 | { 95 | close(fd_); 96 | } 97 | 98 | // Maximum size that sendto will accept. 99 | constexpr size_t MAX_UDP_SIZE = 65507; 100 | 101 | void NetOutput::outputBuffer(void *mem, size_t size, int64_t /*timestamp_us*/, uint32_t /*flags*/) 102 | { 103 | LOG(2, "NetOutput: output buffer " << mem << " size " << size); 104 | size_t max_size = saddr_ptr_ ? MAX_UDP_SIZE : size; 105 | for (uint8_t *ptr = (uint8_t *)mem; size;) 106 | { 107 | size_t bytes_to_send = std::min(size, max_size); 108 | if (sendto(fd_, ptr, bytes_to_send, 0, saddr_ptr_, sockaddr_in_size_) < 0) 109 | throw std::runtime_error("failed to send data on socket"); 110 | ptr += bytes_to_send; 111 | size -= bytes_to_send; 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /output/net_output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * net_output.hpp - send output over network. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include "output.hpp" 13 | 14 | class NetOutput : public Output 15 | { 16 | public: 17 | NetOutput(VideoOptions const *options); 18 | ~NetOutput(); 19 | 20 | protected: 21 | void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) override; 22 | 23 | private: 24 | int fd_; 25 | sockaddr_in saddr_; 26 | const sockaddr *saddr_ptr_; 27 | socklen_t sockaddr_in_size_; 28 | }; 29 | -------------------------------------------------------------------------------- /output/output.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * output.cpp - video stream output base class 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include "circular_output.hpp" 12 | #include "file_output.hpp" 13 | #include "net_output.hpp" 14 | #include "output.hpp" 15 | 16 | Output::Output(VideoOptions const *options) 17 | : options_(options), fp_timestamps_(nullptr), state_(WAITING_KEYFRAME), time_offset_(0), last_timestamp_(0), 18 | buf_metadata_(std::cout.rdbuf()), of_metadata_() 19 | { 20 | if (!options->save_pts.empty()) 21 | { 22 | fp_timestamps_ = fopen(options->save_pts.c_str(), "w"); 23 | if (!fp_timestamps_) 24 | throw std::runtime_error("Failed to open timestamp file " + options->save_pts); 25 | fprintf(fp_timestamps_, "# timecode format v2\n"); 26 | } 27 | if (!options->metadata.empty()) 28 | { 29 | const std::string &filename = options_->metadata; 30 | 31 | if (filename.compare("-")) 32 | { 33 | of_metadata_.open(filename, std::ios::out); 34 | buf_metadata_ = of_metadata_.rdbuf(); 35 | start_metadata_output(buf_metadata_, options_->metadata_format); 36 | } 37 | } 38 | 39 | enable_ = !options->pause; 40 | } 41 | 42 | Output::~Output() 43 | { 44 | if (fp_timestamps_) 45 | fclose(fp_timestamps_); 46 | if (!options_->metadata.empty()) 47 | stop_metadata_output(buf_metadata_, options_->metadata_format); 48 | } 49 | 50 | void Output::Signal() 51 | { 52 | enable_ = !enable_; 53 | } 54 | 55 | void Output::OutputReady(void *mem, size_t size, int64_t timestamp_us, bool keyframe) 56 | { 57 | // When output is enabled, we may have to wait for the next keyframe. 58 | uint32_t flags = keyframe ? FLAG_KEYFRAME : FLAG_NONE; 59 | if (!enable_) 60 | state_ = DISABLED; 61 | else if (state_ == DISABLED) 62 | state_ = WAITING_KEYFRAME; 63 | if (state_ == WAITING_KEYFRAME && keyframe) 64 | state_ = RUNNING, flags |= FLAG_RESTART; 65 | if (state_ != RUNNING) 66 | return; 67 | 68 | // Frig the timestamps to be continuous after a pause. 69 | if (flags & FLAG_RESTART) 70 | time_offset_ = timestamp_us - last_timestamp_; 71 | last_timestamp_ = timestamp_us - time_offset_; 72 | 73 | outputBuffer(mem, size, last_timestamp_, flags); 74 | 75 | // Save timestamps to a file, if that was requested. 76 | if (fp_timestamps_) 77 | { 78 | timestampReady(last_timestamp_); 79 | } 80 | 81 | if (!options_->metadata.empty()) 82 | { 83 | libcamera::ControlList metadata = metadata_queue_.front(); 84 | write_metadata(buf_metadata_, options_->metadata_format, metadata, !metadata_started_); 85 | metadata_started_ = true; 86 | metadata_queue_.pop(); 87 | } 88 | } 89 | 90 | void Output::timestampReady(int64_t timestamp) 91 | { 92 | fprintf(fp_timestamps_, "%" PRId64 ".%03" PRId64 "\n", timestamp / 1000, timestamp % 1000); 93 | if (options_->flush) 94 | fflush(fp_timestamps_); 95 | } 96 | 97 | void Output::outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags) 98 | { 99 | // Supply this so that a vanilla Output gives you an object that outputs no buffers. 100 | } 101 | 102 | Output *Output::Create(VideoOptions const *options) 103 | { 104 | if (options->codec == "libav" || (options->codec == "h264" && options->GetPlatform() != Platform::VC4)) 105 | return new Output(options); 106 | 107 | if (strncmp(options->output.c_str(), "udp://", 6) == 0 || strncmp(options->output.c_str(), "tcp://", 6) == 0) 108 | return new NetOutput(options); 109 | else if (options->circular) 110 | return new CircularOutput(options); 111 | else if (!options->output.empty()) 112 | return new FileOutput(options); 113 | else 114 | return new Output(options); 115 | } 116 | 117 | void Output::MetadataReady(libcamera::ControlList &metadata) 118 | { 119 | if (options_->metadata.empty()) 120 | return; 121 | 122 | metadata_queue_.push(metadata); 123 | } 124 | 125 | void start_metadata_output(std::streambuf *buf, std::string fmt) 126 | { 127 | std::ostream out(buf); 128 | if (fmt == "json") 129 | out << "[" << std::endl; 130 | } 131 | 132 | void write_metadata(std::streambuf *buf, std::string fmt, libcamera::ControlList &metadata, bool first_write) 133 | { 134 | std::ostream out(buf); 135 | const libcamera::ControlIdMap *id_map = metadata.idMap(); 136 | if (fmt == "txt") 137 | { 138 | for (auto const &[id, val] : metadata) 139 | out << id_map->at(id)->name() << "=" << val.toString() << std::endl; 140 | out << std::endl; 141 | } 142 | else 143 | { 144 | if (!first_write) 145 | out << "," << std::endl; 146 | out << "{"; 147 | bool first_done = false; 148 | for (auto const &[id, val] : metadata) 149 | { 150 | std::string arg_quote = (val.toString().find('/') != std::string::npos) ? "\"" : ""; 151 | out << (first_done ? "," : "") << std::endl 152 | << " \"" << id_map->at(id)->name() << "\": " << arg_quote << val.toString() << arg_quote; 153 | first_done = true; 154 | } 155 | out << std::endl << "}"; 156 | } 157 | } 158 | 159 | void stop_metadata_output(std::streambuf *buf, std::string fmt) 160 | { 161 | std::ostream out(buf); 162 | if (fmt == "json") 163 | out << std::endl << "]" << std::endl; 164 | } 165 | -------------------------------------------------------------------------------- /output/output.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * output.hpp - video stream output base class 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include "core/video_options.hpp" 15 | 16 | class Output 17 | { 18 | public: 19 | static Output *Create(VideoOptions const *options); 20 | 21 | Output(VideoOptions const *options); 22 | virtual ~Output(); 23 | virtual void Signal(); // a derived class might redefine what this means 24 | void OutputReady(void *mem, size_t size, int64_t timestamp_us, bool keyframe); 25 | void MetadataReady(libcamera::ControlList &metadata); 26 | 27 | protected: 28 | enum Flag 29 | { 30 | FLAG_NONE = 0, 31 | FLAG_KEYFRAME = 1, 32 | FLAG_RESTART = 2 33 | }; 34 | virtual void outputBuffer(void *mem, size_t size, int64_t timestamp_us, uint32_t flags); 35 | virtual void timestampReady(int64_t timestamp); 36 | VideoOptions const *options_; 37 | FILE *fp_timestamps_; 38 | 39 | private: 40 | enum State 41 | { 42 | DISABLED = 0, 43 | WAITING_KEYFRAME = 1, 44 | RUNNING = 2 45 | }; 46 | State state_; 47 | std::atomic enable_; 48 | int64_t time_offset_; 49 | int64_t last_timestamp_; 50 | std::streambuf *buf_metadata_; 51 | std::ofstream of_metadata_; 52 | bool metadata_started_ = false; 53 | std::queue metadata_queue_; 54 | }; 55 | 56 | void start_metadata_output(std::streambuf *buf, std::string fmt); 57 | void write_metadata(std::streambuf *buf, std::string fmt, libcamera::ControlList &metadata, bool first_write); 58 | void stop_metadata_output(std::streambuf *buf, std::string fmt); 59 | -------------------------------------------------------------------------------- /post_processing_stages/README.md: -------------------------------------------------------------------------------- 1 | ## AcousticFocusStage – Documentation (English) 2 | 3 | **Purpose:** 4 | The AcousticFocusStage is a post-processing stage for rpicam-apps that provides acoustic feedback based on the libcamera Focus Figure of Merit (FoM). 5 | This allows you to find the optimal focus point of a manual lens **without needing to look at or interpret the preview image**. 6 | 7 | ### Features 8 | 9 | - Plays a sine tone via the Raspberry Pi’s audio output. 10 | - The tone’s frequency is mapped to the current Focus FoM value (frequency rises or falls as FoM rises or falls). 11 | - No visual contact with the preview is required. 12 | - The tone is triggered once per second for a configurable duration. 13 | - All parameters (frequency range, mapping type, duration, etc.) can be configured via JSON. 14 | - **Note:** You must use an external USB sound card, HDMI audio, or another supported audio device for this stage to function. 15 | 16 | ### Dependencies 17 | 18 | Install the following packages: 19 | 20 | ```sh 21 | sudo apt update 22 | sudo apt install sox libsox-fmt-all 23 | ``` 24 | 25 | ### Build Instructions 26 | 27 | 1. Add `acoustic_focus_stage.cpp` to your `post_processing_stages` directory. 28 | 2. Add the stage to your `meson.build`: 29 | ```meson 30 | core_postproc_src = files([ 31 | ... 32 | 'acoustic_focus_stage.cpp', 33 | ]) 34 | postproc_assets += files([ 35 | ... 36 | assets_dir / 'acoustic_focus.json', 37 | ]) 38 | ``` 39 | 3. Rebuild and install: 40 | ```sh 41 | meson compile -C build 42 | sudo meson install -C build 43 | ``` 44 | 45 | ### Configuration 46 | 47 | Create a config file `acoustic_focus.json` (example): 48 | 49 | ```json 50 | { 51 | "acoustic_focus": [ 52 | { 53 | "stage": "acoustic_focus", 54 | "minFoM": 1, 55 | "maxFoM": 2000, 56 | "minFreq": 300, 57 | "maxFreq": 5000, 58 | "duration": 0.1, 59 | "mapping": "log", 60 | "description": "mapping values are log (logarithmic) or linear" 61 | } 62 | ] 63 | } 64 | ``` 65 | 66 | - `minFoM`, `maxFoM`: Range of Figure of Merit values to map. 67 | - `minFreq`, `maxFreq`: Frequency range for the output tone (Hz). 68 | - `duration`: Tone duration in seconds. 69 | - `mapping`: `"log"` for logarithmic mapping, `"linear"` for linear mapping. 70 | 71 | ### Usage 72 | 73 | 1. Start rpicam-vid with: 74 | ```sh 75 | rpicam-vid --post-process-config assets/acoustic_focus.json 76 | ``` 77 | 2. Adjust focus on your manual lens. The tone’s pitch will rise or fall as the focus improves or worsens. 78 | 79 | --- 80 | 81 | ## AcousticFocusStage – Dokumentation (Deutsch) 82 | 83 | **Zweck:** 84 | Die AcousticFocusStage ist eine Post-Processing-Stage für rpicam-apps, die akustisches Feedback auf Basis des libcamera Focus Figure of Merit (FoM) gibt. 85 | Damit findest du den optimalen Fokuspunkt einer manuellen Linse **ohne auf die Vorschau schauen oder diese interpretieren zu müssen**. 86 | 87 | ### Funktionen 88 | 89 | - Gibt einen Sinuston über den Audio-Ausgang des Raspberry Pi aus. 90 | - Die Tonhöhe wird aus dem aktuellen Focus FoM-Wert berechnet (steigt oder fällt mit dem FoM). 91 | - Kein Sichtkontakt zur Vorschau erforderlich. 92 | - Der Ton wird einmal pro Sekunde für eine konfigurierbare Dauer ausgegeben. 93 | - Alle Parameter (Frequenzbereich, Mapping-Typ, Dauer usw.) sind per JSON konfigurierbar. 94 | - **Hinweis:** Es muss eine Soundausgabe-Hardware vorhanden sein. 95 | 96 | ### Abhängigkeiten 97 | 98 | Installiere folgende Pakete: 99 | 100 | ```sh 101 | sudo apt update 102 | sudo apt install sox libsox-fmt-all 103 | ``` 104 | 105 | ### Kompilierung 106 | 107 | 1. Lege `acoustic_focus_stage.cpp` im Verzeichnis `post_processing_stages` ab. 108 | 2. Ergänze die Stage in deiner `meson.build`: 109 | ```meson 110 | core_postproc_src = files([ 111 | ... 112 | 'acoustic_focus_stage.cpp', 113 | ]) 114 | postproc_assets += files([ 115 | ... 116 | assets_dir / 'acoustic_focus.json', 117 | ]) 118 | ``` 119 | 3. Baue und installiere neu: 120 | ```sh 121 | meson compile -C build 122 | sudo meson install -C build 123 | ``` 124 | 125 | ### Konfiguration 126 | 127 | Beispiel für `acoustic_focus.json`: 128 | 129 | ```json 130 | { 131 | "acoustic_focus": [ 132 | { 133 | "stage": "acoustic_focus", 134 | "minFoM": 1, 135 | "maxFoM": 2000, 136 | "minFreq": 400, 137 | "maxFreq": 2000, 138 | "duration": 0.1, 139 | "mapping": "log", // oder "linear" 140 | "description": "mapping values are log (logarithmic) or linear" 141 | } 142 | ] 143 | } 144 | ``` 145 | 146 | - `minFoM`, `maxFoM`: Bereich der Figure of Merit-Werte. 147 | - `minFreq`, `maxFreq`: Frequenzbereich für den Ton (Hz). 148 | - `duration`: Tondauer in Sekunden. 149 | - `mapping`: `"log"` für logarithmisch, `"linear"` für linear. 150 | 151 | ### Verwendung 152 | 153 | 1. Starte rpicam-vid mit: 154 | ```sh 155 | rpicam-vid --post-process-config assets/acoustic_focus.json 156 | ``` 157 | 2. Drehe am Fokusring deiner manuellen Linse. Die Tonhöhe steigt oder fällt, je nach Fokusqualität. 158 | 159 | --- 160 | 161 | **Hinweis:** 162 | Die Stage ist ein reines Hilfsmittel für das manuelle Fokussieren und benötigt keinen Blickkontakt zum Monitor! -------------------------------------------------------------------------------- /post_processing_stages/acoustic_focus_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2025, Kletternaut 4 | * 5 | * acoustic_focus_stage.cpp - acoustic feedback for autofocus FoM 6 | * 7 | * This stage provides acoustic feedback based on the libcamera Autofocus Figure of Merit (FoM). 8 | * The FoM is mapped to an audible frequency, allowing users to hear focus quality changes in real time. 9 | * No visual contact with the preview is required, and the preview does not need to be interpreted. 10 | * As the FoM rises or falls, the tone frequency also rises or falls accordingly. 11 | * Various parameters (e.g. frequency range, mapping type, duration) can be configured via JSON. 12 | * Note: Sound output hardware must be present for this stage to function. 13 | */ 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | 25 | #include "core/rpicam_app.hpp" 26 | #include "post_processing_stages/post_processing_stage.hpp" 27 | 28 | class AcousticFocusStage : public PostProcessingStage 29 | { 30 | public: 31 | AcousticFocusStage(RPiCamApp *app) : PostProcessingStage(app) 32 | { 33 | } 34 | 35 | char const *Name() const override { return "acoustic_focus"; } 36 | 37 | void Read(boost::property_tree::ptree const ¶ms) override 38 | { 39 | min_fom_ = params.get("minFoM", 1); 40 | max_fom_ = params.get("maxFoM", 3000); 41 | min_freq_ = params.get("minFreq", 300); 42 | max_freq_ = params.get("maxFreq", 3000); 43 | duration_ = params.get("duration", 0.1); 44 | mapping_ = params.get("mapping", "log"); 45 | } 46 | 47 | bool Process(CompletedRequestPtr &completed_request) override 48 | { 49 | static auto last = std::chrono::steady_clock::now(); 50 | 51 | auto now = std::chrono::steady_clock::now(); 52 | auto ms = std::chrono::duration_cast(now - last).count(); 53 | 54 | if (ms >= 1000) 55 | { 56 | last = now; 57 | 58 | auto fom = completed_request->metadata.get(libcamera::controls::FocusFoM); 59 | if (fom) 60 | { 61 | int freq = min_freq_; 62 | if (mapping_ == "log") 63 | { 64 | double norm = std::log(std::max(*fom, min_fom_)) - std::log(min_fom_); 65 | double denom = std::log(max_fom_) - std::log(min_fom_); 66 | freq = min_freq_ + static_cast(norm / denom * (max_freq_ - min_freq_)); 67 | } 68 | else 69 | { // linear 70 | double norm = std::max(*fom, min_fom_) - min_fom_; 71 | double denom = max_fom_ - min_fom_; 72 | freq = min_freq_ + static_cast(norm / denom * (max_freq_ - min_freq_)); 73 | } 74 | freq = std::min(max_freq_, std::max(min_freq_, freq)); 75 | 76 | std::ostringstream oss; 77 | oss << std::fixed << std::setprecision(6) << duration_; 78 | std::string duration_str = oss.str(); 79 | 80 | std::string cmd = "/usr/bin/play -nq -t alsa synth " + duration_str + " sine " + std::to_string(freq); 81 | std::thread([](std::string cmd) { [[maybe_unused]] int i = system(cmd.c_str()); }, cmd).detach(); 82 | } 83 | } 84 | return false; 85 | } 86 | 87 | private: 88 | int min_fom_ = 1, max_fom_ = 2000; 89 | int min_freq_ = 400, max_freq_ = 2000; 90 | double duration_ = 0.1; 91 | std::string mapping_ = "log"; 92 | }; 93 | 94 | static PostProcessingStage *Create(RPiCamApp *app) 95 | { 96 | return new AcousticFocusStage(app); 97 | } 98 | 99 | static RegisterStage reg("acoustic_focus", &Create); 100 | -------------------------------------------------------------------------------- /post_processing_stages/annotate_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * annotate_cv_stage.cpp - add text annotation to image 6 | */ 7 | 8 | // The text string can include the % directives supported by FrameInfo. 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include "core/frame_info.hpp" 15 | #include "core/rpicam_app.hpp" 16 | 17 | #include "post_processing_stages/post_processing_stage.hpp" 18 | 19 | #include "opencv2/core.hpp" 20 | #include "opencv2/imgproc.hpp" 21 | 22 | using namespace cv; 23 | 24 | using Stream = libcamera::Stream; 25 | 26 | class AnnotateCvStage : public PostProcessingStage 27 | { 28 | public: 29 | AnnotateCvStage(RPiCamApp *app) : PostProcessingStage(app) {} 30 | 31 | char const *Name() const override; 32 | 33 | void Read(boost::property_tree::ptree const ¶ms) override; 34 | 35 | void Configure() override; 36 | 37 | bool Process(CompletedRequestPtr &completed_request) override; 38 | 39 | private: 40 | Stream *stream_; 41 | StreamInfo info_; 42 | std::string text_; 43 | int fg_; 44 | int bg_; 45 | double scale_; 46 | int thickness_; 47 | double alpha_; 48 | double adjusted_scale_; 49 | int adjusted_thickness_; 50 | }; 51 | 52 | #define NAME "annotate_cv" 53 | 54 | char const *AnnotateCvStage::Name() const 55 | { 56 | return NAME; 57 | } 58 | 59 | void AnnotateCvStage::Read(boost::property_tree::ptree const ¶ms) 60 | { 61 | text_ = params.get("text"); 62 | fg_ = params.get("fg", 255); 63 | bg_ = params.get("bg", 0); 64 | scale_ = params.get("scale", 1.0); 65 | thickness_ = params.get("thickness", 2); 66 | alpha_ = params.get("alpha", 0.5); 67 | } 68 | 69 | void AnnotateCvStage::Configure() 70 | { 71 | stream_ = app_->GetMainStream(); 72 | if (!stream_ || stream_->configuration().pixelFormat != libcamera::formats::YUV420) 73 | throw std::runtime_error("AnnotateCvStage: only YUV420 format supported"); 74 | info_ = app_->GetStreamInfo(stream_); 75 | 76 | // Adjust the scale and thickness according to the image size, so that the relative 77 | // size is preserved across different camera modes. Note that the thickness can get 78 | // rather harshly quantised, not much we can do about that. 79 | adjusted_scale_ = scale_ * info_.width / 1200; 80 | adjusted_thickness_ = std::max(thickness_ * info_.width / 700, 1u); 81 | } 82 | 83 | bool AnnotateCvStage::Process(CompletedRequestPtr &completed_request) 84 | { 85 | BufferWriteSync w(app_, completed_request->buffers[stream_]); 86 | libcamera::Span buffer = w.Get()[0]; 87 | FrameInfo info(completed_request); 88 | 89 | // Other post-processing stages can supply metadata to update the text. 90 | completed_request->post_process_metadata.Get("annotate.text", text_); 91 | std::string text = info.ToString(text_); 92 | char text_with_date[256]; 93 | time_t t = time(NULL); 94 | tm *tm_ptr = localtime(&t); 95 | if (strftime(text_with_date, sizeof(text_with_date), text.c_str(), tm_ptr) != 0) 96 | text = std::string(text_with_date); 97 | 98 | uint8_t *ptr = (uint8_t *)buffer.data(); 99 | Mat im(info_.height, info_.width, CV_8U, ptr, info_.stride); 100 | int font = FONT_HERSHEY_SIMPLEX; 101 | 102 | int baseline = 0; 103 | Size size = getTextSize(text, font, adjusted_scale_, adjusted_thickness_, &baseline); 104 | 105 | // Can't find a handy "draw rectangle with alpha" function... 106 | for (int y = 0; y < size.height + baseline; y++, ptr += info_.stride) 107 | { 108 | for (int x = 0; x < size.width; x++) 109 | ptr[x] = bg_ * alpha_ + (1 - alpha_) * ptr[x]; 110 | } 111 | putText(im, text, Point(0, size.height), font, adjusted_scale_, fg_, adjusted_thickness_, 0); 112 | 113 | return false; 114 | } 115 | 116 | static PostProcessingStage *Create(RPiCamApp *app) 117 | { 118 | return new AnnotateCvStage(app); 119 | } 120 | 121 | static RegisterStage reg(NAME, &Create); 122 | -------------------------------------------------------------------------------- /post_processing_stages/hailo/hailo_classifier.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2024, Raspberry Pi Ltd 4 | * 5 | * hailo_classifier.cpp - Hailo inference for classifier network 6 | */ 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | 17 | #include "classification/classification.hpp" 18 | 19 | #include "core/rpicam_app.hpp" 20 | 21 | #include "hailo_postprocessing_stage.hpp" 22 | 23 | using Size = libcamera::Size; 24 | using PostProcFuncPtr = void (*)(HailoROIPtr); 25 | 26 | #define NAME "hailo_classifier" 27 | #define POSTPROC_LIB "libclassification.so" 28 | 29 | class HailoClassifier : public HailoPostProcessingStage 30 | { 31 | public: 32 | HailoClassifier(RPiCamApp *app); 33 | 34 | char const *Name() const override; 35 | 36 | void Read(boost::property_tree::ptree const ¶ms) override; 37 | 38 | void Configure() override; 39 | 40 | bool Process(CompletedRequestPtr &completed_request) override; 41 | 42 | private: 43 | std::vector runInference(uint8_t *frame); 44 | 45 | PostProcessingLib postproc_; 46 | 47 | // Config params 48 | float threshold_; 49 | bool do_softmax_; 50 | }; 51 | 52 | HailoClassifier::HailoClassifier(RPiCamApp *app) 53 | : HailoPostProcessingStage(app), postproc_(PostProcLibDir(POSTPROC_LIB)) 54 | { 55 | } 56 | 57 | char const *HailoClassifier::Name() const 58 | { 59 | return NAME; 60 | } 61 | 62 | void HailoClassifier::Read(boost::property_tree::ptree const ¶ms) 63 | { 64 | threshold_ = params.get("threshold", 0.5f); 65 | do_softmax_ = params.get("do_softmax", true); 66 | 67 | HailoPostProcessingStage::Read(params); 68 | } 69 | 70 | void HailoClassifier::Configure() 71 | { 72 | HailoPostProcessingStage::Configure(); 73 | } 74 | 75 | bool HailoClassifier::Process(CompletedRequestPtr &completed_request) 76 | { 77 | if (!HailoPostProcessingStage::Ready()) 78 | { 79 | LOG_ERROR("HailoRT not ready!"); 80 | return false; 81 | } 82 | 83 | BufferReadSync r(app_, completed_request->buffers[low_res_stream_]); 84 | libcamera::Span buffer = r.Get()[0]; 85 | std::shared_ptr input; 86 | uint8_t *input_ptr; 87 | 88 | if (low_res_info_.pixel_format == libcamera::formats::YUV420) 89 | { 90 | StreamInfo rgb_info; 91 | rgb_info.width = InputTensorSize().width; 92 | rgb_info.height = InputTensorSize().height; 93 | rgb_info.stride = rgb_info.width * 3; 94 | 95 | input = allocator_.Allocate(rgb_info.stride * rgb_info.height); 96 | input_ptr = input.get(); 97 | 98 | Yuv420ToRgb(input.get(), buffer.data(), low_res_info_, rgb_info); 99 | } 100 | else if (low_res_info_.pixel_format == libcamera::formats::RGB888 || 101 | low_res_info_.pixel_format == libcamera::formats::BGR888) 102 | { 103 | unsigned int stride = low_res_info_.width * 3; 104 | 105 | // If the stride shows we have padding on the right edge of the buffer, we must copy it out to another buffer 106 | // without padding. 107 | if (low_res_info_.stride != stride) 108 | { 109 | input = allocator_.Allocate(stride * low_res_info_.height); 110 | input_ptr = input.get(); 111 | 112 | for (unsigned int i = 0; i < low_res_info_.height; i++) 113 | memcpy(input_ptr + i * stride, buffer.data() + i * low_res_info_.stride, stride); 114 | } 115 | else 116 | input_ptr = buffer.data(); 117 | } 118 | else 119 | { 120 | LOG_ERROR("Unexpected lores format " << low_res_info_.pixel_format); 121 | return false; 122 | } 123 | 124 | std::vector results = runInference(input_ptr); 125 | if (results.size()) 126 | { 127 | LOG(2, "Result: " << results[0]->get_label()); 128 | completed_request->post_process_metadata.Set("annotate.text", results[0]->get_label()); 129 | } 130 | 131 | return false; 132 | } 133 | 134 | std::vector HailoClassifier::runInference(uint8_t *frame) 135 | { 136 | hailort::AsyncInferJob job; 137 | std::vector output_tensors; 138 | hailo_status status; 139 | 140 | status = HailoPostProcessingStage::DispatchJob(frame, job, output_tensors); 141 | if (status != HAILO_SUCCESS) 142 | return {}; 143 | 144 | // Wait for job completion. 145 | status = job.wait(1s); 146 | if (status != HAILO_SUCCESS) 147 | { 148 | LOG_ERROR("Failed to wait for inference to finish, status = " << status); 149 | return {}; 150 | } 151 | 152 | // Postprocess tensor 153 | PostProcFuncPtr filter = reinterpret_cast(postproc_.GetSymbol("resnet_v1_50")); 154 | if (!filter) 155 | return {}; 156 | 157 | HailoROIPtr roi = MakeROI(output_tensors); 158 | filter(roi); 159 | std::vector detections = hailo_common::get_hailo_classifications(roi); 160 | 161 | return detections; 162 | } 163 | 164 | static PostProcessingStage *Create(RPiCamApp *app) 165 | { 166 | return new HailoClassifier(app); 167 | } 168 | 169 | static RegisterStage reg(NAME, &Create); 170 | -------------------------------------------------------------------------------- /post_processing_stages/hailo/hailo_postprocessing_stage.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2024, Raspberry Pi Ltd 4 | * 5 | * hailo_postprocessing.hpp - Hailo inference postprocessing stage base class. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | 20 | #include 21 | #include "hailo_objects.hpp" 22 | 23 | #include "core/rpicam_app.hpp" 24 | #include "post_processing_stages/post_processing_stage.hpp" 25 | 26 | #include "hailo_postproc_lib.h" 27 | 28 | class Allocator 29 | { 30 | public: 31 | Allocator(); 32 | ~Allocator(); 33 | 34 | void Reset(); 35 | 36 | std::shared_ptr Allocate(unsigned int size); 37 | 38 | private: 39 | void free(uint8_t *ptr); 40 | 41 | struct AllocInfo 42 | { 43 | AllocInfo(uint8_t *_ptr, unsigned int _size, bool _free) 44 | : ptr(_ptr), size(_size), free(_free) 45 | { 46 | } 47 | 48 | uint8_t *ptr; 49 | unsigned int size; 50 | bool free; 51 | }; 52 | 53 | std::vector alloc_info_; 54 | std::mutex lock_; 55 | }; 56 | 57 | class OutTensor 58 | { 59 | public: 60 | std::shared_ptr data; 61 | std::string name; 62 | hailo_quant_info_t quant_info; 63 | hailo_3d_image_shape_t shape; 64 | hailo_format_t format; 65 | 66 | OutTensor(std::shared_ptr data, const std::string &name, const hailo_quant_info_t &quant_info, 67 | const hailo_3d_image_shape_t &shape, hailo_format_t format) 68 | : data(std::move(data)), name(name), quant_info(quant_info), shape(shape), format(format) 69 | { 70 | } 71 | 72 | ~OutTensor() 73 | { 74 | } 75 | 76 | friend std::ostream &operator<<(std::ostream &os, const OutTensor &t) 77 | { 78 | os << "OutTensor: h " << t.shape.height << ", w " << t.shape.width << ", c " << t.shape.features; 79 | return os; 80 | } 81 | 82 | static bool SortFunction(const OutTensor &l, const OutTensor &r) 83 | { 84 | return l.shape.width < r.shape.width; 85 | } 86 | }; 87 | 88 | enum class MsgType 89 | { 90 | Display, 91 | Quit 92 | }; 93 | 94 | using RgbImagePtr = std::shared_ptr; 95 | 96 | struct Msg 97 | { 98 | Msg(MsgType const &t) : type(t) {} 99 | template 100 | Msg(MsgType const &t, T p, const libcamera::Size &sz, const std::string &title) 101 | : type(t), payload(std::forward(p)), size(sz), window_title(title) 102 | { 103 | } 104 | MsgType type; 105 | RgbImagePtr payload; 106 | libcamera::Size size; 107 | std::string window_title; 108 | }; 109 | 110 | class MessageQueue 111 | { 112 | public: 113 | template 114 | void Post(U &&msg) 115 | { 116 | std::unique_lock lock(mutex_); 117 | queue_.push_back(std::forward(msg)); 118 | cond_.notify_one(); 119 | } 120 | Msg Wait() 121 | { 122 | std::unique_lock lock(mutex_); 123 | cond_.wait(lock, [this] { return !queue_.empty(); }); 124 | Msg msg = std::move(queue_.front()); 125 | queue_.pop_front(); 126 | return msg; 127 | } 128 | void Clear() 129 | { 130 | std::unique_lock lock(mutex_); 131 | queue_ = {}; 132 | } 133 | void Clear(const std::string &display) 134 | { 135 | std::unique_lock lock(mutex_); 136 | auto it = std::remove_if(queue_.begin(), queue_.end(), 137 | [&display](const Msg &m) { return m.window_title == display; }); 138 | queue_.erase(it, queue_.end()); 139 | } 140 | private: 141 | std::deque queue_; 142 | std::mutex mutex_; 143 | std::condition_variable cond_; 144 | }; 145 | 146 | class HailoPostProcessingStage : public PostProcessingStage 147 | { 148 | public: 149 | HailoPostProcessingStage(RPiCamApp *app); 150 | ~HailoPostProcessingStage(); 151 | 152 | void Read(boost::property_tree::ptree const ¶ms) override; 153 | 154 | void Configure() override; 155 | 156 | protected: 157 | bool Ready() const 158 | { 159 | return init_ && low_res_stream_ && output_stream_; 160 | } 161 | 162 | static std::string PostProcLibDir(const std::string &lib) 163 | { 164 | return std::string(HAILO_POSTPROC_LIB_DIR) + "/" + lib; 165 | } 166 | 167 | const libcamera::Size &InputTensorSize() const 168 | { 169 | return input_tensor_size_; 170 | } 171 | 172 | hailo_status DispatchJob(const uint8_t *input, hailort::AsyncInferJob &job, std::vector &output_tensors); 173 | HailoROIPtr MakeROI(const std::vector &output_tensors) const; 174 | 175 | libcamera::Rectangle ConvertInferenceCoordinates(const std::vector &coords, 176 | const std::vector &scaler_crops) const; 177 | 178 | libcamera::Stream *low_res_stream_; 179 | libcamera::Stream *output_stream_; 180 | libcamera::Stream *raw_stream_; 181 | StreamInfo low_res_info_; 182 | StreamInfo output_stream_info_; 183 | 184 | Allocator allocator_; 185 | 186 | hailort::VDevice *vdevice_; 187 | std::shared_ptr infer_model_; 188 | std::shared_ptr configured_infer_model_; 189 | MessageQueue &msg_queue_; 190 | 191 | private: 192 | int configureHailoRT(); 193 | void displayThread(); 194 | 195 | std::mutex lock_; 196 | bool init_ = false; 197 | std::string hef_file_, hef_file_8_, hef_file_8L_; 198 | hailort::ConfiguredInferModel::Bindings bindings_; 199 | std::chrono::time_point last_frame_; 200 | libcamera::Size input_tensor_size_; 201 | hailo_device_identity_t device_id_; 202 | }; 203 | -------------------------------------------------------------------------------- /post_processing_stages/hailo/meson.build: -------------------------------------------------------------------------------- 1 | hailo_tappas_lib_dir = hailo_tappas_dep.get_variable('tappas_libdir') 2 | 3 | # Find the Tappas postprocessing *.so lib dir - this is different depending on the Tappas version. 4 | hailo_tappas_posproc_libdir = '' 5 | pp_dirs = ['post-process', 'post_processes'] 6 | foreach dir : pp_dirs 7 | if fs.is_dir(hailo_tappas_lib_dir / dir) 8 | hailo_tappas_posproc_libdir = hailo_tappas_lib_dir / dir 9 | break 10 | endif 11 | endforeach 12 | 13 | if hailo_tappas_posproc_libdir == '' 14 | error('Cannot find Hailo Tappas postprocessing libdir') 15 | endif 16 | 17 | hailo_conf_data = configuration_data() 18 | hailo_conf_data.set('HAILO_POSTPROC_LIB_DIR', '"' + hailo_tappas_posproc_libdir + '"') 19 | hailo_postproc_lib = configure_file(output : 'hailo_postproc_lib.h', configuration : hailo_conf_data) 20 | 21 | hailo_deps = [hailort_dep, hailo_tappas_dep, libcamera_dep, opencv_dep] 22 | 23 | # Hailo Tappas PP config files to be installed 24 | hailopp_config_files = files([ 25 | assets_dir / 'yolov5_personface.json', 26 | ]) 27 | 28 | hailo_postprocessing_src = files([ 29 | # Base stage 30 | 'hailo_postprocessing_stage.cpp', 31 | # Yolo 5/6/8/x inference 32 | 'hailo_yolo_inference.cpp', 33 | # Image classifier 34 | 'hailo_classifier.cpp', 35 | # Pose estimation 36 | 'hailo_yolov8_pose.cpp', 37 | # Instance segmentation 38 | 'hailo_yolov5_segmentation.cpp', 39 | # Face landmarking 40 | 'hailo_scrfd.cpp', 41 | ]) 42 | 43 | postproc_assets += files([ 44 | assets_dir / 'hailo_classifier.json', 45 | assets_dir / 'hailo_yolov5_personface.json', 46 | assets_dir / 'hailo_yolov6_inference.json', 47 | assets_dir / 'hailo_yolov8_inference.json', 48 | assets_dir / 'hailo_yolox_inference.json', 49 | assets_dir / 'hailo_yolov8_pose.json', 50 | assets_dir / 'hailo_yolov5_segmentation.json', 51 | assets_dir / 'hailo_scrfd.json', 52 | assets_dir / 'hailo_pose_inf_fl.json', 53 | ]) 54 | 55 | hailo_cpp_arguments = ['-Wno-ignored-qualifiers', '-Wno-unused-parameter', '-Wno-extra'] 56 | 57 | hailo_postprocessing_lib = shared_module('hailo-postproc', hailo_postprocessing_src, 58 | dependencies : hailo_deps, 59 | cpp_args : hailo_cpp_arguments, 60 | include_directories : '../..', 61 | install : true, 62 | install_dir : posproc_libdir, 63 | name_prefix : '', 64 | ) 65 | 66 | install_data(hailopp_config_files, 67 | install_dir : get_option('datadir') / 'hailo-models') 68 | 69 | if get_option('download_hailo_models') 70 | download_script = meson.project_source_root() / 'utils' / 'download-hailo-models.sh' 71 | custom_target('hailo-models', 72 | command : [ download_script, '@OUTPUT@' ], 73 | output : 'hailo-models', 74 | install : true, 75 | install_dir : get_option('datadir'), 76 | ) 77 | endif 78 | -------------------------------------------------------------------------------- /post_processing_stages/histogram.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019, Raspberry Pi (Trading) Limited 4 | * 5 | * histogram.cpp - histogram calculations 6 | */ 7 | #include 8 | #include 9 | 10 | #include "histogram.hpp" 11 | 12 | uint64_t Histogram::CumulativeFreq(double bin) const 13 | { 14 | if (bin <= 0) 15 | return 0; 16 | else if (bin >= Bins()) 17 | return Total(); 18 | int b = (int)bin; 19 | return cumulative_[b] + 20 | (bin - b) * (cumulative_[b + 1] - cumulative_[b]); 21 | } 22 | 23 | double Histogram::Quantile(double q, int first, int last) const 24 | { 25 | if (first == -1) 26 | first = 0; 27 | if (last == -1) 28 | last = cumulative_.size() - 2; 29 | assert(first <= last); 30 | uint64_t items = q * Total(); 31 | while (first < last) // binary search to find the right bin 32 | { 33 | int middle = (first + last) / 2; 34 | if (cumulative_[middle + 1] > items) 35 | last = middle; // between first and middle 36 | else 37 | first = middle + 1; // after middle 38 | } 39 | assert(items >= cumulative_[first] && items <= cumulative_[last + 1]); 40 | double frac = cumulative_[first + 1] == cumulative_[first] ? 0 41 | : (double)(items - cumulative_[first]) / 42 | (cumulative_[first + 1] - cumulative_[first]); 43 | return first + frac; 44 | } 45 | 46 | double Histogram::InterQuantileMean(double q_lo, double q_hi) const 47 | { 48 | assert(q_hi > q_lo); 49 | double p_lo = Quantile(q_lo); 50 | double p_hi = Quantile(q_hi, (int)p_lo); 51 | double sum_bin_freq = 0, cumul_freq = 0; 52 | for (double p_next = floor(p_lo) + 1.0; p_next <= ceil(p_hi); 53 | p_lo = p_next, p_next += 1.0) { 54 | int bin = floor(p_lo); 55 | double freq = (cumulative_[bin + 1] - cumulative_[bin]) * 56 | (std::min(p_next, p_hi) - p_lo); 57 | sum_bin_freq += bin * freq; 58 | cumul_freq += freq; 59 | } 60 | // add 0.5 to give an average for bin mid-points 61 | return sum_bin_freq / cumul_freq + 0.5; 62 | } 63 | -------------------------------------------------------------------------------- /post_processing_stages/histogram.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019, Raspberry Pi (Trading) Limited 4 | * 5 | * histogram.hpp - histogram calculation interface 6 | */ 7 | #pragma once 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | // A simple histogram class, for use in particular to find "quantiles" and 14 | // averages between "quantiles". 15 | 16 | class Histogram 17 | { 18 | public: 19 | template Histogram(T *histogram, int num) 20 | { 21 | assert(num); 22 | cumulative_.reserve(num + 1); 23 | cumulative_.push_back(0); 24 | for (int i = 0; i < num; i++) 25 | cumulative_.push_back(cumulative_.back() + 26 | histogram[i]); 27 | } 28 | uint32_t Bins() const { return cumulative_.size() - 1; } 29 | uint64_t Total() const { return cumulative_[cumulative_.size() - 1]; } 30 | // Cumulative frequency up to a (fractional) point in a bin. 31 | uint64_t CumulativeFreq(double bin) const; 32 | // Return the (fractional) bin of the point q (0 <= q <= 1) through the 33 | // histogram. Optionally provide limits to help. 34 | double Quantile(double q, int first = -1, int last = -1) const; 35 | // Return the average histogram bin value between the two quantiles. 36 | double InterQuantileMean(double q_lo, double q_hi) const; 37 | 38 | private: 39 | std::vector cumulative_; 40 | }; 41 | -------------------------------------------------------------------------------- /post_processing_stages/imx500/imx500_post_processing_stage.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2024, Raspberry Pi Ltd 4 | * 5 | * imx500_post_rpocessing_stage.hpp - IMX500 post processing stage base class 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | #include 17 | 18 | #include "core/completed_request.hpp" 19 | #include "core/rpicam_app.hpp" 20 | #include "post_processing_stages/post_processing_stage.hpp" 21 | 22 | class IMX500PostProcessingStage : public PostProcessingStage 23 | { 24 | public: 25 | static constexpr unsigned int Max_Num_Tensors = 16; 26 | static constexpr unsigned int Max_Num_Dimensions = 16; 27 | static constexpr unsigned int Network_Name_Len = 64; 28 | 29 | struct OutputTensorInfo 30 | { 31 | uint32_t tensor_data_num; 32 | uint32_t num_dimensions; 33 | uint16_t size[Max_Num_Dimensions]; 34 | }; 35 | 36 | struct CnnOutputTensorInfo 37 | { 38 | char network_name[Network_Name_Len]; 39 | uint32_t num_tensors; 40 | OutputTensorInfo info[Max_Num_Tensors]; 41 | }; 42 | 43 | IMX500PostProcessingStage(RPiCamApp *app); 44 | ~IMX500PostProcessingStage(); 45 | 46 | void Read(boost::property_tree::ptree const ¶ms) override; 47 | 48 | void Configure() override; 49 | 50 | bool Process(CompletedRequestPtr &completed_request) override; 51 | 52 | libcamera::Rectangle ConvertInferenceCoordinates(const std::vector &coords, 53 | const libcamera::Rectangle &scalerCrop) const; 54 | void SetInferenceRoiAbs(const libcamera::Rectangle &roi_) const; 55 | void SetInferenceRoiAuto(const unsigned int width, const unsigned int height) const; 56 | void ShowFwProgressBar(); 57 | 58 | protected: 59 | libcamera::Rectangle full_sensor_resolution_ = libcamera::Rectangle(0, 0, 4056, 3040); 60 | libcamera::Stream *output_stream_; 61 | libcamera::Stream *raw_stream_; 62 | 63 | private: 64 | void doProgressBar(); 65 | 66 | int device_fd_; 67 | std::ifstream fw_progress_; 68 | std::ifstream fw_progress_chunk_; 69 | 70 | std::ofstream input_tensor_file_; 71 | unsigned int num_input_tensors_saved_; 72 | unsigned int save_frames_; 73 | std::vector norm_val_; 74 | std::vector norm_shift_; 75 | std::vector div_val_; 76 | unsigned int div_shift_; 77 | std::mutex lock_; 78 | }; 79 | -------------------------------------------------------------------------------- /post_processing_stages/imx500/meson.build: -------------------------------------------------------------------------------- 1 | imx500_postprocessing_src = files([ 2 | # Base stage 3 | 'imx500_post_processing_stage.cpp', 4 | # Object detection 5 | 'imx500_object_detection.cpp', 6 | # Posenet 7 | 'imx500_posenet.cpp', 8 | ]) 9 | 10 | postproc_assets += files([ 11 | assets_dir / 'imx500_mobilenet_ssd.json', 12 | assets_dir / 'imx500_posenet.json', 13 | ]) 14 | 15 | imx500_postprocessing_lib = shared_module('imx500-postproc', imx500_postprocessing_src, 16 | dependencies : libcamera_dep, 17 | include_directories : '../..', 18 | install : true, 19 | install_dir : posproc_libdir, 20 | name_prefix : '', 21 | ) 22 | 23 | if get_option('download_imx500_models') 24 | download_script = meson.project_source_root() / 'utils' / 'download-imx500-models.sh' 25 | custom_target('imx500-models', 26 | command : [ download_script, '@OUTPUT@' ], 27 | output : 'imx500-models', 28 | install : true, 29 | install_dir : get_option('datadir'), 30 | ) 31 | endif 32 | -------------------------------------------------------------------------------- /post_processing_stages/meson.build: -------------------------------------------------------------------------------- 1 | posproc_libdir = get_option('prefix') / get_option('libdir') / 'rpicam-apps-postproc' 2 | 3 | conf_data = configuration_data() 4 | conf_data.set('POSTPROC_LIB_DIR', '"' + posproc_libdir + '"') 5 | configure_file(output : 'postproc_lib.h', configuration : conf_data) 6 | 7 | # JSON (and other assets) 8 | assets_dir = meson.project_source_root() / 'assets' 9 | postproc_assets = [] 10 | 11 | # Core postprocessing framework files. 12 | rpicam_app_src += files([ 13 | 'histogram.cpp', 14 | 'post_processing_stage.cpp', 15 | 'pwl.cpp', 16 | ]) 17 | 18 | # Core postprocessing stages. 19 | core_postproc_src = files([ 20 | 'hdr_stage.cpp', 21 | 'motion_detect_stage.cpp', 22 | 'negate_stage.cpp', 23 | 'acoustic_focus_stage.cpp', 24 | ]) 25 | 26 | # Core assets 27 | postproc_assets += files([ 28 | assets_dir / 'hdr.json', 29 | assets_dir / 'motion_detect.json', 30 | assets_dir / 'negate.json', 31 | assets_dir / 'acoustic_focus.json', 32 | ]) 33 | 34 | core_postproc_lib = shared_module('core-postproc', core_postproc_src, 35 | include_directories : '../', 36 | dependencies : libcamera_dep, 37 | cpp_args : cpp_arguments, 38 | install : true, 39 | install_dir : posproc_libdir, 40 | name_prefix : '', 41 | ) 42 | 43 | # OpenCV based postprocessing stages. 44 | enable_opencv = false 45 | opencv_dep = dependency('opencv4', required : get_option('enable_opencv')) 46 | if opencv_dep.found() 47 | opencv_postproc_src = files([ 48 | 'sobel_cv_stage.cpp', 49 | 'face_detect_cv_stage.cpp', 50 | 'annotate_cv_stage.cpp', 51 | 'plot_pose_cv_stage.cpp', 52 | 'object_detect_draw_cv_stage.cpp', 53 | ]) 54 | 55 | # OpenCV assets 56 | postproc_assets += files([ 57 | assets_dir / 'sobel_cv.json', 58 | assets_dir / 'face_detect_cv.json', 59 | assets_dir / 'annotate_cv.json', 60 | ]) 61 | 62 | opencv_postproc_lib = shared_module('opencv-postproc', opencv_postproc_src, 63 | include_directories : '../', 64 | dependencies : [libcamera_dep, opencv_dep], 65 | cpp_args : cpp_arguments, 66 | install : true, 67 | install_dir : posproc_libdir, 68 | name_prefix : '', 69 | ) 70 | enable_opencv = true 71 | endif 72 | 73 | # TFlite based postprocessing stages. 74 | enable_tflite = false 75 | tflite_dep = dependency('tensorflow-lite', required : get_option('enable_tflite')) 76 | if tflite_dep.found() 77 | tflite_postproc_src = files([ 78 | 'tf_stage.cpp', 79 | 'object_classify_tf_stage.cpp', 80 | 'object_detect_tf_stage.cpp', 81 | 'pose_estimation_tf_stage.cpp', 82 | 'segmentation_tf_stage.cpp', 83 | ]) 84 | 85 | # TFlite assets 86 | postproc_assets += files([ 87 | assets_dir / 'object_classify_tf.json', 88 | assets_dir / 'object_detect_tf.json', 89 | assets_dir / 'pose_estimation_tf.json', 90 | assets_dir / 'segmentation_tf.json', 91 | ]) 92 | 93 | tflite_postproc_lib = shared_module('tflite-postproc', tflite_postproc_src, 94 | include_directories : '../', 95 | dependencies : [libcamera_dep, tflite_dep], 96 | cpp_args : cpp_arguments, 97 | install : true, 98 | install_dir : posproc_libdir, 99 | name_prefix : '', 100 | ) 101 | enable_tflite = true 102 | endif 103 | 104 | # Hailo postprocessing stages. 105 | enable_hailo = false 106 | hailort_dep = dependency('HailoRT', modules : ['HailoRT::libhailort'], version: '>=4.18.0', 107 | required : get_option('enable_hailo')) 108 | hailo_tappas_dep = dependency('hailo-tappas-core', version: '>=3.31.0', 109 | required : get_option('enable_hailo')) 110 | if hailort_dep.found() and hailo_tappas_dep.found() and opencv_dep.found() 111 | subdir('hailo') 112 | enable_hailo = true 113 | endif 114 | 115 | # IMX500 postprocessing stages. 116 | if get_option('enable_imx500') 117 | subdir('imx500') 118 | endif 119 | 120 | post_processing_headers = files([ 121 | 'histogram.hpp', 122 | 'object_detect.hpp', 123 | 'post_processing_stage.hpp', 124 | 'pwl.hpp', 125 | 'segmentation.hpp', 126 | 'tf_stage.hpp', 127 | ]) 128 | 129 | install_headers(post_processing_headers, subdir: meson.project_name() / 'post_processing_stages') 130 | install_data(postproc_assets, install_dir : get_option('datadir') / 'rpi-camera-assets') 131 | -------------------------------------------------------------------------------- /post_processing_stages/negate_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * negate_stage.cpp - image negate effect 6 | */ 7 | 8 | #include 9 | 10 | #include "core/rpicam_app.hpp" 11 | 12 | #include "post_processing_stages/post_processing_stage.hpp" 13 | 14 | using Stream = libcamera::Stream; 15 | 16 | class NegateStage : public PostProcessingStage 17 | { 18 | public: 19 | NegateStage(RPiCamApp *app) : PostProcessingStage(app) {} 20 | 21 | char const *Name() const override; 22 | 23 | void Read(boost::property_tree::ptree const ¶ms) override {} 24 | 25 | void Configure() override; 26 | 27 | bool Process(CompletedRequestPtr &completed_request) override; 28 | 29 | private: 30 | Stream *stream_; 31 | }; 32 | 33 | #define NAME "negate" 34 | 35 | char const *NegateStage::Name() const 36 | { 37 | return NAME; 38 | } 39 | 40 | void NegateStage::Configure() 41 | { 42 | stream_ = app_->GetMainStream(); 43 | } 44 | 45 | bool NegateStage::Process(CompletedRequestPtr &completed_request) 46 | { 47 | BufferWriteSync w(app_, completed_request->buffers[stream_]); 48 | libcamera::Span buffer = w.Get()[0]; 49 | uint32_t *ptr = (uint32_t *)buffer.data(); 50 | 51 | // Constraints on the stride mean we always have multiple-of-4 bytes. 52 | for (unsigned int i = 0; i < buffer.size(); i += 4) 53 | *(ptr++) ^= 0xffffffff; 54 | 55 | return false; 56 | } 57 | 58 | static PostProcessingStage *Create(RPiCamApp *app) 59 | { 60 | return new NegateStage(app); 61 | } 62 | 63 | static RegisterStage reg(NAME, &Create); 64 | -------------------------------------------------------------------------------- /post_processing_stages/object_detect.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * object_detect.hpp - object detector result 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | 12 | #include 13 | 14 | struct Detection 15 | { 16 | Detection(int c, const std::string &n, float conf, int x, int y, int w, int h) 17 | : category(c), name(n), confidence(conf), box(x, y, w, h) 18 | { 19 | } 20 | int category; 21 | std::string name; 22 | float confidence; 23 | libcamera::Rectangle box; 24 | std::string toString() const 25 | { 26 | std::stringstream output; 27 | output.precision(2); 28 | output << name << "[" << category << "] (" << confidence << ") @ " << box.x << "," << box.y << " " << box.width 29 | << "x" << box.height; 30 | return output.str(); 31 | } 32 | }; 33 | -------------------------------------------------------------------------------- /post_processing_stages/object_detect_draw_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * object_detect_draw_cv_stage.cpp - draw object detection results 6 | */ 7 | 8 | #include "opencv2/imgproc.hpp" 9 | 10 | #include "core/rpicam_app.hpp" 11 | 12 | #include "post_processing_stages/post_processing_stage.hpp" 13 | 14 | #include "object_detect.hpp" 15 | 16 | using namespace cv; 17 | 18 | using Rectange = libcamera::Rectangle; 19 | using Stream = libcamera::Stream; 20 | 21 | class ObjectDetectDrawCvStage : public PostProcessingStage 22 | { 23 | public: 24 | ObjectDetectDrawCvStage(RPiCamApp *app) : PostProcessingStage(app) {} 25 | 26 | char const *Name() const override; 27 | 28 | void Read(boost::property_tree::ptree const ¶ms) override; 29 | 30 | void Configure() override; 31 | 32 | bool Process(CompletedRequestPtr &completed_request) override; 33 | 34 | private: 35 | Stream *stream_; 36 | int line_thickness_; 37 | double font_size_; 38 | }; 39 | 40 | #define NAME "object_detect_draw_cv" 41 | 42 | char const *ObjectDetectDrawCvStage::Name() const 43 | { 44 | return NAME; 45 | } 46 | 47 | void ObjectDetectDrawCvStage::Configure() 48 | { 49 | stream_ = app_->GetMainStream(); 50 | } 51 | 52 | void ObjectDetectDrawCvStage::Read(boost::property_tree::ptree const ¶ms) 53 | { 54 | line_thickness_ = params.get("line_thickness", 1); 55 | font_size_ = params.get("font_size", 1.0); 56 | } 57 | 58 | bool ObjectDetectDrawCvStage::Process(CompletedRequestPtr &completed_request) 59 | { 60 | if (!stream_) 61 | return false; 62 | 63 | BufferWriteSync w(app_, completed_request->buffers[stream_]); 64 | libcamera::Span buffer = w.Get()[0]; 65 | uint32_t *ptr = (uint32_t *)buffer.data(); 66 | StreamInfo info = app_->GetStreamInfo(stream_); 67 | 68 | std::vector detections; 69 | 70 | completed_request->post_process_metadata.Get("object_detect.results", detections); 71 | 72 | Mat image(info.height, info.width, CV_8U, ptr, info.stride); 73 | Scalar colour = Scalar(255, 255, 255); 74 | int font = FONT_HERSHEY_SIMPLEX; 75 | 76 | for (auto &detection : detections) 77 | { 78 | Rect r(detection.box.x, detection.box.y, detection.box.width, detection.box.height); 79 | rectangle(image, r, colour, line_thickness_); 80 | std::stringstream text_stream; 81 | text_stream << detection.name << " " << (int)(detection.confidence * 100) << "%"; 82 | std::string text = text_stream.str(); 83 | int baseline = 0; 84 | Size size = getTextSize(text, font, font_size_, 2, &baseline); 85 | Point text_origin(detection.box.x + 5, detection.box.y + size.height + 5); 86 | putText(image, text, text_origin, font, font_size_, colour, 2); 87 | } 88 | 89 | return false; 90 | } 91 | 92 | static PostProcessingStage *Create(RPiCamApp *app) 93 | { 94 | return new ObjectDetectDrawCvStage(app); 95 | } 96 | 97 | static RegisterStage reg(NAME, &Create); 98 | -------------------------------------------------------------------------------- /post_processing_stages/pose_estimation_tf_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * pose_estimation_tf_stage - pose estimator 6 | */ 7 | 8 | #include "tf_stage.hpp" 9 | 10 | constexpr int FEATURE_SIZE = 17; 11 | constexpr int HEATMAP_DIMS = 9; 12 | 13 | #define NAME "pose_estimation_tf" 14 | 15 | class PoseEstimationTfStage : public TfStage 16 | { 17 | public: 18 | // The model we use expects 257x257 images. Really. 19 | PoseEstimationTfStage(RPiCamApp *app) : TfStage(app, 257, 257) { config_ = std::make_unique(); } 20 | char const *Name() const override { return NAME; } 21 | 22 | protected: 23 | void readExtras(boost::property_tree::ptree const ¶ms) override; 24 | 25 | void checkConfiguration() override; 26 | 27 | // Retrieve the various joint coordinates and confidences from the model. 28 | void interpretOutputs() override; 29 | 30 | // Attach results as metadata. 31 | void applyResults(CompletedRequestPtr &completed_request) override; 32 | 33 | private: 34 | std::vector heats_; 35 | std::vector confidences_; 36 | std::vector locations_; 37 | }; 38 | 39 | void PoseEstimationTfStage::readExtras([[maybe_unused]] boost::property_tree::ptree const ¶ms) 40 | { 41 | // Actually we don't read anything, but we can check the output tensor dimensions. 42 | int output = interpreter_->outputs()[0]; 43 | TfLiteIntArray *dims = interpreter_->tensor(output)->dims; 44 | // Causes might include loading the wrong model. 45 | if (dims->data[0] != 1 || dims->data[1] != HEATMAP_DIMS || dims->data[2] != HEATMAP_DIMS || 46 | dims->data[3] != FEATURE_SIZE) 47 | throw std::runtime_error("PoseEstimationTfStage: Unexpected output dimensions"); 48 | } 49 | 50 | void PoseEstimationTfStage::checkConfiguration() 51 | { 52 | if (!main_stream_) 53 | throw std::runtime_error("PoseEstimationTfStage: Main stream is required"); 54 | } 55 | 56 | void PoseEstimationTfStage::applyResults(CompletedRequestPtr &completed_request) 57 | { 58 | std::vector> lib_locations { locations_ }; 59 | std::vector> confidences { confidences_ }; 60 | 61 | completed_request->post_process_metadata.Set("pose_estimation.locations", lib_locations); 62 | completed_request->post_process_metadata.Set("pose_estimation.confidences", confidences); 63 | } 64 | 65 | void PoseEstimationTfStage::interpretOutputs() 66 | { 67 | // This code has been adapted from the "Qengineering/TensorFlow_Lite_Pose_RPi_32-bits" repository and can be 68 | // found here: "https://github.com/Qengineering/TensorFlow_Lite_Pose_RPi_32-bits/blob/master/Pose_single.cpp" 69 | float *heatmaps = interpreter_->tensor(interpreter_->outputs()[0])->data.f; 70 | float *offsets = interpreter_->tensor(interpreter_->outputs()[1])->data.f; 71 | 72 | heats_.clear(); 73 | confidences_.clear(); 74 | locations_.clear(); 75 | 76 | for (int i = 0; i < FEATURE_SIZE; i++) 77 | { 78 | float confidence_temp = heatmaps[i]; 79 | libcamera::Point heat_coord; 80 | for (int y = 0; y < HEATMAP_DIMS; y++) 81 | { 82 | for (int x = 0; x < HEATMAP_DIMS; x++) 83 | { 84 | int j = FEATURE_SIZE * (HEATMAP_DIMS * y + x) + i; 85 | if (heatmaps[j] > confidence_temp) 86 | { 87 | confidence_temp = heatmaps[j]; 88 | heat_coord.x = x; 89 | heat_coord.y = y; 90 | } 91 | } 92 | } 93 | heats_.push_back(heat_coord); 94 | confidences_.push_back(confidence_temp); 95 | } 96 | 97 | for (int i = 0; i < FEATURE_SIZE; i++) 98 | { 99 | libcamera::Point location_coord; 100 | int x = heats_[i].x, y = heats_[i].y, j = (FEATURE_SIZE * 2) * (HEATMAP_DIMS * y + x) + i; 101 | 102 | location_coord.y = (y * main_stream_info_.height) / (HEATMAP_DIMS - 1) + offsets[j]; 103 | location_coord.x = (x * main_stream_info_.width) / (HEATMAP_DIMS - 1) + offsets[j + FEATURE_SIZE]; 104 | 105 | locations_.push_back(location_coord); 106 | } 107 | } 108 | 109 | static PostProcessingStage *Create(RPiCamApp *app) 110 | { 111 | return new PoseEstimationTfStage(app); 112 | } 113 | 114 | static RegisterStage reg(NAME, &Create); 115 | 116 | -------------------------------------------------------------------------------- /post_processing_stages/post_processing_stage.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * post_processing_stage.hpp - Post processing stage base class definition. 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | // Prevents compiler warnings in Boost headers with more recent versions of GCC. 16 | #define BOOST_BIND_GLOBAL_PLACEHOLDERS 17 | 18 | #include 19 | #include 20 | 21 | #include "core/completed_request.hpp" 22 | #include "core/stream_info.hpp" 23 | 24 | namespace libcamera 25 | { 26 | struct StreamConfiguration; 27 | } 28 | 29 | class RPiCamApp; 30 | 31 | using StreamConfiguration = libcamera::StreamConfiguration; 32 | 33 | class PostProcessingStage 34 | { 35 | public: 36 | PostProcessingStage(RPiCamApp *app); 37 | 38 | virtual ~PostProcessingStage(); 39 | 40 | virtual char const *Name() const = 0; 41 | 42 | virtual void Read(boost::property_tree::ptree const ¶ms); 43 | 44 | virtual void AdjustConfig(std::string const &use_case, StreamConfiguration *config); 45 | 46 | virtual void Configure(); 47 | 48 | virtual void Start(); 49 | 50 | // Return true if this request is to be dropped. 51 | virtual bool Process(CompletedRequestPtr &completed_request) = 0; 52 | 53 | virtual void Stop(); 54 | 55 | virtual void Teardown(); 56 | 57 | // Below here are some helpers provided for the convenience of derived classes. 58 | 59 | // Convert YUV420 image to RGB. We crop from the centre of the image if the src 60 | // image is larger than the destination. 61 | static std::vector Yuv420ToRgb(const uint8_t *src, StreamInfo &src_info, StreamInfo &dst_info); 62 | static void Yuv420ToRgb(uint8_t *dst, const uint8_t *src, StreamInfo &src_info, StreamInfo &dst_info); 63 | 64 | protected: 65 | // Helper to calculate the execution time of any callable object and return it in as a std::chrono::duration. 66 | // For functions returning a value, the simplest thing would be to wrap the call in a lambda and capture 67 | // the return value. 68 | template 69 | static auto ExecutionTime(F &&f, Args &&... args) 70 | { 71 | auto t1 = T::now(); 72 | std::invoke(std::forward(f), std::forward(args)...); 73 | auto t2 = T::now(); 74 | return std::chrono::duration(t2 - t1); 75 | } 76 | 77 | template 78 | static std::vector GetJsonArray(const boost::property_tree::ptree &pt, const std::string &key, 79 | const std::vector &default_value = {}) 80 | { 81 | std::vector vec; 82 | 83 | if (pt.find(key) != pt.not_found()) 84 | { 85 | for (auto &v : pt.get_child(key)) 86 | vec.push_back(v.second.get_value()); 87 | } 88 | 89 | for (unsigned int i = vec.size(); i < default_value.size(); i++) 90 | vec.push_back(default_value[i]); 91 | 92 | return vec; 93 | } 94 | 95 | RPiCamApp *app_; 96 | }; 97 | 98 | typedef PostProcessingStage *(*StageCreateFunc)(RPiCamApp *app); 99 | struct RegisterStage 100 | { 101 | RegisterStage(char const *name, StageCreateFunc create_func); 102 | }; 103 | 104 | std::map const &GetPostProcessingStages(); 105 | -------------------------------------------------------------------------------- /post_processing_stages/pwl.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2019, Raspberry Pi (Trading) Limited 4 | * 5 | * pwl.hpp - piecewise linear functions interface 6 | */ 7 | #pragma once 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | class Pwl 17 | { 18 | public: 19 | struct Interval { 20 | Interval(double _start, double _end) : start(_start), end(_end) {} 21 | double start, end; 22 | bool Contains(double value) { return value >= start && value <= end; } 23 | double Clip(double value) { return value < start ? start : (value > end ? end : value); } 24 | double Len() const { return end - start; } 25 | }; 26 | struct Point { 27 | Point() : x(0), y(0) {} 28 | Point(double _x, double _y) : x(_x), y(_y) {} 29 | double x, y; 30 | Point operator-(Point const &p) const { return Point(x - p.x, y - p.y); } 31 | Point operator+(Point const &p) const { return Point(x + p.x, y + p.y); } 32 | double operator%(Point const &p) const { return x * p.x + y * p.y; } 33 | Point operator*(double f) const { return Point(x * f, y * f); } 34 | Point operator/(double f) const { return Point(x / f, y / f); } 35 | double Len2() const { return x * x + y * y; } 36 | double Len() const { return sqrt(Len2()); } 37 | }; 38 | Pwl() {} 39 | Pwl(std::vector const &points) : points_(points) {} 40 | void Read(boost::property_tree::ptree const ¶ms); 41 | void Append(double x, double y, const double eps = 1e-6); 42 | void Prepend(double x, double y, const double eps = 1e-6); 43 | Interval Domain() const; 44 | Interval Range() const; 45 | bool Empty() const; 46 | // Evaluate Pwl, optionally supplying an initial guess for the 47 | // "span". The "span" may be optionally be updated. If you want to know 48 | // the "span" value but don't have an initial guess you can set it to 49 | // -1. 50 | double Eval(double x, int *span_ptr = nullptr, bool update_span = true) const; 51 | // Find perpendicular closest to xy, starting from span+1 so you can 52 | // call it repeatedly to check for multiple closest points (set span to 53 | // -1 on the first call). Also returns "pseudo" perpendiculars; see 54 | // PerpType enum. 55 | enum class PerpType { 56 | NotFound, // no perpendicular found 57 | Start, // start of Pwl is closest point 58 | End, // end of Pwl is closest point 59 | Vertex, // vertex of Pwl is closest point 60 | Perpendicular // true perpendicular found 61 | }; 62 | PerpType Invert(Point const &xy, Point &perp, int &span, const double eps = 1e-6) const; 63 | // Compose two Pwls together, doing "this" first and "other" after. 64 | Pwl Compose(Pwl const &other, const double eps = 1e-6) const; 65 | // Apply function to (x,y) values at every control point. 66 | void Map(std::function f) const; 67 | // Apply function to (x, y0, y1) values wherever either Pwl has a 68 | // control point. 69 | static void Map2(Pwl const &pwl0, Pwl const &pwl1, 70 | std::function f); 71 | // Combine two Pwls, meaning we create a new Pwl where the y values are 72 | // given by running f wherever either has a knot. 73 | static Pwl Combine(Pwl const &pwl0, Pwl const &pwl1, 74 | std::function f, 75 | const double eps = 1e-6); 76 | // Make "this" match (at least) the given domain. Any extension my be 77 | // clipped or linear. 78 | void MatchDomain(Interval const &domain, bool clip = true, const double eps = 1e-6); 79 | // Generate a LUT for this funciton. 80 | template std::vector GenerateLut() const 81 | { 82 | int end = Domain().end + 1, span = 0; 83 | std::vector lut(end); 84 | for (int x = 0; x < end; x++) 85 | lut[x] = Eval(x, &span); 86 | return lut; 87 | } 88 | Pwl &operator*=(double d); 89 | void Debug(FILE *fp = stderr) const; 90 | 91 | private: 92 | int findSpan(double x, int span) const; 93 | std::vector points_; 94 | }; 95 | -------------------------------------------------------------------------------- /post_processing_stages/segmentation.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * segmentation.hpp - segmentation result 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | struct Segmentation 14 | { 15 | Segmentation(int w, int h, std::vector l, const std::vector &s) 16 | : width(w), height(h), labels(l), segmentation(s) 17 | { 18 | } 19 | int width; 20 | int height; 21 | std::vector labels; 22 | std::vector segmentation; 23 | }; 24 | -------------------------------------------------------------------------------- /post_processing_stages/sobel_cv_stage.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * sobel_cv_stage.cpp - Sobel filter implementation, using OpenCV 6 | */ 7 | 8 | #include 9 | 10 | #include "core/rpicam_app.hpp" 11 | 12 | #include "post_processing_stages/post_processing_stage.hpp" 13 | 14 | #include "opencv2/core.hpp" 15 | #include "opencv2/imgproc.hpp" 16 | 17 | using namespace cv; 18 | 19 | using Stream = libcamera::Stream; 20 | 21 | class SobelCvStage : public PostProcessingStage 22 | { 23 | public: 24 | SobelCvStage(RPiCamApp *app) : PostProcessingStage(app) {} 25 | 26 | char const *Name() const override; 27 | 28 | void Read(boost::property_tree::ptree const ¶ms) override; 29 | 30 | void Configure() override; 31 | 32 | bool Process(CompletedRequestPtr &completed_request) override; 33 | 34 | private: 35 | Stream *stream_; 36 | int ksize_ = 3; 37 | }; 38 | 39 | #define NAME "sobel_cv" 40 | 41 | char const *SobelCvStage::Name() const 42 | { 43 | return NAME; 44 | } 45 | 46 | void SobelCvStage::Read(boost::property_tree::ptree const ¶ms) 47 | { 48 | ksize_ = params.get("ksize", 3); 49 | } 50 | 51 | void SobelCvStage::Configure() 52 | { 53 | stream_ = app_->GetMainStream(); 54 | if (!stream_ || stream_->configuration().pixelFormat != libcamera::formats::YUV420) 55 | throw std::runtime_error("SobelCvStage: only YUV420 format supported"); 56 | } 57 | 58 | bool SobelCvStage::Process(CompletedRequestPtr &completed_request) 59 | { 60 | StreamInfo info = app_->GetStreamInfo(stream_); 61 | BufferWriteSync w(app_, completed_request->buffers[stream_]); 62 | libcamera::Span buffer = w.Get()[0]; 63 | uint8_t *ptr = (uint8_t *)buffer.data(); 64 | 65 | //Everything beyond this point is image processing... 66 | 67 | uint8_t value = 128; 68 | int num = (info.stride * info.height) / 2; 69 | Mat src = Mat(info.height, info.width, CV_8U, ptr, info.stride); 70 | int scale = 1; 71 | int delta = 0; 72 | int ddepth = CV_16S; 73 | 74 | memset(ptr + info.stride * info.height, value, num); 75 | 76 | // Remove noise by blurring with a Gaussian filter ( kernal size = 3 ) 77 | GaussianBlur(src, src, Size(3, 3), 0, 0, BORDER_DEFAULT); 78 | 79 | Mat grad_x, grad_y; 80 | 81 | //Scharr(src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT); 82 | Sobel(src, grad_x, ddepth, 1, 0, ksize_, scale, delta, BORDER_DEFAULT); 83 | //Scharr(src_gray, grad_y, ddepth, 0, 1, scale, delta, BORDER_DEFAULT); 84 | Sobel(src, grad_y, ddepth, 0, 1, ksize_, scale, delta, BORDER_DEFAULT); 85 | 86 | // converting back to CV_8U 87 | convertScaleAbs(grad_x, grad_x); 88 | convertScaleAbs(grad_y, grad_y); 89 | 90 | //weight the x and y gradients and add their magnitudes 91 | addWeighted(grad_x, 0.5, grad_y, 0.5, 0, src); 92 | 93 | return false; 94 | } 95 | 96 | static PostProcessingStage *Create(RPiCamApp *app) 97 | { 98 | return new SobelCvStage(app); 99 | } 100 | 101 | static RegisterStage reg(NAME, &Create); 102 | -------------------------------------------------------------------------------- /post_processing_stages/tf_stage.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | * 5 | * tf_stage.hpp - base class for TensorFlowLite stages 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | #include "tensorflow/lite/builtin_op_data.h" 17 | #include "tensorflow/lite/c/common.h" 18 | #include "tensorflow/lite/interpreter.h" 19 | #include "tensorflow/lite/kernels/register.h" 20 | 21 | #include "core/rpicam_app.hpp" 22 | #include "core/stream_info.hpp" 23 | 24 | #include "post_processing_stages/post_processing_stage.hpp" 25 | 26 | // The TfStage is a convenient base class from which post processing stages using 27 | // TensorFlowLite can be derived. It provides a certain amount of boiler plate code 28 | // and some other useful functions. Please refer to the examples provided that make 29 | // use of it. 30 | 31 | struct TfConfig 32 | { 33 | int number_of_threads = 3; 34 | int refresh_rate = 5; 35 | std::string model_file; 36 | bool verbose = false; 37 | float normalisation_offset = 127.5; 38 | float normalisation_scale = 127.5; 39 | }; 40 | 41 | class TfStage : public PostProcessingStage 42 | { 43 | public: 44 | // The TfStage provides implementations of the PostProcessingStage functions with the 45 | // exception of Name(), which derived classes must still provide. 46 | 47 | // The constructor supplies the width and height that TFLite wants. 48 | TfStage(RPiCamApp *app, int tf_w, int tf_h); 49 | 50 | //char const *Name() const override; 51 | 52 | void Read(boost::property_tree::ptree const ¶ms) override; 53 | 54 | void Configure() override; 55 | 56 | bool Process(CompletedRequestPtr &completed_request) override; 57 | 58 | void Stop() override; 59 | 60 | protected: 61 | TfConfig *config() const { return config_.get(); } 62 | 63 | // Instead of redefining the above public interface, derived class should implement 64 | // the following four virtual methods. 65 | 66 | // Read additional parameters required by the stage. Can also do some model checking. 67 | virtual void readExtras(boost::property_tree::ptree const ¶ms) {} 68 | 69 | // Check the stream and image configuration. Here the stage should report any errors 70 | // and/or fail. 71 | virtual void checkConfiguration() {} 72 | 73 | // This runs asynchronously from the main thread right after the model has run. The 74 | // outputs should be processed into a form where applyResults can make use of them. 75 | virtual void interpretOutputs() {} 76 | 77 | // Here we run synchronously again and so should not take too long. The results 78 | // produced by interpretOutputs can be used now, for example as metadata to attach 79 | // to the image, or even drawn onto the image itself. 80 | virtual void applyResults(CompletedRequestPtr &completed_request) {} 81 | 82 | std::unique_ptr config_; 83 | 84 | // The width and height that TFLite wants. 85 | unsigned int tf_w_, tf_h_; 86 | 87 | // We run TFLite on the low resolution image, details of which are here. 88 | libcamera::Stream *lores_stream_; 89 | StreamInfo lores_info_; 90 | 91 | // The stage may or may not make use of the larger or "main" image stream. 92 | libcamera::Stream *main_stream_; 93 | StreamInfo main_stream_info_; 94 | 95 | std::unique_ptr model_; 96 | std::unique_ptr interpreter_; 97 | 98 | private: 99 | void initialise(); 100 | void runInference(); 101 | 102 | std::mutex future_mutex_; 103 | std::unique_ptr> future_; 104 | std::vector lores_copy_; 105 | std::mutex output_mutex_; 106 | }; 107 | -------------------------------------------------------------------------------- /preview/meson.build: -------------------------------------------------------------------------------- 1 | rpicam_app_src += files([ 2 | 'null_preview.cpp', 3 | 'preview.cpp', 4 | ]) 5 | 6 | preview_headers = files([ 7 | 'preview.hpp', 8 | ]) 9 | 10 | enable_drm = false 11 | drm_deps = dependency('libdrm', required : get_option('enable_drm')) 12 | 13 | if drm_deps.found() 14 | rpicam_app_dep += drm_deps 15 | rpicam_app_src += files('drm_preview.cpp') 16 | cpp_arguments += '-DLIBDRM_PRESENT=1' 17 | enable_drm = true 18 | endif 19 | 20 | enable_egl = false 21 | x11_deps = dependency('x11', required : get_option('enable_egl')) 22 | epoxy_deps = dependency('epoxy', required : get_option('enable_egl')) 23 | 24 | if x11_deps.found() and epoxy_deps.found() 25 | rpicam_app_dep += [x11_deps, epoxy_deps] 26 | rpicam_app_src += files('egl_preview.cpp') 27 | cpp_arguments += '-DLIBEGL_PRESENT=1' 28 | enable_egl = true 29 | endif 30 | 31 | enable_qt = false 32 | qt_dep = dependency('qt5', modules : ['Core', 'Widgets'], required : get_option('enable_qt')) 33 | if qt_dep.found() 34 | rpicam_app_dep += qt_dep 35 | rpicam_app_src += files('qt_preview.cpp') 36 | cpp_arguments += '-DQT_PRESENT=1' 37 | enable_qt = true 38 | endif 39 | 40 | install_headers(preview_headers, subdir: meson.project_name() / 'preview') 41 | -------------------------------------------------------------------------------- /preview/null_preview.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * null_preview.cpp - dummy "show nothing" preview window. 6 | */ 7 | 8 | #include 9 | 10 | #include "core/options.hpp" 11 | 12 | #include "preview.hpp" 13 | 14 | class NullPreview : public Preview 15 | { 16 | public: 17 | NullPreview(Options const *options) : Preview(options) { LOG(2, "Running without preview window"); } 18 | ~NullPreview() {} 19 | // Display the buffer. You get given the fd back in the BufferDoneCallback 20 | // once its available for re-use. 21 | virtual void Show(int fd, libcamera::Span span, StreamInfo const &info) override { done_callback_(fd); } 22 | // Reset the preview window, clearing the current buffers and being ready to 23 | // show new ones. 24 | void Reset() override {} 25 | // Return the maximum image size allowed. Zeroes mean "no limit". 26 | virtual void MaxImageSize(unsigned int &w, unsigned int &h) const override { w = h = 0; } 27 | 28 | void SetInfoText(const std::string &text) override { LOG(1, text); } 29 | 30 | private: 31 | }; 32 | 33 | Preview *make_null_preview(Options const *options) 34 | { 35 | return new NullPreview(options); 36 | } 37 | -------------------------------------------------------------------------------- /preview/preview.cpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2021, Raspberry Pi (Trading) Ltd. 4 | * 5 | * preview.cpp - preview window interface 6 | */ 7 | 8 | #include "core/options.hpp" 9 | 10 | #include "preview.hpp" 11 | 12 | Preview *make_null_preview(Options const *options); 13 | Preview *make_egl_preview(Options const *options); 14 | Preview *make_drm_preview(Options const *options); 15 | Preview *make_qt_preview(Options const *options); 16 | 17 | Preview *make_preview(Options const *options) 18 | { 19 | if (options->nopreview) 20 | return make_null_preview(options); 21 | #if QT_PRESENT 22 | else if (options->qt_preview) 23 | { 24 | Preview *p = make_qt_preview(options); 25 | if (p) 26 | LOG(1, "Made QT preview window"); 27 | return p; 28 | } 29 | #endif 30 | else 31 | { 32 | try 33 | { 34 | #if LIBEGL_PRESENT 35 | Preview *p = make_egl_preview(options); 36 | if (p) 37 | LOG(1, "Made X/EGL preview window"); 38 | return p; 39 | #else 40 | throw std::runtime_error("egl libraries unavailable."); 41 | #endif 42 | } 43 | catch (std::exception const &e) 44 | { 45 | try 46 | { 47 | #if LIBDRM_PRESENT 48 | Preview *p = make_drm_preview(options); 49 | if (p) 50 | LOG(1, "Made DRM preview window"); 51 | return p; 52 | #else 53 | throw std::runtime_error("drm libraries unavailable."); 54 | #endif 55 | } 56 | catch (std::exception const &e) 57 | { 58 | LOG(1, "Preview window unavailable"); 59 | return make_null_preview(options); 60 | } 61 | } 62 | } 63 | 64 | return nullptr; // prevents compiler warning in debug builds 65 | } 66 | -------------------------------------------------------------------------------- /preview/preview.hpp: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: BSD-2-Clause */ 2 | /* 3 | * Copyright (C) 2020, Raspberry Pi (Trading) Ltd. 4 | * 5 | * preview.hpp - preview window interface 6 | */ 7 | 8 | #pragma once 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include "core/stream_info.hpp" 16 | 17 | struct Options; 18 | 19 | class Preview 20 | { 21 | public: 22 | typedef std::function DoneCallback; 23 | 24 | Preview(Options const *options) : options_(options) {} 25 | virtual ~Preview() {} 26 | // This is where the application sets the callback it gets whenever the viewfinder 27 | // is no longer displaying the buffer and it can be safely recycled. 28 | void SetDoneCallback(DoneCallback callback) { done_callback_ = callback; } 29 | virtual void SetInfoText(const std::string &text) {} 30 | // Display the buffer. You get given the fd back in the BufferDoneCallback 31 | // once its available for re-use. 32 | virtual void Show(int fd, libcamera::Span span, StreamInfo const &info) = 0; 33 | // Reset the preview window, clearing the current buffers and being ready to 34 | // show new ones. 35 | virtual void Reset() = 0; 36 | // Check if preview window has been shut down. 37 | virtual bool Quit() { return false; } 38 | // Return the maximum image size allowed. 39 | virtual void MaxImageSize(unsigned int &w, unsigned int &h) const = 0; 40 | 41 | protected: 42 | DoneCallback done_callback_; 43 | Options const *options_; 44 | }; 45 | 46 | Preview *make_preview(Options const *options); 47 | -------------------------------------------------------------------------------- /utils/camera-bug-report: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # rpicam-apps bug report generator. 4 | # Copyright (C) 2021, Raspberry Pi Ltd. 5 | # 6 | import argparse 7 | from datetime import datetime 8 | import select 9 | import subprocess 10 | import sys 11 | import time 12 | 13 | 14 | class Report: 15 | def __init__(self, id, file): 16 | self._id = id 17 | self._cmds = [] 18 | self._strs = [] 19 | self._file = file 20 | 21 | def __run_cmd(self, cmd): 22 | print(f'** {cmd} **', file=self._file) 23 | try: 24 | p = subprocess.run(cmd, text=True, check=False, shell=True, 25 | stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 26 | print(p.stdout, file=self._file) 27 | except RuntimeError as e: 28 | print(f'Error: {e}', file=self._file) 29 | 30 | def add_cmd(self, c): 31 | self._cmds.append(c) 32 | 33 | def add_str(self, s): 34 | self._strs.append(s) 35 | 36 | def exec(self): 37 | print(f'{"-"*80}\n{self._id}\n{"-"*80}', file=self._file) 38 | 39 | for c in self._cmds: 40 | self.__run_cmd(c) 41 | 42 | for s in self._strs: 43 | print(s, file=self._file) 44 | 45 | 46 | def run_prog(cmd, t): 47 | cmd = cmd.split(' ') 48 | out = [] 49 | try: 50 | start = time.time() 51 | p = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, errors='ignore') 52 | poll = select.poll() 53 | poll.register(p.stdout, select.POLLIN) 54 | 55 | while p.poll() is None: 56 | if poll.poll(0): 57 | line = p.stdout.readline() 58 | print(line, end='', flush=True) 59 | out.append(line) 60 | 61 | if (t != 0) and (time.time() - start > t): 62 | p.kill() 63 | out = out + p.communicate()[0].splitlines(keepends=True) 64 | out.append('Error: ***** TIMEOUT *****') 65 | break 66 | 67 | except KeyboardInterrupt: 68 | p.kill() 69 | out = out + p.communicate()[0].splitlines(keepends=True) 70 | out.append('Error: ***** INTERRUPT *****') 71 | 72 | p.wait() 73 | return ''.join(out) 74 | 75 | 76 | if __name__ == '__main__': 77 | parser = argparse.ArgumentParser(description='rpicam-apps Bug Report Generator') 78 | parser.add_argument('-o', help='Report filename', type=str, default='bug-report.txt') 79 | parser.add_argument('-t', help='Timeout (seconds) for the command to run. A value of 0 \ 80 | disables the timeout.', type=float, default=0) 81 | parser.add_argument('-c', help='Command to run, e.g. -c "rpicam-still -t 1000 -o test.jpg"', type=str) 82 | args = parser.parse_args() 83 | 84 | # This is the app the user is actually running. 85 | app = 'rpicam-hello' 86 | if args.c: 87 | app = args.c.split(" ")[0] 88 | # Can we identify the app? If not, use rpicam-hello for version checks. 89 | if not any([s in app for s in ['rpicam-still', 'rpicam-vid', 'rpicam-hello', 'rpicam-raw', 'rpicam-jpeg']]): 90 | app = 'rpicam-hello' 91 | 92 | reports = [] 93 | with open(args.o, 'wt') as file: 94 | title = Report('rpicam-apps Bug Report', file) 95 | title.add_str(f'Date: {datetime.now().strftime("%d-%m-%Y (%H:%M:%S)")}') 96 | title.add_str(f'Command: {" ".join(sys.argv)}\n') 97 | reports.append(title) 98 | 99 | hwinfo = Report('Hardware information', file) 100 | hwinfo.add_cmd('hostname') 101 | hwinfo.add_cmd('cat /proc/cpuinfo') 102 | reports.append(hwinfo) 103 | 104 | config = Report('Configuration', file) 105 | config.add_cmd('cat /boot/firmware/cmdline.txt') 106 | config.add_cmd('cat /boot/firmware/config.txt') 107 | reports.append(config) 108 | 109 | logs = Report('Logs', file) 110 | logs.add_cmd('dmesg') 111 | logs.add_cmd('sudo vclog log --msg') 112 | logs.add_cmd('sudo vclog log --assert') 113 | reports.append(logs) 114 | 115 | mem = Report('Memory', file) 116 | mem.add_cmd('cat /proc/meminfo') 117 | mem.add_cmd('sudo cat /sys/kernel/debug/dma_buf/bufinfo') 118 | mem.add_cmd('sudo cat /sys/kernel/debug/vcsm-cma/state') 119 | reports.append(mem) 120 | 121 | media = Report('Media Devices', file) 122 | for i in range(5): 123 | media.add_cmd(f'media-ctl -d {i} -p') 124 | reports.append(media) 125 | 126 | # Get the camera list with the same program specified in the run command 127 | cam = Report('Cameras', file) 128 | cam.add_cmd(f'{app} --list-cameras') 129 | reports.append(cam) 130 | 131 | # Get the version with the same program specified in the run command 132 | ver = Report('Versions', file) 133 | ver.add_cmd('uname -a') 134 | ver.add_cmd('cat /etc/os-release') 135 | ver.add_cmd('vcgencmd version') 136 | ver.add_cmd(f'{app} --version') 137 | reports.append(ver) 138 | 139 | # Run the actual application before executing the reports! 140 | if args.c: 141 | cmd_out = run_prog(args.c, args.t) 142 | 143 | # Report for the command output 144 | cmd = Report(args.c, file) 145 | cmd.add_str(cmd_out) 146 | reports.append(cmd) 147 | 148 | for r in reports: 149 | r.exec() 150 | 151 | print(f'\nBug report generated to {args.o}') 152 | print('Please upload this file when you create a new bug report at:') 153 | print('https://github.com/raspberrypi/rpicam-apps/issues/') 154 | -------------------------------------------------------------------------------- /utils/download-hailo-models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | declare -A networks=( 5 | # Image classification 6 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/resnet_v1_50_h8l.hef"]="resnet_v1_50_h8l.hef" 7 | # Yolov6 inference 8 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/yolov6n.hef"]="yolov6n_h8l.hef" 9 | ["https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.13.0/hailo8/yolov6n.hef"]="yolov6n_h8.hef" 10 | # Yolov8 inference 11 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/yolov8s_h8l.hef"]="yolov8s_h8l.hef" 12 | ["https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.13.0/hailo8/yolov8s.hef"]="yolov8s_h8.hef" 13 | # Yolov5 segmentation 14 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/yolov5n_seg_h8l_mz.hef"]="yolov5n_seg_h8l_mz.hef" 15 | ["https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.13.0/hailo8/yolov5n_seg.hef"]="yolov5n_seg_h8.hef" 16 | # YoloX inference 17 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/yolox_s_leaky_h8l_mz.hef"]="yolox_s_leaky_h8l_rpi.hef" 18 | # Yolov8 pose 19 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/yolov8s_pose_h8l_pi.hef"]="yolov8s_pose_h8l_pi.hef" 20 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8/yolov8s_pose.hef"]="yolov8s_pose_h8.hef" 21 | # Yolov5 person/face inference 22 | ["https://hailo-csdata.s3.eu-west-2.amazonaws.com/resources/hefs/h8l_rpi/yolov5s_personface_h8l.hef"]="yolov5s_personface_h8l.hef" 23 | # Face landmarking 24 | ["https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.13.0/hailo8l/scrfd_2.5g.hef"]="scrfd_2.5g_h8l.hef" 25 | ) 26 | 27 | if [ $# -ne 1 ]; then 28 | echo "Usage: $0 " 29 | exit 1 30 | fi 31 | 32 | dir=$1 33 | mkdir -p $dir 34 | 35 | for url in "${!networks[@]}"; do 36 | filename="${networks[$url]}" 37 | wget -nv -O "$dir/$filename" "$url" 38 | done 39 | -------------------------------------------------------------------------------- /utils/download-imx500-models.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | networks=( 5 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_deeplabv3plus.rpk 6 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_efficientdet_lite0_pp.rpk 7 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_efficientnet_bo.rpk 8 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_efficientnet_lite0.rpk 9 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_efficientnetv2_b0.rpk 10 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_efficientnetv2_b1.rpk 11 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_efficientnetv2_b2.rpk 12 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_higherhrnet_coco.rpk 13 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_inputtensoronly.rpk 14 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_mnasnet1.0.rpk 15 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_mobilenet_v2.rpk 16 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_mobilevit_xs.rpk 17 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_mobilevit_xxs.rpk 18 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_nanodet_plus_416x416_pp.rpk 19 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_nanodet_plus_416x416.rpk 20 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_posenet.rpk 21 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_regnetx_002.rpk 22 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_regnety_002.rpk 23 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_regnety_004.rpk 24 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_resnet18.rpk 25 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_shufflenet_v2_x1_5.rpk 26 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_squeezenet1.0.rpk 27 | https://github.com/raspberrypi/imx500-models/raw/main/imx500_network_ssd_mobilenetv2_fpnlite_320x320_pp.rpk 28 | ) 29 | 30 | if [ $# -ne 1 ]; then 31 | echo "Usage: $0 " 32 | exit 1 33 | fi 34 | 35 | dir=$1 36 | mkdir -p $dir 37 | 38 | for url in ${networks[@]}; do 39 | wget -nv -N -P $dir $url 40 | done 41 | -------------------------------------------------------------------------------- /utils/gen-dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cd "$MESON_SOURCE_ROOT" || return 4 | git rev-parse HEAD > "$MESON_DIST_ROOT"/version.gen 5 | -------------------------------------------------------------------------------- /utils/meson.build: -------------------------------------------------------------------------------- 1 | install_data('camera-bug-report', install_dir : get_option('bindir')) 2 | -------------------------------------------------------------------------------- /utils/timestamp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # rpicam-apps timestamp analysis tool 4 | # Copyright (C) 2021, Raspberry Pi Ltd. 5 | # 6 | import argparse 7 | import json 8 | import subprocess 9 | 10 | try: 11 | from matplotlib import pyplot as plt 12 | plot_available = True 13 | except ImportError: 14 | plot_available = False 15 | 16 | 17 | def read_times_pts(file): 18 | with open(file) as f: 19 | if f.readline().strip() != '# timecode format v2': 20 | raise RuntimeError('PTS file format unknown') 21 | return [float(line) for line in f.readlines()] 22 | 23 | 24 | def read_times_container(file): 25 | cmd = ['ffprobe', file, '-hide_banner', '-select_streams', 'v', '-show_entries', 'frame', '-of', 'json'] 26 | r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) 27 | if r.returncode: 28 | raise RuntimeError(f'ffprobe failed to run with command:\n{" ".join(cmd)}') 29 | 30 | frame_data = json.loads(r.stdout)['frames'] 31 | keys = ['pkt_pts_time', 'pts_time', 'pkt_dts_time', 'dts_time'] 32 | key = [k for k in keys if k in frame_data[0].keys()] 33 | 34 | if len(key) == 0: 35 | raise RuntimeError(f'Timestamp keys not found in {file}') 36 | 37 | ts_list = [float(f[key[0]]) * 1000 for f in frame_data] 38 | return ts_list 39 | 40 | 41 | def get_differences(items): 42 | return [next_item - item for item, next_item in zip(items[:-1], items[1:])] 43 | 44 | 45 | def outliers(diffs, frac, avg): 46 | return f'{sum(d < (1 - frac) * avg or d > (1 + frac) * avg for d in diffs)} ({frac * 100}%)' 47 | 48 | 49 | def plot_pts(diffs, avg, title, narrow): 50 | fig, ax = plt.subplots() 51 | ax.plot(diffs, label='Frame times') 52 | ax.plot([0, len(diffs)], [avg, avg], 'g--', label='Average') 53 | # Find an plot the max value 54 | max_val, idx = max((val, idx) for (idx, val) in enumerate(diffs)) 55 | ax.plot([idx], [max_val], 'rx', label='Maximum') 56 | if narrow: 57 | cap = 2 * min(diffs) 58 | max_val = max(val for val in diffs if val < cap) 59 | ax.axis([0, len(diffs), min(diffs) * 0.995, max_val * 1.005]) 60 | ax.legend() 61 | plt.title(title) 62 | plt.xlabel('Frame') 63 | plt.ylabel('Frame time (ms)') 64 | plt.grid(True) 65 | plt.show() 66 | 67 | 68 | if __name__ == '__main__': 69 | parser = argparse.ArgumentParser(description='rpicam-apps timestamp analysis tool') 70 | parser.add_argument('filename', help='PTS file generated from rpicam-vid (with a .txt or .pts extension)' 71 | ' or an avi/mkv/mp4 container file', type=str) 72 | parser.add_argument('--plot', help='Plot timestamp graph', action='store_true') 73 | parser.add_argument('--narrow', help='Narrow the y-axis by hiding outliers', action='store_true') 74 | args = parser.parse_args() 75 | 76 | if args.filename.lower().endswith(('.txt', '.pts')): 77 | times = read_times_pts(args.filename) 78 | elif args.filename.lower().endswith(('.avi', '.mkv', '.mp4')): 79 | times = read_times_container(args.filename) 80 | else: 81 | raise RuntimeError('Unknown file format') 82 | 83 | diffs = get_differences(times) 84 | avg = sum(diffs) / len(diffs) 85 | min_val, min_idx = min((val, idx) for (idx, val) in enumerate(diffs)) 86 | max_val, max_idx = max((val, idx) for (idx, val) in enumerate(diffs)) 87 | print(f'Total: {len(diffs) + 1} frames ({len(diffs)} samples)') 88 | print(f'Average: {avg:.3f} ms / {1e3/avg:.3f} fps') 89 | print(f'Minimum: {min_val:.3f} ms at frame {min_idx}') 90 | print(f'Maximum: {max_val:.3f} ms at frame {max_idx}') 91 | print('Outliers:', *[outliers(diffs, f, avg) for f in (1, .1, .01, .001)]) 92 | 93 | if args.plot: 94 | if plot_available: 95 | plot_pts(diffs, avg, f'{args.filename}', args.narrow) 96 | else: 97 | print('\nError: matplotlib is not installed, please install with "pip3 install matplotlib"') 98 | -------------------------------------------------------------------------------- /utils/version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # Copyright (C) 2021, Raspberry Pi (Trading) Limited 4 | # Generate version information for rpicam-apps 5 | 6 | import subprocess 7 | import sys 8 | from datetime import datetime 9 | from string import hexdigits 10 | 11 | digits = 12 12 | 13 | 14 | def generate_version(): 15 | try: 16 | if len(sys.argv) == 2: 17 | # Check if this is a git directory 18 | r = subprocess.run(['git', 'rev-parse', '--git-dir'], 19 | stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, universal_newlines=True) 20 | if r.returncode: 21 | raise RuntimeError('Invalid git directory!') 22 | 23 | # Get commit id 24 | r = subprocess.run(['git', 'rev-parse', '--verify', 'HEAD'], 25 | stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, universal_newlines=True) 26 | if r.returncode: 27 | raise RuntimeError('Invalid git commit!') 28 | 29 | commit = r.stdout.strip('\n')[0:digits] 30 | 31 | # Check dirty status 32 | r = subprocess.run(['git', 'diff-index', '--quiet', 'HEAD'], 33 | stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, universal_newlines=True) 34 | if r.returncode: 35 | commit = commit + '-dirty' 36 | 37 | elif len(sys.argv) == 3: 38 | commit = sys.argv[2].lower().strip() 39 | if any(c not in hexdigits for c in commit): 40 | raise RuntimeError('Invalid git sha!') 41 | 42 | commit = commit[0:digits] 43 | 44 | else: 45 | raise RuntimeError('Invalid number of command line arguments') 46 | 47 | except RuntimeError as e: 48 | print(f'ERR: {e}', file=sys.stderr) 49 | commit = '0' * digits + '-invalid' 50 | 51 | finally: 52 | print(f'v{sys.argv[1]} {commit} {datetime.now().strftime("%d-%m-%Y (%H:%M:%S)")}', end="") 53 | 54 | 55 | if __name__ == "__main__": 56 | generate_version() 57 | --------------------------------------------------------------------------------