├── .gitignore ├── src ├── sys │ ├── linux_sys │ │ ├── mod.rs │ │ └── bindgen.sh │ ├── windows_sys │ │ ├── mod.rs │ │ └── bindgen.ps1 │ ├── mod.rs │ ├── version.rs │ ├── guid.rs │ └── headers │ │ └── nvcuvid.h ├── safe │ ├── mod.rs │ ├── builders.rs │ ├── session.rs │ ├── result.rs │ ├── api.rs │ ├── encoder.rs │ └── buffer.rs └── lib.rs ├── rustfmt.toml ├── Cargo.toml ├── LICENSE ├── README.md ├── .github └── workflows │ ├── lints.yml │ └── docs.yml ├── tests └── blanks.rs └── examples └── importing_vulkan_buffers.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | .vscode/ 4 | *.bin 5 | -------------------------------------------------------------------------------- /src/sys/linux_sys/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cuviddec; 2 | pub mod nvEncodeAPI; 3 | pub mod nvcuvid; 4 | -------------------------------------------------------------------------------- /src/sys/windows_sys/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cuviddec; 2 | pub mod nvEncodeAPI; 3 | pub mod nvcuvid; 4 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | condense_wildcard_suffixes = true 2 | error_on_line_overflow = true 3 | # error_on_unformatted = true 4 | format_code_in_doc_comments = true 5 | format_macro_matchers = true 6 | format_macro_bodies = true 7 | format_strings = true 8 | imports_layout = "HorizontalVertical" 9 | imports_granularity = "Crate" 10 | normalize_comments = true 11 | overflow_delimited_expr = true 12 | reorder_impl_items = true 13 | group_imports = "StdExternalCrate" 14 | use_field_init_shorthand = true 15 | wrap_comments = true 16 | -------------------------------------------------------------------------------- /src/sys/mod.rs: -------------------------------------------------------------------------------- 1 | //! Auto-generated bindings to NVIDIA Video Codec SDK. 2 | //! 3 | //! The bindings were generated using [bindgen](https://github.com/rust-lang/rust-bindgen) 4 | //! using the scripts `sys/linux_sys/bindgen.sh` and 5 | //! `sys/windows_sys/bindgen.ps1` for the respective operating system. 6 | 7 | mod guid; 8 | mod version; 9 | 10 | #[allow(warnings)] 11 | #[rustfmt::skip] 12 | #[cfg(target_os = "linux")] 13 | mod linux_sys; 14 | #[cfg(target_os = "linux")] 15 | pub use linux_sys::*; 16 | 17 | #[allow(warnings)] 18 | #[rustfmt::skip] 19 | #[cfg(target_os = "windows")] 20 | mod windows_sys; 21 | #[cfg(target_os = "windows")] 22 | pub use windows_sys::*; 23 | -------------------------------------------------------------------------------- /src/safe/mod.rs: -------------------------------------------------------------------------------- 1 | //! Safe wrapper around the raw bindings. 2 | //! 3 | //! Largely unfinished, so you might still have to dip into 4 | //! [`sys`](crate::sys) for the missing functionality. 5 | 6 | mod api; 7 | mod buffer; 8 | mod builders; 9 | mod encoder; 10 | mod result; 11 | mod session; 12 | 13 | pub use api::{EncodeAPI, ENCODE_API}; 14 | pub use buffer::{ 15 | Bitstream, 16 | BitstreamLock, 17 | Buffer, 18 | BufferLock, 19 | EncoderInput, 20 | EncoderOutput, 21 | RegisteredResource, 22 | }; 23 | pub use encoder::{Encoder, EncoderInitParams}; 24 | pub use result::{EncodeError, ErrorKind}; 25 | pub use session::{CodecPictureParams, EncodePictureParams, Session}; 26 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nvidia-video-codec-sdk" 3 | version = "0.4.0" 4 | edition = "2021" 5 | license = "MIT" 6 | rust-version = "1.70" 7 | 8 | description = "Bindings for NVIDIA Video Codec SDK" 9 | homepage = "https://github.com/ViliamVadocz/nvidia-video-codec-sdk" 10 | documentation = "https://docs.rs/nvidia-video-codec-sdk" 11 | repository = "https://github.com/ViliamVadocz/nvidia-video-codec-sdk" 12 | readme = "README.md" 13 | 14 | keywords = ["encoding", "decoding", "bindings", "nvidia", "cuda"] 15 | 16 | [package.metadata.docs.rs] 17 | cargo-args = ["-Zunstable-options", "-Zrustdoc-scrape-examples"] 18 | features = ["ci-check"] 19 | 20 | [dependencies] 21 | cudarc = { version = "0.16.4", features = ["cuda-version-from-build-system"] } 22 | lazy_static = "1.5.0" 23 | 24 | [dev-dependencies] 25 | vulkano = "0.35.0" 26 | libc = "0.2" 27 | 28 | [features] 29 | default = [] 30 | # workaround to make the ci similar to cudarc 31 | ci-check = ["cudarc/cuda-12020", "cudarc/dynamic-loading"] 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 Viliam Vadocz 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NVIDIA Video Codec SDK 2 | 3 | [![crates.io](https://img.shields.io/crates/v/nvidia-video-codec-sdk?style=for-the-badge)](https://crates.io/crates/nvidia-video-codec-sdk) 4 | [![docs.rs](https://img.shields.io/docsrs/nvidia-video-codec-sdk?label=docs.rs%20latest&style=for-the-badge)](https://docs.rs/nvidia-video-codec-sdk) 5 | 6 | Rust bindings for [NVIDIA Video Codec SDK](https://developer.nvidia.com/video-codec-sdk). 7 | 8 | The documentation is also hosted on GitHub Pages 9 | [here](https://viliamvadocz.github.io/nvidia-video-codec-sdk/nvidia_video_codec_sdk/). 10 | 11 | Versions: 12 | - NVIDIA Video Codec SDK 12.1.14 13 | - CUDA 12.2 (older CUDA versions should also work) 14 | 15 | ## Installation 16 | 17 | The build script will try to automatically locate your NVIDIA Video Codec SDK installation. 18 | You can help it by setting the environment variable `NVIDIA_VIDEO_CODEC_SDK_PATH` to the directory containing the library files. 19 | - `nvEncodeAPI.lib` and `nvcuvid.lib` on Windows, 20 | - `libnvidia-encode.so` and `libnvcuvid.so` on Linux. 21 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Bindings for the [NVIDIA Video Codec SDK](https://developer.nvidia.com/video-codec-sdk). 2 | //! 3 | //! The raw bindings can be found in [`sys`]. 4 | //! Parts of the API have been wrapped in [`safe`]. 5 | //! 6 | //! Feel free to contribute! 7 | //! 8 | //! --- 9 | //! 10 | //! # Encoding 11 | //! 12 | //! See [NVIDIA Video Codec SDK - Video Encoder API Programming Guide](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html). 13 | //! 14 | //! The main entrypoint for the encoder API is the [`Encoder`] type. 15 | //! 16 | //! Usage follows this structure: 17 | //! 1. Initialize an [`Encoder`] with an encode device (such as CUDA). 18 | //! 2. Configure the encoder and start a [`Session`]. 19 | //! 3. Create input [`Buffer`]s (or [`RegisteredResource`]) and output 20 | //! [`Bitstream`]s. 21 | //! 4. Encode frames with [`Session::encode_picture`]. 22 | //! 23 | //! See the mentioned types for more info on how to use each. 24 | //! 25 | //! # Decoding 26 | //! 27 | //! There is no safe wrapper yet. 28 | 29 | #![warn( 30 | missing_docs, 31 | clippy::pedantic, 32 | clippy::style, 33 | clippy::unwrap_used, 34 | missing_debug_implementations, 35 | missing_copy_implementations 36 | )] 37 | 38 | pub mod safe; 39 | pub mod sys; 40 | 41 | #[macro_use] 42 | extern crate lazy_static; 43 | 44 | pub use safe::*; 45 | -------------------------------------------------------------------------------- /.github/workflows/lints.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | env: 4 | CARGO_TERM_COLOR: always 5 | 6 | name: lints 7 | jobs: 8 | clippy: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v5 12 | - uses: actions-rs/toolchain@v1 13 | with: 14 | profile: minimal 15 | toolchain: nightly 16 | components: clippy 17 | override: true 18 | # - uses: actions-rs/clippy-check@v1 19 | # with: 20 | # token: ${{ secrets.GITHUB_TOKEN }} 21 | # args: --features ci-check -- -D warnings 22 | - uses: actions-rs/cargo@v1 23 | with: 24 | command: clippy 25 | args: --features ci-check -- --deny warnings 26 | fmt: 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v5 30 | - uses: actions-rs/toolchain@v1 31 | with: 32 | profile: minimal 33 | toolchain: nightly 34 | components: rustfmt 35 | override: true 36 | - uses: actions-rs/cargo@v1 37 | with: 38 | command: fmt 39 | args: --all --check 40 | docs: 41 | runs-on: ubuntu-latest 42 | steps: 43 | - uses: actions/checkout@v5 44 | - uses: actions-rs/toolchain@v1 45 | with: 46 | profile: minimal 47 | toolchain: nightly 48 | override: true 49 | - uses: actions-rs/cargo@v1 50 | with: 51 | command: doc 52 | args: --features ci-check 53 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ "master" ] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | 8 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages 9 | permissions: 10 | contents: read 11 | pages: write 12 | id-token: write 13 | 14 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. 15 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. 16 | concurrency: 17 | group: "pages" 18 | cancel-in-progress: false 19 | 20 | name: docs 21 | jobs: 22 | build: 23 | runs-on: ubuntu-latest 24 | steps: 25 | - uses: actions/checkout@v5 26 | - uses: actions-rs/toolchain@v1 27 | with: 28 | profile: minimal 29 | toolchain: nightly 30 | override: true 31 | - uses: actions-rs/cargo@v1 32 | with: 33 | command: doc 34 | args: --features ci-check --no-deps 35 | - shell: sh 36 | run: | 37 | chmod -c -R +rX "target/doc" | 38 | while read line; do 39 | echo "::warning title=Invalid file permissions automatically fixed::$line" 40 | done 41 | - uses: actions/upload-pages-artifact@v4 42 | with: 43 | path: ./target/doc 44 | deploy: 45 | runs-on: ubuntu-latest 46 | environment: 47 | name: github-pages 48 | url: ${{ steps.deployment.outputs.page_url }} 49 | needs: build 50 | steps: 51 | - uses: actions/deploy-pages@v4 52 | id: deployment 53 | -------------------------------------------------------------------------------- /src/sys/linux_sys/bindgen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | bindgen \ 3 | --allowlist-type cudaVideo.* \ 4 | --allowlist-type cuvid.* \ 5 | --allowlist-type CUVID.* \ 6 | --allowlist-function cuvid.* \ 7 | --allowlist-var \[IPBS\]_VOP \ 8 | --allowlist-var cuvid.* \ 9 | --blocklist-file .*/cuda\\.h \ 10 | --blocklist-file .*/std.*\\.h \ 11 | --must-use-type CUresult \ 12 | --must-use-type cuvidDecodeStatus \ 13 | \ 14 | --default-enum-style=rust \ 15 | --no-doc-comments \ 16 | --with-derive-default \ 17 | --with-derive-eq \ 18 | --with-derive-hash \ 19 | --with-derive-ord \ 20 | --use-core \ 21 | --merge-extern-blocks \ 22 | --sort-semantically \ 23 | --output cuviddec.rs ../headers/cuviddec.h 24 | 25 | bindgen \ 26 | --allowlist-type CU.* \ 27 | --allowlist-type cudaVideo.* \ 28 | --allowlist-type cudaAudio.* \ 29 | --allowlist-type HEVC.* \ 30 | --allowlist-function cuvid.* \ 31 | --allowlist-var MAX_CLOCK_TS \ 32 | --blocklist-file .*/cuda\\.h \ 33 | --blocklist-file .*/std.*\\.h \ 34 | --blocklist-file .*/cuviddec\\.h \ 35 | --must-use-type CUresult \ 36 | \ 37 | --default-enum-style=rust \ 38 | --no-doc-comments \ 39 | --with-derive-default \ 40 | --with-derive-eq \ 41 | --with-derive-hash \ 42 | --with-derive-ord \ 43 | --use-core \ 44 | --merge-extern-blocks \ 45 | --sort-semantically \ 46 | --output nvcuvid.rs ../headers/nvcuvid.h 47 | 48 | bindgen \ 49 | --allowlist-type NVENC.* \ 50 | --allowlist-type NV_ENC.* \ 51 | --allowlist-type NV_ENCODE.* \ 52 | --allowlist-type GUID \ 53 | --allowlist-type PENV.* \ 54 | --allowlist-function NvEncodeAPI.* \ 55 | --allowlist-function NvEnc.* \ 56 | --allowlist-var NVENC.* \ 57 | --allowlist-var NV_ENC.* \ 58 | --allowlist-var NV_MAX.* \ 59 | --blocklist-item NV_ENC_\\w+_GUID \ 60 | --blocklist-file .*/cuda\\.h \ 61 | --blocklist-file .*/std.*\\.h \ 62 | --blocklist-file .*/cuviddec\\.h \ 63 | --must-use-type NVENCSTATUS \ 64 | \ 65 | --default-enum-style=rust \ 66 | --no-doc-comments \ 67 | --with-derive-default \ 68 | --with-derive-eq \ 69 | --with-derive-hash \ 70 | --with-derive-ord \ 71 | --use-core \ 72 | --merge-extern-blocks \ 73 | --sort-semantically \ 74 | --output nvEncodeAPI.rs ../headers/nvEncodeAPI.h 75 | 76 | # Additional preludes to make sure the bindings compile. 77 | echo -e "use cudarc::driver::sys::*;\n$(cat cuviddec.rs)" > cuviddec.rs 78 | echo -e "use super::cuviddec::*;\nuse cudarc::driver::sys::*;\ntype wchar_t = i32;\n$(cat nvcuvid.rs)" > nvcuvid.rs 79 | echo -e "pub use super::super::version::*;\npub use super::super::guid::*;\n$(cat nvEncodeAPI.rs)" > nvEncodeAPI.rs 80 | -------------------------------------------------------------------------------- /src/sys/version.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | //! Constants from `nvEncodeAPI` that bindgen fails to generate. 3 | 4 | #[must_use] 5 | #[allow(non_snake_case)] 6 | /// Macro to generate per-structure version for use with API. 7 | pub const fn NVENCAPI_STRUCT_VERSION(ver: u32) -> u32 { 8 | super::nvEncodeAPI::NVENCAPI_VERSION | (ver << 16) | (0x7 << 28) 9 | } 10 | 11 | // Search for `#define \w+_VER` and copy the whole line. 12 | // Then remove constants which are already defined in `nvEncodeAPI.rs`. 13 | // Finally convert to Rust syntax by a swapping `#define` for `pub const`, 14 | // adding type, equals, and semicolon. 15 | 16 | pub const NV_ENC_CAPS_PARAM_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 17 | pub const NV_ENC_RESTORE_ENCODER_STATE_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 18 | pub const NV_ENC_OUTPUT_STATS_BLOCK_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 19 | pub const NV_ENC_OUTPUT_STATS_ROW_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 20 | pub const NV_ENC_ENCODE_OUT_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 21 | pub const NV_ENC_LOOKAHEAD_PIC_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 22 | pub const NV_ENC_CREATE_INPUT_BUFFER_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 23 | pub const NV_ENC_CREATE_BITSTREAM_BUFFER_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 24 | pub const NV_ENC_CREATE_MV_BUFFER_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 25 | pub const NV_ENC_RC_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 26 | pub const NV_ENC_CONFIG_VER: u32 = NVENCAPI_STRUCT_VERSION(8) | (1 << 31); 27 | pub const NV_ENC_INITIALIZE_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(6) | (1 << 31); 28 | pub const NV_ENC_RECONFIGURE_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1) | (1 << 31); 29 | pub const NV_ENC_PRESET_CONFIG_VER: u32 = NVENCAPI_STRUCT_VERSION(4) | (1 << 31); 30 | pub const NV_ENC_PIC_PARAMS_MVC_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 31 | pub const NV_ENC_PIC_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(6) | (1 << 31); 32 | pub const NV_ENC_MEONLY_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(3); 33 | pub const NV_ENC_LOCK_BITSTREAM_VER: u32 = NVENCAPI_STRUCT_VERSION(1) | (1 << 31); 34 | pub const NV_ENC_LOCK_INPUT_BUFFER_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 35 | pub const NV_ENC_MAP_INPUT_RESOURCE_VER: u32 = NVENCAPI_STRUCT_VERSION(4); 36 | pub const NV_ENC_FENCE_POINT_D3D12_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 37 | pub const NV_ENC_INPUT_RESOURCE_D3D12_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 38 | pub const NV_ENC_OUTPUT_RESOURCE_D3D12_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 39 | pub const NV_ENC_REGISTER_RESOURCE_VER: u32 = NVENCAPI_STRUCT_VERSION(4); 40 | pub const NV_ENC_STAT_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 41 | pub const NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 42 | pub const NV_ENC_EVENT_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 43 | pub const NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER: u32 = NVENCAPI_STRUCT_VERSION(1); 44 | pub const NV_ENCODE_API_FUNCTION_LIST_VER: u32 = NVENCAPI_STRUCT_VERSION(2); 45 | -------------------------------------------------------------------------------- /src/sys/windows_sys/bindgen.ps1: -------------------------------------------------------------------------------- 1 | # Powershell script 2 | bindgen ` 3 | --allowlist-type cudaVideo.* ` 4 | --allowlist-type cuvid.* ` 5 | --allowlist-type CUVID.* ` 6 | --allowlist-function cuvid.* ` 7 | --allowlist-var \[IPBS\]_VOP ` 8 | --allowlist-var cuvid.* ` 9 | --blocklist-file .*cuda\.h ` 10 | --blocklist-file .*std.*\.h ` 11 | --must-use-type CUresult ` 12 | --must-use-type cuvidDecodeStatus ` 13 | ` 14 | --default-enum-style=rust ` 15 | --no-doc-comments ` 16 | --with-derive-default ` 17 | --with-derive-eq ` 18 | --with-derive-hash ` 19 | --with-derive-ord ` 20 | --use-core ` 21 | --merge-extern-blocks ` 22 | --sort-semantically ` 23 | --output cuviddec.rs ..\headers\cuviddec.h ` 24 | -- -I "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.0\include" 25 | 26 | bindgen ` 27 | --allowlist-type CU.* ` 28 | --allowlist-type cudaVideo.* ` 29 | --allowlist-type cudaAudio.* ` 30 | --allowlist-type HEVC.* ` 31 | --allowlist-function cuvid.* ` 32 | --allowlist-var MAX_CLOCK_TS ` 33 | --blocklist-file .*cuda\.h ` 34 | --blocklist-file .*std.*\.h ` 35 | --blocklist-file .*cuviddec\.h ` 36 | --must-use-type CUresult ` 37 | ` 38 | --default-enum-style=rust ` 39 | --no-doc-comments ` 40 | --with-derive-default ` 41 | --with-derive-eq ` 42 | --with-derive-hash ` 43 | --with-derive-ord ` 44 | --use-core ` 45 | --merge-extern-blocks ` 46 | --sort-semantically ` 47 | --output nvcuvid.rs ..\headers\nvcuvid.h ` 48 | -- -I "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.0\include" 49 | 50 | bindgen ` 51 | --allowlist-type NVENC.* ` 52 | --allowlist-type NV_ENC.* ` 53 | --allowlist-type NV_ENCODE.* ` 54 | --allowlist-type GUID ` 55 | --allowlist-type PENV.* ` 56 | --allowlist-function NvEncodeAPI.* ` 57 | --allowlist-function NvEnc.* ` 58 | --allowlist-var NVENC.* ` 59 | --allowlist-var NV_ENC.* ` 60 | --allowlist-var NV_MAX.* ` 61 | --blocklist-item NV_ENC_\w+_GUID ` 62 | --blocklist-file .*cuda\.h ` 63 | --blocklist-file .*std.*\.h ` 64 | --must-use-type NVENCSTATUS ` 65 | ` 66 | --default-enum-style=rust ` 67 | --no-doc-comments ` 68 | --with-derive-default ` 69 | --with-derive-eq ` 70 | --with-derive-hash ` 71 | --with-derive-ord ` 72 | --use-core ` 73 | --merge-extern-blocks ` 74 | --sort-semantically ` 75 | --output nvEncodeAPI.rs ..\headers\nvEncodeAPI.h ` 76 | -- -I "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.0\include" 77 | 78 | # Additional preludes to make sure the bindings compile. 79 | $( 80 | "use cudarc::driver::sys::*;" 81 | Get-Content cuviddec.rs -Raw 82 | ) | Set-Content cuviddec.rs 83 | $( 84 | "use super::cuviddec::*;" 85 | "use cudarc::driver::sys::*;" 86 | Get-Content nvcuvid.rs -Raw 87 | ) | Set-Content nvcuvid.rs 88 | $( 89 | "pub use super::super::version::*;" 90 | "pub use super::super::guid::*;" 91 | Get-Content nvEncodeAPI.rs -Raw 92 | ) | Set-Content nvEncodeAPI.rs 93 | -------------------------------------------------------------------------------- /tests/blanks.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::VecDeque, 3 | fs::OpenOptions, 4 | io::Write, 5 | path::Path, 6 | sync::Arc, 7 | thread, 8 | time::Duration, 9 | }; 10 | 11 | use cudarc::driver::CudaContext; 12 | use nvidia_video_codec_sdk::{ 13 | sys::nvEncodeAPI::{GUID, NV_ENC_BUFFER_FORMAT, NV_ENC_CODEC_H264_GUID}, 14 | EncodeError, 15 | Encoder, 16 | EncoderInitParams, 17 | ErrorKind, 18 | }; 19 | 20 | fn encode_blanks>( 21 | cuda_ctx: Arc, 22 | file_path: Option

, 23 | ) -> Result<(), EncodeError> { 24 | const FRAMES: usize = 128; 25 | const BUFFERS: usize = 16; 26 | const WIDTH: u32 = 1920; 27 | const HEIGHT: u32 = 1080; 28 | const FRAMERATE: u32 = 30; 29 | const BUFFER_FORMAT: NV_ENC_BUFFER_FORMAT = NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB; 30 | const ENCODE_GUID: GUID = NV_ENC_CODEC_H264_GUID; 31 | // The size should be adjusted depending on the buffer format and pitch/stride. 32 | #[allow(clippy::large_stack_arrays)] 33 | const FRAME: [u8; (WIDTH * HEIGHT * 4) as usize] = [255; (WIDTH * HEIGHT * 4) as usize]; 34 | 35 | let mut output = file_path.map(|path| { 36 | OpenOptions::new() 37 | .create(true) 38 | .write(true) 39 | .open(path) 40 | .expect("Path should be valid.") 41 | }); 42 | 43 | // Initialize encoder. 44 | let encoder = Encoder::initialize_with_cuda(cuda_ctx)?; 45 | let mut initialize_params = EncoderInitParams::new(ENCODE_GUID, WIDTH, HEIGHT); 46 | initialize_params 47 | .enable_picture_type_decision() 48 | .framerate(FRAMERATE, 1); 49 | let session = encoder.start_session(BUFFER_FORMAT, initialize_params)?; 50 | 51 | // Create input and output buffers. 52 | let mut input_buffers = (0..BUFFERS) 53 | .map(|_| session.create_input_buffer()) 54 | .collect::, _>>()?; 55 | let mut output_bitstreams = (0..BUFFERS) 56 | .map(|_| session.create_output_bitstream()) 57 | .collect::, _>>()?; 58 | // We will use this queue to mark which buffers are in-use. 59 | let mut in_use = VecDeque::with_capacity(BUFFERS); 60 | 61 | // Encode frames. 62 | 'next_frame: for _ in 0..FRAMES { 63 | assert_eq!(input_buffers.len() + in_use.len(), BUFFERS); 64 | assert_eq!(output_bitstreams.len() + in_use.len(), BUFFERS); 65 | 66 | // Get an input and output buffer. 67 | let mut input_buffer = input_buffers 68 | .pop() 69 | .expect("There should be enough buffers."); 70 | let mut output_bitstream = output_bitstreams 71 | .pop() 72 | .expect("There should be enough buffers."); 73 | 74 | // Write a frame to the input buffer. 75 | unsafe { input_buffer.lock()?.write(&FRAME) }; 76 | 77 | // Encode the frame. 78 | 'encode: loop { 79 | match session.encode_picture( 80 | &mut input_buffer, 81 | &mut output_bitstream, 82 | Default::default(), 83 | ) { 84 | Ok(()) => { 85 | // Success! Mark that these buffers are in-use. 86 | in_use.push_back((input_buffer, output_bitstream)); 87 | break 'encode; 88 | } 89 | Err(e) if e.kind() == ErrorKind::EncoderBusy => { 90 | // Encoder is busy, so let's just wait for a bit. 91 | thread::sleep(Duration::from_millis(10)); 92 | } 93 | Err(e) if e.kind() == ErrorKind::NeedMoreInput => { 94 | // Encoder needs more input; mark that these buffers are in-use 95 | // and skip to the next frame. 96 | in_use.push_back((input_buffer, output_bitstream)); 97 | continue 'next_frame; 98 | } 99 | Err(e) => return Err(e), 100 | } 101 | } 102 | 103 | // In an attempt to speed things up, don't try to lock output bitstreams 104 | // immediately, but instead delay as long as possible. 105 | if in_use.len() < BUFFERS { 106 | continue; 107 | } 108 | 109 | // Get data out of bitstream, and put buffers back. 110 | let (in_buf, mut out_buf) = in_use 111 | .pop_front() 112 | .expect("There should be at least one element since that was just checked."); 113 | let lock = out_buf.lock()?; 114 | if let Some(file) = output.as_mut() { 115 | file.write_all(lock.data()).unwrap(); 116 | } 117 | drop(lock); 118 | input_buffers.push(in_buf); 119 | output_bitstreams.push(out_buf); 120 | } 121 | 122 | // Finish reading the rest of the bitstream buffers. 123 | for (_, mut out_buf) in in_use { 124 | let lock = out_buf.lock()?; 125 | if let Some(file) = output.as_mut() { 126 | file.write_all(lock.data()).unwrap(); 127 | } 128 | } 129 | 130 | Ok(()) 131 | } 132 | 133 | #[test] 134 | fn encoder_works() { 135 | encode_blanks::<&str>( 136 | CudaContext::new(0).expect("CUDA should be installed."), 137 | None, 138 | ) 139 | .unwrap(); 140 | } 141 | 142 | #[test] 143 | fn encode_in_parallel() { 144 | std::thread::scope(|scope| { 145 | let cuda_ctx = CudaContext::new(0).expect("CUDA should be installed."); 146 | for _ in 0..4 { 147 | let thread_cuda_ctx = cuda_ctx.clone(); 148 | scope.spawn(|| encode_blanks::<&str>(thread_cuda_ctx, None).unwrap()); 149 | } 150 | }); 151 | } 152 | -------------------------------------------------------------------------------- /src/safe/builders.rs: -------------------------------------------------------------------------------- 1 | //! Builders for large params struct from autogenerated bindings. 2 | //! 3 | //! The bindings contain many massive structs which are used as parameters to 4 | //! functions in the API. This module provides builders to simplify creating 5 | //! these struct. They each have a `new()` method which has the minimum required 6 | //! parameters for the struct. Other options 7 | 8 | use std::ffi::c_void; 9 | 10 | use crate::sys::nvEncodeAPI::{ 11 | GUID, 12 | NV_ENC_BUFFER_FORMAT, 13 | NV_ENC_CONFIG, 14 | NV_ENC_INITIALIZE_PARAMS, 15 | NV_ENC_INITIALIZE_PARAMS_VER, 16 | NV_ENC_INPUT_RESOURCE_TYPE, 17 | NV_ENC_PIC_FLAGS, 18 | NV_ENC_PIC_PARAMS, 19 | NV_ENC_PIC_PARAMS_VER, 20 | NV_ENC_REGISTER_RESOURCE, 21 | NV_ENC_REGISTER_RESOURCE_VER, 22 | }; 23 | 24 | #[deprecated(note = "use the safe wrapper `EncoderInitParams`")] 25 | impl NV_ENC_INITIALIZE_PARAMS { 26 | /// Builder for [`NV_ENC_INITIALIZE_PARAMS`]. 27 | #[must_use] 28 | pub fn new(encode_guid: GUID, width: u32, height: u32) -> Self { 29 | NV_ENC_INITIALIZE_PARAMS { 30 | version: NV_ENC_INITIALIZE_PARAMS_VER, 31 | encodeGUID: encode_guid, 32 | encodeWidth: width, 33 | encodeHeight: height, 34 | ..Default::default() 35 | } 36 | } 37 | 38 | /// Specifies the preset for encoding. If the preset GUID is set then 39 | /// the preset configuration will be applied before any other parameter. 40 | pub fn preset_guid(&mut self, preset_guid: GUID) -> &mut Self { 41 | self.presetGUID = preset_guid; 42 | self 43 | } 44 | 45 | /// Specifies the advanced codec specific structure. If client has sent a 46 | /// valid codec config structure, it will override parameters set by the 47 | /// [`NV_ENC_INITIALIZE_PARAMS::preset_guid`]. 48 | /// 49 | /// The client can query the interface for codec-specific parameters 50 | /// using [`Encoder::get_preset_config`](super::encoder::Encoder::get_preset_config). 51 | /// It can then modify (if required) some of the codec config parameters and 52 | /// send down a custom config structure using this method. Even in this 53 | /// case the client is recommended to pass the same preset GUID it has 54 | /// used to get the config. 55 | pub fn encode_config(&mut self, encode_config: &mut NV_ENC_CONFIG) -> &mut Self { 56 | self.encodeConfig = encode_config; 57 | self 58 | } 59 | 60 | /// Specifies the display aspect ratio (H264/HEVC) or the render 61 | /// width/height (AV1). 62 | pub fn display_aspect_ratio(&mut self, width: u32, height: u32) -> &mut Self { 63 | self.darWidth = width; 64 | self.darHeight = height; 65 | self 66 | } 67 | 68 | /// Specifies the framerate in frames per second as a fraction 69 | /// `numerator / denominator`. 70 | pub fn framerate(&mut self, numerator: u32, denominator: u32) -> &mut Self { 71 | self.frameRateNum = numerator; 72 | self.frameRateDen = denominator; 73 | self 74 | } 75 | 76 | /// Enable the Picture Type Decision to be taken by the 77 | /// `NvEncodeAPI` interface. 78 | pub fn enable_picture_type_decision(&mut self) -> &mut Self { 79 | self.enablePTD = 1; 80 | self 81 | } 82 | 83 | // TODO: Add other options 84 | } 85 | 86 | impl NV_ENC_PIC_PARAMS { 87 | /// Create an EOS empty frame that is used at the 88 | /// end of encoding to flush the encoder. 89 | #[must_use] 90 | pub fn end_of_stream() -> Self { 91 | NV_ENC_PIC_PARAMS { 92 | version: NV_ENC_PIC_PARAMS_VER, 93 | encodePicFlags: NV_ENC_PIC_FLAGS::NV_ENC_PIC_FLAG_EOS as u32, 94 | ..Default::default() 95 | } 96 | } 97 | } 98 | 99 | impl NV_ENC_REGISTER_RESOURCE { 100 | /// Builder for [`NV_ENC_REGISTER_RESOURCE`]. 101 | /// 102 | /// # Arguments 103 | /// 104 | /// * `resource_type` - Specifies the type of resource to be registered. 105 | /// Supported values are: 106 | /// - [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX`], 107 | /// - [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR`], 108 | /// - [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX`] 109 | /// * `width` - Input frame width. 110 | /// * `height` - Input frame height. 111 | /// * `resource_to_register` - Handle to the resource that is being 112 | /// registered. In the case of 113 | /// [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR`], 114 | /// this should be a `CUdeviceptr` which you can get from 115 | /// `cuExternalMemoryGetMappedBuffer`. 116 | /// * `buffer_format` - Buffer format of resource to be registered. 117 | #[must_use] 118 | pub fn new( 119 | resource_type: NV_ENC_INPUT_RESOURCE_TYPE, 120 | width: u32, 121 | height: u32, 122 | resource_to_register: *mut c_void, 123 | buffer_format: NV_ENC_BUFFER_FORMAT, 124 | ) -> Self { 125 | NV_ENC_REGISTER_RESOURCE { 126 | version: NV_ENC_REGISTER_RESOURCE_VER, 127 | resourceType: resource_type, 128 | width, 129 | height, 130 | pitch: width, 131 | resourceToRegister: resource_to_register, 132 | registeredResource: std::ptr::null_mut(), 133 | bufferFormat: buffer_format, 134 | ..Default::default() 135 | } 136 | } 137 | 138 | /// Set the input buffer pitch. 139 | /// 140 | /// - For [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX`] 141 | /// resources, set this to 0. 142 | /// - For [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR`] 143 | /// resources, set this to the pitch as obtained from `cuMemAllocPitch()`, 144 | /// or to the width in **bytes** (if this resource was created by using 145 | /// `cuMemAlloc()`). This value must be a multiple of 4. 146 | /// - For [`NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX`] 147 | /// resources, set this to the texture width multiplied by the number of 148 | /// components in the texture format. 149 | #[must_use] 150 | pub fn pitch(mut self, pitch: u32) -> Self { 151 | self.pitch = pitch; 152 | self 153 | } 154 | 155 | // TODO: Add other options 156 | } 157 | -------------------------------------------------------------------------------- /src/sys/guid.rs: -------------------------------------------------------------------------------- 1 | //! Static GUIDs that bindgen generates incorrectly. 2 | //! See [relevant issue](https://github.com/rust-lang/rust-bindgen/issues/1888). 3 | 4 | use super::nvEncodeAPI::GUID; 5 | 6 | // Search for `//.*\nstatic const\s+GUID\s+NV_ENC_\w+_GUID\s+=\n\{.*\};` 7 | 8 | // ========================================================================================= 9 | // Encode Codec GUIDS supported by the NvEncodeAPI interface. 10 | // ========================================================================================= 11 | 12 | /// GUID for the H.264 encoding. 13 | /// {6BC82762-4E63-4ca4-AA85-1E50F321F6BF} 14 | pub const NV_ENC_CODEC_H264_GUID: GUID = GUID { 15 | Data1: 0x6bc8_2762, 16 | Data2: 0x4e63, 17 | Data3: 0x4ca4, 18 | Data4: [0xaa, 0x85, 0x1e, 0x50, 0xf3, 0x21, 0xf6, 0xbf], 19 | }; 20 | 21 | /// GUID for the H.265 encoding. 22 | /// {790CDC88-4522-4d7b-9425-BDA9975F7603} 23 | pub const NV_ENC_CODEC_HEVC_GUID: GUID = GUID { 24 | Data1: 0x790c_dc88, 25 | Data2: 0x4522, 26 | Data3: 0x4d7b, 27 | Data4: [0x94, 0x25, 0xbd, 0xa9, 0x97, 0x5f, 0x76, 0x3], 28 | }; 29 | 30 | /// GUID for the AV1 encoding. 31 | /// {0A352289-0AA7-4759-862D-5D15CD16D254} 32 | pub const NV_ENC_CODEC_AV1_GUID: GUID = GUID { 33 | Data1: 0x0a35_2289, 34 | Data2: 0x0aa7, 35 | Data3: 0x4759, 36 | Data4: [0x86, 0x2d, 0x5d, 0x15, 0xcd, 0x16, 0xd2, 0x54], 37 | }; 38 | 39 | // ========================================================================================= 40 | // * Encode Profile GUIDS supported by the NvEncodeAPI interface. 41 | // ========================================================================================= 42 | 43 | /// GUID for the autoselect profile. 44 | // {BFD6F8E7-233C-4341-8B3E-4818523803F4} 45 | pub const NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID: GUID = GUID { 46 | Data1: 0xbfd6_f8e7, 47 | Data2: 0x233c, 48 | Data3: 0x4341, 49 | Data4: [0x8b, 0x3e, 0x48, 0x18, 0x52, 0x38, 0x3, 0xf4], 50 | }; 51 | 52 | /// GUID for the H.264 encoding baseline profile. 53 | /// {0727BCAA-78C4-4c83-8C2F-EF3DFF267C6A} 54 | pub const NV_ENC_H264_PROFILE_BASELINE_GUID: GUID = GUID { 55 | Data1: 0x0727_bcaa, 56 | Data2: 0x78c4, 57 | Data3: 0x4c83, 58 | Data4: [0x8c, 0x2f, 0xef, 0x3d, 0xff, 0x26, 0x7c, 0x6a], 59 | }; 60 | 61 | /// GUID for the H.264 encoding main profile. 62 | /// {60B5C1D4-67FE-4790-94D5-C4726D7B6E6D} 63 | pub const NV_ENC_H264_PROFILE_MAIN_GUID: GUID = GUID { 64 | Data1: 0x60b5_c1d4, 65 | Data2: 0x67fe, 66 | Data3: 0x4790, 67 | Data4: [0x94, 0xd5, 0xc4, 0x72, 0x6d, 0x7b, 0x6e, 0x6d], 68 | }; 69 | 70 | /// GUID for the H.264 encoding, high quality profile. 71 | /// {E7CBC309-4F7A-4b89-AF2A-D537C92BE310} 72 | pub const NV_ENC_H264_PROFILE_HIGH_GUID: GUID = GUID { 73 | Data1: 0xe7cb_c309, 74 | Data2: 0x4f7a, 75 | Data3: 0x4b89, 76 | Data4: [0xaf, 0x2a, 0xd5, 0x37, 0xc9, 0x2b, 0xe3, 0x10], 77 | }; 78 | 79 | /// GUID for the H.264 high quality, `YCbCR444` digital color format profile. 80 | /// {7AC663CB-A598-4960-B844-339B261A7D52} 81 | pub const NV_ENC_H264_PROFILE_HIGH_444_GUID: GUID = GUID { 82 | Data1: 0x7ac6_63cb, 83 | Data2: 0xa598, 84 | Data3: 0x4960, 85 | Data4: [0xb8, 0x44, 0x33, 0x9b, 0x26, 0x1a, 0x7d, 0x52], 86 | }; 87 | 88 | /// GUID for the H.264, stereo encoding profile. 89 | /// {40847BF5-33F7-4601-9084-E8FE3C1DB8B7} 90 | pub const NV_ENC_H264_PROFILE_STEREO_GUID: GUID = GUID { 91 | Data1: 0x4084_7bf5, 92 | Data2: 0x33f7, 93 | Data3: 0x4601, 94 | Data4: [0x90, 0x84, 0xe8, 0xfe, 0x3c, 0x1d, 0xb8, 0xb7], 95 | }; 96 | 97 | /// GUID for the H.264, progressive encoding profile. 98 | /// {B405AFAC-F32B-417B-89C4-9ABEED3E5978} 99 | pub const NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID: GUID = GUID { 100 | Data1: 0xb405_afac, 101 | Data2: 0xf32b, 102 | Data3: 0x417b, 103 | Data4: [0x89, 0xc4, 0x9a, 0xbe, 0xed, 0x3e, 0x59, 0x78], 104 | }; 105 | 106 | /// GUID for the H.264, constrained encoding profile. 107 | /// {AEC1BD87-E85B-48f2-84C3-98BCA6285072} 108 | pub const NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID: GUID = GUID { 109 | Data1: 0xaec1_bd87, 110 | Data2: 0xe85b, 111 | Data3: 0x48f2, 112 | Data4: [0x84, 0xc3, 0x98, 0xbc, 0xa6, 0x28, 0x50, 0x72], 113 | }; 114 | 115 | /// GUID for the H.265 main (8-bit) encoding profile. 116 | /// {B514C39A-B55B-40fa-878F-F1253B4DFDEC} 117 | pub const NV_ENC_HEVC_PROFILE_MAIN_GUID: GUID = GUID { 118 | Data1: 0xb514_c39a, 119 | Data2: 0xb55b, 120 | Data3: 0x40fa, 121 | Data4: [0x87, 0x8f, 0xf1, 0x25, 0x3b, 0x4d, 0xfd, 0xec], 122 | }; 123 | 124 | /// GUID for H.265 Main10 (10-bit) encoding profile. 125 | /// {fa4d2b6c-3a5b-411a-8018-0a3f5e3c9be5} 126 | pub const NV_ENC_HEVC_PROFILE_MAIN10_GUID: GUID = GUID { 127 | Data1: 0xfa4d_2b6c, 128 | Data2: 0x3a5b, 129 | Data3: 0x411a, 130 | Data4: [0x80, 0x18, 0x0a, 0x3f, 0x5e, 0x3c, 0x9b, 0xe5], 131 | }; 132 | 133 | /// GUID for H.265, JM 16 (`FRExt`) encoding profile. 134 | /// {51ec32b5-1b4c-453c-9cbd-b616bd621341} 135 | pub const NV_ENC_HEVC_PROFILE_FREXT_GUID: GUID = GUID { 136 | Data1: 0x51ec_32b5, 137 | Data2: 0x1b4c, 138 | Data3: 0x453c, 139 | Data4: [0x9c, 0xbd, 0xb6, 0x16, 0xbd, 0x62, 0x13, 0x41], 140 | }; 141 | 142 | /// GUID for the AV1 main encoding preset. 143 | /// {5f2a39f5-f14e-4f95-9a9e-b76d568fcf97} 144 | pub const NV_ENC_AV1_PROFILE_MAIN_GUID: GUID = GUID { 145 | Data1: 0x5f2a_39f5, 146 | Data2: 0xf14e, 147 | Data3: 0x4f95, 148 | Data4: [0x9a, 0x9e, 0xb7, 0x6d, 0x56, 0x8f, 0xcf, 0x97], 149 | }; 150 | 151 | // ========================================================================================= 152 | // * Preset GUIDS supported by the NvEncodeAPI interface. 153 | // ========================================================================================= 154 | 155 | // Performance degrades and quality improves as we move from P1 to P7. Presets 156 | // P3 to P7 for H264 and Presets P2 to P7 for HEVC have B frames enabled by 157 | // default for HIGH_QUALITY and LOSSLESS tuning info, and will not work with 158 | // Weighted Prediction enabled. In case Weighted Prediction is required, disable 159 | // B frames by setting frameIntervalP = 1 160 | 161 | /// GUID for the P1 (highest performance) encoding preset. 162 | /// {FC0A8D3E-45F8-4CF8-80C7-298871590EBF} 163 | pub const NV_ENC_PRESET_P1_GUID: GUID = GUID { 164 | Data1: 0xfc0a_8d3e, 165 | Data2: 0x45f8, 166 | Data3: 0x4cf8, 167 | Data4: [0x80, 0xc7, 0x29, 0x88, 0x71, 0x59, 0xe, 0xbf], 168 | }; 169 | 170 | /// GUID for the P2 (higher performance) encoding preset. 171 | /// Has B-frames enabled by default for H.265 `HIGH_QUALITY` and `LOSSLESS` 172 | /// tuning info, and will not work with Weighted Prediction enabled. 173 | /// {F581CFB8-88D6-4381-93F0-DF13F9C27DAB} 174 | pub const NV_ENC_PRESET_P2_GUID: GUID = GUID { 175 | Data1: 0xf581_cfb8, 176 | Data2: 0x88d6, 177 | Data3: 0x4381, 178 | Data4: [0x93, 0xf0, 0xdf, 0x13, 0xf9, 0xc2, 0x7d, 0xab], 179 | }; 180 | 181 | /// GUID for the P3 (high performance) encoding preset. 182 | /// Has B-frames enabled by default for H.264 and H.265: `HIGH_QUALITY` and 183 | /// `LOSSLESS` tuning info, and will not work with Weighted Prediction enabled. 184 | /// {36850110-3A07-441F-94D5-3670631F91F6} 185 | pub const NV_ENC_PRESET_P3_GUID: GUID = GUID { 186 | Data1: 0x3685_0110, 187 | Data2: 0x3a07, 188 | Data3: 0x441f, 189 | Data4: [0x94, 0xd5, 0x36, 0x70, 0x63, 0x1f, 0x91, 0xf6], 190 | }; 191 | 192 | /// GUID for the P4 (balanced) encoding preset. 193 | /// Has B-frames enabled by default for H.264 and H.265: `HIGH_QUALITY` and 194 | /// `LOSSLESS` tuning info, and will not work with Weighted Prediction enabled. 195 | /// {90A7B826-DF06-4862-B9D2-CD6D73A08681} 196 | pub const NV_ENC_PRESET_P4_GUID: GUID = GUID { 197 | Data1: 0x90a7_b826, 198 | Data2: 0xdf06, 199 | Data3: 0x4862, 200 | Data4: [0xb9, 0xd2, 0xcd, 0x6d, 0x73, 0xa0, 0x86, 0x81], 201 | }; 202 | 203 | /// GUID for the P5 (high quality) encoding preset. 204 | /// Has B-frames enabled by default for H.264 and H.265: `HIGH_QUALITY` and 205 | /// `LOSSLESS` tuning info, and will not work with Weighted Prediction enabled. 206 | /// {21C6E6B4-297A-4CBA-998F-B6CBDE72ADE3} 207 | pub const NV_ENC_PRESET_P5_GUID: GUID = GUID { 208 | Data1: 0x21c6_e6b4, 209 | Data2: 0x297a, 210 | Data3: 0x4cba, 211 | Data4: [0x99, 0x8f, 0xb6, 0xcb, 0xde, 0x72, 0xad, 0xe3], 212 | }; 213 | 214 | /// GUID for the P6 (higher quality) encoding preset. 215 | /// Has B-frames enabled by default for H.264 and H.265: `HIGH_QUALITY` and 216 | /// `LOSSLESS` tuning info, and will not work with Weighted Prediction enabled. 217 | /// {8E75C279-6299-4AB6-8302-0B215A335CF5} 218 | pub const NV_ENC_PRESET_P6_GUID: GUID = GUID { 219 | Data1: 0x8e75_c279, 220 | Data2: 0x6299, 221 | Data3: 0x4ab6, 222 | Data4: [0x83, 0x2, 0xb, 0x21, 0x5a, 0x33, 0x5c, 0xf5], 223 | }; 224 | 225 | /// GUID for the P6 (highest quality) encoding preset. 226 | /// Has B-frames enabled by default for H.264 and H.265: `HIGH_QUALITY` and 227 | /// `LOSSLESS` tuning info, and will not work with Weighted Prediction enabled. 228 | /// {84848C12-6F71-4C13-931B-53E283F57974} 229 | pub const NV_ENC_PRESET_P7_GUID: GUID = GUID { 230 | Data1: 0x8484_8c12, 231 | Data2: 0x6f71, 232 | Data3: 0x4c13, 233 | Data4: [0x93, 0x1b, 0x53, 0xe2, 0x83, 0xf5, 0x79, 0x74], 234 | }; 235 | -------------------------------------------------------------------------------- /src/safe/session.rs: -------------------------------------------------------------------------------- 1 | //! Defines [`Session`] which represents an ongoing encoder session. 2 | //! 3 | //! You need to start a session using [`Encoder::start_session`] before 4 | //! you can initialize input or output buffers, and before you can encode 5 | //! frames. The [`Session`] also stores some information such as the encode 6 | //! width and height so that you do not have to keep repeating it each time. 7 | 8 | use std::fmt::Debug; 9 | 10 | use super::{api::ENCODE_API, encoder::Encoder, result::EncodeError}; 11 | use crate::{ 12 | sys::nvEncodeAPI::{ 13 | GUID, 14 | NV_ENC_BUFFER_FORMAT, 15 | NV_ENC_CODEC_AV1_GUID, 16 | NV_ENC_CODEC_H264_GUID, 17 | NV_ENC_CODEC_HEVC_GUID, 18 | NV_ENC_CODEC_PIC_PARAMS, 19 | NV_ENC_PIC_PARAMS, 20 | NV_ENC_PIC_PARAMS_AV1, 21 | NV_ENC_PIC_PARAMS_H264, 22 | NV_ENC_PIC_PARAMS_HEVC, 23 | NV_ENC_PIC_PARAMS_VER, 24 | NV_ENC_PIC_STRUCT, 25 | NV_ENC_PIC_TYPE, 26 | }, 27 | EncoderInput, 28 | EncoderOutput, 29 | }; 30 | 31 | /// An encoding session to create input/output buffers and encode frames. 32 | /// 33 | /// You need to call [`Encoder::start_session`] before you can 34 | /// encode frames using the session. On drop, the session will automatically 35 | /// send an empty EOS frame to flush the encoder. 36 | #[derive(Debug)] 37 | pub struct Session { 38 | pub(crate) encoder: Encoder, 39 | pub(crate) width: u32, 40 | pub(crate) height: u32, 41 | pub(crate) buffer_format: NV_ENC_BUFFER_FORMAT, 42 | pub(crate) encode_guid: GUID, 43 | } 44 | 45 | impl Session { 46 | /// Get the encoder used for this session. 47 | /// 48 | /// This might be useful if you want to use some of 49 | /// the functions on [`Encoder`]. 50 | /// 51 | /// # Examples 52 | /// 53 | /// ``` 54 | /// # use cudarc::driver::CudaContext; 55 | /// # use nvidia_video_codec_sdk::{ 56 | /// # sys::nvEncodeAPI::{ 57 | /// # NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 58 | /// # NV_ENC_CODEC_H264_GUID, 59 | /// # }, 60 | /// # Encoder, EncoderInitParams 61 | /// # }; 62 | /// //* Create encoder. *// 63 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 64 | /// # let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 65 | /// 66 | /// //* Set `encode_guid` and check that H.264 encoding is supported. *// 67 | /// # let encode_guid = NV_ENC_CODEC_H264_GUID; 68 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 69 | /// # assert!(encode_guids.contains(&encode_guid)); 70 | /// 71 | /// let session = encoder 72 | /// .start_session( 73 | /// NV_ENC_BUFFER_FORMAT_ARGB, 74 | /// EncoderInitParams::new(encode_guid, 1920, 1080), 75 | /// ) 76 | /// .unwrap(); 77 | /// // We can still use the encoder like this: 78 | /// let _input_formats = session 79 | /// .get_encoder() 80 | /// .get_supported_input_formats(encode_guid); 81 | /// ``` 82 | #[must_use] 83 | pub fn get_encoder(&self) -> &Encoder { 84 | &self.encoder 85 | } 86 | 87 | /// Encode a frame. 88 | /// 89 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#submitting-input-frame-for-encoding). 90 | /// 91 | /// # Errors 92 | /// 93 | /// Could error if the encode picture parameters were invalid or otherwise 94 | /// incorrect, or if we run out memory. 95 | /// 96 | /// There are two recoverable errors: 97 | /// - If this returns an error with 98 | /// [`ErrorKind::EncoderBusy`](super::ErrorKind::EncoderBusy) then you 99 | /// should retry after a few milliseconds. 100 | /// - If this returns an error with 101 | /// [`ErrorKind::NeedMoreInput`](super::ErrorKind::NeedMoreInput), the 102 | /// client should not lock the output bitstream yet. They should continue 103 | /// encoding until this function returns `Ok`, and then lock the 104 | /// bitstreams in the order in which they were originally used. 105 | /// 106 | /// # Panics 107 | /// 108 | /// Panics if codec specific parameters are provided for a different codec 109 | /// than the one used in the session. 110 | /// 111 | /// # Examples 112 | /// 113 | /// ``` 114 | /// # use cudarc::driver::CudaContext; 115 | /// # use nvidia_video_codec_sdk::{ 116 | /// # sys::nvEncodeAPI::{ 117 | /// # NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 118 | /// # NV_ENC_CODEC_H264_GUID, 119 | /// # NV_ENC_PIC_PARAMS, 120 | /// # NV_ENC_PIC_STRUCT, 121 | /// # }, 122 | /// # Encoder, EncoderInitParams, 123 | /// # EncodePictureParams 124 | /// # }; 125 | /// # const WIDTH: u32 = 1920; 126 | /// # const HEIGHT: u32 = 1080; 127 | /// # const DATA_LEN: usize = (WIDTH * HEIGHT * 4) as usize; 128 | /// //* Create encoder. *// 129 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 130 | /// # let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 131 | /// 132 | /// //* Set `encode_guid` and `buffer_format`, and check that H.264 encoding and the ARGB format are supported. *// 133 | /// # let encode_guid = NV_ENC_CODEC_H264_GUID; 134 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 135 | /// # assert!(encode_guids.contains(&encode_guid)); 136 | /// # let buffer_format = NV_ENC_BUFFER_FORMAT_ARGB; 137 | /// # let input_formats = encoder.get_supported_input_formats(encode_guid).unwrap(); 138 | /// # assert!(input_formats.contains(&buffer_format)); 139 | /// 140 | /// // Begin encoder session. 141 | /// let mut initialize_params = EncoderInitParams::new(encode_guid, WIDTH, HEIGHT); 142 | /// initialize_params.display_aspect_ratio(16, 9) 143 | /// .framerate(30, 1) 144 | /// .enable_picture_type_decision(); 145 | /// let session = encoder.start_session( 146 | /// buffer_format, 147 | /// initialize_params, 148 | /// ).unwrap(); 149 | /// 150 | /// //* Create input and output buffers. *// 151 | /// # let mut input_buffer = session 152 | /// # .create_input_buffer() 153 | /// # .unwrap(); 154 | /// # let mut output_bitstream = session.create_output_bitstream().unwrap(); 155 | /// 156 | /// // Encode frame. 157 | /// unsafe { input_buffer.lock().unwrap().write(&[0; DATA_LEN]) }; 158 | /// session 159 | /// .encode_picture( 160 | /// &mut input_buffer, 161 | /// &mut output_bitstream, 162 | /// // Optional picture parameters 163 | /// EncodePictureParams { 164 | /// input_timestamp: 42, 165 | /// ..Default::default() 166 | /// } 167 | /// ) 168 | /// .unwrap(); 169 | /// # // TODO: check that output is correct. 170 | /// let _data = output_bitstream.lock().unwrap().data(); 171 | /// ``` 172 | pub fn encode_picture( 173 | &self, 174 | input_buffer: &mut I, 175 | output_bitstream: &mut O, 176 | params: EncodePictureParams, 177 | ) -> Result<(), EncodeError> { 178 | if let Some(codec_params) = ¶ms.codec_params { 179 | assert_eq!( 180 | codec_params.get_codec_guid(), 181 | self.encode_guid, 182 | "The provided codec specific params must match the codec used" 183 | ); 184 | } 185 | let mut encode_pic_params = NV_ENC_PIC_PARAMS { 186 | version: NV_ENC_PIC_PARAMS_VER, 187 | inputWidth: self.width, 188 | inputHeight: self.height, 189 | inputPitch: input_buffer.pitch(), 190 | inputBuffer: input_buffer.handle(), 191 | outputBitstream: output_bitstream.handle(), 192 | bufferFmt: self.buffer_format, 193 | pictureStruct: NV_ENC_PIC_STRUCT::NV_ENC_PIC_STRUCT_FRAME, 194 | inputTimeStamp: params.input_timestamp, 195 | codecPicParams: params.codec_params.map(Into::into).unwrap_or_default(), 196 | pictureType: params.picture_type, 197 | ..Default::default() 198 | }; 199 | unsafe { (ENCODE_API.encode_picture)(self.encoder.ptr, &mut encode_pic_params) } 200 | .result(&self.encoder) 201 | } 202 | 203 | /// Send an EOS notifications to flush the encoder. 204 | /// 205 | /// This function is called automatically on drop, but if you wish to 206 | /// get the data after flushing, you should call this function yourself. 207 | /// 208 | /// # Errors 209 | /// 210 | /// Could error if we run out of memory. 211 | /// 212 | /// If this returns an error with 213 | /// [`ErrorKind::EncoderBusy`](super::ErrorKind::EncoderBusy) then you 214 | /// should retry after a few milliseconds. 215 | pub fn end_of_stream(&self) -> Result<(), EncodeError> { 216 | let mut encode_pic_params = NV_ENC_PIC_PARAMS::end_of_stream(); 217 | unsafe { (ENCODE_API.encode_picture)(self.encoder.ptr, &mut encode_pic_params) } 218 | .result(&self.encoder) 219 | } 220 | } 221 | 222 | /// Send an EOS notifications on drop to flush the encoder. 223 | impl Drop for Session { 224 | fn drop(&mut self) { 225 | if !std::thread::panicking() { 226 | self.end_of_stream() 227 | .expect("The encoder should not be busy."); 228 | } 229 | } 230 | } 231 | 232 | /// Optional parameters for [`Session::encode_picture`]. 233 | #[allow(missing_debug_implementations)] // CodecPictureParams doesn't implement Debug 234 | pub struct EncodePictureParams { 235 | /// Opaque data used for identifying the corresponding encoded frame 236 | pub input_timestamp: u64, 237 | /// The picture type to use, if picture type decision is disabled in the 238 | /// encoder 239 | pub picture_type: NV_ENC_PIC_TYPE, 240 | /// Codec-specific parameters 241 | pub codec_params: Option, 242 | } 243 | 244 | impl Default for EncodePictureParams { 245 | fn default() -> Self { 246 | Self { 247 | input_timestamp: 0, 248 | picture_type: NV_ENC_PIC_TYPE::NV_ENC_PIC_TYPE_UNKNOWN, 249 | codec_params: None, 250 | } 251 | } 252 | } 253 | 254 | /// Codec specific picture parameters 255 | #[allow(missing_debug_implementations)] // NV_ENC_PIC_PARAMS_H264 contains a union, thus doesn't derive Debug 256 | pub enum CodecPictureParams { 257 | /// Parameters for H.264 258 | H264(NV_ENC_PIC_PARAMS_H264), 259 | /// Parameters for HEVC or H.265 260 | Hevc(NV_ENC_PIC_PARAMS_HEVC), 261 | /// Parameters for AV1 262 | Av1(NV_ENC_PIC_PARAMS_AV1), 263 | } 264 | 265 | impl CodecPictureParams { 266 | /// Returns the GUID representing the codec for which the parameters are 267 | /// specified. 268 | #[must_use] 269 | pub fn get_codec_guid(&self) -> GUID { 270 | match self { 271 | Self::H264(_) => NV_ENC_CODEC_H264_GUID, 272 | Self::Hevc(_) => NV_ENC_CODEC_HEVC_GUID, 273 | Self::Av1(_) => NV_ENC_CODEC_AV1_GUID, 274 | } 275 | } 276 | } 277 | 278 | impl From for NV_ENC_CODEC_PIC_PARAMS { 279 | fn from(value: CodecPictureParams) -> Self { 280 | match value { 281 | CodecPictureParams::H264(params) => Self { 282 | h264PicParams: params, 283 | }, 284 | CodecPictureParams::Hevc(params) => Self { 285 | hevcPicParams: params, 286 | }, 287 | CodecPictureParams::Av1(params) => Self { 288 | av1PicParams: params, 289 | }, 290 | } 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /src/safe/result.rs: -------------------------------------------------------------------------------- 1 | //! Defines a wrapper around 2 | //! [`NVENCSTATUS`](crate::sys::nvEncodeAPI::NVENCSTATUS) to provide ergonomic 3 | //! error handling. 4 | 5 | use std::{error::Error, ffi::CStr, fmt}; 6 | 7 | use super::{api::ENCODE_API, encoder::Encoder}; 8 | use crate::sys::nvEncodeAPI::NVENCSTATUS; 9 | 10 | /// Wrapper enum around [`NVENCSTATUS`]. 11 | #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)] 12 | pub enum ErrorKind { 13 | /// No encode capable devices were detected. 14 | NoEncodeDevice = 1, 15 | /// The device passed by the client is not supported. 16 | UnsupportedDevice = 2, 17 | /// The encoder device supplied by the client is not valid. 18 | InvalidEncoderDevice = 3, 19 | /// The device passed to the API call is invalid. 20 | InvalidDevice = 4, 21 | /// The device passed to the API call is no longer available 22 | /// and needs to be reinitialized. The clients need to destroy the 23 | /// current encoder session by freeing the allocated input output 24 | /// buffers and destroying the device and create a new encoding session. 25 | DeviceNotExist = 5, 26 | /// One or more of the pointers passed to the API call is invalid. 27 | InvalidPtr = 6, 28 | /// The completion event passed in the [`EncodeAPI.encode_picture`] 29 | /// call is invalid. 30 | InvalidEvent = 7, 31 | /// One or more of the parameter passed to the API call is invalid. 32 | InvalidParam = 8, 33 | /// An API call was made in wrong sequence or order. 34 | InvalidCall = 9, 35 | /// the API call failed because it was unable to allocate enough memory 36 | /// to perform the requested operation. 37 | OutOfMemory = 10, 38 | /// The encoder has not been initialized with 39 | /// [`EncodeAPI.initialize_encoder`] or that initialization has failed. 40 | /// The client cannot allocate input or output buffers or do any encoding 41 | /// related operation before successfully initializing the encoder. 42 | EncoderNotInitialized = 11, 43 | /// An unsupported parameter was passed by the client. 44 | UnsupportedParam = 12, 45 | /// The [`EncodeAPI.lock_bitstream`] failed to lock the output 46 | /// buffer. This happens when the client makes a non-blocking lock call 47 | /// to access the output bitstream by passing the `doNotWait` flag. 48 | /// This is not a fatal error and client should retry the same operation 49 | /// after few milliseconds. 50 | LockBusy = 13, 51 | /// The size of the user buffer passed by the client is insufficient for 52 | /// the requested operation. 53 | NotEnoughBuffer = 14, 54 | /// An invalid struct version was used by the client. 55 | InvalidVersion = 15, 56 | /// [`EncodeAPI.map_input_resource`] failed to map the client provided 57 | /// input resource. 58 | MapFailed = 16, 59 | /// The encode driver requires more input buffers to produce an output 60 | /// bitstream. If this error is returned from [`EncodeAPI.encode_picture`], 61 | /// this is not a fatal error. If the client is encoding with B frames 62 | /// then, [`EncodeAPI.encode_picture`] might be buffering the input 63 | /// frame for re-ordering. 64 | /// 65 | /// A client operating in synchronous mode cannot call 66 | /// [`EncodeAPI.lock_bitstream`] on the output bitstream buffer if 67 | /// [`EncodeAPI.encode_picture`] returned this variant. The client must 68 | /// continue providing input frames until encode driver returns 69 | /// successfully. After a success the client 70 | /// can call [`EncodeAPI.lock_bitstream`] on the output buffers in the 71 | /// same order in which it has called [`EncodeAPI.encode_picture`]. 72 | NeedMoreInput = 17, 73 | /// The hardware encoder is busy encoding and is unable to encode 74 | /// the input. The client should call [`EncodeAPI.encode_picture`] again 75 | /// after few milliseconds. 76 | EncoderBusy = 18, 77 | /// The completion event passed in [`EncodeAPI.encode_picture`] 78 | /// has not been registered with encoder driver using 79 | /// [`EncodeAPI.register_async_event`]. 80 | EventNotRegistered = 19, 81 | /// An unknown internal error has occurred. 82 | Generic = 20, 83 | /// The client is attempting to use a feature 84 | /// that is not available for the license type for the current system. 85 | IncompatibleClientKey = 21, 86 | /// the client is attempting to use a feature 87 | /// that is not implemented for the current version. 88 | Unimplemented = 22, 89 | /// [`EncodeAPI.register_resource`] failed to register the resource. 90 | ResourceRegisterFailed = 23, 91 | /// The client is attempting to unregister a resource 92 | /// that has not been successfully registered. 93 | ResourceNotRegistered = 24, 94 | /// The client is attempting to unmap a resource 95 | /// that has not been successfully mapped. 96 | ResourceNotMapped = 25, 97 | /// The encode driver requires more output buffers to write an 98 | /// output bitstream. If this error is returned from 99 | /// [`EncodeAPI.restore_encoder_state`], this is not a fatal error. If the 100 | /// client is encoding with B frames then, 101 | /// [`EncodeAPI.restore_encoder_state`] API might be requiring the extra 102 | /// output buffer for accommodating overlay frame output in a separate 103 | /// buffer, for AV1 codec. In this case, the client must call 104 | /// [`EncodeAPI.restore_encoder_state`] API again with 105 | /// an output bitstream as input along with the parameters in the previous 106 | /// call. When operating in asynchronous mode of encoding, client must 107 | /// also specify the completion event. 108 | NeedMoreOutput = 26, 109 | } 110 | 111 | /// Wrapper struct around [`NVENCSTATUS`]. 112 | /// 113 | /// This struct also contains a string with additional info 114 | /// when it is relevant and available. 115 | #[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] 116 | pub struct EncodeError { 117 | kind: ErrorKind, 118 | string: Option, 119 | } 120 | 121 | impl EncodeError { 122 | /// Getter for the error kind. 123 | #[must_use] 124 | pub fn kind(&self) -> ErrorKind { 125 | self.kind 126 | } 127 | 128 | /// Getter for the error string. 129 | #[must_use] 130 | pub fn string(&self) -> Option<&str> { 131 | self.string.as_deref() 132 | } 133 | } 134 | 135 | impl fmt::Display for EncodeError { 136 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 137 | match &self.string { 138 | Some(s) => write!(f, "{:?}: {s}", self.kind), 139 | None => write!(f, "{:?}", self.kind), 140 | } 141 | } 142 | } 143 | 144 | impl Error for EncodeError {} 145 | 146 | impl From for ErrorKind { 147 | fn from(status: NVENCSTATUS) -> Self { 148 | match status { 149 | NVENCSTATUS::NV_ENC_SUCCESS => { 150 | unreachable!("Success should not be converted to an error.") 151 | } 152 | NVENCSTATUS::NV_ENC_ERR_NO_ENCODE_DEVICE => Self::NoEncodeDevice, 153 | NVENCSTATUS::NV_ENC_ERR_UNSUPPORTED_DEVICE => Self::UnsupportedDevice, 154 | NVENCSTATUS::NV_ENC_ERR_INVALID_ENCODERDEVICE => Self::InvalidEncoderDevice, 155 | NVENCSTATUS::NV_ENC_ERR_INVALID_DEVICE => Self::InvalidDevice, 156 | NVENCSTATUS::NV_ENC_ERR_DEVICE_NOT_EXIST => Self::DeviceNotExist, 157 | NVENCSTATUS::NV_ENC_ERR_INVALID_PTR => Self::InvalidPtr, 158 | NVENCSTATUS::NV_ENC_ERR_INVALID_EVENT => Self::InvalidEvent, 159 | NVENCSTATUS::NV_ENC_ERR_INVALID_PARAM => Self::InvalidParam, 160 | NVENCSTATUS::NV_ENC_ERR_INVALID_CALL => Self::InvalidCall, 161 | NVENCSTATUS::NV_ENC_ERR_OUT_OF_MEMORY => Self::OutOfMemory, 162 | NVENCSTATUS::NV_ENC_ERR_ENCODER_NOT_INITIALIZED => Self::EncoderNotInitialized, 163 | NVENCSTATUS::NV_ENC_ERR_UNSUPPORTED_PARAM => Self::UnsupportedParam, 164 | NVENCSTATUS::NV_ENC_ERR_LOCK_BUSY => Self::LockBusy, 165 | NVENCSTATUS::NV_ENC_ERR_NOT_ENOUGH_BUFFER => Self::NotEnoughBuffer, 166 | NVENCSTATUS::NV_ENC_ERR_INVALID_VERSION => Self::InvalidVersion, 167 | NVENCSTATUS::NV_ENC_ERR_MAP_FAILED => Self::MapFailed, 168 | NVENCSTATUS::NV_ENC_ERR_NEED_MORE_INPUT => Self::NeedMoreInput, 169 | NVENCSTATUS::NV_ENC_ERR_ENCODER_BUSY => Self::EncoderBusy, 170 | NVENCSTATUS::NV_ENC_ERR_EVENT_NOT_REGISTERD => Self::EventNotRegistered, 171 | NVENCSTATUS::NV_ENC_ERR_GENERIC => Self::Generic, 172 | NVENCSTATUS::NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY => Self::IncompatibleClientKey, 173 | NVENCSTATUS::NV_ENC_ERR_UNIMPLEMENTED => Self::Unimplemented, 174 | NVENCSTATUS::NV_ENC_ERR_RESOURCE_REGISTER_FAILED => Self::ResourceRegisterFailed, 175 | NVENCSTATUS::NV_ENC_ERR_RESOURCE_NOT_REGISTERED => Self::ResourceNotRegistered, 176 | NVENCSTATUS::NV_ENC_ERR_RESOURCE_NOT_MAPPED => Self::ResourceNotMapped, 177 | NVENCSTATUS::NV_ENC_ERR_NEED_MORE_OUTPUT => Self::NeedMoreOutput, 178 | } 179 | } 180 | } 181 | 182 | impl NVENCSTATUS { 183 | /// Convert an [`NVENCSTATUS`] to a [`Result`]. 184 | /// 185 | /// [`NVENCSTATUS::NV_ENC_SUCCESS`] is converted to `Ok(())`, 186 | /// and all other variants are mapped to the corresponding variant 187 | /// in [`ErrorKind`]. The error type is [`EncodeError`] which has 188 | /// a kind and an optional `String` which might contain additional 189 | /// information about the error. 190 | /// 191 | /// # Errors 192 | /// 193 | /// Returns an error whenever the status is not 194 | /// [`NVENCSTATUS::NV_ENC_SUCCESS`]. 195 | /// 196 | /// # Examples 197 | /// 198 | /// ``` 199 | /// # use cudarc::driver::CudaContext; 200 | /// # use nvidia_video_codec_sdk::{sys::nvEncodeAPI::GUID, EncodeError, Encoder, ErrorKind}; 201 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 202 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 203 | /// // Cause an error by passing in an invalid GUID. 204 | /// // `Encoder::get_supported_input_formats()` uses `.result()` internally 205 | /// let error = encoder 206 | /// .get_supported_input_formats(GUID::default()) 207 | /// .unwrap_err(); 208 | /// // Get the kind. 209 | /// assert_eq!(error.kind(), ErrorKind::InvalidParam); 210 | /// // Get the error message. 211 | /// // Unfortunately, it's not always helpful. 212 | /// assert_eq!(error.string(), Some("EncodeAPI Internal Error.")); 213 | /// ``` 214 | pub fn result(self, encoder: &Encoder) -> Result<(), EncodeError> { 215 | self.result_without_string().map_err(|mut err| { 216 | err.string = match err.kind { 217 | // Avoid getting the string if it is not needed. 218 | ErrorKind::LockBusy 219 | | ErrorKind::EncoderBusy 220 | | ErrorKind::NeedMoreInput 221 | | ErrorKind::OutOfMemory => None, 222 | // Otherwise allocate an owned `String` with the error. 223 | _ => Some( 224 | unsafe { CStr::from_ptr((ENCODE_API.get_last_error_string)(encoder.ptr)) } 225 | .to_string_lossy() 226 | .to_string(), 227 | ), 228 | } 229 | .and_then(|s| if s.is_empty() { None } else { Some(s) }); 230 | err 231 | }) 232 | } 233 | 234 | /// Convert an [`NVENCSTATUS`] to a [`Result`] without 235 | /// using an [`Encoder`]. 236 | /// 237 | /// This function is the same as [`NVENCSTATUS::result`] except 238 | /// it does not get the error string because it does not have access 239 | /// to an [`Encoder`]. This is only useful if you do not have an [`Encoder`] 240 | /// yet, for example when initializing the API. 241 | /// 242 | /// You should always prefer to use [`NVENCSTATUS::result`] when possible. 243 | /// 244 | /// # Errors 245 | /// 246 | /// Returns an error whenever the status is not 247 | /// [`NVENCSTATUS::NV_ENC_SUCCESS`]. 248 | pub fn result_without_string(self) -> Result<(), EncodeError> { 249 | match self { 250 | Self::NV_ENC_SUCCESS => Ok(()), 251 | err => Err(EncodeError { 252 | kind: err.into(), 253 | string: None, 254 | }), 255 | } 256 | } 257 | } 258 | -------------------------------------------------------------------------------- /examples/importing_vulkan_buffers.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::{File, OpenOptions}, 3 | io::Write, 4 | ptr::{self, NonNull}, 5 | sync::Arc, 6 | }; 7 | 8 | use cudarc::driver::CudaContext; 9 | use libc::munmap; 10 | use nvidia_video_codec_sdk::{ 11 | sys::nvEncodeAPI::{ 12 | NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 13 | NV_ENC_CODEC_H264_GUID, 14 | NV_ENC_H264_PROFILE_HIGH_GUID, 15 | NV_ENC_PRESET_P1_GUID, 16 | NV_ENC_TUNING_INFO, 17 | }, 18 | Encoder, 19 | EncoderInitParams, 20 | }; 21 | use vulkano::{ 22 | device::{ 23 | physical::PhysicalDeviceType, 24 | Device, 25 | DeviceCreateInfo, 26 | DeviceExtensions, 27 | DeviceFeatures, 28 | QueueCreateInfo, 29 | }, 30 | instance::{Instance, InstanceCreateInfo}, 31 | memory::{ 32 | DeviceMemory, 33 | ExternalMemoryHandleType, 34 | ExternalMemoryHandleTypes, 35 | MappedMemoryRange, 36 | MemoryAllocateInfo, 37 | MemoryMapFlags, 38 | MemoryMapInfo, 39 | MemoryPropertyFlags, 40 | }, 41 | VulkanLibrary, 42 | }; 43 | 44 | /// Returns the color `(r, g, b, alpha)` of a pixel on the screen relative to 45 | /// its position on a screen: 46 | /// 47 | /// Top right will be red, 48 | /// bottom left will be green, 49 | /// all colors will shift towards having more blue as `time` increases. 50 | /// 51 | /// # Arguments 52 | /// 53 | /// * `width`, `height` - Width and height of the screen. 54 | /// * `x`, `y` - Coordinates of the pixel on the screen. 55 | /// * time - Fraction indicating what part of the animation we are in [0,1] 56 | fn get_color(width: u32, height: u32, x: u32, y: u32, time: f32) -> (u8, u8, u8, u8) { 57 | let alpha = 255; 58 | let red = (255 * x / width) as u8; 59 | let green = (255 * y / height) as u8; 60 | let blue = (255. * time) as u8; 61 | (blue, green, red, alpha) 62 | } 63 | 64 | /// Generates test frame inputs and sets `buf` to that input. 65 | /// 66 | /// # Arguments 67 | /// 68 | /// * `buf` - The buffer in which to put the generated input. 69 | /// * `width`, `height` - The size of the frames to generate input for. 70 | /// * `i`, `i_max` - The current frame and total amount of frames. 71 | fn generate_test_input(buf: &mut [u8], width: u32, height: u32, i: u32, i_max: u32) { 72 | assert_eq!(buf.len(), (width * height * 4) as usize); 73 | for y in 0..height { 74 | for x in 0..width { 75 | let pixel = width * y + x; 76 | let index = (pixel * 4) as usize; 77 | let color = get_color(width, height, x, y, i as f32 / i_max as f32); 78 | buf[index] = color.0; 79 | buf[index + 1] = color.1; 80 | buf[index + 2] = color.2; 81 | buf[index + 3] = color.3; 82 | } 83 | } 84 | } 85 | 86 | /// Initialize Vulkan and find the desired memory type index. 87 | /// 88 | /// This function will probably only work on UNIX because we require the 89 | /// `khr_external_memory_fd` extension to export Opaque File Descriptors. 90 | /// 91 | /// The `memory_type_index` corresponds to the memory type which is 92 | /// `HOST_VISIBLE` which is needed so that we can map device memory later in 93 | /// the example. 94 | fn initialize_vulkan() -> (Arc, u32) { 95 | // Initialize Vulkan library. 96 | let vulkan_library = VulkanLibrary::new().expect("Vulkan should be installed correctly"); 97 | let instance = Instance::new( 98 | vulkan_library, 99 | InstanceCreateInfo::application_from_cargo_toml(), 100 | ) 101 | .expect("Vulkan should be installed correctly"); 102 | 103 | let (memory_type_index, physical_device) = instance 104 | .enumerate_physical_devices() 105 | .expect("There should be some device capable of encoding") 106 | .filter_map(|pd| { 107 | matches!(pd.properties().device_type, PhysicalDeviceType::DiscreteGpu) 108 | .then_some(()) 109 | .and_then(|()| { 110 | pd.memory_properties() 111 | .memory_types 112 | .iter() 113 | .position(|mt| { 114 | mt.property_flags 115 | .contains(MemoryPropertyFlags::HOST_VISIBLE) 116 | }) 117 | .map(|index| (index as u32, pd)) 118 | }) 119 | }) 120 | .next() 121 | .expect( 122 | "There should be at least one GPU which supports a memory type that is `HOST_VISIBLE`", 123 | ); 124 | 125 | // Create a Vulkan device. 126 | let (vulkan_device, _queues) = Device::new(physical_device, DeviceCreateInfo { 127 | queue_create_infos: vec![QueueCreateInfo::default()], 128 | enabled_extensions: DeviceExtensions { 129 | khr_external_memory_fd: true, 130 | ..Default::default() 131 | }, 132 | enabled_features: DeviceFeatures { 133 | memory_map_placed: true, 134 | ..Default::default() 135 | }, 136 | ..Default::default() 137 | }) 138 | .expect( 139 | "Vulkan should be installed correctly and `Device` should support `khr_external_memory_fd`", 140 | ); 141 | 142 | (vulkan_device, memory_type_index) 143 | } 144 | 145 | /// Creates an encoded bitstream for a 128 frame, 1920x1080 video. 146 | /// This bitstream will be written to ./test.bin 147 | /// To view this bitstream use a decoder like ffmpeg. 148 | /// 149 | /// For ffmpeg use `ffmpeg -i test.bin -vcodec copy test.mp4` to 150 | /// decode the video. 151 | fn main() { 152 | const WIDTH: u32 = 1920; 153 | const HEIGHT: u32 = 1080; 154 | const FRAMES: u32 = 128; 155 | 156 | let (vulkan_device, memory_type_index) = initialize_vulkan(); 157 | 158 | // Create a new CudaContext to interact with cuda. 159 | let cuda_ctx = CudaContext::new(0).expect("Cuda should be installed correctly."); 160 | 161 | let encoder = Encoder::initialize_with_cuda(cuda_ctx.clone()) 162 | .expect("NVIDIA Video Codec SDK should be installed correctly."); 163 | 164 | // Get all encode guids supported by the GPU. 165 | let encode_guids = encoder 166 | .get_encode_guids() 167 | .expect("The encoder should be able to get the supported guids."); 168 | let encode_guid = NV_ENC_CODEC_H264_GUID; 169 | assert!(encode_guids.contains(&encode_guid)); 170 | 171 | // Get available preset guids based on encode guid. 172 | let preset_guids = encoder 173 | .get_preset_guids(encode_guid) 174 | .expect("The encoder should have a preset for H.264."); 175 | let preset_guid = NV_ENC_PRESET_P1_GUID; 176 | assert!(preset_guids.contains(&preset_guid)); 177 | 178 | // Get available profiles based on encode guid. 179 | let profile_guids = encoder 180 | .get_profile_guids(encode_guid) 181 | .expect("The encoder should have a profile for H.264."); 182 | let profile_guid = NV_ENC_H264_PROFILE_HIGH_GUID; 183 | assert!(profile_guids.contains(&profile_guid)); 184 | 185 | // Get input formats based on the encode guid. 186 | let input_formats = encoder 187 | .get_supported_input_formats(encode_guid) 188 | .expect("The encoder should be able to get supported input buffer formats."); 189 | let buffer_format = NV_ENC_BUFFER_FORMAT_ARGB; 190 | assert!(input_formats.contains(&buffer_format)); 191 | 192 | let tuning_info = NV_ENC_TUNING_INFO::NV_ENC_TUNING_INFO_ULTRA_LOW_LATENCY; 193 | 194 | // Get the preset config based on the selected encode guid (H.264), selected 195 | // preset (`LOW_LATENCY`), and tuning info (`ULTRA_LOW_LATENCY`). 196 | let mut preset_config = encoder 197 | .get_preset_config(encode_guid, preset_guid, tuning_info) 198 | .expect("Encoder should be able to create config based on presets."); 199 | 200 | // Initialize a new encoder session based on the `preset_config` 201 | // we generated before. 202 | let mut initialize_params = EncoderInitParams::new(encode_guid, WIDTH, HEIGHT); 203 | initialize_params 204 | .preset_guid(preset_guid) 205 | .tuning_info(tuning_info) 206 | .display_aspect_ratio(16, 9) 207 | .framerate(30, 1) 208 | .enable_picture_type_decision() 209 | .encode_config(&mut preset_config.presetCfg); 210 | let session = encoder 211 | .start_session(buffer_format, initialize_params) 212 | .expect("Encoder should be initialized correctly."); 213 | 214 | // Calculate the number of buffers we need based on the interval of P frames and 215 | // the look ahead depth. 216 | let num_bufs = usize::try_from(preset_config.presetCfg.frameIntervalP) 217 | .expect("frame intervalP should always be positive.") 218 | + usize::try_from(preset_config.presetCfg.rcParams.lookaheadDepth) 219 | .expect("lookahead depth should always be positive."); 220 | 221 | let mut output_buffers: Vec<_> = (0..num_bufs) 222 | .map(|_| { 223 | session 224 | .create_output_bitstream() 225 | .expect("The encoder should be able to create bitstreams.") 226 | }) 227 | .collect(); 228 | 229 | // Write result to output file "example_output.bin". 230 | let mut out_file = OpenOptions::new() 231 | .write(true) 232 | .create(true) 233 | .truncate(true) 234 | .open("example_output.bin") 235 | .expect("Permissions and available space should allow creating a new file."); 236 | 237 | // Generate each of the frames with Vulkan. 238 | let file_descriptors = (0..FRAMES) 239 | .map(|f| { 240 | create_buffer( 241 | vulkan_device.clone(), 242 | memory_type_index, 243 | WIDTH, 244 | HEIGHT, 245 | f, 246 | FRAMES, 247 | ) 248 | }) 249 | .collect::>(); 250 | 251 | // Encode each of the frames. 252 | for (i, file_descriptor) in file_descriptors.into_iter().enumerate() { 253 | println!("Encoding frame {:>3} / {FRAMES}", i + 1); 254 | let output_bitstream = &mut output_buffers[i % num_bufs]; 255 | 256 | // Import file descriptor using CUDA. 257 | let external_memory = unsafe { 258 | cuda_ctx.import_external_memory(file_descriptor, (WIDTH * HEIGHT * 4) as u64) 259 | } 260 | .expect("File descriptor should be valid for importing."); 261 | let mapped_buffer = external_memory 262 | .map_all() 263 | .expect("External memory should be mappable."); 264 | 265 | // Register and map with NVENC. 266 | let mut registered_resource = session 267 | .register_cuda_resource(WIDTH * 4, mapped_buffer) 268 | .expect("Buffer should be mapped and available for registration with NVENC."); 269 | 270 | session 271 | .encode_picture( 272 | &mut registered_resource, 273 | output_bitstream, 274 | Default::default(), 275 | ) 276 | .expect("Encoder should be able to encode valid pictures"); 277 | 278 | // Immediately locking is probably inefficient 279 | // (you should encode multiple before locking), 280 | // but for simplicity we just lock immediately. 281 | let lock = output_bitstream 282 | .lock() 283 | .expect("Bitstream lock should be available."); 284 | dbg!(lock.frame_index()); 285 | dbg!(lock.timestamp()); 286 | dbg!(lock.duration()); 287 | dbg!(lock.picture_type()); 288 | 289 | let data = lock.data(); 290 | out_file 291 | .write_all(data) 292 | .expect("Writing should succeed because `out_file` was opened with write permissions."); 293 | } 294 | } 295 | 296 | /// Allocates memory on a Vulkan [`Device`] and returns a [`File`] (file 297 | /// descriptor) to that data. 298 | /// 299 | /// Will be used to create file descriptors for the invidual frames. 300 | /// 301 | /// # Arguments 302 | /// 303 | /// * `vulkan_device` - The device where the data should be allocated. 304 | /// * `memory_type_index` - The index of the memory type that should be 305 | /// allocated. 306 | /// * `width`, `height` - The size of data to store. 307 | /// * `i`, `i_max`: - The current frame and maximum frame index. 308 | fn create_buffer( 309 | vulkan_device: Arc, 310 | memory_type_index: u32, 311 | width: u32, 312 | height: u32, 313 | i: u32, 314 | i_max: u32, 315 | ) -> File { 316 | let size = (width * height * 4) as u64; 317 | 318 | // Allocate memory with Vulkan. 319 | let mut memory = DeviceMemory::allocate(vulkan_device, MemoryAllocateInfo { 320 | allocation_size: size, 321 | memory_type_index, 322 | export_handle_types: ExternalMemoryHandleTypes::OPAQUE_FD, 323 | ..Default::default() 324 | }) 325 | .expect("There should be space to allocate vulkan memory on the device"); 326 | 327 | // Map and write to the memory. 328 | let address = unsafe { 329 | libc::mmap( 330 | ptr::null_mut(), 331 | memory.allocation_size() as libc::size_t, 332 | libc::PROT_READ | libc::PROT_WRITE, 333 | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, 334 | -1, 335 | 0, 336 | ) 337 | }; 338 | if address as i64 == -1 { 339 | panic!("There should be memory available to map and write to"); 340 | } 341 | 342 | unsafe { 343 | memory.map_placed( 344 | MemoryMapInfo { 345 | flags: MemoryMapFlags::PLACED, 346 | size: memory.allocation_size(), 347 | ..Default::default() 348 | }, 349 | NonNull::new(address).expect("The mapped address should not be null"), 350 | ) 351 | } 352 | .unwrap(); 353 | 354 | unsafe { 355 | let content = 356 | std::slice::from_raw_parts_mut(address as *mut u8, memory.allocation_size() as usize); 357 | generate_test_input(content, width, height, i, i_max); 358 | memory 359 | .flush_range(MappedMemoryRange { 360 | offset: 0, 361 | size, 362 | ..Default::default() 363 | }) 364 | .expect( 365 | "There should be no other devices writing to this memory and size should also fit \ 366 | within the size", 367 | ); 368 | } 369 | 370 | // unmap from the host size 371 | let result = unsafe { munmap(address, size as libc::size_t) }; 372 | if result == -1 { 373 | panic!("munmap failed"); 374 | } 375 | 376 | // unmap the device memory 377 | memory 378 | .unmap(Default::default()) 379 | .expect("unmap should be sucessful on host-mapped device"); 380 | 381 | // Export the memory. 382 | memory 383 | .export_fd(ExternalMemoryHandleType::OpaqueFd) 384 | .expect("The memory should be able to be turned into a file handle if we are on UNIX") 385 | } 386 | -------------------------------------------------------------------------------- /src/safe/api.rs: -------------------------------------------------------------------------------- 1 | //! Defines `ENCODE_API`, which is a lazy static of [`EncodeAPI`]. 2 | 3 | use core::ffi::{c_int, c_void}; 4 | 5 | use crate::sys::nvEncodeAPI::{ 6 | NvEncodeAPICreateInstance, 7 | NvEncodeAPIGetMaxSupportedVersion, 8 | GUID, 9 | NVENCAPI_MAJOR_VERSION, 10 | NVENCAPI_MINOR_VERSION, 11 | NVENCSTATUS, 12 | NV_ENCODE_API_FUNCTION_LIST, 13 | NV_ENCODE_API_FUNCTION_LIST_VER, 14 | NV_ENC_BUFFER_FORMAT, 15 | NV_ENC_CAPS_PARAM, 16 | NV_ENC_CREATE_BITSTREAM_BUFFER, 17 | NV_ENC_CREATE_INPUT_BUFFER, 18 | NV_ENC_CREATE_MV_BUFFER, 19 | NV_ENC_CUSTREAM_PTR, 20 | NV_ENC_EVENT_PARAMS, 21 | NV_ENC_INITIALIZE_PARAMS, 22 | NV_ENC_INPUT_PTR, 23 | NV_ENC_LOCK_BITSTREAM, 24 | NV_ENC_LOCK_INPUT_BUFFER, 25 | NV_ENC_LOOKAHEAD_PIC_PARAMS, 26 | NV_ENC_MAP_INPUT_RESOURCE, 27 | NV_ENC_MEONLY_PARAMS, 28 | NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS, 29 | NV_ENC_OUTPUT_PTR, 30 | NV_ENC_PIC_PARAMS, 31 | NV_ENC_PRESET_CONFIG, 32 | NV_ENC_RECONFIGURE_PARAMS, 33 | NV_ENC_REGISTERED_PTR, 34 | NV_ENC_REGISTER_RESOURCE, 35 | NV_ENC_RESTORE_ENCODER_STATE_PARAMS, 36 | NV_ENC_SEQUENCE_PARAM_PAYLOAD, 37 | NV_ENC_STAT, 38 | NV_ENC_TUNING_INFO, 39 | }; 40 | 41 | lazy_static! { 42 | /// A lazy static for the Encoder API. 43 | /// 44 | /// You should not interact with this directly. 45 | /// [`Encoder`](crate::Encoder) exposes much of the functionality and provides a nicer API. 46 | pub static ref ENCODE_API: EncodeAPI = 47 | EncodeAPI::new(); 48 | } 49 | 50 | // Function type aliases to shorten later definitions. 51 | type OpenEncodeSession = unsafe extern "C" fn(*mut c_void, u32, *mut *mut c_void) -> NVENCSTATUS; 52 | type GetEncodeGUIDCount = unsafe extern "C" fn(*mut c_void, *mut u32) -> NVENCSTATUS; 53 | type GetEncodeGUIDs = unsafe extern "C" fn(*mut c_void, *mut GUID, u32, *mut u32) -> NVENCSTATUS; 54 | type GetInputFormatCount = unsafe extern "C" fn(*mut c_void, GUID, *mut u32) -> NVENCSTATUS; 55 | type GetInputFormats = unsafe extern "C" fn( 56 | *mut c_void, 57 | GUID, 58 | *mut NV_ENC_BUFFER_FORMAT, 59 | u32, 60 | *mut u32, 61 | ) -> NVENCSTATUS; 62 | type GetEncodeCaps = 63 | unsafe extern "C" fn(*mut c_void, GUID, *mut NV_ENC_CAPS_PARAM, *mut c_int) -> NVENCSTATUS; 64 | type GetEncodePresetCount = unsafe extern "C" fn(*mut c_void, GUID, *mut u32) -> NVENCSTATUS; 65 | type GetEncodePresetGUIDs = 66 | unsafe extern "C" fn(*mut c_void, GUID, *mut GUID, u32, *mut u32) -> NVENCSTATUS; 67 | type GetEncodeProfileGUIDCount = GetEncodePresetCount; 68 | type GetEncodeProfileGUIDs = GetEncodePresetGUIDs; 69 | type GetEncodePresetConfig = 70 | unsafe extern "C" fn(*mut c_void, GUID, GUID, *mut NV_ENC_PRESET_CONFIG) -> NVENCSTATUS; 71 | type GetEncodePresetConfigEx = unsafe extern "C" fn( 72 | *mut c_void, 73 | GUID, 74 | GUID, 75 | NV_ENC_TUNING_INFO, 76 | *mut NV_ENC_PRESET_CONFIG, 77 | ) -> NVENCSTATUS; 78 | type InitializeEncoder = 79 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_INITIALIZE_PARAMS) -> NVENCSTATUS; 80 | type CreateInputBuffer = 81 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_CREATE_INPUT_BUFFER) -> NVENCSTATUS; 82 | type DestroyInputBuffer = unsafe extern "C" fn(*mut c_void, NV_ENC_INPUT_PTR) -> NVENCSTATUS; 83 | type CreateBitstreamBuffer = 84 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_CREATE_BITSTREAM_BUFFER) -> NVENCSTATUS; 85 | type DestroyBitstreamBuffer = unsafe extern "C" fn(*mut c_void, NV_ENC_OUTPUT_PTR) -> NVENCSTATUS; 86 | type EncodePicture = unsafe extern "C" fn(*mut c_void, *mut NV_ENC_PIC_PARAMS) -> NVENCSTATUS; 87 | type LockBitstream = unsafe extern "C" fn(*mut c_void, *mut NV_ENC_LOCK_BITSTREAM) -> NVENCSTATUS; 88 | type UnlockBitstream = unsafe extern "C" fn(*mut c_void, NV_ENC_OUTPUT_PTR) -> NVENCSTATUS; 89 | type LockInputBuffer = 90 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_LOCK_INPUT_BUFFER) -> NVENCSTATUS; 91 | type UnlockInputBuffer = unsafe extern "C" fn(*mut c_void, NV_ENC_INPUT_PTR) -> NVENCSTATUS; 92 | type GetEncodeStats = unsafe extern "C" fn(*mut c_void, *mut NV_ENC_STAT) -> NVENCSTATUS; 93 | type GetSequenceParams = 94 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_SEQUENCE_PARAM_PAYLOAD) -> NVENCSTATUS; 95 | type RegisterAsyncEvent = 96 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_EVENT_PARAMS) -> NVENCSTATUS; 97 | type UnregisterAsyncEvent = 98 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_EVENT_PARAMS) -> NVENCSTATUS; 99 | type MapInputResource = 100 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_MAP_INPUT_RESOURCE) -> NVENCSTATUS; 101 | type UnmapInputResource = unsafe extern "C" fn(*mut c_void, NV_ENC_INPUT_PTR) -> NVENCSTATUS; 102 | type DestroyEncoder = unsafe extern "C" fn(encoder: *mut c_void) -> NVENCSTATUS; 103 | type InvalidateRefFrames = unsafe extern "C" fn(*mut c_void, u64) -> NVENCSTATUS; 104 | type OpenEncodeSessionEx = unsafe extern "C" fn( 105 | *mut NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS, 106 | *mut *mut c_void, 107 | ) -> NVENCSTATUS; 108 | type RegisterResource = 109 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_REGISTER_RESOURCE) -> NVENCSTATUS; 110 | type UnregisterResource = unsafe extern "C" fn(*mut c_void, NV_ENC_REGISTERED_PTR) -> NVENCSTATUS; 111 | type ReconfigureEncoder = 112 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_RECONFIGURE_PARAMS) -> NVENCSTATUS; 113 | type CreateMVBuffer = 114 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_CREATE_MV_BUFFER) -> NVENCSTATUS; 115 | type DestroyMVBuffer = unsafe extern "C" fn(*mut c_void, NV_ENC_OUTPUT_PTR) -> NVENCSTATUS; 116 | type RunMotionEstimationOnly = 117 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_MEONLY_PARAMS) -> NVENCSTATUS; 118 | type GetLastErrorString = unsafe extern "C" fn(encoder: *mut c_void) -> *const ::core::ffi::c_char; 119 | type SetIOCudaStreams = 120 | unsafe extern "C" fn(*mut c_void, NV_ENC_CUSTREAM_PTR, NV_ENC_CUSTREAM_PTR) -> NVENCSTATUS; 121 | type GetSequenceParamEx = unsafe extern "C" fn( 122 | *mut c_void, 123 | *mut NV_ENC_INITIALIZE_PARAMS, 124 | *mut NV_ENC_SEQUENCE_PARAM_PAYLOAD, 125 | ) -> NVENCSTATUS; 126 | type RestoreEncoderState = 127 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_RESTORE_ENCODER_STATE_PARAMS) -> NVENCSTATUS; 128 | type LookaheadPicture = 129 | unsafe extern "C" fn(*mut c_void, *mut NV_ENC_LOOKAHEAD_PIC_PARAMS) -> NVENCSTATUS; 130 | 131 | /// An instance of the `NvEncodeAPI` interface, containing function pointers 132 | /// which should be used to interface with the rest of the Encoder API. 133 | #[allow(dead_code, missing_docs)] 134 | #[derive(Debug, Clone)] 135 | pub struct EncodeAPI { 136 | #[doc(alias = "NvEncOpenEncodeSession")] 137 | pub open_encode_session: OpenEncodeSession, 138 | #[doc(alias = "NvEncOpenEncodeSessionEx")] 139 | pub open_encode_session_ex: OpenEncodeSessionEx, 140 | #[doc(alias = "NvEncInitializeEncoder")] 141 | pub initialize_encoder: InitializeEncoder, 142 | #[doc(alias = "NvEncReconfigureEncoder")] 143 | pub reconfigure_encoder: ReconfigureEncoder, 144 | #[doc(alias = "NvEncDestroyEncoder")] 145 | pub destroy_encoder: DestroyEncoder, 146 | #[doc(alias = "NvEncGetEncodeGuidCount")] 147 | pub get_encode_guid_count: GetEncodeGUIDCount, 148 | #[doc(alias = "NvEncGetEncodeGUIDs")] 149 | pub get_encode_guids: GetEncodeGUIDs, 150 | #[doc(alias = "NvEncGetEncodeProfileGuidCount")] 151 | pub get_encode_profile_guid_count: GetEncodeProfileGUIDCount, 152 | #[doc(alias = "NvEncGetEncodeProfileGUIDs")] 153 | pub get_encode_profile_guids: GetEncodeProfileGUIDs, 154 | #[doc(alias = "NvEncGetInputFormatCount")] 155 | pub get_input_format_count: GetInputFormatCount, 156 | #[doc(alias = "NvEncGetInputFormats")] 157 | pub get_input_formats: GetInputFormats, 158 | #[doc(alias = "NvEncGetEncodePresetCount")] 159 | pub get_encode_preset_count: GetEncodePresetCount, 160 | #[doc(alias = "NvEncGetEncodePresetGUIDs")] 161 | pub get_encode_preset_guids: GetEncodePresetGUIDs, 162 | #[doc(alias = "NvEncGetEncodePresetConfig")] 163 | pub get_encode_preset_config: GetEncodePresetConfig, 164 | #[doc(alias = "NvEncGetEncodePresetConfigEx")] 165 | pub get_encode_preset_config_ex: GetEncodePresetConfigEx, 166 | #[doc(alias = "NvEncGetEncodeCaps")] 167 | pub get_encode_caps: GetEncodeCaps, 168 | #[doc(alias = "NvEncCreateInputBuffer")] 169 | pub create_input_buffer: CreateInputBuffer, 170 | #[doc(alias = "NvEncDestroyInputBuffer")] 171 | pub destroy_input_buffer: DestroyInputBuffer, 172 | #[doc(alias = "NvLockInputBuffer")] 173 | pub lock_input_buffer: LockInputBuffer, 174 | #[doc(alias = "NvUnlockInputBuffer")] 175 | pub unlock_input_buffer: UnlockInputBuffer, 176 | #[doc(alias = "NvEncCreateBitstreamBuffer")] 177 | pub create_bitstream_buffer: CreateBitstreamBuffer, 178 | #[doc(alias = "NvEncDestroyBitstreamBuffer")] 179 | pub destroy_bitstream_buffer: DestroyBitstreamBuffer, 180 | #[doc(alias = "NvEncLockBitstream")] 181 | pub lock_bitstream: LockBitstream, 182 | #[doc(alias = "NvEncUnlockBitstream")] 183 | pub unlock_bitstream: UnlockBitstream, 184 | #[doc(alias = "NvEncMapInputResource")] 185 | pub map_input_resource: MapInputResource, 186 | #[doc(alias = "NvEncUnmapInputResource")] 187 | pub unmap_input_resource: UnmapInputResource, 188 | #[doc(alias = "NvEncRegisterResource")] 189 | pub register_resource: RegisterResource, 190 | #[doc(alias = "NvEncUnregisterResource")] 191 | pub unregister_resource: UnregisterResource, 192 | #[doc(alias = "NvEncCreateMVBuffer")] 193 | pub create_mv_buffer: CreateMVBuffer, 194 | #[doc(alias = "NvEncDestroyMVBuffer")] 195 | pub destroy_mv_buffer: DestroyMVBuffer, 196 | #[doc(alias = "NvEncEncodePicture")] 197 | pub encode_picture: EncodePicture, 198 | #[doc(alias = "NvEncGetEncodeStats")] 199 | pub get_encode_stats: GetEncodeStats, 200 | #[doc(alias = "NvEncGetSequenceParams")] 201 | pub get_sequence_params: GetSequenceParams, 202 | #[doc(alias = "NvEncGetSequenceParamEx")] 203 | pub get_sequence_param_ex: GetSequenceParamEx, 204 | #[doc(alias = "NvEncRegisterAsyncEvent")] 205 | pub register_async_event: RegisterAsyncEvent, 206 | #[doc(alias = "NvEncUnregisterAsyncEvent")] 207 | pub unregister_async_event: UnregisterAsyncEvent, 208 | #[doc(alias = "NvEncInvalidateRefFrames")] 209 | pub invalidate_ref_frames: InvalidateRefFrames, 210 | #[doc(alias = "NvEncRunMotionEstimationOnly")] 211 | pub run_motion_estimation_only: RunMotionEstimationOnly, 212 | #[doc(alias = "NvEncGetLastErrorString")] 213 | pub get_last_error_string: GetLastErrorString, 214 | #[doc(alias = "NvEncSetIOCudaStreams")] 215 | pub set_io_cuda_streams: SetIOCudaStreams, 216 | #[doc(alias = "NvEncRestoreEncoderState")] 217 | pub restore_encoder_state: RestoreEncoderState, 218 | #[doc(alias = "NvEncLookaheadPicture")] 219 | pub lookahead_picture: LookaheadPicture, 220 | } 221 | 222 | fn assert_versions_match(max_supported_version: u32) { 223 | let major_version = max_supported_version >> 4; 224 | let minor_version = max_supported_version & 0b1111; 225 | assert!( 226 | (major_version, minor_version) >= (NVENCAPI_MAJOR_VERSION, NVENCAPI_MINOR_VERSION), 227 | "The maximum supported version should be greater or equal than the header version." 228 | ); 229 | } 230 | 231 | impl EncodeAPI { 232 | fn new() -> Self { 233 | const MSG: &str = "The API instance should populate the whole function list."; 234 | 235 | // Check that the driver max supported version matches the version 236 | // from the header files. If they do not match, the bindings should be updated. 237 | let mut version = 0; 238 | unsafe { NvEncodeAPIGetMaxSupportedVersion(&mut version) } 239 | .result_without_string() 240 | .expect("The pointer to the version should be valid."); 241 | assert_versions_match(version); 242 | 243 | // Create empty function buffer. 244 | let mut function_list = NV_ENCODE_API_FUNCTION_LIST { 245 | version: NV_ENCODE_API_FUNCTION_LIST_VER, 246 | ..Default::default() 247 | }; 248 | // Create Encode API Instance (populate function buffer). 249 | unsafe { NvEncodeAPICreateInstance(&mut function_list) } 250 | .result_without_string() 251 | .expect("The pointer to the function list should be valid."); 252 | 253 | Self { 254 | open_encode_session: function_list.nvEncOpenEncodeSession.expect(MSG), 255 | open_encode_session_ex: function_list.nvEncOpenEncodeSessionEx.expect(MSG), 256 | initialize_encoder: function_list.nvEncInitializeEncoder.expect(MSG), 257 | reconfigure_encoder: function_list.nvEncReconfigureEncoder.expect(MSG), 258 | destroy_encoder: function_list.nvEncDestroyEncoder.expect(MSG), 259 | get_encode_guid_count: function_list.nvEncGetEncodeGUIDCount.expect(MSG), 260 | get_encode_guids: function_list.nvEncGetEncodeGUIDs.expect(MSG), 261 | get_encode_profile_guid_count: function_list.nvEncGetEncodeProfileGUIDCount.expect(MSG), 262 | get_encode_profile_guids: function_list.nvEncGetEncodeProfileGUIDs.expect(MSG), 263 | get_input_format_count: function_list.nvEncGetInputFormatCount.expect(MSG), 264 | get_input_formats: function_list.nvEncGetInputFormats.expect(MSG), 265 | get_encode_preset_count: function_list.nvEncGetEncodePresetCount.expect(MSG), 266 | get_encode_preset_guids: function_list.nvEncGetEncodePresetGUIDs.expect(MSG), 267 | get_encode_preset_config: function_list.nvEncGetEncodePresetConfig.expect(MSG), 268 | get_encode_preset_config_ex: function_list.nvEncGetEncodePresetConfigEx.expect(MSG), 269 | get_encode_caps: function_list.nvEncGetEncodeCaps.expect(MSG), 270 | create_input_buffer: function_list.nvEncCreateInputBuffer.expect(MSG), 271 | destroy_input_buffer: function_list.nvEncDestroyInputBuffer.expect(MSG), 272 | lock_input_buffer: function_list.nvEncLockInputBuffer.expect(MSG), 273 | unlock_input_buffer: function_list.nvEncUnlockInputBuffer.expect(MSG), 274 | create_bitstream_buffer: function_list.nvEncCreateBitstreamBuffer.expect(MSG), 275 | destroy_bitstream_buffer: function_list.nvEncDestroyBitstreamBuffer.expect(MSG), 276 | lock_bitstream: function_list.nvEncLockBitstream.expect(MSG), 277 | unlock_bitstream: function_list.nvEncUnlockBitstream.expect(MSG), 278 | map_input_resource: function_list.nvEncMapInputResource.expect(MSG), 279 | unmap_input_resource: function_list.nvEncUnmapInputResource.expect(MSG), 280 | register_resource: function_list.nvEncRegisterResource.expect(MSG), 281 | unregister_resource: function_list.nvEncUnregisterResource.expect(MSG), 282 | create_mv_buffer: function_list.nvEncCreateMVBuffer.expect(MSG), 283 | destroy_mv_buffer: function_list.nvEncDestroyMVBuffer.expect(MSG), 284 | encode_picture: function_list.nvEncEncodePicture.expect(MSG), 285 | get_encode_stats: function_list.nvEncGetEncodeStats.expect(MSG), 286 | get_sequence_params: function_list.nvEncGetSequenceParams.expect(MSG), 287 | get_sequence_param_ex: function_list.nvEncGetSequenceParamEx.expect(MSG), 288 | register_async_event: function_list.nvEncRegisterAsyncEvent.expect(MSG), 289 | unregister_async_event: function_list.nvEncUnregisterAsyncEvent.expect(MSG), 290 | invalidate_ref_frames: function_list.nvEncInvalidateRefFrames.expect(MSG), 291 | run_motion_estimation_only: function_list.nvEncRunMotionEstimationOnly.expect(MSG), 292 | get_last_error_string: function_list.nvEncGetLastErrorString.expect(MSG), 293 | set_io_cuda_streams: function_list.nvEncSetIOCudaStreams.expect(MSG), 294 | restore_encoder_state: function_list.nvEncRestoreEncoderState.expect(MSG), 295 | lookahead_picture: function_list.nvEncLookaheadPicture.expect(MSG), 296 | } 297 | } 298 | } 299 | -------------------------------------------------------------------------------- /src/safe/encoder.rs: -------------------------------------------------------------------------------- 1 | //! The [`Encoder`] is the main entrypoint for the Encoder API. 2 | //! 3 | //! The [`Encoder`] provides a slightly higher-level abstraction over the 4 | //! encoder API. This module also defines builders for some of the parameter 5 | //! structs used by the interface. 6 | 7 | use std::{ffi::c_void, ptr, sync::Arc}; 8 | 9 | use cudarc::driver::CudaContext; 10 | 11 | use super::{api::ENCODE_API, result::EncodeError, session::Session}; 12 | use crate::sys::nvEncodeAPI::{ 13 | GUID, 14 | NVENCAPI_VERSION, 15 | NV_ENC_BUFFER_FORMAT, 16 | NV_ENC_CONFIG, 17 | NV_ENC_CONFIG_VER, 18 | NV_ENC_DEVICE_TYPE, 19 | NV_ENC_INITIALIZE_PARAMS, 20 | NV_ENC_INITIALIZE_PARAMS_VER, 21 | NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS, 22 | NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER, 23 | NV_ENC_PRESET_CONFIG, 24 | NV_ENC_PRESET_CONFIG_VER, 25 | NV_ENC_TUNING_INFO, 26 | }; 27 | 28 | /// Entrypoint for the Encoder API. 29 | /// 30 | /// The general usage follows these steps: 31 | /// 1. Initialize the encoder. 32 | /// 2. Set up the desired encoding parameters. 33 | /// 3. Allocate or register input and output buffers. 34 | /// 4. Copy frames to input buffers, encode, and read out of output bitstream. 35 | /// 5. Close the encoding session and clean up. 36 | /// 37 | /// With this wrapper cleanup is performed automatically. 38 | /// To do the other steps this struct provides associated functions 39 | /// such as [`Encoder::get_encode_guids`] or 40 | /// [`Encoder::get_supported_input_formats`]. 41 | /// 42 | /// Once the configuration is completed, a session should be initialized with 43 | /// [`Encoder::start_session`] to get a [`Session`]. 44 | /// This type has further function to create input and output buffers 45 | /// and encode pictures. 46 | /// 47 | /// See [NVIDIA Video Codec SDK - Video Encoder API Programming Guide](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html). 48 | #[derive(Debug)] 49 | pub struct Encoder { 50 | pub(crate) ptr: *mut c_void, 51 | // Used to fetch the device pointer for an externally allocated buffer 52 | pub(crate) ctx: Arc, 53 | } 54 | 55 | /// The client must flush the encoder before freeing any resources. 56 | /// Do this by sending an EOS encode frame. 57 | /// (This is also done automatically when [`Session`] is dropped.). 58 | /// 59 | /// Sending an EOS frame might still generate data, so if you care 60 | /// about this you should send an EOS frame yourself. 61 | /// 62 | /// The client must free all the input and output resources before 63 | /// destroying the encoder. 64 | /// If using events, they must also be unregistered. 65 | impl Drop for Encoder { 66 | fn drop(&mut self) { 67 | unsafe { (ENCODE_API.destroy_encoder)(self.ptr) } 68 | .result(self) 69 | .expect("The encoder pointer should be valid."); 70 | } 71 | } 72 | 73 | impl Encoder { 74 | /// Create an [`Encoder`] with CUDA as the encode device. 75 | /// 76 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#cuda). 77 | /// 78 | /// # Errors 79 | /// 80 | /// Could error if there was no encode capable device detected 81 | /// or if the encode device was invalid. 82 | /// 83 | /// # Examples 84 | /// 85 | /// ``` 86 | /// # use cudarc::driver::CudaContext; 87 | /// # use nvidia_video_codec_sdk::Encoder; 88 | /// let cuda_ctx = CudaContext::new(0).unwrap(); 89 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 90 | /// ``` 91 | pub fn initialize_with_cuda(cuda_ctx: Arc) -> Result { 92 | let mut encoder = ptr::null_mut(); 93 | let mut session_params = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS { 94 | version: NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER, 95 | deviceType: NV_ENC_DEVICE_TYPE::NV_ENC_DEVICE_TYPE_CUDA, 96 | apiVersion: NVENCAPI_VERSION, 97 | // Pass the CUDA Context as the device. 98 | // valid casting since CUcontext is a *mut 99 | device: cuda_ctx.cu_ctx().cast::(), 100 | ..Default::default() 101 | }; 102 | 103 | if let err @ Err(_) = 104 | unsafe { (ENCODE_API.open_encode_session_ex)(&mut session_params, &mut encoder) } 105 | .result_without_string() 106 | { 107 | // We are required to destroy the encoder if there was an error. 108 | unsafe { (ENCODE_API.destroy_encoder)(encoder) }.result_without_string()?; 109 | err?; 110 | } 111 | 112 | Ok(Self { 113 | ptr: encoder, 114 | ctx: cuda_ctx, 115 | }) 116 | } 117 | 118 | // TODO: 119 | // - Make Encoder generic in Device. 120 | // - Add functions to create Encoder from other encode devices. 121 | 122 | /// Get the encode GUIDs which the encoder supports. 123 | /// 124 | /// You should use this function to check whether your 125 | /// machine supports the video compression standard 126 | /// that you with to use. 127 | /// 128 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#selecting-encoder-codec-guid). 129 | /// 130 | /// # Errors 131 | /// 132 | /// Could error if we run out of memory. 133 | /// 134 | /// # Examples 135 | /// 136 | /// ``` 137 | /// # use cudarc::driver::CudaContext; 138 | /// # use nvidia_video_codec_sdk::{sys::nvEncodeAPI::NV_ENC_CODEC_H264_GUID, Encoder}; 139 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 140 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 141 | /// let encode_guids = encoder.get_encode_guids().unwrap(); 142 | /// // Confirm that this machine support encoding to H.264. 143 | /// assert!(encode_guids.contains(&NV_ENC_CODEC_H264_GUID)); 144 | /// ``` 145 | pub fn get_encode_guids(&self) -> Result, EncodeError> { 146 | // Query number of supported encoder codec GUIDs. 147 | let mut supported_count = 0; 148 | unsafe { (ENCODE_API.get_encode_guid_count)(self.ptr, &mut supported_count) } 149 | .result(self)?; 150 | // Get the supported GUIDs. 151 | let mut encode_guids = vec![GUID::default(); supported_count as usize]; 152 | let mut actual_count = 0; 153 | unsafe { 154 | (ENCODE_API.get_encode_guids)( 155 | self.ptr, 156 | encode_guids.as_mut_ptr(), 157 | supported_count, 158 | &mut actual_count, 159 | ) 160 | } 161 | .result(self)?; 162 | encode_guids.truncate(actual_count as usize); 163 | Ok(encode_guids) 164 | } 165 | 166 | /// Get the encode preset GUIDs which the encoder supports 167 | /// for the given codec GUID. 168 | /// 169 | /// You should use this function to check whether your 170 | /// machine supports the encode preset that you wish to use. 171 | /// 172 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#selecting-encoder-preset-configuration). 173 | /// 174 | /// # Errors 175 | /// 176 | /// Could error if the encode GUID is invalid 177 | /// or we run out of memory. 178 | /// 179 | /// # Examples 180 | /// 181 | /// ``` 182 | /// # use cudarc::driver::CudaContext; 183 | /// # use nvidia_video_codec_sdk::{ 184 | /// # sys::nvEncodeAPI::{NV_ENC_CODEC_H264_GUID, NV_ENC_PRESET_P1_GUID}, 185 | /// # Encoder, 186 | /// # }; 187 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 188 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 189 | /// 190 | /// //* Check if H.264 encoding is supported. *// 191 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 192 | /// # assert!(encode_guids.contains(&NV_ENC_CODEC_H264_GUID)); 193 | /// 194 | /// let preset_guids = encoder.get_preset_guids(NV_ENC_CODEC_H264_GUID).unwrap(); 195 | /// // Confirm that H.264 supports the P1 preset (high performance, low quality) on this machine. 196 | /// assert!(preset_guids.contains(&NV_ENC_PRESET_P1_GUID)); 197 | /// ``` 198 | pub fn get_preset_guids(&self, encode_guid: GUID) -> Result, EncodeError> { 199 | // Query the number of preset GUIDS. 200 | let mut preset_count = 0; 201 | unsafe { (ENCODE_API.get_encode_preset_count)(self.ptr, encode_guid, &mut preset_count) } 202 | .result(self)?; 203 | // Get the preset GUIDs. 204 | let mut actual_count = 0; 205 | let mut preset_guids = vec![GUID::default(); preset_count as usize]; 206 | unsafe { 207 | (ENCODE_API.get_encode_preset_guids)( 208 | self.ptr, 209 | encode_guid, 210 | preset_guids.as_mut_ptr(), 211 | preset_count, 212 | &mut actual_count, 213 | ) 214 | } 215 | .result(self)?; 216 | preset_guids.truncate(actual_count as usize); 217 | Ok(preset_guids) 218 | } 219 | 220 | /// Get the encode profile GUIDs which the encoder supports 221 | /// for the given codec GUID. 222 | /// 223 | /// You should use this function to check whether your 224 | /// machine supports the encode profile that you wish to use. 225 | /// 226 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#selecting-an-encoder-profile). 227 | /// 228 | /// # Errors 229 | /// 230 | /// Could error if the encode GUID is invalid 231 | /// or we run out of memory. 232 | /// 233 | /// # Examples 234 | /// 235 | /// ``` 236 | /// # use cudarc::driver::CudaContext; 237 | /// # use nvidia_video_codec_sdk::{ 238 | /// # sys::nvEncodeAPI::{NV_ENC_CODEC_H264_GUID, NV_ENC_H264_PROFILE_HIGH_GUID}, 239 | /// # Encoder, 240 | /// # }; 241 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 242 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 243 | /// 244 | /// //* Check if H.264 encoding is supported. *// 245 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 246 | /// # assert!(encode_guids.contains(&NV_ENC_CODEC_H264_GUID)); 247 | /// 248 | /// let profile_guids = encoder.get_profile_guids(NV_ENC_CODEC_H264_GUID).unwrap(); 249 | /// // Confirm that H.264 supports the HIGH profile on this machine. 250 | /// assert!(profile_guids.contains(&NV_ENC_H264_PROFILE_HIGH_GUID)); 251 | /// ``` 252 | pub fn get_profile_guids(&self, encode_guid: GUID) -> Result, EncodeError> { 253 | // Query the number of profile GUIDs. 254 | let mut profile_count = 0; 255 | unsafe { 256 | (ENCODE_API.get_encode_profile_guid_count)(self.ptr, encode_guid, &mut profile_count) 257 | } 258 | .result(self)?; 259 | // Get the profile GUIDs. 260 | let mut profile_guids = vec![GUID::default(); profile_count as usize]; 261 | let mut actual_count = 0; 262 | unsafe { 263 | (ENCODE_API.get_encode_profile_guids)( 264 | self.ptr, 265 | encode_guid, 266 | profile_guids.as_mut_ptr(), 267 | profile_count, 268 | &mut actual_count, 269 | ) 270 | } 271 | .result(self)?; 272 | profile_guids.truncate(actual_count as usize); 273 | Ok(profile_guids) 274 | } 275 | 276 | /// Get the buffer formats which the encoder supports 277 | /// for the given codec GUID. 278 | /// 279 | /// You should use this function to check whether your 280 | /// machine supports the buffer format that you wish to use. 281 | /// 282 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#getting-supported-list-of-input-formats). 283 | /// 284 | /// # Errors 285 | /// 286 | /// Could error if the encode GUID is invalid 287 | /// or we run out of memory. 288 | /// 289 | /// # Examples 290 | /// 291 | /// ``` 292 | /// # use cudarc::driver::CudaContext; 293 | /// # use nvidia_video_codec_sdk::{ 294 | /// # sys::nvEncodeAPI::{NV_ENC_BUFFER_FORMAT, NV_ENC_CODEC_H264_GUID}, 295 | /// # Encoder, 296 | /// # }; 297 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 298 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 299 | /// 300 | /// //* Check if H.264 encoding is supported. *// 301 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 302 | /// # assert!(encode_guids.contains(&NV_ENC_CODEC_H264_GUID)); 303 | /// 304 | /// let input_guids = encoder 305 | /// .get_supported_input_formats(NV_ENC_CODEC_H264_GUID) 306 | /// .unwrap(); 307 | /// // Confirm that H.264 supports the `ARGB10` format on this machine. 308 | /// assert!(input_guids.contains(&NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB10)); 309 | /// ``` 310 | pub fn get_supported_input_formats( 311 | &self, 312 | encode_guid: GUID, 313 | ) -> Result, EncodeError> { 314 | // Query the number of supported input formats. 315 | let mut format_count = 0; 316 | unsafe { (ENCODE_API.get_input_format_count)(self.ptr, encode_guid, &mut format_count) } 317 | .result(self)?; 318 | // Get the supported input formats. 319 | let mut supported_input_formats = 320 | vec![NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_UNDEFINED; format_count as usize]; 321 | let mut actual_count = 0; 322 | unsafe { 323 | (ENCODE_API.get_input_formats)( 324 | self.ptr, 325 | encode_guid, 326 | supported_input_formats.as_mut_ptr(), 327 | format_count, 328 | &mut actual_count, 329 | ) 330 | } 331 | .result(self)?; 332 | supported_input_formats.truncate(actual_count as usize); 333 | Ok(supported_input_formats) 334 | } 335 | 336 | /// Get the preset config struct from the given codec GUID, preset GUID, 337 | /// and tuning info. 338 | /// 339 | /// You should use this function to generate a preset config for the 340 | /// encoder session if you want to modify the preset further. 341 | /// 342 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#selecting-encoder-preset-configuration) 343 | /// 344 | /// # Errors 345 | /// 346 | /// Could error if `encode_guid` or `preset_guid` is invalid, 347 | /// if `tuning_info` is set to 348 | /// [`NV_ENC_TUNING_INFO::NV_ENC_TUNING_INFO_UNDEFINED`] or 349 | /// [`NV_ENC_TUNING_INFO::NV_ENC_TUNING_INFO_COUNT`], 350 | /// or if we run out of memory. 351 | /// 352 | /// # Examples 353 | /// 354 | /// ``` 355 | /// # use cudarc::driver::CudaContext; 356 | /// # use nvidia_video_codec_sdk::{ 357 | /// # sys::nvEncodeAPI::{ 358 | /// # NV_ENC_CODEC_H264_GUID, 359 | /// # NV_ENC_PRESET_P1_GUID, 360 | /// # NV_ENC_TUNING_INFO, 361 | /// # }, 362 | /// # Encoder, 363 | /// # }; 364 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 365 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 366 | /// 367 | /// //* Check if H.264 encoding and the P1 preset (highest performance) are supported. *// 368 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 369 | /// # assert!(encode_guids.contains(&NV_ENC_CODEC_H264_GUID)); 370 | /// # let preset_guids = encoder.get_preset_guids(NV_ENC_CODEC_H264_GUID).unwrap(); 371 | /// # assert!(preset_guids.contains(&NV_ENC_PRESET_P1_GUID)); 372 | /// 373 | /// // Create the preset config. 374 | /// let _preset_config = encoder 375 | /// .get_preset_config( 376 | /// NV_ENC_CODEC_H264_GUID, 377 | /// NV_ENC_PRESET_P1_GUID, 378 | /// NV_ENC_TUNING_INFO::NV_ENC_TUNING_INFO_ULTRA_LOW_LATENCY, 379 | /// ) 380 | /// .unwrap(); 381 | /// ``` 382 | pub fn get_preset_config( 383 | &self, 384 | encode_guid: GUID, 385 | preset_guid: GUID, 386 | tuning_info: NV_ENC_TUNING_INFO, 387 | ) -> Result { 388 | let mut preset_config = NV_ENC_PRESET_CONFIG { 389 | version: NV_ENC_PRESET_CONFIG_VER, 390 | presetCfg: NV_ENC_CONFIG { 391 | version: NV_ENC_CONFIG_VER, 392 | ..Default::default() 393 | }, 394 | ..Default::default() 395 | }; 396 | unsafe { 397 | (ENCODE_API.get_encode_preset_config_ex)( 398 | self.ptr, 399 | encode_guid, 400 | preset_guid, 401 | tuning_info, 402 | &mut preset_config, 403 | ) 404 | } 405 | .result(self)?; 406 | Ok(preset_config) 407 | } 408 | 409 | /// Initialize an encoder session with the given configuration. 410 | /// 411 | /// You must do this before you can encode a picture. 412 | /// You should use the [`NV_ENC_INITIALIZE_PARAMS`] builder 413 | /// via [`NV_ENC_INITIALIZE_PARAMS::new`]. 414 | /// 415 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#initializing-the-hardware-encoder-session). 416 | /// 417 | /// # Errors 418 | /// 419 | /// Could error if the `initialize_params` are invalid 420 | /// or if we run out of memory. 421 | /// 422 | /// # Examples 423 | /// 424 | /// ``` 425 | /// # use cudarc::driver::CudaContext; 426 | /// # use nvidia_video_codec_sdk::{ 427 | /// # sys::nvEncodeAPI::{ 428 | /// # NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 429 | /// # NV_ENC_CODEC_H264_GUID, 430 | /// # }, 431 | /// # Encoder, EncoderInitParams 432 | /// # }; 433 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 434 | /// let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 435 | /// 436 | /// //* Check if `NV_ENC_CODEC_H264_GUID` is supported. *// 437 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 438 | /// # assert!(encode_guids.contains(&NV_ENC_CODEC_H264_GUID)); 439 | /// 440 | /// // Initialize the encoder session. 441 | /// let _session = encoder 442 | /// .start_session( 443 | /// NV_ENC_BUFFER_FORMAT_ARGB, 444 | /// EncoderInitParams::new(NV_ENC_CODEC_H264_GUID, 1920, 1080), 445 | /// ) 446 | /// .unwrap(); 447 | /// ``` 448 | pub fn start_session( 449 | self, 450 | buffer_format: NV_ENC_BUFFER_FORMAT, 451 | mut initialize_params: EncoderInitParams<'_>, 452 | ) -> Result { 453 | let initialize_params = &mut initialize_params.param; 454 | let width = initialize_params.encodeWidth; 455 | let height = initialize_params.encodeHeight; 456 | unsafe { (ENCODE_API.initialize_encoder)(self.ptr, initialize_params) }.result(&self)?; 457 | Ok(Session { 458 | encoder: self, 459 | width, 460 | height, 461 | buffer_format, 462 | encode_guid: initialize_params.encodeGUID, 463 | }) 464 | } 465 | } 466 | 467 | /// A safe wrapper for [`NV_ENC_INITIALIZE_PARAMS`], which is the encoder 468 | /// initialize parameter. 469 | #[derive(Debug)] 470 | pub struct EncoderInitParams<'a> { 471 | param: NV_ENC_INITIALIZE_PARAMS, 472 | marker: std::marker::PhantomData<&'a mut NV_ENC_CONFIG>, 473 | } 474 | 475 | impl<'a> EncoderInitParams<'a> { 476 | /// Create a new builder for [`EncoderInitParams`], which is a wrapper for 477 | /// [`NV_ENC_INITIALIZE_PARAMS`]. 478 | #[must_use] 479 | pub fn new(encode_guid: GUID, width: u32, height: u32) -> Self { 480 | let param = NV_ENC_INITIALIZE_PARAMS { 481 | version: NV_ENC_INITIALIZE_PARAMS_VER, 482 | encodeGUID: encode_guid, 483 | encodeWidth: width, 484 | encodeHeight: height, 485 | ..Default::default() 486 | }; 487 | Self { 488 | param, 489 | marker: std::marker::PhantomData, 490 | } 491 | } 492 | 493 | /// Specifies the preset for encoding. If the preset GUID is set then 494 | /// the preset configuration will be applied before any other parameter. 495 | pub fn preset_guid(&mut self, preset_guid: GUID) -> &mut Self { 496 | self.param.presetGUID = preset_guid; 497 | self 498 | } 499 | 500 | /// Tuning Info of NVENC encoding(`tuning_info` is not applicable to H264 501 | /// and HEVC meonly mode). 502 | pub fn tuning_info(&mut self, tuning_info: NV_ENC_TUNING_INFO) -> &mut Self { 503 | self.param.tuningInfo = tuning_info; 504 | self 505 | } 506 | 507 | /// Specifies the advanced codec specific structure. If client has sent a 508 | /// valid codec config structure, it will override parameters set by the 509 | /// [`EncoderInitParams::preset_guid`]. 510 | /// 511 | /// The client can query the interface for codec-specific parameters 512 | /// using [`Encoder::get_preset_config`](super::encoder::Encoder::get_preset_config). 513 | /// It can then modify (if required) some of the codec config parameters and 514 | /// send down a custom config structure using this method. Even in this 515 | /// case the client is recommended to pass the same preset GUID it has 516 | /// used to get the config. 517 | pub fn encode_config(&mut self, encode_config: &'a mut NV_ENC_CONFIG) -> &mut Self { 518 | self.param.encodeConfig = encode_config; 519 | self 520 | } 521 | 522 | /// Specifies the display aspect ratio (H264/HEVC) or the render 523 | /// width/height (AV1). 524 | pub fn display_aspect_ratio(&mut self, width: u32, height: u32) -> &mut Self { 525 | self.param.darWidth = width; 526 | self.param.darHeight = height; 527 | self 528 | } 529 | 530 | /// Specifies the framerate in frames per second as a fraction 531 | /// `numerator / denominator`. 532 | pub fn framerate(&mut self, numerator: u32, denominator: u32) -> &mut Self { 533 | self.param.frameRateNum = numerator; 534 | self.param.frameRateDen = denominator; 535 | self 536 | } 537 | 538 | /// Enable the Picture Type Decision to be taken by the 539 | /// `NvEncodeAPI` interface. 540 | pub fn enable_picture_type_decision(&mut self) -> &mut Self { 541 | self.param.enablePTD = 1; 542 | self 543 | } 544 | } 545 | -------------------------------------------------------------------------------- /src/safe/buffer.rs: -------------------------------------------------------------------------------- 1 | //! Defines traits and types for dealing with input and output buffers. 2 | 3 | use std::{ffi::c_void, ptr}; 4 | 5 | use cudarc::driver::{DevicePtr, MappedBuffer}; 6 | 7 | use super::{api::ENCODE_API, encoder::Encoder, result::EncodeError, session::Session}; 8 | use crate::sys::nvEncodeAPI::{ 9 | NV_ENC_BUFFER_FORMAT, 10 | NV_ENC_CREATE_BITSTREAM_BUFFER, 11 | NV_ENC_CREATE_BITSTREAM_BUFFER_VER, 12 | NV_ENC_CREATE_INPUT_BUFFER, 13 | NV_ENC_CREATE_INPUT_BUFFER_VER, 14 | NV_ENC_INPUT_RESOURCE_TYPE, 15 | NV_ENC_LOCK_BITSTREAM, 16 | NV_ENC_LOCK_BITSTREAM_VER, 17 | NV_ENC_LOCK_INPUT_BUFFER, 18 | NV_ENC_LOCK_INPUT_BUFFER_VER, 19 | NV_ENC_MAP_INPUT_RESOURCE, 20 | NV_ENC_MAP_INPUT_RESOURCE_VER, 21 | NV_ENC_PIC_TYPE, 22 | NV_ENC_REGISTER_RESOURCE, 23 | }; 24 | 25 | /// If a type implements this trait it means it is a valid input buffer 26 | /// for the encoding API. 27 | pub trait EncoderInput { 28 | /// Get the pitch (AKA stride) of the input resource. 29 | fn pitch(&self) -> u32; 30 | 31 | /// Get the handle of the input resource. 32 | fn handle(&mut self) -> *mut c_void; 33 | } 34 | 35 | /// If a type implements this trait it means it is a valid output buffer 36 | /// for the encoding API. 37 | pub trait EncoderOutput { 38 | /// Get the handle of the output resource. 39 | fn handle(&mut self) -> *mut c_void; 40 | } 41 | 42 | /// Functions for creating input and output buffers. 43 | impl Session { 44 | /// Create a [`Buffer`]. 45 | /// 46 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#creating-resources-required-to-hold-inputoutput-data). 47 | /// 48 | /// # Errors 49 | /// 50 | /// Could error if the `width`, `height`, or `buffer_format` is invalid, 51 | /// or if we run out of memory. 52 | /// 53 | /// # Examples 54 | /// 55 | /// ``` 56 | /// # use cudarc::driver::CudaContext; 57 | /// # use nvidia_video_codec_sdk::{ 58 | /// # sys::nvEncodeAPI::{ 59 | /// # NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 60 | /// # NV_ENC_CODEC_H264_GUID, 61 | /// # NV_ENC_PIC_PARAMS, 62 | /// # NV_ENC_PIC_STRUCT, 63 | /// # }, 64 | /// # Encoder, EncoderInitParams 65 | /// # }; 66 | /// # const WIDTH: u32 = 1920; 67 | /// # const HEIGHT: u32 = 1080; 68 | /// //* Create encoder. *// 69 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 70 | /// # let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 71 | /// 72 | /// //* Set `encode_guid` and `buffer_format`, and check that H.264 encoding and the ARGB format are supported. *// 73 | /// # let encode_guid = NV_ENC_CODEC_H264_GUID; 74 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 75 | /// # assert!(encode_guids.contains(&encode_guid)); 76 | /// # let buffer_format = NV_ENC_BUFFER_FORMAT_ARGB; 77 | /// # let input_formats = encoder.get_supported_input_formats(encode_guid).unwrap(); 78 | /// # assert!(input_formats.contains(&buffer_format)); 79 | /// 80 | /// //* Begin encoder session. *// 81 | /// # let mut initialize_params = EncoderInitParams::new(encode_guid, WIDTH, HEIGHT); 82 | /// # initialize_params.display_aspect_ratio(16, 9) 83 | /// # .framerate(30, 1) 84 | /// # .enable_picture_type_decision(); 85 | /// # let session = encoder.start_session( 86 | /// # buffer_format, 87 | /// # initialize_params, 88 | /// # ).unwrap(); 89 | /// 90 | /// // Create an input buffer. 91 | /// let _input_buffer = session 92 | /// .create_input_buffer() 93 | /// .unwrap(); 94 | /// ``` 95 | pub fn create_input_buffer(&self) -> Result, EncodeError> { 96 | let mut create_input_buffer_params = NV_ENC_CREATE_INPUT_BUFFER { 97 | version: NV_ENC_CREATE_INPUT_BUFFER_VER, 98 | width: self.width, 99 | height: self.height, 100 | bufferFmt: self.buffer_format, 101 | inputBuffer: ptr::null_mut(), 102 | ..Default::default() 103 | }; 104 | unsafe { 105 | (ENCODE_API.create_input_buffer)(self.encoder.ptr, &mut create_input_buffer_params) 106 | } 107 | .result(&self.encoder)?; 108 | Ok(Buffer { 109 | ptr: create_input_buffer_params.inputBuffer, 110 | pitch: self.width, 111 | encoder: &self.encoder, 112 | }) 113 | } 114 | 115 | /// Create a [`Bitstream`]. 116 | /// 117 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#creating-resources-required-to-hold-inputoutput-data). 118 | /// 119 | /// # Errors 120 | /// 121 | /// Could error is we run out of memory. 122 | /// 123 | /// # Examples 124 | /// 125 | /// ``` 126 | /// # use cudarc::driver::CudaContext; 127 | /// # use nvidia_video_codec_sdk::{ 128 | /// # sys::nvEncodeAPI::{ 129 | /// # NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 130 | /// # NV_ENC_CODEC_H264_GUID, 131 | /// # NV_ENC_PIC_PARAMS, 132 | /// # NV_ENC_PIC_STRUCT, 133 | /// # }, 134 | /// # Encoder, EncoderInitParams 135 | /// # }; 136 | /// # const WIDTH: u32 = 1920; 137 | /// # const HEIGHT: u32 = 1080; 138 | /// //* Create encoder. *// 139 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 140 | /// # let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 141 | /// 142 | /// //* Set `encode_guid` and `buffer_format`, and check that H.264 encoding and the ARGB format are supported. *// 143 | /// # let encode_guid = NV_ENC_CODEC_H264_GUID; 144 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 145 | /// # assert!(encode_guids.contains(&encode_guid)); 146 | /// # let buffer_format = NV_ENC_BUFFER_FORMAT_ARGB; 147 | /// # let input_formats = encoder.get_supported_input_formats(encode_guid).unwrap(); 148 | /// # assert!(input_formats.contains(&buffer_format)); 149 | /// 150 | /// //* Begin encoder session. *// 151 | /// # let mut initialize_params = EncoderInitParams::new(encode_guid, WIDTH, HEIGHT); 152 | /// # initialize_params.display_aspect_ratio(16, 9) 153 | /// # .framerate(30, 1) 154 | /// # .enable_picture_type_decision(); 155 | /// # let session = encoder.start_session( 156 | /// # buffer_format, 157 | /// # initialize_params, 158 | /// # ).unwrap(); 159 | /// 160 | /// // Create an output bitstream buffer. 161 | /// let _output_bitstream = session 162 | /// .create_output_bitstream() 163 | /// .unwrap(); 164 | /// ``` 165 | pub fn create_output_bitstream(&self) -> Result, EncodeError> { 166 | let mut create_bitstream_buffer_params = NV_ENC_CREATE_BITSTREAM_BUFFER { 167 | version: NV_ENC_CREATE_BITSTREAM_BUFFER_VER, 168 | bitstreamBuffer: ptr::null_mut(), 169 | ..Default::default() 170 | }; 171 | unsafe { 172 | (ENCODE_API.create_bitstream_buffer)( 173 | self.encoder.ptr, 174 | &mut create_bitstream_buffer_params, 175 | ) 176 | } 177 | .result(&self.encoder)?; 178 | Ok(Bitstream { 179 | ptr: create_bitstream_buffer_params.bitstreamBuffer, 180 | encoder: &self.encoder, 181 | }) 182 | } 183 | 184 | /// Create a [`RegisteredResource`] from a [`MappedBuffer`]. 185 | /// 186 | /// See [`Session::register_generic_resource`]. 187 | /// 188 | /// `pitch` should be set to the value obtained from `cuMemAllocPitch()`, 189 | /// or to the width in **bytes** (if this resource was created by using 190 | /// `cuMemAlloc()`). This value must be a multiple of 4. 191 | /// 192 | /// # Errors 193 | /// 194 | /// Could error if registration or mapping fails, 195 | /// if the resource is invalid, or if we run out of memory. 196 | pub fn register_cuda_resource( 197 | &self, 198 | pitch: u32, 199 | mapped_buffer: MappedBuffer, 200 | ) -> Result, EncodeError> { 201 | let stream = self.encoder.ctx.default_stream(); 202 | let (device_ptr, _) = mapped_buffer.device_ptr(&stream); 203 | self.register_generic_resource( 204 | mapped_buffer, 205 | NV_ENC_INPUT_RESOURCE_TYPE::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR, 206 | device_ptr as *mut c_void, 207 | pitch, 208 | ) 209 | } 210 | 211 | /// Create a [`RegisteredResource`]. 212 | /// 213 | /// This function is generic in the marker. This is so that you can 214 | /// optionally put a value on the [`RegisteredResource`] to make sure that 215 | /// value does not get dropped while the resource is registered. You should 216 | /// prefer using specific functions for the resource you are registering, 217 | /// such as [`Session::register_cuda_resource`], when they are available. 218 | /// 219 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#input-buffers-allocated-externally). 220 | /// 221 | /// # Errors 222 | /// 223 | /// Could error if registration or mapping fails, 224 | /// if the resource is invalid, or if we run out of memory. 225 | pub fn register_generic_resource( 226 | &self, 227 | marker: T, 228 | resource_type: NV_ENC_INPUT_RESOURCE_TYPE, 229 | resource_to_register: *mut c_void, 230 | pitch: u32, 231 | ) -> Result, EncodeError> { 232 | // Register resource. 233 | let mut register_resource_params = NV_ENC_REGISTER_RESOURCE::new( 234 | resource_type, 235 | self.width, 236 | self.height, 237 | resource_to_register, 238 | self.buffer_format, 239 | ) 240 | .pitch(pitch); 241 | unsafe { (ENCODE_API.register_resource)(self.encoder.ptr, &mut register_resource_params) } 242 | .result(&self.encoder)?; 243 | let registered_resource = register_resource_params.registeredResource; 244 | 245 | // Map resource. 246 | let mut map_input_resource_params = NV_ENC_MAP_INPUT_RESOURCE { 247 | version: NV_ENC_MAP_INPUT_RESOURCE_VER, 248 | registeredResource: registered_resource, 249 | mappedResource: ptr::null_mut(), 250 | mappedBufferFmt: NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_UNDEFINED, 251 | ..Default::default() 252 | }; 253 | unsafe { 254 | (ENCODE_API.map_input_resource)(self.encoder.ptr, &mut map_input_resource_params) 255 | } 256 | .result(&self.encoder)?; 257 | 258 | let mapped_resource = map_input_resource_params.mappedResource; 259 | Ok(RegisteredResource { 260 | reg_ptr: registered_resource, 261 | map_ptr: mapped_resource, 262 | pitch, 263 | encoder: &self.encoder, 264 | _marker: marker, 265 | }) 266 | } 267 | } 268 | 269 | /// Abstraction around input buffer allocated using 270 | /// the NVIDIA Video Encoder API. 271 | /// 272 | /// The buffer is automatically destroyed when dropped. 273 | #[derive(Debug)] 274 | pub struct Buffer<'a> { 275 | pub(crate) ptr: *mut c_void, 276 | pitch: u32, 277 | encoder: &'a Encoder, 278 | } 279 | 280 | unsafe impl Send for Buffer<'_> {} 281 | 282 | impl<'a> Buffer<'a> { 283 | /// Lock the input buffer. 284 | /// 285 | /// On a successful lock you get a [`BufferLock`] which can be used to write 286 | /// data to the input buffer. On drop, [`BufferLock`] will unlock the 287 | /// buffer. 288 | /// 289 | /// This function will block until a lock is acquired. For the non-blocking 290 | /// version see [`Buffer::try_lock`]. 291 | /// 292 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#input-buffers-allocated-through-nvidia-video-encoder-interface). 293 | /// 294 | /// # Errors 295 | /// 296 | /// Could error if we run out of memory. 297 | /// 298 | /// # Examples 299 | /// 300 | /// ``` 301 | /// # use cudarc::driver::CudaContext; 302 | /// # use nvidia_video_codec_sdk::{ 303 | /// # sys::nvEncodeAPI::{ 304 | /// # NV_ENC_BUFFER_FORMAT::NV_ENC_BUFFER_FORMAT_ARGB, 305 | /// # NV_ENC_CODEC_H264_GUID, 306 | /// # NV_ENC_PIC_PARAMS, 307 | /// # NV_ENC_PIC_STRUCT, 308 | /// # }, 309 | /// # Encoder, EncoderInitParams 310 | /// # }; 311 | /// # const WIDTH: u32 = 1920; 312 | /// # const HEIGHT: u32 = 1080; 313 | /// # const DATA_LEN: usize = (WIDTH * HEIGHT * 4) as usize; 314 | /// //* Create encoder. *// 315 | /// # let cuda_ctx = CudaContext::new(0).unwrap(); 316 | /// # let encoder = Encoder::initialize_with_cuda(cuda_ctx).unwrap(); 317 | /// //* Set `encode_guid` and `buffer_format`, and check that H.264 encoding and the ARGB format are supported. *// 318 | /// # let encode_guid = NV_ENC_CODEC_H264_GUID; 319 | /// # let encode_guids = encoder.get_encode_guids().unwrap(); 320 | /// # assert!(encode_guids.contains(&encode_guid)); 321 | /// # let buffer_format = NV_ENC_BUFFER_FORMAT_ARGB; 322 | /// # let input_formats = encoder.get_supported_input_formats(encode_guid).unwrap(); 323 | /// # assert!(input_formats.contains(&buffer_format)); 324 | /// //* Begin encoder session. *// 325 | /// # let mut initialize_params = EncoderInitParams::new(encode_guid, WIDTH, HEIGHT); 326 | /// # initialize_params.display_aspect_ratio(16, 9) 327 | /// # .framerate(30, 1) 328 | /// # .enable_picture_type_decision(); 329 | /// # let session = encoder.start_session( 330 | /// # buffer_format, 331 | /// # initialize_params, 332 | /// # ).unwrap(); 333 | /// 334 | /// // Create an input buffer. 335 | /// let mut input_buffer = session 336 | /// .create_input_buffer() 337 | /// .unwrap(); 338 | /// unsafe { input_buffer.lock().unwrap().write(&[0; DATA_LEN]) }; 339 | /// ``` 340 | pub fn lock<'b>(&'b mut self) -> Result, EncodeError> { 341 | self.lock_inner(true) 342 | } 343 | 344 | /// Non-blocking version of [`Buffer::lock`]. See it for more info. 345 | /// 346 | /// This function will return an error with 347 | /// [`ErrorKind::EncoderBusy`](super::ErrorKind::EncoderBusy) or 348 | /// [`ErrorKind::LockBusy`](super::ErrorKind::LockBusy) if the lock is being 349 | /// used. The NVIDIA documentation from the header file is unclear about 350 | /// this. 351 | /// 352 | /// # Errors 353 | /// 354 | /// Could error if we run out of memory. 355 | /// 356 | /// If this returns an error with 357 | /// [`ErrorKind::EncoderBusy`](super::ErrorKind::EncoderBusy) or 358 | /// [`ErrorKind::LockBusy`](super::ErrorKind::LockBusy) then that means the 359 | /// lock is still busy and the client should retry in a few 360 | /// milliseconds. 361 | pub fn try_lock<'b>(&'b mut self) -> Result, EncodeError> { 362 | self.lock_inner(false) 363 | } 364 | 365 | #[inline] 366 | fn lock_inner<'b>(&'b mut self, wait: bool) -> Result, EncodeError> { 367 | let mut lock_input_buffer_params = NV_ENC_LOCK_INPUT_BUFFER { 368 | version: NV_ENC_LOCK_INPUT_BUFFER_VER, 369 | inputBuffer: self.ptr, 370 | ..Default::default() 371 | }; 372 | if !wait { 373 | lock_input_buffer_params.set_doNotWait(1); 374 | } 375 | unsafe { (ENCODE_API.lock_input_buffer)(self.encoder.ptr, &mut lock_input_buffer_params) } 376 | .result(self.encoder)?; 377 | 378 | let data_ptr = lock_input_buffer_params.bufferDataPtr; 379 | let pitch = lock_input_buffer_params.pitch; 380 | self.pitch = pitch; 381 | 382 | Ok(BufferLock { 383 | buffer: self, 384 | data_ptr, 385 | pitch, 386 | }) 387 | } 388 | } 389 | 390 | impl Drop for Buffer<'_> { 391 | fn drop(&mut self) { 392 | unsafe { (ENCODE_API.destroy_input_buffer)(self.encoder.ptr, self.ptr) } 393 | .result(self.encoder) 394 | .expect("The encoder and buffer pointers should be valid."); 395 | } 396 | } 397 | 398 | impl EncoderInput for Buffer<'_> { 399 | fn pitch(&self) -> u32 { 400 | self.pitch 401 | } 402 | 403 | fn handle(&mut self) -> *mut c_void { 404 | self.ptr 405 | } 406 | } 407 | 408 | /// An RAII lock on the input buffer. 409 | /// 410 | /// This type is created via [`Buffer::lock`] or [`Buffer::try_lock`]. 411 | /// The purpose of this type is similar to [`std::sync::MutexGuard`] - 412 | /// it automatically unlocks the buffer when the lock goes out of scope. 413 | #[allow(clippy::module_name_repetitions)] 414 | #[derive(Debug)] 415 | pub struct BufferLock<'a, 'b> { 416 | buffer: &'a Buffer<'b>, 417 | data_ptr: *mut c_void, 418 | #[allow(dead_code)] 419 | pitch: u32, 420 | } 421 | 422 | impl BufferLock<'_, '_> { 423 | /// Write data to the buffer. 424 | /// 425 | /// # Safety 426 | /// 427 | /// The size of the data should be less or equal to the size of the buffer. 428 | /// The size of the buffer depends on the width, height, and buffer format. 429 | /// 430 | /// The user should also account for pitch, the data is written 431 | /// contiguously. 432 | pub unsafe fn write(&mut self, data: &[u8]) { 433 | // TODO: Make this safe by doing checks. 434 | // - Check that length of data fits (depends on format). 435 | // - Write pitched? 436 | data.as_ptr() 437 | .copy_to(self.data_ptr.cast::(), data.len()); 438 | } 439 | } 440 | 441 | impl Drop for BufferLock<'_, '_> { 442 | fn drop(&mut self) { 443 | unsafe { (ENCODE_API.unlock_input_buffer)(self.buffer.encoder.ptr, self.buffer.ptr) } 444 | .result(self.buffer.encoder) 445 | .expect("The encoder and buffer pointers should be valid."); 446 | } 447 | } 448 | 449 | /// Abstraction around the output bitstream buffer that 450 | /// is used as the output of the encoding. 451 | /// 452 | /// The buffer is automatically destroyed when dropped. 453 | #[derive(Debug)] 454 | pub struct Bitstream<'a> { 455 | pub(crate) ptr: *mut c_void, 456 | encoder: &'a Encoder, 457 | } 458 | 459 | unsafe impl Send for Bitstream<'_> {} 460 | 461 | impl Bitstream<'_> { 462 | /// Lock the output bitstream. 463 | /// 464 | /// On a successful lock you get a [`BitstreamLock`] which can be used to 465 | /// access the bitstream data as well as any other information the 466 | /// encoder provides when locking a bitstream. 467 | /// 468 | /// This function will block until a lock is acquired. For the non-blocking 469 | /// version see [`Bitstream::try_lock`]. 470 | /// 471 | /// See [NVIDIA docs](https://docs.nvidia.com/video-technologies/video-codec-sdk/12.0/nvenc-video-encoder-api-prog-guide/index.html#retrieving-encoded-output). 472 | /// 473 | /// # Errors 474 | /// 475 | /// Could error if we run out of memory. 476 | pub fn lock(&mut self) -> Result, EncodeError> { 477 | self.lock_inner(true) 478 | } 479 | 480 | /// Non-blocking version of [`Bitstream::lock`]. See it for more info. 481 | /// 482 | /// This function will return an error with 483 | /// [`ErrorKind::LockBusy`](super::ErrorKind::LockBusy) if the 484 | /// lock is currently busy. 485 | /// 486 | /// # Errors 487 | /// 488 | /// Could error if we run out of memory. 489 | /// 490 | /// An error with [`ErrorKind::LockBusy`](super::ErrorKind::LockBusy) could 491 | /// be returned if the lock is currently busy. This is a recoverable 492 | /// error and the client should retry in a few milliseconds. 493 | pub fn try_lock(&mut self) -> Result, EncodeError> { 494 | self.lock_inner(false) 495 | } 496 | 497 | fn lock_inner(&mut self, wait: bool) -> Result, EncodeError> { 498 | // Lock bitstream. 499 | let mut lock_bitstream_buffer_params = NV_ENC_LOCK_BITSTREAM { 500 | version: NV_ENC_LOCK_BITSTREAM_VER, 501 | outputBitstream: self.ptr, 502 | ..Default::default() 503 | }; 504 | if !wait { 505 | lock_bitstream_buffer_params.set_doNotWait(1); 506 | } 507 | unsafe { (ENCODE_API.lock_bitstream)(self.encoder.ptr, &mut lock_bitstream_buffer_params) } 508 | .result(self.encoder)?; 509 | 510 | // Get data. 511 | let data_ptr = lock_bitstream_buffer_params.bitstreamBufferPtr; 512 | let data_size = lock_bitstream_buffer_params.bitstreamSizeInBytes as usize; 513 | let data = unsafe { std::slice::from_raw_parts_mut(data_ptr.cast::(), data_size) }; 514 | 515 | Ok(BitstreamLock { 516 | bitstream: self, 517 | data, 518 | frame_index: lock_bitstream_buffer_params.frameIdx, 519 | timestamp: lock_bitstream_buffer_params.outputTimeStamp, 520 | duration: lock_bitstream_buffer_params.outputDuration, 521 | picture_type: lock_bitstream_buffer_params.pictureType, 522 | }) 523 | } 524 | } 525 | 526 | impl Drop for Bitstream<'_> { 527 | fn drop(&mut self) { 528 | unsafe { (ENCODE_API.destroy_bitstream_buffer)(self.encoder.ptr, self.ptr) } 529 | .result(self.encoder) 530 | .expect("The encoder and bitstream pointers should be valid."); 531 | } 532 | } 533 | 534 | impl EncoderOutput for Bitstream<'_> { 535 | fn handle(&mut self) -> *mut c_void { 536 | self.ptr 537 | } 538 | } 539 | 540 | /// An RAII lock on the output bitstream buffer. 541 | /// 542 | /// This type is created via [`Bitstream::lock`] or [`Bitstream::try_lock`]. 543 | /// The purpose of this type is similar to [`std::sync::MutexGuard`] - 544 | /// it automatically unlocks the buffer when the lock goes out of scope. 545 | #[derive(Debug)] 546 | pub struct BitstreamLock<'a, 'b> { 547 | bitstream: &'a Bitstream<'b>, 548 | data: &'a [u8], 549 | // statistics and other info 550 | frame_index: u32, 551 | timestamp: u64, 552 | duration: u64, 553 | picture_type: NV_ENC_PIC_TYPE, 554 | // TODO: other fields 555 | } 556 | 557 | impl BitstreamLock<'_, '_> { 558 | /// Getter for the data contained in the output bitstream. 559 | #[must_use] 560 | pub fn data(&self) -> &[u8] { 561 | self.data 562 | } 563 | 564 | /// Getter for the frame index. 565 | #[must_use] 566 | pub fn frame_index(&self) -> u32 { 567 | self.frame_index 568 | } 569 | 570 | /// Getter for the timestamp. 571 | #[must_use] 572 | pub fn timestamp(&self) -> u64 { 573 | self.timestamp 574 | } 575 | 576 | /// Getter for the duration. 577 | #[must_use] 578 | pub fn duration(&self) -> u64 { 579 | self.duration 580 | } 581 | 582 | /// Getter for the picture type. 583 | #[must_use] 584 | pub fn picture_type(&self) -> NV_ENC_PIC_TYPE { 585 | self.picture_type 586 | } 587 | } 588 | 589 | impl Drop for BitstreamLock<'_, '_> { 590 | fn drop(&mut self) { 591 | unsafe { (ENCODE_API.unlock_bitstream)(self.bitstream.encoder.ptr, self.bitstream.ptr) } 592 | .result(self.bitstream.encoder) 593 | .expect("The encoder and bitstream pointers should be valid."); 594 | } 595 | } 596 | 597 | /// Abstraction for a registered and mapped external resource. 598 | /// 599 | /// The Encoder API exposes a way to use input buffers allocated externally, 600 | /// for example through CUDA or OpenGL. 601 | /// 602 | /// The buffer is automatically unmapped and unregistered when dropped. 603 | /// The external buffer memory should still be properly destroyed by the client. 604 | #[derive(Debug)] 605 | pub struct RegisteredResource<'a, T> { 606 | pub(crate) reg_ptr: *mut c_void, 607 | pub(crate) map_ptr: *mut c_void, 608 | pitch: u32, 609 | encoder: &'a Encoder, 610 | // A generic marker to make sure the external resources are dropped 611 | // after the resource is unregistered. 612 | _marker: T, 613 | } 614 | 615 | unsafe impl Send for RegisteredResource<'_, MappedBuffer> {} 616 | 617 | /// Automatically unmap and unregister the external resource 618 | /// when it goes out of scope. 619 | impl Drop for RegisteredResource<'_, T> { 620 | fn drop(&mut self) { 621 | // Unmapping resource. 622 | unsafe { (ENCODE_API.unmap_input_resource)(self.encoder.ptr, self.map_ptr) } 623 | .result(self.encoder) 624 | .expect("The encoder pointer and map handle should be valid."); 625 | // Unregister resource. 626 | unsafe { (ENCODE_API.unregister_resource)(self.encoder.ptr, self.reg_ptr) } 627 | .result(self.encoder) 628 | .expect("The encoder pointer and resource handle should be valid."); 629 | } 630 | } 631 | 632 | impl EncoderInput for RegisteredResource<'_, T> { 633 | fn pitch(&self) -> u32 { 634 | self.pitch 635 | } 636 | 637 | fn handle(&mut self) -> *mut c_void { 638 | self.map_ptr 639 | } 640 | } 641 | -------------------------------------------------------------------------------- /src/sys/headers/nvcuvid.h: -------------------------------------------------------------------------------- 1 | /* 2 | * This copyright notice applies to this header file only: 3 | * 4 | * Copyright (c) 2010-2023 NVIDIA Corporation 5 | * 6 | * Permission is hereby granted, free of charge, to any person 7 | * obtaining a copy of this software and associated documentation 8 | * files (the "Software"), to deal in the Software without 9 | * restriction, including without limitation the rights to use, 10 | * copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | * copies of the software, and to permit persons to whom the 12 | * software is furnished to do so, subject to the following 13 | * conditions: 14 | * 15 | * The above copyright notice and this permission notice shall be 16 | * included in all copies or substantial portions of the Software. 17 | * 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 20 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 21 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 22 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 23 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 | * OTHER DEALINGS IN THE SOFTWARE. 26 | */ 27 | 28 | /********************************************************************************************************************/ 29 | //! \file nvcuvid.h 30 | //! NVDECODE API provides video decoding interface to NVIDIA GPU devices. 31 | //! \date 2015-2022 32 | //! This file contains the interface constants, structure definitions and function prototypes. 33 | /********************************************************************************************************************/ 34 | 35 | #if !defined(__NVCUVID_H__) 36 | #define __NVCUVID_H__ 37 | 38 | #include "cuviddec.h" 39 | 40 | #if defined(__cplusplus) 41 | extern "C" { 42 | #endif /* __cplusplus */ 43 | 44 | #define MAX_CLOCK_TS 3 45 | 46 | /***********************************************/ 47 | //! 48 | //! High-level helper APIs for video sources 49 | //! 50 | /***********************************************/ 51 | 52 | typedef void *CUvideosource; 53 | typedef void *CUvideoparser; 54 | typedef long long CUvideotimestamp; 55 | 56 | 57 | /************************************************************************/ 58 | //! \enum cudaVideoState 59 | //! Video source state enums 60 | //! Used in cuvidSetVideoSourceState and cuvidGetVideoSourceState APIs 61 | /************************************************************************/ 62 | typedef enum { 63 | cudaVideoState_Error = -1, /**< Error state (invalid source) */ 64 | cudaVideoState_Stopped = 0, /**< Source is stopped (or reached end-of-stream) */ 65 | cudaVideoState_Started = 1 /**< Source is running and delivering data */ 66 | } cudaVideoState; 67 | 68 | /************************************************************************/ 69 | //! \enum cudaAudioCodec 70 | //! Audio compression enums 71 | //! Used in CUAUDIOFORMAT structure 72 | /************************************************************************/ 73 | typedef enum { 74 | cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */ 75 | cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */ 76 | cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */ 77 | cudaAudioCodec_AC3, /**< Dolby Digital (AC3) Audio */ 78 | cudaAudioCodec_LPCM, /**< PCM Audio */ 79 | cudaAudioCodec_AAC, /**< AAC Audio */ 80 | } cudaAudioCodec; 81 | 82 | /************************************************************************/ 83 | //! \ingroup STRUCTS 84 | //! \struct HEVCTIMECODESET 85 | //! Used to store Time code extracted from Time code SEI in HEVC codec 86 | /************************************************************************/ 87 | typedef struct _HEVCTIMECODESET 88 | { 89 | unsigned int time_offset_value; 90 | unsigned short n_frames; 91 | unsigned char clock_timestamp_flag; 92 | unsigned char units_field_based_flag; 93 | unsigned char counting_type; 94 | unsigned char full_timestamp_flag; 95 | unsigned char discontinuity_flag; 96 | unsigned char cnt_dropped_flag; 97 | unsigned char seconds_value; 98 | unsigned char minutes_value; 99 | unsigned char hours_value; 100 | unsigned char seconds_flag; 101 | unsigned char minutes_flag; 102 | unsigned char hours_flag; 103 | unsigned char time_offset_length; 104 | unsigned char reserved; 105 | } HEVCTIMECODESET; 106 | 107 | /************************************************************************/ 108 | //! \ingroup STRUCTS 109 | //! \struct HEVCSEITIMECODE 110 | //! Used to extract Time code SEI in HEVC codec 111 | /************************************************************************/ 112 | typedef struct _HEVCSEITIMECODE 113 | { 114 | HEVCTIMECODESET time_code_set[MAX_CLOCK_TS]; 115 | unsigned char num_clock_ts; 116 | } HEVCSEITIMECODE; 117 | 118 | /**********************************************************************************/ 119 | //! \ingroup STRUCTS 120 | //! \struct CUSEIMESSAGE; 121 | //! Used in CUVIDSEIMESSAGEINFO structure 122 | /**********************************************************************************/ 123 | typedef struct _CUSEIMESSAGE 124 | { 125 | unsigned char sei_message_type; /**< OUT: SEI Message Type */ 126 | unsigned char reserved[3]; 127 | unsigned int sei_message_size; /**< OUT: SEI Message Size */ 128 | } CUSEIMESSAGE; 129 | 130 | /************************************************************************************************/ 131 | //! \ingroup STRUCTS 132 | //! \struct CUVIDEOFORMAT 133 | //! Video format 134 | //! Used in cuvidGetSourceVideoFormat API 135 | /************************************************************************************************/ 136 | typedef struct 137 | { 138 | cudaVideoCodec codec; /**< OUT: Compression format */ 139 | /** 140 | * OUT: frame rate = numerator / denominator (for example: 30000/1001) 141 | */ 142 | struct { 143 | /**< OUT: frame rate numerator (0 = unspecified or variable frame rate) */ 144 | unsigned int numerator; 145 | /**< OUT: frame rate denominator (0 = unspecified or variable frame rate) */ 146 | unsigned int denominator; 147 | } frame_rate; 148 | unsigned char progressive_sequence; /**< OUT: 0=interlaced, 1=progressive */ 149 | unsigned char bit_depth_luma_minus8; /**< OUT: high bit depth luma. E.g, 2 for 10-bitdepth, 4 for 12-bitdepth */ 150 | unsigned char bit_depth_chroma_minus8; /**< OUT: high bit depth chroma. E.g, 2 for 10-bitdepth, 4 for 12-bitdepth */ 151 | unsigned char min_num_decode_surfaces; /**< OUT: Minimum number of decode surfaces to be allocated for correct 152 | decoding. The client can send this value in ulNumDecodeSurfaces 153 | (in CUVIDDECODECREATEINFO structure). 154 | This guarantees correct functionality and optimal video memory 155 | usage but not necessarily the best performance, which depends on 156 | the design of the overall application. The optimal number of 157 | decode surfaces (in terms of performance and memory utilization) 158 | should be decided by experimentation for each application, but it 159 | cannot go below min_num_decode_surfaces. 160 | If this value is used for ulNumDecodeSurfaces then it must be 161 | returned to parser during sequence callback. */ 162 | unsigned int coded_width; /**< OUT: coded frame width in pixels */ 163 | unsigned int coded_height; /**< OUT: coded frame height in pixels */ 164 | /** 165 | * area of the frame that should be displayed 166 | * typical example: 167 | * coded_width = 1920, coded_height = 1088 168 | * display_area = { 0,0,1920,1080 } 169 | */ 170 | struct { 171 | int left; /**< OUT: left position of display rect */ 172 | int top; /**< OUT: top position of display rect */ 173 | int right; /**< OUT: right position of display rect */ 174 | int bottom; /**< OUT: bottom position of display rect */ 175 | } display_area; 176 | cudaVideoChromaFormat chroma_format; /**< OUT: Chroma format */ 177 | unsigned int bitrate; /**< OUT: video bitrate (bps, 0=unknown) */ 178 | /** 179 | * OUT: Display Aspect Ratio = x:y (4:3, 16:9, etc) 180 | */ 181 | struct { 182 | int x; 183 | int y; 184 | } display_aspect_ratio; 185 | /** 186 | * Video Signal Description 187 | * Refer section E.2.1 (VUI parameters semantics) of H264 spec file 188 | */ 189 | struct { 190 | unsigned char video_format : 3; /**< OUT: 0-Component, 1-PAL, 2-NTSC, 3-SECAM, 4-MAC, 5-Unspecified */ 191 | unsigned char video_full_range_flag : 1; /**< OUT: indicates the black level and luma and chroma range */ 192 | unsigned char reserved_zero_bits : 4; /**< Reserved bits */ 193 | unsigned char color_primaries; /**< OUT: chromaticity coordinates of source primaries */ 194 | unsigned char transfer_characteristics; /**< OUT: opto-electronic transfer characteristic of the source picture */ 195 | unsigned char matrix_coefficients; /**< OUT: used in deriving luma and chroma signals from RGB primaries */ 196 | } video_signal_description; 197 | unsigned int seqhdr_data_length; /**< OUT: Additional bytes following (CUVIDEOFORMATEX) */ 198 | } CUVIDEOFORMAT; 199 | 200 | /****************************************************************/ 201 | //! \ingroup STRUCTS 202 | //! \struct CUVIDOPERATINGPOINTINFO 203 | //! Operating point information of scalable bitstream 204 | /****************************************************************/ 205 | typedef struct 206 | { 207 | cudaVideoCodec codec; 208 | union 209 | { 210 | struct 211 | { 212 | unsigned char operating_points_cnt; 213 | unsigned char reserved24_bits[3]; 214 | unsigned short operating_points_idc[32]; 215 | } av1; 216 | unsigned char CodecReserved[1024]; 217 | }; 218 | } CUVIDOPERATINGPOINTINFO; 219 | 220 | /**********************************************************************************/ 221 | //! \ingroup STRUCTS 222 | //! \struct CUVIDSEIMESSAGEINFO 223 | //! Used in cuvidParseVideoData API with PFNVIDSEIMSGCALLBACK pfnGetSEIMsg 224 | /**********************************************************************************/ 225 | typedef struct _CUVIDSEIMESSAGEINFO 226 | { 227 | void *pSEIData; /**< OUT: SEI Message Data */ 228 | CUSEIMESSAGE *pSEIMessage; /**< OUT: SEI Message Info */ 229 | unsigned int sei_message_count; /**< OUT: SEI Message Count */ 230 | unsigned int picIdx; /**< OUT: SEI Message Pic Index */ 231 | } CUVIDSEIMESSAGEINFO; 232 | 233 | /****************************************************************/ 234 | //! \ingroup STRUCTS 235 | //! \struct CUVIDAV1SEQHDR 236 | //! AV1 specific sequence header information 237 | /****************************************************************/ 238 | typedef struct { 239 | unsigned int max_width; 240 | unsigned int max_height; 241 | unsigned char reserved[1016]; 242 | } CUVIDAV1SEQHDR; 243 | 244 | /****************************************************************/ 245 | //! \ingroup STRUCTS 246 | //! \struct CUVIDEOFORMATEX 247 | //! Video format including raw sequence header information 248 | //! Used in cuvidGetSourceVideoFormat API 249 | /****************************************************************/ 250 | typedef struct 251 | { 252 | CUVIDEOFORMAT format; /**< OUT: CUVIDEOFORMAT structure */ 253 | union { 254 | CUVIDAV1SEQHDR av1; 255 | unsigned char raw_seqhdr_data[1024]; /**< OUT: Sequence header data */ 256 | }; 257 | } CUVIDEOFORMATEX; 258 | 259 | /****************************************************************/ 260 | //! \ingroup STRUCTS 261 | //! \struct CUAUDIOFORMAT 262 | //! Audio formats 263 | //! Used in cuvidGetSourceAudioFormat API 264 | /****************************************************************/ 265 | typedef struct 266 | { 267 | cudaAudioCodec codec; /**< OUT: Compression format */ 268 | unsigned int channels; /**< OUT: number of audio channels */ 269 | unsigned int samplespersec; /**< OUT: sampling frequency */ 270 | unsigned int bitrate; /**< OUT: For uncompressed, can also be used to determine bits per sample */ 271 | unsigned int reserved1; /**< Reserved for future use */ 272 | unsigned int reserved2; /**< Reserved for future use */ 273 | } CUAUDIOFORMAT; 274 | 275 | 276 | /***************************************************************/ 277 | //! \enum CUvideopacketflags 278 | //! Data packet flags 279 | //! Used in CUVIDSOURCEDATAPACKET structure 280 | /***************************************************************/ 281 | typedef enum { 282 | CUVID_PKT_ENDOFSTREAM = 0x01, /**< Set when this is the last packet for this stream */ 283 | CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */ 284 | CUVID_PKT_DISCONTINUITY = 0x04, /**< Set when a discontinuity has to be signalled */ 285 | CUVID_PKT_ENDOFPICTURE = 0x08, /**< Set when the packet contains exactly one frame or one field */ 286 | CUVID_PKT_NOTIFY_EOS = 0x10, /**< If this flag is set along with CUVID_PKT_ENDOFSTREAM, an additional (dummy) 287 | display callback will be invoked with null value of CUVIDPARSERDISPINFO which 288 | should be interpreted as end of the stream. */ 289 | } CUvideopacketflags; 290 | 291 | /*****************************************************************************/ 292 | //! \ingroup STRUCTS 293 | //! \struct CUVIDSOURCEDATAPACKET 294 | //! Data Packet 295 | //! Used in cuvidParseVideoData API 296 | //! IN for cuvidParseVideoData 297 | /*****************************************************************************/ 298 | typedef struct _CUVIDSOURCEDATAPACKET 299 | { 300 | unsigned long flags; /**< IN: Combination of CUVID_PKT_XXX flags */ 301 | unsigned long payload_size; /**< IN: number of bytes in the payload (may be zero if EOS flag is set) */ 302 | const unsigned char *payload; /**< IN: Pointer to packet payload data (may be NULL if EOS flag is set) */ 303 | CUvideotimestamp timestamp; /**< IN: Presentation time stamp (10MHz clock), only valid if 304 | CUVID_PKT_TIMESTAMP flag is set */ 305 | } CUVIDSOURCEDATAPACKET; 306 | 307 | // Callback for packet delivery 308 | typedef int (CUDAAPI *PFNVIDSOURCECALLBACK)(void *, CUVIDSOURCEDATAPACKET *); 309 | 310 | /**************************************************************************************************************************/ 311 | //! \ingroup STRUCTS 312 | //! \struct CUVIDSOURCEPARAMS 313 | //! Describes parameters needed in cuvidCreateVideoSource API 314 | //! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported 315 | //! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed. 316 | /**************************************************************************************************************************/ 317 | typedef struct _CUVIDSOURCEPARAMS 318 | { 319 | unsigned int ulClockRate; /**< IN: Time stamp units in Hz (0=default=10000000Hz) */ 320 | unsigned int bAnnexb : 1; /**< IN: AV1 annexB stream */ 321 | unsigned int uReserved : 31; /**< Reserved for future use - set to zero */ 322 | unsigned int uReserved1[6]; /**< Reserved for future use - set to zero */ 323 | void *pUserData; /**< IN: User private data passed in to the data handlers */ 324 | PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< IN: Called to deliver video packets */ 325 | PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< IN: Called to deliver audio packets. */ 326 | void *pvReserved2[8]; /**< Reserved for future use - set to NULL */ 327 | } CUVIDSOURCEPARAMS; 328 | 329 | 330 | /**********************************************/ 331 | //! \ingroup ENUMS 332 | //! \enum CUvideosourceformat_flags 333 | //! CUvideosourceformat_flags 334 | //! Used in cuvidGetSourceVideoFormat API 335 | /**********************************************/ 336 | typedef enum { 337 | CUVID_FMT_EXTFORMATINFO = 0x100 /**< Return extended format structure (CUVIDEOFORMATEX) */ 338 | } CUvideosourceformat_flags; 339 | 340 | #if !defined(__APPLE__) 341 | /***************************************************************************************************************************/ 342 | //! \ingroup FUNCTS 343 | //! \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams) 344 | //! Create CUvideosource object. CUvideosource spawns demultiplexer thread that provides two callbacks: 345 | //! pfnVideoDataHandler() and pfnAudioDataHandler() 346 | //! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported 347 | //! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed. 348 | /***************************************************************************************************************************/ 349 | CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams); 350 | 351 | /***************************************************************************************************************************/ 352 | //! \ingroup FUNCTS 353 | //! \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams) 354 | //! Create video source 355 | /***************************************************************************************************************************/ 356 | CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams); 357 | 358 | /********************************************************************/ 359 | //! \ingroup FUNCTS 360 | //! \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj) 361 | //! Destroy video source 362 | /********************************************************************/ 363 | CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj); 364 | 365 | /******************************************************************************************/ 366 | //! \ingroup FUNCTS 367 | //! \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state) 368 | //! Set video source state to: 369 | //! cudaVideoState_Started - to signal the source to run and deliver data 370 | //! cudaVideoState_Stopped - to stop the source from delivering the data 371 | //! cudaVideoState_Error - invalid source 372 | /******************************************************************************************/ 373 | CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state); 374 | 375 | /******************************************************************************************/ 376 | //! \ingroup FUNCTS 377 | //! \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj) 378 | //! Get video source state 379 | //! Returns: 380 | //! cudaVideoState_Started - if Source is running and delivering data 381 | //! cudaVideoState_Stopped - if Source is stopped or reached end-of-stream 382 | //! cudaVideoState_Error - if Source is in error state 383 | /******************************************************************************************/ 384 | cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj); 385 | 386 | /******************************************************************************************************************/ 387 | //! \ingroup FUNCTS 388 | //! \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags) 389 | //! Gets video source format in pvidfmt, flags is set to combination of CUvideosourceformat_flags as per requirement 390 | /******************************************************************************************************************/ 391 | CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags); 392 | 393 | /**************************************************************************************************************************/ 394 | //! \ingroup FUNCTS 395 | //! \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags) 396 | //! Get audio source format 397 | //! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported 398 | //! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed. 399 | /**************************************************************************************************************************/ 400 | CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags); 401 | 402 | #endif 403 | /**********************************************************************************/ 404 | //! \ingroup STRUCTS 405 | //! \struct CUVIDPARSERDISPINFO 406 | //! Used in cuvidParseVideoData API with PFNVIDDISPLAYCALLBACK pfnDisplayPicture 407 | /**********************************************************************************/ 408 | typedef struct _CUVIDPARSERDISPINFO 409 | { 410 | int picture_index; /**< OUT: Index of the current picture */ 411 | int progressive_frame; /**< OUT: 1 if progressive frame; 0 otherwise */ 412 | int top_field_first; /**< OUT: 1 if top field is displayed first; 0 otherwise */ 413 | int repeat_first_field; /**< OUT: Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling, 414 | -1=unpaired field) */ 415 | CUvideotimestamp timestamp; /**< OUT: Presentation time stamp */ 416 | } CUVIDPARSERDISPINFO; 417 | 418 | /***********************************************************************************************************************/ 419 | //! Parser callbacks 420 | //! The parser will call these synchronously from within cuvidParseVideoData(), whenever there is sequence change or a picture 421 | //! is ready to be decoded and/or displayed. First argument in functions is "void *pUserData" member of structure CUVIDSOURCEPARAMS 422 | //! Return values from these callbacks are interpreted as below. If the callbacks return failure, it will be propagated by 423 | //! cuvidParseVideoData() to the application. 424 | //! Parser picks default operating point as 0 and outputAllLayers flag as 0 if PFNVIDOPPOINTCALLBACK is not set or return value is 425 | //! -1 or invalid operating point. 426 | //! PFNVIDSEQUENCECALLBACK : 0: fail, 1: succeeded, > 1: override dpb size of parser (set by CUVIDPARSERPARAMS::ulMaxNumDecodeSurfaces 427 | //! while creating parser) 428 | //! PFNVIDDECODECALLBACK : 0: fail, >=1: succeeded 429 | //! PFNVIDDISPLAYCALLBACK : 0: fail, >=1: succeeded 430 | //! PFNVIDOPPOINTCALLBACK : <0: fail, >=0: succeeded (bit 0-9: OperatingPoint, bit 10-10: outputAllLayers, bit 11-30: reserved) 431 | //! PFNVIDSEIMSGCALLBACK : 0: fail, >=1: succeeded 432 | /***********************************************************************************************************************/ 433 | typedef int (CUDAAPI *PFNVIDSEQUENCECALLBACK)(void *, CUVIDEOFORMAT *); 434 | typedef int (CUDAAPI *PFNVIDDECODECALLBACK)(void *, CUVIDPICPARAMS *); 435 | typedef int (CUDAAPI *PFNVIDDISPLAYCALLBACK)(void *, CUVIDPARSERDISPINFO *); 436 | typedef int (CUDAAPI *PFNVIDOPPOINTCALLBACK)(void *, CUVIDOPERATINGPOINTINFO*); 437 | typedef int (CUDAAPI *PFNVIDSEIMSGCALLBACK) (void *, CUVIDSEIMESSAGEINFO *); 438 | 439 | /**************************************/ 440 | //! \ingroup STRUCTS 441 | //! \struct CUVIDPARSERPARAMS 442 | //! Used in cuvidCreateVideoParser API 443 | /**************************************/ 444 | typedef struct _CUVIDPARSERPARAMS 445 | { 446 | cudaVideoCodec CodecType; /**< IN: cudaVideoCodec_XXX */ 447 | unsigned int ulMaxNumDecodeSurfaces; /**< IN: Max # of decode surfaces (parser will cycle through these) */ 448 | unsigned int ulClockRate; /**< IN: Timestamp units in Hz (0=default=10000000Hz) */ 449 | unsigned int ulErrorThreshold; /**< IN: % Error threshold (0-100) for calling pfnDecodePicture (100=always 450 | IN: call pfnDecodePicture even if picture bitstream is fully corrupted) */ 451 | unsigned int ulMaxDisplayDelay; /**< IN: Max display queue delay (improves pipelining of decode with display) 452 | 0=no delay (recommended values: 2..4) */ 453 | unsigned int bAnnexb : 1; /**< IN: AV1 annexB stream */ 454 | unsigned int uReserved : 31; /**< Reserved for future use - set to zero */ 455 | unsigned int uReserved1[4]; /**< IN: Reserved for future use - set to 0 */ 456 | void *pUserData; /**< IN: User data for callbacks */ 457 | PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< IN: Called before decoding frames and/or whenever there is a fmt change */ 458 | PFNVIDDECODECALLBACK pfnDecodePicture; /**< IN: Called when a picture is ready to be decoded (decode order) */ 459 | PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< IN: Called whenever a picture is ready to be displayed (display order) */ 460 | PFNVIDOPPOINTCALLBACK pfnGetOperatingPoint; /**< IN: Called from AV1 sequence header to get operating point of a AV1 461 | scalable bitstream */ 462 | PFNVIDSEIMSGCALLBACK pfnGetSEIMsg; /**< IN: Called when all SEI messages are parsed for particular frame */ 463 | void *pvReserved2[5]; /**< Reserved for future use - set to NULL */ 464 | CUVIDEOFORMATEX *pExtVideoInfo; /**< IN: [Optional] sequence header data from system layer */ 465 | } CUVIDPARSERPARAMS; 466 | 467 | /************************************************************************************************/ 468 | //! \ingroup FUNCTS 469 | //! \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams) 470 | //! Create video parser object and initialize 471 | /************************************************************************************************/ 472 | CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams); 473 | 474 | /************************************************************************************************/ 475 | //! \ingroup FUNCTS 476 | //! \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket) 477 | //! Parse the video data from source data packet in pPacket 478 | //! Extracts parameter sets like SPS, PPS, bitstream etc. from pPacket and 479 | //! calls back pfnDecodePicture with CUVIDPICPARAMS data for kicking of HW decoding 480 | //! calls back pfnSequenceCallback with CUVIDEOFORMAT data for initial sequence header or when 481 | //! the decoder encounters a video format change 482 | //! calls back pfnDisplayPicture with CUVIDPARSERDISPINFO data to display a video frame 483 | /************************************************************************************************/ 484 | CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket); 485 | 486 | /************************************************************************************************/ 487 | //! \ingroup FUNCTS 488 | //! \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj) 489 | //! Destroy the video parser 490 | /************************************************************************************************/ 491 | CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj); 492 | 493 | /**********************************************************************************************/ 494 | 495 | #if defined(__cplusplus) 496 | } 497 | #endif /* __cplusplus */ 498 | 499 | #endif // __NVCUVID_H__ 500 | 501 | 502 | --------------------------------------------------------------------------------